repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
Azure/azure-cli-extensions | src/express-route/azext_express_route/vendored_sdks/network_management_client.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/express-route/azext_express_route/vendored_sdks/network_management_client.py#L1779-L1825 | def route_filter_rules(self):
"""Instance depends on the API version:
* 2016-12-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2016_12_01.operations.RouteFilterRulesOperations>`
* 2017-03-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_03_01.operations.RouteFilterRulesOperations>`
* 2017-06-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_06_01.operations.RouteFilterRulesOperations>`
* 2017-08-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_08_01.operations.RouteFilterRulesOperations>`
* 2017-09-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_09_01.operations.RouteFilterRulesOperations>`
* 2017-10-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_10_01.operations.RouteFilterRulesOperations>`
* 2017-11-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_11_01.operations.RouteFilterRulesOperations>`
* 2018-01-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_01_01.operations.RouteFilterRulesOperations>`
* 2018-02-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_02_01.operations.RouteFilterRulesOperations>`
* 2018-04-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_04_01.operations.RouteFilterRulesOperations>`
* 2018-06-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_06_01.operations.RouteFilterRulesOperations>`
* 2018-07-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_07_01.operations.RouteFilterRulesOperations>`
* 2018-08-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_08_01.operations.RouteFilterRulesOperations>`
"""
api_version = self._get_api_version('route_filter_rules')
if api_version == '2016-12-01':
from .v2016_12_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-03-01':
from .v2017_03_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-08-01':
from .v2017_08_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-09-01':
from .v2017_09_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2017-11-01':
from .v2017_11_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-01-01':
from .v2018_01_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import RouteFilterRulesOperations as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import RouteFilterRulesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | [
"def",
"route_filter_rules",
"(",
"self",
")",
":",
"api_version",
"=",
"self",
".",
"_get_api_version",
"(",
"'route_filter_rules'",
")",
"if",
"api_version",
"==",
"'2016-12-01'",
":",
"from",
".",
"v2016_12_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-03-01'",
":",
"from",
".",
"v2017_03_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-06-01'",
":",
"from",
".",
"v2017_06_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-08-01'",
":",
"from",
".",
"v2017_08_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-09-01'",
":",
"from",
".",
"v2017_09_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-10-01'",
":",
"from",
".",
"v2017_10_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-11-01'",
":",
"from",
".",
"v2017_11_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-01-01'",
":",
"from",
".",
"v2018_01_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-02-01'",
":",
"from",
".",
"v2018_02_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-04-01'",
":",
"from",
".",
"v2018_04_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-06-01'",
":",
"from",
".",
"v2018_06_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-07-01'",
":",
"from",
".",
"v2018_07_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-08-01'",
":",
"from",
".",
"v2018_08_01",
".",
"operations",
"import",
"RouteFilterRulesOperations",
"as",
"OperationClass",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")",
"return",
"OperationClass",
"(",
"self",
".",
"_client",
",",
"self",
".",
"config",
",",
"Serializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
",",
"Deserializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
")"
] | Instance depends on the API version:
* 2016-12-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2016_12_01.operations.RouteFilterRulesOperations>`
* 2017-03-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_03_01.operations.RouteFilterRulesOperations>`
* 2017-06-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_06_01.operations.RouteFilterRulesOperations>`
* 2017-08-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_08_01.operations.RouteFilterRulesOperations>`
* 2017-09-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_09_01.operations.RouteFilterRulesOperations>`
* 2017-10-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_10_01.operations.RouteFilterRulesOperations>`
* 2017-11-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2017_11_01.operations.RouteFilterRulesOperations>`
* 2018-01-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_01_01.operations.RouteFilterRulesOperations>`
* 2018-02-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_02_01.operations.RouteFilterRulesOperations>`
* 2018-04-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_04_01.operations.RouteFilterRulesOperations>`
* 2018-06-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_06_01.operations.RouteFilterRulesOperations>`
* 2018-07-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_07_01.operations.RouteFilterRulesOperations>`
* 2018-08-01: :class:`RouteFilterRulesOperations<azure.mgmt.network.v2018_08_01.operations.RouteFilterRulesOperations>` | [
"Instance",
"depends",
"on",
"the",
"API",
"version",
":"
] | python | train |
tylerbutler/engineer | engineer/util.py | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/util.py#L214-L225 | def ensure_exists(p, assume_dirs=False):
"""
Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files.
"""
if path(p).ext and not assume_dirs:
path(p).dirname().makedirs_p()
else:
path(p).makedirs_p()
return p | [
"def",
"ensure_exists",
"(",
"p",
",",
"assume_dirs",
"=",
"False",
")",
":",
"if",
"path",
"(",
"p",
")",
".",
"ext",
"and",
"not",
"assume_dirs",
":",
"path",
"(",
"p",
")",
".",
"dirname",
"(",
")",
".",
"makedirs_p",
"(",
")",
"else",
":",
"path",
"(",
"p",
")",
".",
"makedirs_p",
"(",
")",
"return",
"p"
] | Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files. | [
"Ensures",
"a",
"given",
"path",
"*",
"p",
"*",
"exists",
"."
] | python | train |
JNRowe/upoints | upoints/baken.py | https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/baken.py#L123-L205 | def import_locations(self, baken_file):
"""Import baken data files.
``import_locations()`` returns a dictionary with keys containing the
section title, and values consisting of a collection :class:`Baken`
objects.
It expects data files in the format used by the baken_ amateur radio
package, which is Windows INI style files such as:
.. code-block:: ini
[Abeche, Chad]
latitude=14.460000
longitude=20.680000
height=0.000000
[GB3BUX]
frequency=50.000
locator=IO93BF
power=25 TX
antenna=2 x Turnstile
height=460
mode=A1A
The reader uses the :mod:`configparser` module, so should be reasonably
robust against encodings and such. The above file processed by
``import_locations()`` will return the following ``dict`` object::
{"Abeche, Chad": Baken(14.460, 20.680, None, None, None, 0.000,
None, None, None, None, None),
"GB3BUX": : Baken(None, None, "2 x Turnstile", None, 50.000,
460.000, "IO93BF", "A1A", None, 25, None)}
Args::
baken_file (iter): Baken data to read
Returns:
dict: Named locations and their associated values
.. _baken: http://www.qsl.net:80/g4klx/
"""
self._baken_file = baken_file
data = ConfigParser()
if hasattr(baken_file, 'readlines'):
data.readfp(baken_file)
elif isinstance(baken_file, list):
data.read(baken_file)
elif isinstance(baken_file, basestring):
data.readfp(open(baken_file))
else:
raise TypeError('Unable to handle data of type %r'
% type(baken_file))
valid_locator = re.compile(r"[A-Z]{2}\d{2}[A-Z]{2}")
for name in data.sections():
elements = {}
for item in ('latitude', 'longitude', 'antenna', 'direction',
'frequency', 'height', 'locator', 'mode', 'operator',
'power', 'qth'):
if data.has_option(name, item):
if item in ('antenna', 'locator', 'mode', 'power', 'qth'):
elements[item] = data.get(name, item)
elif item == 'operator':
elements[item] = elements[item].split(',')
elif item == 'direction':
elements[item] = data.get(name, item).split(',')
else:
try:
elements[item] = data.getfloat(name, item)
except ValueError:
logging.debug('Multiple frequency workaround for '
'%r entry' % name)
elements[item] = \
map(float, data.get(name, item).split(','))
else:
elements[item] = None
if elements['latitude'] is None \
and not valid_locator.match(elements['locator']):
logging.info('Skipping %r entry, as it contains no location '
'data' % name)
continue
self[name] = Baken(**elements) | [
"def",
"import_locations",
"(",
"self",
",",
"baken_file",
")",
":",
"self",
".",
"_baken_file",
"=",
"baken_file",
"data",
"=",
"ConfigParser",
"(",
")",
"if",
"hasattr",
"(",
"baken_file",
",",
"'readlines'",
")",
":",
"data",
".",
"readfp",
"(",
"baken_file",
")",
"elif",
"isinstance",
"(",
"baken_file",
",",
"list",
")",
":",
"data",
".",
"read",
"(",
"baken_file",
")",
"elif",
"isinstance",
"(",
"baken_file",
",",
"basestring",
")",
":",
"data",
".",
"readfp",
"(",
"open",
"(",
"baken_file",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unable to handle data of type %r'",
"%",
"type",
"(",
"baken_file",
")",
")",
"valid_locator",
"=",
"re",
".",
"compile",
"(",
"r\"[A-Z]{2}\\d{2}[A-Z]{2}\"",
")",
"for",
"name",
"in",
"data",
".",
"sections",
"(",
")",
":",
"elements",
"=",
"{",
"}",
"for",
"item",
"in",
"(",
"'latitude'",
",",
"'longitude'",
",",
"'antenna'",
",",
"'direction'",
",",
"'frequency'",
",",
"'height'",
",",
"'locator'",
",",
"'mode'",
",",
"'operator'",
",",
"'power'",
",",
"'qth'",
")",
":",
"if",
"data",
".",
"has_option",
"(",
"name",
",",
"item",
")",
":",
"if",
"item",
"in",
"(",
"'antenna'",
",",
"'locator'",
",",
"'mode'",
",",
"'power'",
",",
"'qth'",
")",
":",
"elements",
"[",
"item",
"]",
"=",
"data",
".",
"get",
"(",
"name",
",",
"item",
")",
"elif",
"item",
"==",
"'operator'",
":",
"elements",
"[",
"item",
"]",
"=",
"elements",
"[",
"item",
"]",
".",
"split",
"(",
"','",
")",
"elif",
"item",
"==",
"'direction'",
":",
"elements",
"[",
"item",
"]",
"=",
"data",
".",
"get",
"(",
"name",
",",
"item",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"try",
":",
"elements",
"[",
"item",
"]",
"=",
"data",
".",
"getfloat",
"(",
"name",
",",
"item",
")",
"except",
"ValueError",
":",
"logging",
".",
"debug",
"(",
"'Multiple frequency workaround for '",
"'%r entry'",
"%",
"name",
")",
"elements",
"[",
"item",
"]",
"=",
"map",
"(",
"float",
",",
"data",
".",
"get",
"(",
"name",
",",
"item",
")",
".",
"split",
"(",
"','",
")",
")",
"else",
":",
"elements",
"[",
"item",
"]",
"=",
"None",
"if",
"elements",
"[",
"'latitude'",
"]",
"is",
"None",
"and",
"not",
"valid_locator",
".",
"match",
"(",
"elements",
"[",
"'locator'",
"]",
")",
":",
"logging",
".",
"info",
"(",
"'Skipping %r entry, as it contains no location '",
"'data'",
"%",
"name",
")",
"continue",
"self",
"[",
"name",
"]",
"=",
"Baken",
"(",
"*",
"*",
"elements",
")"
] | Import baken data files.
``import_locations()`` returns a dictionary with keys containing the
section title, and values consisting of a collection :class:`Baken`
objects.
It expects data files in the format used by the baken_ amateur radio
package, which is Windows INI style files such as:
.. code-block:: ini
[Abeche, Chad]
latitude=14.460000
longitude=20.680000
height=0.000000
[GB3BUX]
frequency=50.000
locator=IO93BF
power=25 TX
antenna=2 x Turnstile
height=460
mode=A1A
The reader uses the :mod:`configparser` module, so should be reasonably
robust against encodings and such. The above file processed by
``import_locations()`` will return the following ``dict`` object::
{"Abeche, Chad": Baken(14.460, 20.680, None, None, None, 0.000,
None, None, None, None, None),
"GB3BUX": : Baken(None, None, "2 x Turnstile", None, 50.000,
460.000, "IO93BF", "A1A", None, 25, None)}
Args::
baken_file (iter): Baken data to read
Returns:
dict: Named locations and their associated values
.. _baken: http://www.qsl.net:80/g4klx/ | [
"Import",
"baken",
"data",
"files",
"."
] | python | train |
Parsl/libsubmit | libsubmit/utils.py | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/utils.py#L4-L17 | def wtime_to_minutes(time_string):
''' wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes
'''
hours, mins, seconds = time_string.split(':')
return int(hours) * 60 + int(mins) + 1 | [
"def",
"wtime_to_minutes",
"(",
"time_string",
")",
":",
"hours",
",",
"mins",
",",
"seconds",
"=",
"time_string",
".",
"split",
"(",
"':'",
")",
"return",
"int",
"(",
"hours",
")",
"*",
"60",
"+",
"int",
"(",
"mins",
")",
"+",
"1"
] | wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes | [
"wtime_to_minutes"
] | python | train |
cloudendpoints/endpoints-python | endpoints/util.py | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/util.py#L262-L287 | def check_list_type(objects, allowed_type, name, allow_none=True):
"""Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment.
"""
if objects is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % name)
return objects
if not isinstance(objects, (tuple, list)):
raise TypeError('%s is not a list.' % name)
if not all(isinstance(i, allowed_type) for i in objects):
type_list = sorted(list(set(type(obj) for obj in objects)))
raise TypeError('%s contains types that don\'t match %s: %s' %
(name, allowed_type.__name__, type_list))
return objects | [
"def",
"check_list_type",
"(",
"objects",
",",
"allowed_type",
",",
"name",
",",
"allow_none",
"=",
"True",
")",
":",
"if",
"objects",
"is",
"None",
":",
"if",
"not",
"allow_none",
":",
"raise",
"TypeError",
"(",
"'%s is None, which is not allowed.'",
"%",
"name",
")",
"return",
"objects",
"if",
"not",
"isinstance",
"(",
"objects",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"TypeError",
"(",
"'%s is not a list.'",
"%",
"name",
")",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"i",
",",
"allowed_type",
")",
"for",
"i",
"in",
"objects",
")",
":",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"type",
"(",
"obj",
")",
"for",
"obj",
"in",
"objects",
")",
")",
")",
"raise",
"TypeError",
"(",
"'%s contains types that don\\'t match %s: %s'",
"%",
"(",
"name",
",",
"allowed_type",
".",
"__name__",
",",
"type_list",
")",
")",
"return",
"objects"
] | Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment. | [
"Verify",
"that",
"objects",
"in",
"list",
"are",
"of",
"the",
"allowed",
"type",
"or",
"raise",
"TypeError",
"."
] | python | train |
google/apitools | apitools/base/protorpclite/protojson.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L256-L303 | def __decode_dictionary(self, message_type, dictionary):
"""Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries.
"""
message = message_type()
for key, value in six.iteritems(dictionary):
if value is None:
try:
message.reset(key)
except AttributeError:
pass # This is an unrecognized field, skip it.
continue
try:
field = message.field_by_name(key)
except KeyError:
# Save unknown values.
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
continue
if field.repeated:
# This should be unnecessary? Or in fact become an error.
if not isinstance(value, list):
value = [value]
valid_value = [self.decode_field(field, item)
for item in value]
setattr(message, field.name, valid_value)
continue
# This is just for consistency with the old behavior.
if value == []:
continue
try:
setattr(message, field.name, self.decode_field(field, value))
except messages.DecodeError:
# Save unknown enum values.
if not isinstance(field, messages.EnumField):
raise
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
return message | [
"def",
"__decode_dictionary",
"(",
"self",
",",
"message_type",
",",
"dictionary",
")",
":",
"message",
"=",
"message_type",
"(",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"dictionary",
")",
":",
"if",
"value",
"is",
"None",
":",
"try",
":",
"message",
".",
"reset",
"(",
"key",
")",
"except",
"AttributeError",
":",
"pass",
"# This is an unrecognized field, skip it.",
"continue",
"try",
":",
"field",
"=",
"message",
".",
"field_by_name",
"(",
"key",
")",
"except",
"KeyError",
":",
"# Save unknown values.",
"variant",
"=",
"self",
".",
"__find_variant",
"(",
"value",
")",
"if",
"variant",
":",
"message",
".",
"set_unrecognized_field",
"(",
"key",
",",
"value",
",",
"variant",
")",
"continue",
"if",
"field",
".",
"repeated",
":",
"# This should be unnecessary? Or in fact become an error.",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"[",
"value",
"]",
"valid_value",
"=",
"[",
"self",
".",
"decode_field",
"(",
"field",
",",
"item",
")",
"for",
"item",
"in",
"value",
"]",
"setattr",
"(",
"message",
",",
"field",
".",
"name",
",",
"valid_value",
")",
"continue",
"# This is just for consistency with the old behavior.",
"if",
"value",
"==",
"[",
"]",
":",
"continue",
"try",
":",
"setattr",
"(",
"message",
",",
"field",
".",
"name",
",",
"self",
".",
"decode_field",
"(",
"field",
",",
"value",
")",
")",
"except",
"messages",
".",
"DecodeError",
":",
"# Save unknown enum values.",
"if",
"not",
"isinstance",
"(",
"field",
",",
"messages",
".",
"EnumField",
")",
":",
"raise",
"variant",
"=",
"self",
".",
"__find_variant",
"(",
"value",
")",
"if",
"variant",
":",
"message",
".",
"set_unrecognized_field",
"(",
"key",
",",
"value",
",",
"variant",
")",
"return",
"message"
] | Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries. | [
"Merge",
"dictionary",
"in",
"to",
"message",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/core/d4s2.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/d4s2.py#L151-L160 | def check_response(self, response):
"""
Raises error if the response isn't successful.
:param response: requests.Response response to be checked
"""
if response.status_code == 401:
raise D4S2Error(UNAUTHORIZED_MESSAGE)
if not 200 <= response.status_code < 300:
raise D4S2Error("Request to {} failed with {}:\n{}.".format(response.url, response.status_code,
response.text)) | [
"def",
"check_response",
"(",
"self",
",",
"response",
")",
":",
"if",
"response",
".",
"status_code",
"==",
"401",
":",
"raise",
"D4S2Error",
"(",
"UNAUTHORIZED_MESSAGE",
")",
"if",
"not",
"200",
"<=",
"response",
".",
"status_code",
"<",
"300",
":",
"raise",
"D4S2Error",
"(",
"\"Request to {} failed with {}:\\n{}.\"",
".",
"format",
"(",
"response",
".",
"url",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")"
] | Raises error if the response isn't successful.
:param response: requests.Response response to be checked | [
"Raises",
"error",
"if",
"the",
"response",
"isn",
"t",
"successful",
".",
":",
"param",
"response",
":",
"requests",
".",
"Response",
"response",
"to",
"be",
"checked"
] | python | train |
CloverHealth/temple | temple/ls.py | https://github.com/CloverHealth/temple/blob/d7b75da2459f72ba74d6f3b6e1ab95c3d1b92ccd/temple/ls.py#L32-L74 | def _code_search(query, github_user=None):
"""Performs a Github API code search
Args:
query (str): The query sent to Github's code search
github_user (str, optional): The Github user being searched in the query string
Returns:
dict: A dictionary of repository information keyed on the git SSH url
Raises:
`InvalidGithubUserError`: When ``github_user`` is invalid
"""
github_client = temple.utils.GithubClient()
headers = {'Accept': 'application/vnd.github.v3.text-match+json'}
resp = github_client.get('/search/code',
params={'q': query, 'per_page': 100},
headers=headers)
if resp.status_code == requests.codes.unprocessable_entity and github_user:
raise temple.exceptions.InvalidGithubUserError(
'Invalid Github user or org - "{}"'.format(github_user))
resp.raise_for_status()
resp_data = resp.json()
repositories = collections.defaultdict(dict)
while True:
repositories.update({
'[email protected]:{}.git'.format(repo['repository']['full_name']): repo['repository']
for repo in resp_data['items']
})
next_url = _parse_link_header(resp.headers).get('next')
if next_url:
resp = requests.get(next_url, headers=headers)
resp.raise_for_status()
resp_data = resp.json()
else:
break
return repositories | [
"def",
"_code_search",
"(",
"query",
",",
"github_user",
"=",
"None",
")",
":",
"github_client",
"=",
"temple",
".",
"utils",
".",
"GithubClient",
"(",
")",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.github.v3.text-match+json'",
"}",
"resp",
"=",
"github_client",
".",
"get",
"(",
"'/search/code'",
",",
"params",
"=",
"{",
"'q'",
":",
"query",
",",
"'per_page'",
":",
"100",
"}",
",",
"headers",
"=",
"headers",
")",
"if",
"resp",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"unprocessable_entity",
"and",
"github_user",
":",
"raise",
"temple",
".",
"exceptions",
".",
"InvalidGithubUserError",
"(",
"'Invalid Github user or org - \"{}\"'",
".",
"format",
"(",
"github_user",
")",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"resp_data",
"=",
"resp",
".",
"json",
"(",
")",
"repositories",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"while",
"True",
":",
"repositories",
".",
"update",
"(",
"{",
"'[email protected]:{}.git'",
".",
"format",
"(",
"repo",
"[",
"'repository'",
"]",
"[",
"'full_name'",
"]",
")",
":",
"repo",
"[",
"'repository'",
"]",
"for",
"repo",
"in",
"resp_data",
"[",
"'items'",
"]",
"}",
")",
"next_url",
"=",
"_parse_link_header",
"(",
"resp",
".",
"headers",
")",
".",
"get",
"(",
"'next'",
")",
"if",
"next_url",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"next_url",
",",
"headers",
"=",
"headers",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"resp_data",
"=",
"resp",
".",
"json",
"(",
")",
"else",
":",
"break",
"return",
"repositories"
] | Performs a Github API code search
Args:
query (str): The query sent to Github's code search
github_user (str, optional): The Github user being searched in the query string
Returns:
dict: A dictionary of repository information keyed on the git SSH url
Raises:
`InvalidGithubUserError`: When ``github_user`` is invalid | [
"Performs",
"a",
"Github",
"API",
"code",
"search"
] | python | valid |
tomnor/channelpack | channelpack/pullxl.py | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L435-L446 | def fromxldate(xldate, datemode=1):
"""Return a python datetime object
xldate: float
The xl number.
datemode: int
0: 1900-based, 1: 1904-based. See xlrd documentation.
"""
t = xlrd.xldate_as_tuple(xldate, datemode)
return datetime.datetime(*t) | [
"def",
"fromxldate",
"(",
"xldate",
",",
"datemode",
"=",
"1",
")",
":",
"t",
"=",
"xlrd",
".",
"xldate_as_tuple",
"(",
"xldate",
",",
"datemode",
")",
"return",
"datetime",
".",
"datetime",
"(",
"*",
"t",
")"
] | Return a python datetime object
xldate: float
The xl number.
datemode: int
0: 1900-based, 1: 1904-based. See xlrd documentation. | [
"Return",
"a",
"python",
"datetime",
"object"
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/context.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L145-L167 | def flush(self):
"""Force a flush."""
if not self.items:
return
retry = 0
options = {"deadline": DATASTORE_DEADLINE}
while retry <= self.__timeout_retries:
try:
self.__flush_function(self.items, options)
self.clear()
break
except db.Timeout, e:
logging.warning(e)
logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
self, retry)
retry += 1
options["deadline"] *= 2
except apiproxy_errors.RequestTooLargeError:
self._log_largest_items()
raise
else:
raise | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"items",
":",
"return",
"retry",
"=",
"0",
"options",
"=",
"{",
"\"deadline\"",
":",
"DATASTORE_DEADLINE",
"}",
"while",
"retry",
"<=",
"self",
".",
"__timeout_retries",
":",
"try",
":",
"self",
".",
"__flush_function",
"(",
"self",
".",
"items",
",",
"options",
")",
"self",
".",
"clear",
"(",
")",
"break",
"except",
"db",
".",
"Timeout",
",",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"logging",
".",
"warning",
"(",
"\"Flushing '%s' timed out. Will retry for the %s time.\"",
",",
"self",
",",
"retry",
")",
"retry",
"+=",
"1",
"options",
"[",
"\"deadline\"",
"]",
"*=",
"2",
"except",
"apiproxy_errors",
".",
"RequestTooLargeError",
":",
"self",
".",
"_log_largest_items",
"(",
")",
"raise",
"else",
":",
"raise"
] | Force a flush. | [
"Force",
"a",
"flush",
"."
] | python | train |
pyroscope/pyrobase | src/pyrobase/bencode.py | https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L124-L155 | def encode(self, obj):
""" Add the given object to the result.
"""
if isinstance(obj, int_like_types):
self.result.append("i%de" % obj)
elif isinstance(obj, string_types):
self.result.extend([str(len(obj)), ':', str(obj)])
elif hasattr(obj, "__bencode__"):
self.encode(obj.__bencode__())
elif hasattr(obj, "items"):
# Dictionary
self.result.append('d')
for key, val in sorted(obj.items()):
key = str(key)
self.result.extend([str(len(key)), ':', key])
self.encode(val)
self.result.append('e')
else:
# Treat as iterable
try:
items = iter(obj)
except TypeError as exc:
raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % (
obj, type(obj), exc
))
else:
self.result.append('l')
for item in items:
self.encode(item)
self.result.append('e')
return self.result | [
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"int_like_types",
")",
":",
"self",
".",
"result",
".",
"append",
"(",
"\"i%de\"",
"%",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"self",
".",
"result",
".",
"extend",
"(",
"[",
"str",
"(",
"len",
"(",
"obj",
")",
")",
",",
"':'",
",",
"str",
"(",
"obj",
")",
"]",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"\"__bencode__\"",
")",
":",
"self",
".",
"encode",
"(",
"obj",
".",
"__bencode__",
"(",
")",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"\"items\"",
")",
":",
"# Dictionary",
"self",
".",
"result",
".",
"append",
"(",
"'d'",
")",
"for",
"key",
",",
"val",
"in",
"sorted",
"(",
"obj",
".",
"items",
"(",
")",
")",
":",
"key",
"=",
"str",
"(",
"key",
")",
"self",
".",
"result",
".",
"extend",
"(",
"[",
"str",
"(",
"len",
"(",
"key",
")",
")",
",",
"':'",
",",
"key",
"]",
")",
"self",
".",
"encode",
"(",
"val",
")",
"self",
".",
"result",
".",
"append",
"(",
"'e'",
")",
"else",
":",
"# Treat as iterable",
"try",
":",
"items",
"=",
"iter",
"(",
"obj",
")",
"except",
"TypeError",
"as",
"exc",
":",
"raise",
"BencodeError",
"(",
"\"Unsupported non-iterable object %r of type %s (%s)\"",
"%",
"(",
"obj",
",",
"type",
"(",
"obj",
")",
",",
"exc",
")",
")",
"else",
":",
"self",
".",
"result",
".",
"append",
"(",
"'l'",
")",
"for",
"item",
"in",
"items",
":",
"self",
".",
"encode",
"(",
"item",
")",
"self",
".",
"result",
".",
"append",
"(",
"'e'",
")",
"return",
"self",
".",
"result"
] | Add the given object to the result. | [
"Add",
"the",
"given",
"object",
"to",
"the",
"result",
"."
] | python | train |
boriel/zxbasic | symbols/vararray.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/symbols/vararray.py#L38-L41 | def count(self):
""" Total number of array cells
"""
return functools.reduce(lambda x, y: x * y, (x.count for x in self.bounds)) | [
"def",
"count",
"(",
"self",
")",
":",
"return",
"functools",
".",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"*",
"y",
",",
"(",
"x",
".",
"count",
"for",
"x",
"in",
"self",
".",
"bounds",
")",
")"
] | Total number of array cells | [
"Total",
"number",
"of",
"array",
"cells"
] | python | train |
gitpython-developers/GitPython | git/index/typ.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/typ.py#L172-L176 | def from_blob(cls, blob, stage=0):
""":return: Minimal entry resembling the given blob object"""
time = pack(">LL", 0, 0)
return IndexEntry((blob.mode, blob.binsha, stage << CE_STAGESHIFT, blob.path,
time, time, 0, 0, 0, 0, blob.size)) | [
"def",
"from_blob",
"(",
"cls",
",",
"blob",
",",
"stage",
"=",
"0",
")",
":",
"time",
"=",
"pack",
"(",
"\">LL\"",
",",
"0",
",",
"0",
")",
"return",
"IndexEntry",
"(",
"(",
"blob",
".",
"mode",
",",
"blob",
".",
"binsha",
",",
"stage",
"<<",
"CE_STAGESHIFT",
",",
"blob",
".",
"path",
",",
"time",
",",
"time",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"blob",
".",
"size",
")",
")"
] | :return: Minimal entry resembling the given blob object | [
":",
"return",
":",
"Minimal",
"entry",
"resembling",
"the",
"given",
"blob",
"object"
] | python | train |
wavefrontHQ/python-client | wavefront_api_client/models/message.py | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/models/message.py#L157-L174 | def display(self, display):
"""Sets the display of this Message.
The form of display for this message # noqa: E501
:param display: The display of this Message. # noqa: E501
:type: str
"""
if display is None:
raise ValueError("Invalid value for `display`, must not be `None`") # noqa: E501
allowed_values = ["BANNER", "TOASTER"] # noqa: E501
if display not in allowed_values:
raise ValueError(
"Invalid value for `display` ({0}), must be one of {1}" # noqa: E501
.format(display, allowed_values)
)
self._display = display | [
"def",
"display",
"(",
"self",
",",
"display",
")",
":",
"if",
"display",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `display`, must not be `None`\"",
")",
"# noqa: E501",
"allowed_values",
"=",
"[",
"\"BANNER\"",
",",
"\"TOASTER\"",
"]",
"# noqa: E501",
"if",
"display",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `display` ({0}), must be one of {1}\"",
"# noqa: E501",
".",
"format",
"(",
"display",
",",
"allowed_values",
")",
")",
"self",
".",
"_display",
"=",
"display"
] | Sets the display of this Message.
The form of display for this message # noqa: E501
:param display: The display of this Message. # noqa: E501
:type: str | [
"Sets",
"the",
"display",
"of",
"this",
"Message",
"."
] | python | train |
portfors-lab/sparkle | sparkle/gui/stim/qstimulus.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qstimulus.py#L196-L206 | def showEditor(self):
"""Creates and shows an editor for this Stimulus"""
if self.editor is not None:
editor = self.editor()
editor.setModel(self)
factory = get_stimulus_factory(self._stim.stimType())
editor.editingFinished.connect(factory.update)
return editor
else:
logger = logging.getLogger('main')
logger.warning('Erm, no editor available :(') | [
"def",
"showEditor",
"(",
"self",
")",
":",
"if",
"self",
".",
"editor",
"is",
"not",
"None",
":",
"editor",
"=",
"self",
".",
"editor",
"(",
")",
"editor",
".",
"setModel",
"(",
"self",
")",
"factory",
"=",
"get_stimulus_factory",
"(",
"self",
".",
"_stim",
".",
"stimType",
"(",
")",
")",
"editor",
".",
"editingFinished",
".",
"connect",
"(",
"factory",
".",
"update",
")",
"return",
"editor",
"else",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"warning",
"(",
"'Erm, no editor available :('",
")"
] | Creates and shows an editor for this Stimulus | [
"Creates",
"and",
"shows",
"an",
"editor",
"for",
"this",
"Stimulus"
] | python | train |
mitsei/dlkit | dlkit/json_/learning/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L6241-L6262 | def is_child_of_objective_bank(self, id_, objective_bank_id):
"""Tests if an objective bank is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
return: (boolean) - ``true`` if the ``id`` is a child of
``objective_bank_id,`` ``false`` otherwise
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``id`` or ``objective_bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=objective_bank_id)
return self._hierarchy_session.is_child(id_=objective_bank_id, child_id=id_) | [
"def",
"is_child_of_objective_bank",
"(",
"self",
",",
"id_",
",",
"objective_bank_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.is_child_of_bin",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"is_child_of_catalog",
"(",
"id_",
"=",
"id_",
",",
"catalog_id",
"=",
"objective_bank_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"is_child",
"(",
"id_",
"=",
"objective_bank_id",
",",
"child_id",
"=",
"id_",
")"
] | Tests if an objective bank is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
return: (boolean) - ``true`` if the ``id`` is a child of
``objective_bank_id,`` ``false`` otherwise
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``id`` or ``objective_bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. | [
"Tests",
"if",
"an",
"objective",
"bank",
"is",
"a",
"direct",
"child",
"of",
"another",
"."
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L232-L402 | def make_pdf_from_html(
# Mandatory parameters:
on_disk: bool,
html: str,
# Disk options:
output_path: str = None,
# Shared options:
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> Union[bytes, bool]:
"""
Takes HTML and either returns a PDF in memory or makes one on disk.
For preference, uses ``wkhtmltopdf`` (with ``pdfkit``):
- faster than ``xhtml2pdf``
- tables not buggy like ``Weasyprint``
- however, doesn't support CSS Paged Media, so we have the
``header_html`` and ``footer_html`` options to allow you to pass
appropriate HTML content to serve as the header/footer (rather than
passing it within the main HTML).
Args:
on_disk: make file on disk (rather than returning it in memory)?
html: main HTML
output_path: if ``on_disk``, the output filename
header_html: optional page header, as HTML
footer_html: optional page footer, as HTML
wkhtmltopdf_filename: filename of the ``wkhtmltopdf`` executable
wkhtmltopdf_options: options for ``wkhtmltopdf``
file_encoding: encoding to use when writing the header/footer to disk
debug_options: log ``wkhtmltopdf`` config/options passed to ``pdfkit``?
debug_content: log the main/header/footer HTML?
debug_wkhtmltopdf_args: log the final command-line arguments to
that will be used by ``pdfkit`` when it calls ``wkhtmltopdf``?
fix_pdfkit_encoding_bug: attempt to work around bug in e.g.
``pdfkit==0.5.0`` by encoding ``wkhtmltopdf_filename`` to UTF-8
before passing it to ``pdfkit``? If you pass ``None`` here, then
a default value is used, from
:func:`get_default_fix_pdfkit_encoding_bug`.
processor: a PDF processor type from :class:`Processors`
Returns:
the PDF binary as a ``bytes`` object
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
"""
wkhtmltopdf_options = wkhtmltopdf_options or {} # type: Dict[str, Any]
assert_processor_available(processor)
if debug_content:
log.debug("html: {}", html)
log.debug("header_html: {}", header_html)
log.debug("footer_html: {}", footer_html)
if fix_pdfkit_encoding_bug is None:
fix_pdfkit_encoding_bug = get_default_fix_pdfkit_encoding_bug()
if processor == Processors.XHTML2PDF:
if on_disk:
with open(output_path, mode='wb') as outfile:
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, outfile)
return True
else:
memfile = io.BytesIO()
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, memfile)
# ... returns a document, but we don't use it, so we don't store it
# to stop pychecker complaining
# http://xhtml2pdf.appspot.com/static/pisa-en.html
memfile.seek(0)
return memfile.read()
# http://stackoverflow.com/questions/3310584
elif processor == Processors.WEASYPRINT:
if on_disk:
return weasyprint.HTML(string=html).write_pdf(output_path)
else:
# http://ampad.de/blog/generating-pdfs-django/
return weasyprint.HTML(string=html).write_pdf()
elif processor == Processors.PDFKIT:
# Config:
if not wkhtmltopdf_filename:
config = None
else:
if fix_pdfkit_encoding_bug: # needs to be True for pdfkit==0.5.0
log.debug("Attempting to fix bug in pdfkit (e.g. version 0.5.0)"
" by encoding wkhtmltopdf_filename to UTF-8")
config = pdfkit.configuration(
wkhtmltopdf=wkhtmltopdf_filename.encode('utf-8'))
# the bug is that pdfkit.pdfkit.PDFKit.__init__ will attempt to
# decode the string in its configuration object;
# https://github.com/JazzCore/python-pdfkit/issues/32
else:
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_filename)
# Temporary files that a subprocess can read:
# http://stackoverflow.com/questions/15169101
# wkhtmltopdf requires its HTML files to have ".html" extensions:
# http://stackoverflow.com/questions/5776125
h_filename = None
f_filename = None
try:
if header_html:
h_fd, h_filename = tempfile.mkstemp(suffix='.html')
os.write(h_fd, header_html.encode(file_encoding))
os.close(h_fd)
wkhtmltopdf_options["header-html"] = h_filename
if footer_html:
f_fd, f_filename = tempfile.mkstemp(suffix='.html')
os.write(f_fd, footer_html.encode(file_encoding))
os.close(f_fd)
wkhtmltopdf_options["footer-html"] = f_filename
if debug_options:
log.debug("wkhtmltopdf config: {!r}", config)
log.debug("wkhtmltopdf_options: {}",
pformat(wkhtmltopdf_options))
kit = pdfkit.pdfkit.PDFKit(html, 'string', configuration=config,
options=wkhtmltopdf_options)
if on_disk:
path = output_path
else:
path = None
# With "path=None", the to_pdf() function directly returns
# stdout from a subprocess.Popen().communicate() call (see
# pdfkit.py). Since universal_newlines is not set, stdout will
# be bytes in Python 3.
if debug_wkhtmltopdf_args:
log.debug("Probable current user: {!r}", getpass.getuser())
log.debug("wkhtmltopdf arguments will be: {!r}",
kit.command(path=path))
return kit.to_pdf(path=path)
finally:
if h_filename:
os.remove(h_filename)
if f_filename:
os.remove(f_filename)
else:
raise AssertionError("Unknown PDF engine") | [
"def",
"make_pdf_from_html",
"(",
"# Mandatory parameters:",
"on_disk",
":",
"bool",
",",
"html",
":",
"str",
",",
"# Disk options:",
"output_path",
":",
"str",
"=",
"None",
",",
"# Shared options:",
"header_html",
":",
"str",
"=",
"None",
",",
"footer_html",
":",
"str",
"=",
"None",
",",
"wkhtmltopdf_filename",
":",
"str",
"=",
"_WKHTMLTOPDF_FILENAME",
",",
"wkhtmltopdf_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"file_encoding",
":",
"str",
"=",
"\"utf-8\"",
",",
"debug_options",
":",
"bool",
"=",
"False",
",",
"debug_content",
":",
"bool",
"=",
"False",
",",
"debug_wkhtmltopdf_args",
":",
"bool",
"=",
"True",
",",
"fix_pdfkit_encoding_bug",
":",
"bool",
"=",
"None",
",",
"processor",
":",
"str",
"=",
"_DEFAULT_PROCESSOR",
")",
"->",
"Union",
"[",
"bytes",
",",
"bool",
"]",
":",
"wkhtmltopdf_options",
"=",
"wkhtmltopdf_options",
"or",
"{",
"}",
"# type: Dict[str, Any]",
"assert_processor_available",
"(",
"processor",
")",
"if",
"debug_content",
":",
"log",
".",
"debug",
"(",
"\"html: {}\"",
",",
"html",
")",
"log",
".",
"debug",
"(",
"\"header_html: {}\"",
",",
"header_html",
")",
"log",
".",
"debug",
"(",
"\"footer_html: {}\"",
",",
"footer_html",
")",
"if",
"fix_pdfkit_encoding_bug",
"is",
"None",
":",
"fix_pdfkit_encoding_bug",
"=",
"get_default_fix_pdfkit_encoding_bug",
"(",
")",
"if",
"processor",
"==",
"Processors",
".",
"XHTML2PDF",
":",
"if",
"on_disk",
":",
"with",
"open",
"(",
"output_path",
",",
"mode",
"=",
"'wb'",
")",
"as",
"outfile",
":",
"# noinspection PyUnresolvedReferences",
"xhtml2pdf",
".",
"document",
".",
"pisaDocument",
"(",
"html",
",",
"outfile",
")",
"return",
"True",
"else",
":",
"memfile",
"=",
"io",
".",
"BytesIO",
"(",
")",
"# noinspection PyUnresolvedReferences",
"xhtml2pdf",
".",
"document",
".",
"pisaDocument",
"(",
"html",
",",
"memfile",
")",
"# ... returns a document, but we don't use it, so we don't store it",
"# to stop pychecker complaining",
"# http://xhtml2pdf.appspot.com/static/pisa-en.html",
"memfile",
".",
"seek",
"(",
"0",
")",
"return",
"memfile",
".",
"read",
"(",
")",
"# http://stackoverflow.com/questions/3310584",
"elif",
"processor",
"==",
"Processors",
".",
"WEASYPRINT",
":",
"if",
"on_disk",
":",
"return",
"weasyprint",
".",
"HTML",
"(",
"string",
"=",
"html",
")",
".",
"write_pdf",
"(",
"output_path",
")",
"else",
":",
"# http://ampad.de/blog/generating-pdfs-django/",
"return",
"weasyprint",
".",
"HTML",
"(",
"string",
"=",
"html",
")",
".",
"write_pdf",
"(",
")",
"elif",
"processor",
"==",
"Processors",
".",
"PDFKIT",
":",
"# Config:",
"if",
"not",
"wkhtmltopdf_filename",
":",
"config",
"=",
"None",
"else",
":",
"if",
"fix_pdfkit_encoding_bug",
":",
"# needs to be True for pdfkit==0.5.0",
"log",
".",
"debug",
"(",
"\"Attempting to fix bug in pdfkit (e.g. version 0.5.0)\"",
"\" by encoding wkhtmltopdf_filename to UTF-8\"",
")",
"config",
"=",
"pdfkit",
".",
"configuration",
"(",
"wkhtmltopdf",
"=",
"wkhtmltopdf_filename",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"# the bug is that pdfkit.pdfkit.PDFKit.__init__ will attempt to",
"# decode the string in its configuration object;",
"# https://github.com/JazzCore/python-pdfkit/issues/32",
"else",
":",
"config",
"=",
"pdfkit",
".",
"configuration",
"(",
"wkhtmltopdf",
"=",
"wkhtmltopdf_filename",
")",
"# Temporary files that a subprocess can read:",
"# http://stackoverflow.com/questions/15169101",
"# wkhtmltopdf requires its HTML files to have \".html\" extensions:",
"# http://stackoverflow.com/questions/5776125",
"h_filename",
"=",
"None",
"f_filename",
"=",
"None",
"try",
":",
"if",
"header_html",
":",
"h_fd",
",",
"h_filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.html'",
")",
"os",
".",
"write",
"(",
"h_fd",
",",
"header_html",
".",
"encode",
"(",
"file_encoding",
")",
")",
"os",
".",
"close",
"(",
"h_fd",
")",
"wkhtmltopdf_options",
"[",
"\"header-html\"",
"]",
"=",
"h_filename",
"if",
"footer_html",
":",
"f_fd",
",",
"f_filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.html'",
")",
"os",
".",
"write",
"(",
"f_fd",
",",
"footer_html",
".",
"encode",
"(",
"file_encoding",
")",
")",
"os",
".",
"close",
"(",
"f_fd",
")",
"wkhtmltopdf_options",
"[",
"\"footer-html\"",
"]",
"=",
"f_filename",
"if",
"debug_options",
":",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf config: {!r}\"",
",",
"config",
")",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf_options: {}\"",
",",
"pformat",
"(",
"wkhtmltopdf_options",
")",
")",
"kit",
"=",
"pdfkit",
".",
"pdfkit",
".",
"PDFKit",
"(",
"html",
",",
"'string'",
",",
"configuration",
"=",
"config",
",",
"options",
"=",
"wkhtmltopdf_options",
")",
"if",
"on_disk",
":",
"path",
"=",
"output_path",
"else",
":",
"path",
"=",
"None",
"# With \"path=None\", the to_pdf() function directly returns",
"# stdout from a subprocess.Popen().communicate() call (see",
"# pdfkit.py). Since universal_newlines is not set, stdout will",
"# be bytes in Python 3.",
"if",
"debug_wkhtmltopdf_args",
":",
"log",
".",
"debug",
"(",
"\"Probable current user: {!r}\"",
",",
"getpass",
".",
"getuser",
"(",
")",
")",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf arguments will be: {!r}\"",
",",
"kit",
".",
"command",
"(",
"path",
"=",
"path",
")",
")",
"return",
"kit",
".",
"to_pdf",
"(",
"path",
"=",
"path",
")",
"finally",
":",
"if",
"h_filename",
":",
"os",
".",
"remove",
"(",
"h_filename",
")",
"if",
"f_filename",
":",
"os",
".",
"remove",
"(",
"f_filename",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Unknown PDF engine\"",
")"
] | Takes HTML and either returns a PDF in memory or makes one on disk.
For preference, uses ``wkhtmltopdf`` (with ``pdfkit``):
- faster than ``xhtml2pdf``
- tables not buggy like ``Weasyprint``
- however, doesn't support CSS Paged Media, so we have the
``header_html`` and ``footer_html`` options to allow you to pass
appropriate HTML content to serve as the header/footer (rather than
passing it within the main HTML).
Args:
on_disk: make file on disk (rather than returning it in memory)?
html: main HTML
output_path: if ``on_disk``, the output filename
header_html: optional page header, as HTML
footer_html: optional page footer, as HTML
wkhtmltopdf_filename: filename of the ``wkhtmltopdf`` executable
wkhtmltopdf_options: options for ``wkhtmltopdf``
file_encoding: encoding to use when writing the header/footer to disk
debug_options: log ``wkhtmltopdf`` config/options passed to ``pdfkit``?
debug_content: log the main/header/footer HTML?
debug_wkhtmltopdf_args: log the final command-line arguments to
that will be used by ``pdfkit`` when it calls ``wkhtmltopdf``?
fix_pdfkit_encoding_bug: attempt to work around bug in e.g.
``pdfkit==0.5.0`` by encoding ``wkhtmltopdf_filename`` to UTF-8
before passing it to ``pdfkit``? If you pass ``None`` here, then
a default value is used, from
:func:`get_default_fix_pdfkit_encoding_bug`.
processor: a PDF processor type from :class:`Processors`
Returns:
the PDF binary as a ``bytes`` object
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable | [
"Takes",
"HTML",
"and",
"either",
"returns",
"a",
"PDF",
"in",
"memory",
"or",
"makes",
"one",
"on",
"disk",
"."
] | python | train |
wilson-eft/wilson | wilson/translate/wet.py | https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L1462-L1478 | def Fierz_to_EOS_chrom(C, dd, parameters):
"""From Fierz to chromomagnetic EOS basis for Class V.
dd should be of the form 'sb', 'ds' etc."""
p = parameters
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
Vtb = V[2,2]
Vts = V[2,1]
e = sqrt(4 * pi * parameters['alpha_e'])
gs = sqrt(4 * pi * parameters['alpha_s'])
mb = parameters['m_b']
dic = {"b->s::c7": 16 * pi**2 / mb / e * C["F7gamma" + dd],
"b->s::c7'": 16 * pi**2 / mb / e * C["F7pgamma" + dd],
"b->s::c8": 16 * pi**2 / mb / gs * C["F8g" + dd],
"b->s::c8'": 16 * pi**2 / mb / gs * C["F8pg" + dd]
}
prefactor = sqrt(2)/p['GF']/Vtb/Vts.conj()/4
return {k: prefactor * v for k,v in dic.items()} | [
"def",
"Fierz_to_EOS_chrom",
"(",
"C",
",",
"dd",
",",
"parameters",
")",
":",
"p",
"=",
"parameters",
"V",
"=",
"ckmutil",
".",
"ckm",
".",
"ckm_tree",
"(",
"p",
"[",
"\"Vus\"",
"]",
",",
"p",
"[",
"\"Vub\"",
"]",
",",
"p",
"[",
"\"Vcb\"",
"]",
",",
"p",
"[",
"\"delta\"",
"]",
")",
"Vtb",
"=",
"V",
"[",
"2",
",",
"2",
"]",
"Vts",
"=",
"V",
"[",
"2",
",",
"1",
"]",
"e",
"=",
"sqrt",
"(",
"4",
"*",
"pi",
"*",
"parameters",
"[",
"'alpha_e'",
"]",
")",
"gs",
"=",
"sqrt",
"(",
"4",
"*",
"pi",
"*",
"parameters",
"[",
"'alpha_s'",
"]",
")",
"mb",
"=",
"parameters",
"[",
"'m_b'",
"]",
"dic",
"=",
"{",
"\"b->s::c7\"",
":",
"16",
"*",
"pi",
"**",
"2",
"/",
"mb",
"/",
"e",
"*",
"C",
"[",
"\"F7gamma\"",
"+",
"dd",
"]",
",",
"\"b->s::c7'\"",
":",
"16",
"*",
"pi",
"**",
"2",
"/",
"mb",
"/",
"e",
"*",
"C",
"[",
"\"F7pgamma\"",
"+",
"dd",
"]",
",",
"\"b->s::c8\"",
":",
"16",
"*",
"pi",
"**",
"2",
"/",
"mb",
"/",
"gs",
"*",
"C",
"[",
"\"F8g\"",
"+",
"dd",
"]",
",",
"\"b->s::c8'\"",
":",
"16",
"*",
"pi",
"**",
"2",
"/",
"mb",
"/",
"gs",
"*",
"C",
"[",
"\"F8pg\"",
"+",
"dd",
"]",
"}",
"prefactor",
"=",
"sqrt",
"(",
"2",
")",
"/",
"p",
"[",
"'GF'",
"]",
"/",
"Vtb",
"/",
"Vts",
".",
"conj",
"(",
")",
"/",
"4",
"return",
"{",
"k",
":",
"prefactor",
"*",
"v",
"for",
"k",
",",
"v",
"in",
"dic",
".",
"items",
"(",
")",
"}"
] | From Fierz to chromomagnetic EOS basis for Class V.
dd should be of the form 'sb', 'ds' etc. | [
"From",
"Fierz",
"to",
"chromomagnetic",
"EOS",
"basis",
"for",
"Class",
"V",
".",
"dd",
"should",
"be",
"of",
"the",
"form",
"sb",
"ds",
"etc",
"."
] | python | train |
micha030201/aionationstates | aionationstates/world_.py | https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/world_.py#L124-L150 | def regionsbytag(self, *tags):
"""All regions with any of the named tags.
Parameters
----------
*tags : str
Regional tags. Can be preceded by a ``-`` to select regions
without that tag.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Region`
"""
if len(tags) > 10:
raise ValueError('You can specify up to 10 tags')
if not tags:
raise ValueError('No tags specified')
# We don't check for invalid tags here because the behaviour is
# fairly intuitive - quering for a non-existent tag returns no
# regions, excluding it returns all of them.
@api_query('regionsbytag', tags=','.join(tags))
async def result(_, root):
text = root.find('REGIONS').text
return ([aionationstates.Region(r) for r in text.split(',')]
if text else [])
return result(self) | [
"def",
"regionsbytag",
"(",
"self",
",",
"*",
"tags",
")",
":",
"if",
"len",
"(",
"tags",
")",
">",
"10",
":",
"raise",
"ValueError",
"(",
"'You can specify up to 10 tags'",
")",
"if",
"not",
"tags",
":",
"raise",
"ValueError",
"(",
"'No tags specified'",
")",
"# We don't check for invalid tags here because the behaviour is",
"# fairly intuitive - quering for a non-existent tag returns no",
"# regions, excluding it returns all of them.",
"@",
"api_query",
"(",
"'regionsbytag'",
",",
"tags",
"=",
"','",
".",
"join",
"(",
"tags",
")",
")",
"async",
"def",
"result",
"(",
"_",
",",
"root",
")",
":",
"text",
"=",
"root",
".",
"find",
"(",
"'REGIONS'",
")",
".",
"text",
"return",
"(",
"[",
"aionationstates",
".",
"Region",
"(",
"r",
")",
"for",
"r",
"in",
"text",
".",
"split",
"(",
"','",
")",
"]",
"if",
"text",
"else",
"[",
"]",
")",
"return",
"result",
"(",
"self",
")"
] | All regions with any of the named tags.
Parameters
----------
*tags : str
Regional tags. Can be preceded by a ``-`` to select regions
without that tag.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Region` | [
"All",
"regions",
"with",
"any",
"of",
"the",
"named",
"tags",
"."
] | python | train |
awacha/sastool | sastool/io/credo_saxsctrl/header.py | https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_saxsctrl/header.py#L114-L126 | def distance(self) -> ErrorValue:
"""Sample-to-detector distance"""
if 'DistCalibrated' in self._data:
dist = self._data['DistCalibrated']
else:
dist = self._data["Dist"]
if 'DistCalibratedError' in self._data:
disterr = self._data['DistCalibratedError']
elif 'DistError' in self._data:
disterr = self._data['DistError']
else:
disterr = 0.0
return ErrorValue(dist, disterr) | [
"def",
"distance",
"(",
"self",
")",
"->",
"ErrorValue",
":",
"if",
"'DistCalibrated'",
"in",
"self",
".",
"_data",
":",
"dist",
"=",
"self",
".",
"_data",
"[",
"'DistCalibrated'",
"]",
"else",
":",
"dist",
"=",
"self",
".",
"_data",
"[",
"\"Dist\"",
"]",
"if",
"'DistCalibratedError'",
"in",
"self",
".",
"_data",
":",
"disterr",
"=",
"self",
".",
"_data",
"[",
"'DistCalibratedError'",
"]",
"elif",
"'DistError'",
"in",
"self",
".",
"_data",
":",
"disterr",
"=",
"self",
".",
"_data",
"[",
"'DistError'",
"]",
"else",
":",
"disterr",
"=",
"0.0",
"return",
"ErrorValue",
"(",
"dist",
",",
"disterr",
")"
] | Sample-to-detector distance | [
"Sample",
"-",
"to",
"-",
"detector",
"distance"
] | python | train |
apache/spark | python/pyspark/ml/clustering.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L522-L531 | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"BisectingKMeansSummary",
"(",
"super",
"(",
"BisectingKMeansModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summary available for this %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")"
] | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | python | train |
HewlettPackard/python-hpOneView | hpOneView/resources/servers/enclosures.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/enclosures.py#L197-L209 | def get_sso(self, role):
"""
Builds the SSO (Single Sign-On) URL parameters for the specified enclosure. This allows the user to
log in to the enclosure without providing credentials. This API is currently only supported by C7000 enclosures.
Args:
role: Role
Returns:
SSO (Single Sign-On) URL parameters.
"""
uri = "{}/sso?role={}".format(self.data['uri'], role)
return self._helper.do_get(uri) | [
"def",
"get_sso",
"(",
"self",
",",
"role",
")",
":",
"uri",
"=",
"\"{}/sso?role={}\"",
".",
"format",
"(",
"self",
".",
"data",
"[",
"'uri'",
"]",
",",
"role",
")",
"return",
"self",
".",
"_helper",
".",
"do_get",
"(",
"uri",
")"
] | Builds the SSO (Single Sign-On) URL parameters for the specified enclosure. This allows the user to
log in to the enclosure without providing credentials. This API is currently only supported by C7000 enclosures.
Args:
role: Role
Returns:
SSO (Single Sign-On) URL parameters. | [
"Builds",
"the",
"SSO",
"(",
"Single",
"Sign",
"-",
"On",
")",
"URL",
"parameters",
"for",
"the",
"specified",
"enclosure",
".",
"This",
"allows",
"the",
"user",
"to",
"log",
"in",
"to",
"the",
"enclosure",
"without",
"providing",
"credentials",
".",
"This",
"API",
"is",
"currently",
"only",
"supported",
"by",
"C7000",
"enclosures",
"."
] | python | train |
WebarchivCZ/WA-KAT | src/wa_kat/db/request_info.py | https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/db/request_info.py#L177-L194 | def to_dict(self):
"""
This method is used in with connection to REST API. It basically
converts all important properties to dictionary, which may be used by
frontend.
Returns:
dict: ``{"all_set": bool, "progress": [int(done), int(how_many)], \
"values": {"property": [values], ..}}``
"""
return {
"all_set": self._is_all_set(),
"progress": self.progress(),
"values": {
property_name: getattr(self, property_name) or []
for property_name in worker_mapping().keys()
}
} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"\"all_set\"",
":",
"self",
".",
"_is_all_set",
"(",
")",
",",
"\"progress\"",
":",
"self",
".",
"progress",
"(",
")",
",",
"\"values\"",
":",
"{",
"property_name",
":",
"getattr",
"(",
"self",
",",
"property_name",
")",
"or",
"[",
"]",
"for",
"property_name",
"in",
"worker_mapping",
"(",
")",
".",
"keys",
"(",
")",
"}",
"}"
] | This method is used in with connection to REST API. It basically
converts all important properties to dictionary, which may be used by
frontend.
Returns:
dict: ``{"all_set": bool, "progress": [int(done), int(how_many)], \
"values": {"property": [values], ..}}`` | [
"This",
"method",
"is",
"used",
"in",
"with",
"connection",
"to",
"REST",
"API",
".",
"It",
"basically",
"converts",
"all",
"important",
"properties",
"to",
"dictionary",
"which",
"may",
"be",
"used",
"by",
"frontend",
"."
] | python | train |
fastai/fastai | docs_src/nbval/plugin.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/plugin.py#L248-L259 | def get_sanitize_files(self):
"""
Return list of all sanitize files provided by the user on the command line.
N.B.: We only support one sanitize file at the moment, but
this is likely to change in the future
"""
if self.parent.config.option.sanitize_with is not None:
return [self.parent.config.option.sanitize_with]
else:
return [] | [
"def",
"get_sanitize_files",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent",
".",
"config",
".",
"option",
".",
"sanitize_with",
"is",
"not",
"None",
":",
"return",
"[",
"self",
".",
"parent",
".",
"config",
".",
"option",
".",
"sanitize_with",
"]",
"else",
":",
"return",
"[",
"]"
] | Return list of all sanitize files provided by the user on the command line.
N.B.: We only support one sanitize file at the moment, but
this is likely to change in the future | [
"Return",
"list",
"of",
"all",
"sanitize",
"files",
"provided",
"by",
"the",
"user",
"on",
"the",
"command",
"line",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/model.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L121-L128 | def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx) | [
"def",
"_initialize_kvstore",
"(",
"kvstore",
",",
"param_arrays",
",",
"arg_params",
",",
"param_names",
",",
"update_on_kvstore",
")",
":",
"for",
"idx",
",",
"param_on_devs",
"in",
"enumerate",
"(",
"param_arrays",
")",
":",
"name",
"=",
"param_names",
"[",
"idx",
"]",
"kvstore",
".",
"init",
"(",
"name",
",",
"arg_params",
"[",
"name",
"]",
")",
"if",
"update_on_kvstore",
":",
"kvstore",
".",
"pull",
"(",
"name",
",",
"param_on_devs",
",",
"priority",
"=",
"-",
"idx",
")"
] | Initialize kvstore | [
"Initialize",
"kvstore"
] | python | train |
geopy/geopy | geopy/geocoders/ignfrance.py | https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/ignfrance.py#L548-L577 | def _parse_place(place, is_freeform=None):
"""
Get the location, lat, lng and place from a single json place.
"""
# When freeform already so full address
if is_freeform == 'true':
location = place.get('freeformaddress')
else:
# For parcelle
if place.get('numero'):
location = place.get('street')
else:
# When classic geocoding
# or when reverse geocoding
location = "%s %s" % (
place.get('postal_code', ''),
place.get('commune', ''),
)
if place.get('street'):
location = "%s, %s" % (
place.get('street', ''),
location,
)
if place.get('building'):
location = "%s %s" % (
place.get('building', ''),
location,
)
return Location(location, (place.get('lat'), place.get('lng')), place) | [
"def",
"_parse_place",
"(",
"place",
",",
"is_freeform",
"=",
"None",
")",
":",
"# When freeform already so full address",
"if",
"is_freeform",
"==",
"'true'",
":",
"location",
"=",
"place",
".",
"get",
"(",
"'freeformaddress'",
")",
"else",
":",
"# For parcelle",
"if",
"place",
".",
"get",
"(",
"'numero'",
")",
":",
"location",
"=",
"place",
".",
"get",
"(",
"'street'",
")",
"else",
":",
"# When classic geocoding",
"# or when reverse geocoding",
"location",
"=",
"\"%s %s\"",
"%",
"(",
"place",
".",
"get",
"(",
"'postal_code'",
",",
"''",
")",
",",
"place",
".",
"get",
"(",
"'commune'",
",",
"''",
")",
",",
")",
"if",
"place",
".",
"get",
"(",
"'street'",
")",
":",
"location",
"=",
"\"%s, %s\"",
"%",
"(",
"place",
".",
"get",
"(",
"'street'",
",",
"''",
")",
",",
"location",
",",
")",
"if",
"place",
".",
"get",
"(",
"'building'",
")",
":",
"location",
"=",
"\"%s %s\"",
"%",
"(",
"place",
".",
"get",
"(",
"'building'",
",",
"''",
")",
",",
"location",
",",
")",
"return",
"Location",
"(",
"location",
",",
"(",
"place",
".",
"get",
"(",
"'lat'",
")",
",",
"place",
".",
"get",
"(",
"'lng'",
")",
")",
",",
"place",
")"
] | Get the location, lat, lng and place from a single json place. | [
"Get",
"the",
"location",
"lat",
"lng",
"and",
"place",
"from",
"a",
"single",
"json",
"place",
"."
] | python | train |
bitshares/uptick | uptick/callorders.py | https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/callorders.py#L20-L57 | def calls(ctx, obj, limit):
""" List call/short positions of an account or an asset
"""
if obj.upper() == obj:
# Asset
from bitshares.asset import Asset
asset = Asset(obj, full=True)
calls = asset.get_call_orders(limit)
t = [["acount", "debt", "collateral", "call price", "ratio"]]
for call in calls:
t.append(
[
str(call["account"]["name"]),
str(call["debt"]),
str(call["collateral"]),
str(call["call_price"]),
"%.2f" % (call["ratio"]),
]
)
print_table(t)
else:
# Account
from bitshares.dex import Dex
dex = Dex(bitshares_instance=ctx.bitshares)
calls = dex.list_debt_positions(account=obj)
t = [["debt", "collateral", "call price", "ratio"]]
for symbol in calls:
t.append(
[
str(calls[symbol]["debt"]),
str(calls[symbol]["collateral"]),
str(calls[symbol]["call_price"]),
"%.2f" % (calls[symbol]["ratio"]),
]
)
print_table(t) | [
"def",
"calls",
"(",
"ctx",
",",
"obj",
",",
"limit",
")",
":",
"if",
"obj",
".",
"upper",
"(",
")",
"==",
"obj",
":",
"# Asset",
"from",
"bitshares",
".",
"asset",
"import",
"Asset",
"asset",
"=",
"Asset",
"(",
"obj",
",",
"full",
"=",
"True",
")",
"calls",
"=",
"asset",
".",
"get_call_orders",
"(",
"limit",
")",
"t",
"=",
"[",
"[",
"\"acount\"",
",",
"\"debt\"",
",",
"\"collateral\"",
",",
"\"call price\"",
",",
"\"ratio\"",
"]",
"]",
"for",
"call",
"in",
"calls",
":",
"t",
".",
"append",
"(",
"[",
"str",
"(",
"call",
"[",
"\"account\"",
"]",
"[",
"\"name\"",
"]",
")",
",",
"str",
"(",
"call",
"[",
"\"debt\"",
"]",
")",
",",
"str",
"(",
"call",
"[",
"\"collateral\"",
"]",
")",
",",
"str",
"(",
"call",
"[",
"\"call_price\"",
"]",
")",
",",
"\"%.2f\"",
"%",
"(",
"call",
"[",
"\"ratio\"",
"]",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")",
"else",
":",
"# Account",
"from",
"bitshares",
".",
"dex",
"import",
"Dex",
"dex",
"=",
"Dex",
"(",
"bitshares_instance",
"=",
"ctx",
".",
"bitshares",
")",
"calls",
"=",
"dex",
".",
"list_debt_positions",
"(",
"account",
"=",
"obj",
")",
"t",
"=",
"[",
"[",
"\"debt\"",
",",
"\"collateral\"",
",",
"\"call price\"",
",",
"\"ratio\"",
"]",
"]",
"for",
"symbol",
"in",
"calls",
":",
"t",
".",
"append",
"(",
"[",
"str",
"(",
"calls",
"[",
"symbol",
"]",
"[",
"\"debt\"",
"]",
")",
",",
"str",
"(",
"calls",
"[",
"symbol",
"]",
"[",
"\"collateral\"",
"]",
")",
",",
"str",
"(",
"calls",
"[",
"symbol",
"]",
"[",
"\"call_price\"",
"]",
")",
",",
"\"%.2f\"",
"%",
"(",
"calls",
"[",
"symbol",
"]",
"[",
"\"ratio\"",
"]",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")"
] | List call/short positions of an account or an asset | [
"List",
"call",
"/",
"short",
"positions",
"of",
"an",
"account",
"or",
"an",
"asset"
] | python | train |
mamrhein/specification | specification/_extd_ast_expr.py | https://github.com/mamrhein/specification/blob/a4c09a0d286cda7a04e8a189f12e23edd97f64ea/specification/_extd_ast_expr.py#L214-L218 | def visit_Dict(self, node: AST, dfltChaining: bool = True) -> str:
"""Return dict representation of `node`s elements."""
items = (': '.join((self.visit(key), self.visit(value)))
for key, value in zip(node.keys, node.values))
return f"{{{', '.join(items)}}}" | [
"def",
"visit_Dict",
"(",
"self",
",",
"node",
":",
"AST",
",",
"dfltChaining",
":",
"bool",
"=",
"True",
")",
"->",
"str",
":",
"items",
"=",
"(",
"': '",
".",
"join",
"(",
"(",
"self",
".",
"visit",
"(",
"key",
")",
",",
"self",
".",
"visit",
"(",
"value",
")",
")",
")",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"node",
".",
"keys",
",",
"node",
".",
"values",
")",
")",
"return",
"f\"{{{', '.join(items)}}}\""
] | Return dict representation of `node`s elements. | [
"Return",
"dict",
"representation",
"of",
"node",
"s",
"elements",
"."
] | python | train |
Crypto-toolbox/btfxwss | btfxwss/client.py | https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/client.py#L210-L218 | def subscribe_to_order_book(self, pair, **kwargs):
"""Subscribe to the passed pair's order book channel.
:param pair: str, Symbol pair to request data for
:param kwargs:
:return:
"""
identifier = ('book', pair)
self._subscribe('book', identifier, symbol=pair, **kwargs) | [
"def",
"subscribe_to_order_book",
"(",
"self",
",",
"pair",
",",
"*",
"*",
"kwargs",
")",
":",
"identifier",
"=",
"(",
"'book'",
",",
"pair",
")",
"self",
".",
"_subscribe",
"(",
"'book'",
",",
"identifier",
",",
"symbol",
"=",
"pair",
",",
"*",
"*",
"kwargs",
")"
] | Subscribe to the passed pair's order book channel.
:param pair: str, Symbol pair to request data for
:param kwargs:
:return: | [
"Subscribe",
"to",
"the",
"passed",
"pair",
"s",
"order",
"book",
"channel",
"."
] | python | test |
xeroc/python-graphenelib | grapheneapi/api.py | https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/grapheneapi/api.py#L116-L121 | def reset_counter(self):
""" reset the failed connection counters
"""
self._cnt_retries = 0
for i in self._url_counter:
self._url_counter[i] = 0 | [
"def",
"reset_counter",
"(",
"self",
")",
":",
"self",
".",
"_cnt_retries",
"=",
"0",
"for",
"i",
"in",
"self",
".",
"_url_counter",
":",
"self",
".",
"_url_counter",
"[",
"i",
"]",
"=",
"0"
] | reset the failed connection counters | [
"reset",
"the",
"failed",
"connection",
"counters"
] | python | valid |
DeV1doR/aioethereum | aioethereum/management/eth.py | https://github.com/DeV1doR/aioethereum/blob/85eb46550d862b3ccc309914ea871ca1c7b42157/aioethereum/management/eth.py#L81-L95 | def eth_getBalance(self, address, block=BLOCK_TAG_LATEST):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
:param address: Account address
:type address: str
:param block: Block tag or number (optional)
:type block: int or BLOCK_TAGS
:return: wei
:rtype: int
"""
block = validate_block(block)
return hex_to_dec((yield from self.rpc_call('eth_getBalance',
[address, block]))) | [
"def",
"eth_getBalance",
"(",
"self",
",",
"address",
",",
"block",
"=",
"BLOCK_TAG_LATEST",
")",
":",
"block",
"=",
"validate_block",
"(",
"block",
")",
"return",
"hex_to_dec",
"(",
"(",
"yield",
"from",
"self",
".",
"rpc_call",
"(",
"'eth_getBalance'",
",",
"[",
"address",
",",
"block",
"]",
")",
")",
")"
] | https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
:param address: Account address
:type address: str
:param block: Block tag or number (optional)
:type block: int or BLOCK_TAGS
:return: wei
:rtype: int | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"ethereum",
"/",
"wiki",
"/",
"wiki",
"/",
"JSON",
"-",
"RPC#eth_getbalance"
] | python | train |
pylp/pylp | pylp/cli/colors.py | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/colors.py#L69-L78 | def foreground(color):
"""Set the foreground color."""
if color not in foreground_colors:
return
if is_win32:
last_fg = foreground_colors[color][1]
set_color_win32(last_fg | last_bg)
else:
set_color_ansi(foreground_colors[color][0]) | [
"def",
"foreground",
"(",
"color",
")",
":",
"if",
"color",
"not",
"in",
"foreground_colors",
":",
"return",
"if",
"is_win32",
":",
"last_fg",
"=",
"foreground_colors",
"[",
"color",
"]",
"[",
"1",
"]",
"set_color_win32",
"(",
"last_fg",
"|",
"last_bg",
")",
"else",
":",
"set_color_ansi",
"(",
"foreground_colors",
"[",
"color",
"]",
"[",
"0",
"]",
")"
] | Set the foreground color. | [
"Set",
"the",
"foreground",
"color",
"."
] | python | train |
timkpaine/pyEX | pyEX/stocks.py | https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L794-L809 | def financials(symbol, token='', version=''):
'''Pulls income statement, balance sheet, and cash flow data from the four most recent reported quarters.
https://iexcloud.io/docs/api/#financials
Updates at 8am, 9am UTC daily
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
return _getJson('stock/' + symbol + '/financials', token, version) | [
"def",
"financials",
"(",
"symbol",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"_raiseIfNotStr",
"(",
"symbol",
")",
"return",
"_getJson",
"(",
"'stock/'",
"+",
"symbol",
"+",
"'/financials'",
",",
"token",
",",
"version",
")"
] | Pulls income statement, balance sheet, and cash flow data from the four most recent reported quarters.
https://iexcloud.io/docs/api/#financials
Updates at 8am, 9am UTC daily
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result | [
"Pulls",
"income",
"statement",
"balance",
"sheet",
"and",
"cash",
"flow",
"data",
"from",
"the",
"four",
"most",
"recent",
"reported",
"quarters",
"."
] | python | valid |
mariano/pyfire | pyfire/message.py | https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/message.py#L121-L131 | def is_text(self):
""" Tells if this message is a text message.
Returns:
bool. Success
"""
return self.type in [
self._TYPE_PASTE,
self._TYPE_TEXT,
self._TYPE_TWEET
] | [
"def",
"is_text",
"(",
"self",
")",
":",
"return",
"self",
".",
"type",
"in",
"[",
"self",
".",
"_TYPE_PASTE",
",",
"self",
".",
"_TYPE_TEXT",
",",
"self",
".",
"_TYPE_TWEET",
"]"
] | Tells if this message is a text message.
Returns:
bool. Success | [
"Tells",
"if",
"this",
"message",
"is",
"a",
"text",
"message",
"."
] | python | valid |
Tygs/ww | src/ww/wrappers/tuples.py | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/tuples.py#L33-L48 | def index(self, value):
"""
Args:
value: index
Returns: index of the values
Raises:
ValueError: value is not in list
"""
for i, x in enumerate(self):
if x == value:
return i
raise ValueError("{} is not in list".format(value)) | [
"def",
"index",
"(",
"self",
",",
"value",
")",
":",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"x",
"==",
"value",
":",
"return",
"i",
"raise",
"ValueError",
"(",
"\"{} is not in list\"",
".",
"format",
"(",
"value",
")",
")"
] | Args:
value: index
Returns: index of the values
Raises:
ValueError: value is not in list | [
"Args",
":",
"value",
":",
"index"
] | python | train |
kubernetes-client/python | kubernetes/client/apis/extensions_v1beta1_api.py | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/extensions_v1beta1_api.py#L7730-L7754 | def replace_namespaced_network_policy(self, name, namespace, body, **kwargs):
"""
replace the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_network_policy(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
return data | [
"def",
"replace_namespaced_network_policy",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"replace_namespaced_network_policy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"replace_namespaced_network_policy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | replace the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_network_policy(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1NetworkPolicy
If the method is called asynchronously,
returns the request thread. | [
"replace",
"the",
"specified",
"NetworkPolicy",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
">>>",
"thread",
"=",
"api",
".",
"replace_namespaced_network_policy",
"(",
"name",
"namespace",
"body",
"async_req",
"=",
"True",
")",
">>>",
"result",
"=",
"thread",
".",
"get",
"()"
] | python | train |
wbond/oscrypto | oscrypto/_win/asymmetric.py | https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/asymmetric.py#L2997-L3051 | def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
buffer,
out_len,
buffer_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))[::-1] | [
"def",
"_advapi32_encrypt",
"(",
"certificate_or_public_key",
",",
"data",
",",
"rsa_oaep_padding",
"=",
"False",
")",
":",
"flags",
"=",
"0",
"if",
"rsa_oaep_padding",
":",
"flags",
"=",
"Advapi32Const",
".",
"CRYPT_OAEP",
"out_len",
"=",
"new",
"(",
"advapi32",
",",
"'DWORD *'",
",",
"len",
"(",
"data",
")",
")",
"res",
"=",
"advapi32",
".",
"CryptEncrypt",
"(",
"certificate_or_public_key",
".",
"ex_key_handle",
",",
"null",
"(",
")",
",",
"True",
",",
"flags",
",",
"null",
"(",
")",
",",
"out_len",
",",
"0",
")",
"handle_error",
"(",
"res",
")",
"buffer_len",
"=",
"deref",
"(",
"out_len",
")",
"buffer",
"=",
"buffer_from_bytes",
"(",
"buffer_len",
")",
"write_to_buffer",
"(",
"buffer",
",",
"data",
")",
"pointer_set",
"(",
"out_len",
",",
"len",
"(",
"data",
")",
")",
"res",
"=",
"advapi32",
".",
"CryptEncrypt",
"(",
"certificate_or_public_key",
".",
"ex_key_handle",
",",
"null",
"(",
")",
",",
"True",
",",
"flags",
",",
"buffer",
",",
"out_len",
",",
"buffer_len",
")",
"handle_error",
"(",
"res",
")",
"return",
"bytes_from_buffer",
"(",
"buffer",
",",
"deref",
"(",
"out_len",
")",
")",
"[",
":",
":",
"-",
"1",
"]"
] | Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext | [
"Encrypts",
"a",
"value",
"using",
"an",
"RSA",
"public",
"key",
"via",
"CryptoAPI"
] | python | valid |
the01/paps-settings | paps_settings/settable_plugin.py | https://github.com/the01/paps-settings/blob/48fb65eb0fa7929a0bb381c6dad28d0197b44c83/paps_settings/settable_plugin.py#L136-L187 | def resource_update_list(self, reset=False):
"""
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
"""
if not self._resource_path:
raise PluginException("No resource path set")
if not os.path.isdir(self._resource_path):
raise PluginException(
u"Resource path directory '{}' not found".format(
self._resource_path
)
)
res = []
with self._resource_lock:
if reset:
self._resources = {}
old = dict(self._resources)
for dirname, dirnames, filenames in os.walk(self._resource_path):
for file_name in filenames:
file_ext = os.path.splitext(file_name)[1].lower()[1:]
if file_ext not in self._resource_file_types:
self.debug(u"Skipping '{}'".format(file_name))
continue
file_path = os.path.join(dirname, file_name)
try:
file_hash = get_file_hash(file_path)
except:
self.exception(
u"Failed to hash '{}'".format(file_path)
)
continue
self._resources[file_name] = {
'name': file_name,
'path': file_path,
'hash': file_hash,
'checked': datetime.datetime.utcnow()
}
# generate diff
for key in self._resources:
resource = self._resources[key]
if key not in old or old[key]['hash'] != resource['hash']:
# new file or hash changed
res.append((key, resource['hash']))
return res | [
"def",
"resource_update_list",
"(",
"self",
",",
"reset",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_resource_path",
":",
"raise",
"PluginException",
"(",
"\"No resource path set\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"_resource_path",
")",
":",
"raise",
"PluginException",
"(",
"u\"Resource path directory '{}' not found\"",
".",
"format",
"(",
"self",
".",
"_resource_path",
")",
")",
"res",
"=",
"[",
"]",
"with",
"self",
".",
"_resource_lock",
":",
"if",
"reset",
":",
"self",
".",
"_resources",
"=",
"{",
"}",
"old",
"=",
"dict",
"(",
"self",
".",
"_resources",
")",
"for",
"dirname",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"_resource_path",
")",
":",
"for",
"file_name",
"in",
"filenames",
":",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"[",
"1",
":",
"]",
"if",
"file_ext",
"not",
"in",
"self",
".",
"_resource_file_types",
":",
"self",
".",
"debug",
"(",
"u\"Skipping '{}'\"",
".",
"format",
"(",
"file_name",
")",
")",
"continue",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"file_name",
")",
"try",
":",
"file_hash",
"=",
"get_file_hash",
"(",
"file_path",
")",
"except",
":",
"self",
".",
"exception",
"(",
"u\"Failed to hash '{}'\"",
".",
"format",
"(",
"file_path",
")",
")",
"continue",
"self",
".",
"_resources",
"[",
"file_name",
"]",
"=",
"{",
"'name'",
":",
"file_name",
",",
"'path'",
":",
"file_path",
",",
"'hash'",
":",
"file_hash",
",",
"'checked'",
":",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"}",
"# generate diff",
"for",
"key",
"in",
"self",
".",
"_resources",
":",
"resource",
"=",
"self",
".",
"_resources",
"[",
"key",
"]",
"if",
"key",
"not",
"in",
"old",
"or",
"old",
"[",
"key",
"]",
"[",
"'hash'",
"]",
"!=",
"resource",
"[",
"'hash'",
"]",
":",
"# new file or hash changed",
"res",
".",
"append",
"(",
"(",
"key",
",",
"resource",
"[",
"'hash'",
"]",
")",
")",
"return",
"res"
] | Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)] | [
"Update",
"internal",
"struct",
"of",
"resource",
"hash",
"list",
"and",
"get",
"diff"
] | python | train |
oz123/blogit | blogit/blogit.py | https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L399-L413 | def update_index(entries):
"""find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed.
"""
context = GLOBAL_TEMPLATE_CONTEXT.copy()
context['entries'] = entries
context['last_build'] = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%SZ")
list(map(lambda x: _render(context, x[0],
os.path.join(CONFIG['output_to'], x[1])),
(('entry_index.html', 'index.html'), ('atom.xml', 'atom.xml')))) | [
"def",
"update_index",
"(",
"entries",
")",
":",
"context",
"=",
"GLOBAL_TEMPLATE_CONTEXT",
".",
"copy",
"(",
")",
"context",
"[",
"'entries'",
"]",
"=",
"entries",
"context",
"[",
"'last_build'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"_render",
"(",
"context",
",",
"x",
"[",
"0",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"CONFIG",
"[",
"'output_to'",
"]",
",",
"x",
"[",
"1",
"]",
")",
")",
",",
"(",
"(",
"'entry_index.html'",
",",
"'index.html'",
")",
",",
"(",
"'atom.xml'",
",",
"'atom.xml'",
")",
")",
")",
")"
] | find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed. | [
"find",
"the",
"last",
"10",
"entries",
"in",
"the",
"database",
"and",
"create",
"the",
"main",
"page",
".",
"Each",
"entry",
"in",
"has",
"an",
"doc_id",
"so",
"we",
"only",
"get",
"the",
"last",
"10",
"doc_ids",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/chemenv/coordination_environments/chemenv_strategies.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/chemenv_strategies.py#L779-L790 | def as_dict(self):
"""
Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"max_nabundant": self.max_nabundant,
"target_environments": self.target_environments,
"target_penalty_type": self.target_penalty_type,
"max_csm": self.max_csm} | [
"def",
"as_dict",
"(",
"self",
")",
":",
"return",
"{",
"\"@module\"",
":",
"self",
".",
"__class__",
".",
"__module__",
",",
"\"@class\"",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"additional_condition\"",
":",
"self",
".",
"_additional_condition",
",",
"\"max_nabundant\"",
":",
"self",
".",
"max_nabundant",
",",
"\"target_environments\"",
":",
"self",
".",
"target_environments",
",",
"\"target_penalty_type\"",
":",
"self",
".",
"target_penalty_type",
",",
"\"max_csm\"",
":",
"self",
".",
"max_csm",
"}"
] | Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object. | [
"Bson",
"-",
"serializable",
"dict",
"representation",
"of",
"the",
"TargettedPenaltiedAbundanceChemenvStrategy",
"object",
".",
":",
"return",
":",
"Bson",
"-",
"serializable",
"dict",
"representation",
"of",
"the",
"TargettedPenaltiedAbundanceChemenvStrategy",
"object",
"."
] | python | train |
erdc/RAPIDpy | RAPIDpy/dataset.py | https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/dataset.py#L296-L412 | def get_time_array(self,
datetime_simulation_start=None,
simulation_time_step_seconds=None,
return_datetime=False,
time_index_array=None):
"""
This method extracts or generates an array of time.
The new version of RAPID output has the time array stored.
However, the old version requires the user to know when the
simulation began and the time step of the output.
Parameters
----------
datetime_simulation_start: :obj:`datetime.datetime`, optional
The start datetime of the simulation. Only required if the time
variable is not included in the file.
simulation_time_step_seconds: int, optional
The time step of the file in seconds. Only required if the time
variable is not included in the file.
return_datetime: bool, optional
If true, it converts the data to a list of datetime objects.
Default is False.
time_index_array: list or :obj:`numpy.array`, optional
This is used to extract the datetime values by index from the main
list. This can be from the *get_time_index_range* function.
Returns
-------
list:
An array of integers representing seconds since Jan 1, 1970 UTC
or datetime objects if *return_datetime* is set to True.
These examples demonstrates how to retrieve or generate a time array
to go along with your RAPID streamflow series.
CF-Compliant Qout File Example:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
#retrieve integer timestamp array
time_array = qout_nc.get_time_array()
#or, to get datetime array
time_datetime = qout_nc.get_time_array(return_datetime=True)
Legacy Qout File Example:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout,
datetime_simulation_start=datetime(1980, 1, 1),
simulation_time_step_seconds=3 * 3600)\
as qout_nc:
#retrieve integer timestamp array
time_array = qout_nc.get_time_array()
#or, to get datetime array
time_datetime = qout_nc.get_time_array(return_datetime=True)
"""
# Original Qout file
if datetime_simulation_start is not None:
self.datetime_simulation_start = datetime_simulation_start
if simulation_time_step_seconds is not None:
self.simulation_time_step_seconds = simulation_time_step_seconds
epoch = datetime.datetime(1970, 1, 1, tzinfo=utc)
time_units = "seconds since {0}".format(epoch)
# CF-1.6 compliant file
if self.is_time_variable_valid():
time_array = self.qout_nc.variables['time'][:]
if self.qout_nc.variables['time'].units:
time_units = self.qout_nc.variables['time'].units
# Original Qout file
elif self._is_legacy_time_valid():
initial_time_seconds = ((self.datetime_simulation_start
.replace(tzinfo=utc) - epoch)
.total_seconds() +
self.simulation_time_step_seconds)
final_time_seconds = (initial_time_seconds +
self.size_time *
self.simulation_time_step_seconds)
time_array = np.arange(initial_time_seconds,
final_time_seconds,
self.simulation_time_step_seconds)
else:
raise ValueError("This file does not contain the time"
" variable. To get time array, add"
" datetime_simulation_start and"
" simulation_time_step_seconds")
if time_index_array is not None:
time_array = time_array[time_index_array]
if return_datetime:
time_array = num2date(time_array, time_units)
if self.out_tzinfo is not None:
for i in xrange(len(time_array)):
# convert time to output timezone
time_array[i] = utc.localize(time_array[i]) \
.astimezone(self.out_tzinfo) \
.replace(tzinfo=None)
return time_array | [
"def",
"get_time_array",
"(",
"self",
",",
"datetime_simulation_start",
"=",
"None",
",",
"simulation_time_step_seconds",
"=",
"None",
",",
"return_datetime",
"=",
"False",
",",
"time_index_array",
"=",
"None",
")",
":",
"# Original Qout file",
"if",
"datetime_simulation_start",
"is",
"not",
"None",
":",
"self",
".",
"datetime_simulation_start",
"=",
"datetime_simulation_start",
"if",
"simulation_time_step_seconds",
"is",
"not",
"None",
":",
"self",
".",
"simulation_time_step_seconds",
"=",
"simulation_time_step_seconds",
"epoch",
"=",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"tzinfo",
"=",
"utc",
")",
"time_units",
"=",
"\"seconds since {0}\"",
".",
"format",
"(",
"epoch",
")",
"# CF-1.6 compliant file",
"if",
"self",
".",
"is_time_variable_valid",
"(",
")",
":",
"time_array",
"=",
"self",
".",
"qout_nc",
".",
"variables",
"[",
"'time'",
"]",
"[",
":",
"]",
"if",
"self",
".",
"qout_nc",
".",
"variables",
"[",
"'time'",
"]",
".",
"units",
":",
"time_units",
"=",
"self",
".",
"qout_nc",
".",
"variables",
"[",
"'time'",
"]",
".",
"units",
"# Original Qout file",
"elif",
"self",
".",
"_is_legacy_time_valid",
"(",
")",
":",
"initial_time_seconds",
"=",
"(",
"(",
"self",
".",
"datetime_simulation_start",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
"-",
"epoch",
")",
".",
"total_seconds",
"(",
")",
"+",
"self",
".",
"simulation_time_step_seconds",
")",
"final_time_seconds",
"=",
"(",
"initial_time_seconds",
"+",
"self",
".",
"size_time",
"*",
"self",
".",
"simulation_time_step_seconds",
")",
"time_array",
"=",
"np",
".",
"arange",
"(",
"initial_time_seconds",
",",
"final_time_seconds",
",",
"self",
".",
"simulation_time_step_seconds",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"This file does not contain the time\"",
"\" variable. To get time array, add\"",
"\" datetime_simulation_start and\"",
"\" simulation_time_step_seconds\"",
")",
"if",
"time_index_array",
"is",
"not",
"None",
":",
"time_array",
"=",
"time_array",
"[",
"time_index_array",
"]",
"if",
"return_datetime",
":",
"time_array",
"=",
"num2date",
"(",
"time_array",
",",
"time_units",
")",
"if",
"self",
".",
"out_tzinfo",
"is",
"not",
"None",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"time_array",
")",
")",
":",
"# convert time to output timezone",
"time_array",
"[",
"i",
"]",
"=",
"utc",
".",
"localize",
"(",
"time_array",
"[",
"i",
"]",
")",
".",
"astimezone",
"(",
"self",
".",
"out_tzinfo",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"return",
"time_array"
] | This method extracts or generates an array of time.
The new version of RAPID output has the time array stored.
However, the old version requires the user to know when the
simulation began and the time step of the output.
Parameters
----------
datetime_simulation_start: :obj:`datetime.datetime`, optional
The start datetime of the simulation. Only required if the time
variable is not included in the file.
simulation_time_step_seconds: int, optional
The time step of the file in seconds. Only required if the time
variable is not included in the file.
return_datetime: bool, optional
If true, it converts the data to a list of datetime objects.
Default is False.
time_index_array: list or :obj:`numpy.array`, optional
This is used to extract the datetime values by index from the main
list. This can be from the *get_time_index_range* function.
Returns
-------
list:
An array of integers representing seconds since Jan 1, 1970 UTC
or datetime objects if *return_datetime* is set to True.
These examples demonstrates how to retrieve or generate a time array
to go along with your RAPID streamflow series.
CF-Compliant Qout File Example:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
#retrieve integer timestamp array
time_array = qout_nc.get_time_array()
#or, to get datetime array
time_datetime = qout_nc.get_time_array(return_datetime=True)
Legacy Qout File Example:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout,
datetime_simulation_start=datetime(1980, 1, 1),
simulation_time_step_seconds=3 * 3600)\
as qout_nc:
#retrieve integer timestamp array
time_array = qout_nc.get_time_array()
#or, to get datetime array
time_datetime = qout_nc.get_time_array(return_datetime=True) | [
"This",
"method",
"extracts",
"or",
"generates",
"an",
"array",
"of",
"time",
".",
"The",
"new",
"version",
"of",
"RAPID",
"output",
"has",
"the",
"time",
"array",
"stored",
".",
"However",
"the",
"old",
"version",
"requires",
"the",
"user",
"to",
"know",
"when",
"the",
"simulation",
"began",
"and",
"the",
"time",
"step",
"of",
"the",
"output",
"."
] | python | train |
klmitch/turnstile | turnstile/tools.py | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L141-L177 | def get_kwargs(self, args):
"""
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
"""
# Now we need to figure out which arguments the final function
# actually needs
kwargs = {}
argspec = inspect.getargspec(self._func)
required = set(argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name)
except AttributeError:
if arg_name in required:
# If this happens, that's a programming failure
raise
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for key, value in args.__dict__.items():
if key in kwargs:
# Already handled
continue
kwargs[key] = value
return kwargs | [
"def",
"get_kwargs",
"(",
"self",
",",
"args",
")",
":",
"# Now we need to figure out which arguments the final function",
"# actually needs",
"kwargs",
"=",
"{",
"}",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"self",
".",
"_func",
")",
"required",
"=",
"set",
"(",
"argspec",
".",
"args",
"[",
":",
"-",
"len",
"(",
"argspec",
".",
"defaults",
")",
"]",
"if",
"argspec",
".",
"defaults",
"else",
"argspec",
".",
"args",
")",
"for",
"arg_name",
"in",
"argspec",
".",
"args",
":",
"try",
":",
"kwargs",
"[",
"arg_name",
"]",
"=",
"getattr",
"(",
"args",
",",
"arg_name",
")",
"except",
"AttributeError",
":",
"if",
"arg_name",
"in",
"required",
":",
"# If this happens, that's a programming failure",
"raise",
"# If the function accepts any keyword argument, add whatever",
"# remains",
"if",
"argspec",
".",
"keywords",
":",
"for",
"key",
",",
"value",
"in",
"args",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"kwargs",
":",
"# Already handled",
"continue",
"kwargs",
"[",
"key",
"]",
"=",
"value",
"return",
"kwargs"
] | Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse. | [
"Given",
"a",
"Namespace",
"object",
"drawn",
"from",
"argparse",
"determines",
"the",
"keyword",
"arguments",
"to",
"pass",
"to",
"the",
"underlying",
"function",
".",
"Note",
"that",
"if",
"the",
"underlying",
"function",
"accepts",
"all",
"keyword",
"arguments",
"the",
"dictionary",
"returned",
"will",
"contain",
"the",
"entire",
"contents",
"of",
"the",
"Namespace",
"object",
".",
"Also",
"note",
"that",
"an",
"AttributeError",
"will",
"be",
"raised",
"if",
"any",
"argument",
"required",
"by",
"the",
"function",
"is",
"not",
"set",
"in",
"the",
"Namespace",
"object",
"."
] | python | train |
trailofbits/manticore | manticore/ethereum/abi.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/ethereum/abi.py#L24-L40 | def _type_size(ty):
""" Calculate `static` type size """
if ty[0] in ('int', 'uint', 'bytesM', 'function'):
return 32
elif ty[0] in ('tuple'):
result = 0
for ty_i in ty[1]:
result += ABI._type_size(ty_i)
return result
elif ty[0] in ('array'):
rep = ty[1]
result = 32 # offset link
return result
elif ty[0] in ('bytes', 'string'):
result = 32 # offset link
return result
raise ValueError | [
"def",
"_type_size",
"(",
"ty",
")",
":",
"if",
"ty",
"[",
"0",
"]",
"in",
"(",
"'int'",
",",
"'uint'",
",",
"'bytesM'",
",",
"'function'",
")",
":",
"return",
"32",
"elif",
"ty",
"[",
"0",
"]",
"in",
"(",
"'tuple'",
")",
":",
"result",
"=",
"0",
"for",
"ty_i",
"in",
"ty",
"[",
"1",
"]",
":",
"result",
"+=",
"ABI",
".",
"_type_size",
"(",
"ty_i",
")",
"return",
"result",
"elif",
"ty",
"[",
"0",
"]",
"in",
"(",
"'array'",
")",
":",
"rep",
"=",
"ty",
"[",
"1",
"]",
"result",
"=",
"32",
"# offset link",
"return",
"result",
"elif",
"ty",
"[",
"0",
"]",
"in",
"(",
"'bytes'",
",",
"'string'",
")",
":",
"result",
"=",
"32",
"# offset link",
"return",
"result",
"raise",
"ValueError"
] | Calculate `static` type size | [
"Calculate",
"static",
"type",
"size"
] | python | valid |
PyCQA/astroid | astroid/node_classes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L56-L79 | def unpack_infer(stmt, context=None):
"""recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements
"""
if isinstance(stmt, (List, Tuple)):
for elt in stmt.elts:
if elt is util.Uninferable:
yield elt
continue
yield from unpack_infer(elt, context)
return dict(node=stmt, context=context)
# if inferred is a final node, return it and stop
inferred = next(stmt.infer(context))
if inferred is stmt:
yield inferred
return dict(node=stmt, context=context)
# else, infer recursively, except Uninferable object that should be returned as is
for inferred in stmt.infer(context):
if inferred is util.Uninferable:
yield inferred
else:
yield from unpack_infer(inferred, context)
return dict(node=stmt, context=context) | [
"def",
"unpack_infer",
"(",
"stmt",
",",
"context",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"(",
"List",
",",
"Tuple",
")",
")",
":",
"for",
"elt",
"in",
"stmt",
".",
"elts",
":",
"if",
"elt",
"is",
"util",
".",
"Uninferable",
":",
"yield",
"elt",
"continue",
"yield",
"from",
"unpack_infer",
"(",
"elt",
",",
"context",
")",
"return",
"dict",
"(",
"node",
"=",
"stmt",
",",
"context",
"=",
"context",
")",
"# if inferred is a final node, return it and stop",
"inferred",
"=",
"next",
"(",
"stmt",
".",
"infer",
"(",
"context",
")",
")",
"if",
"inferred",
"is",
"stmt",
":",
"yield",
"inferred",
"return",
"dict",
"(",
"node",
"=",
"stmt",
",",
"context",
"=",
"context",
")",
"# else, infer recursively, except Uninferable object that should be returned as is",
"for",
"inferred",
"in",
"stmt",
".",
"infer",
"(",
"context",
")",
":",
"if",
"inferred",
"is",
"util",
".",
"Uninferable",
":",
"yield",
"inferred",
"else",
":",
"yield",
"from",
"unpack_infer",
"(",
"inferred",
",",
"context",
")",
"return",
"dict",
"(",
"node",
"=",
"stmt",
",",
"context",
"=",
"context",
")"
] | recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements | [
"recursively",
"generate",
"nodes",
"inferred",
"by",
"the",
"given",
"statement",
".",
"If",
"the",
"inferred",
"value",
"is",
"a",
"list",
"or",
"a",
"tuple",
"recurse",
"on",
"the",
"elements"
] | python | train |
juju/charm-helpers | charmhelpers/core/hookenv.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L496-L504 | def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
return [] | [
"def",
"relation_ids",
"(",
"reltype",
"=",
"None",
")",
":",
"reltype",
"=",
"reltype",
"or",
"relation_type",
"(",
")",
"relid_cmd_line",
"=",
"[",
"'relation-ids'",
",",
"'--format=json'",
"]",
"if",
"reltype",
"is",
"not",
"None",
":",
"relid_cmd_line",
".",
"append",
"(",
"reltype",
")",
"return",
"json",
".",
"loads",
"(",
"subprocess",
".",
"check_output",
"(",
"relid_cmd_line",
")",
".",
"decode",
"(",
"'UTF-8'",
")",
")",
"or",
"[",
"]",
"return",
"[",
"]"
] | A list of relation_ids | [
"A",
"list",
"of",
"relation_ids"
] | python | train |
cjdrake/pyeda | pyeda/boolalg/bdd.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L371-L397 | def to_dot(self, name='BDD'): # pragma: no cover
"""Convert to DOT language representation.
See the
`DOT language reference <http://www.graphviz.org/content/dot-language>`_
for details.
"""
parts = ['graph', name, '{']
for node in self.dfs_postorder():
if node is BDDNODEZERO:
parts += ['n' + str(id(node)), '[label=0,shape=box];']
elif node is BDDNODEONE:
parts += ['n' + str(id(node)), '[label=1,shape=box];']
else:
v = _VARS[node.root]
parts.append('n' + str(id(node)))
parts.append('[label="{}",shape=circle];'.format(v))
for node in self.dfs_postorder():
if node is not BDDNODEZERO and node is not BDDNODEONE:
parts += ['n' + str(id(node)), '--',
'n' + str(id(node.lo)),
'[label=0,style=dashed];']
parts += ['n' + str(id(node)), '--',
'n' + str(id(node.hi)),
'[label=1];']
parts.append('}')
return " ".join(parts) | [
"def",
"to_dot",
"(",
"self",
",",
"name",
"=",
"'BDD'",
")",
":",
"# pragma: no cover",
"parts",
"=",
"[",
"'graph'",
",",
"name",
",",
"'{'",
"]",
"for",
"node",
"in",
"self",
".",
"dfs_postorder",
"(",
")",
":",
"if",
"node",
"is",
"BDDNODEZERO",
":",
"parts",
"+=",
"[",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
")",
")",
",",
"'[label=0,shape=box];'",
"]",
"elif",
"node",
"is",
"BDDNODEONE",
":",
"parts",
"+=",
"[",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
")",
")",
",",
"'[label=1,shape=box];'",
"]",
"else",
":",
"v",
"=",
"_VARS",
"[",
"node",
".",
"root",
"]",
"parts",
".",
"append",
"(",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
")",
")",
")",
"parts",
".",
"append",
"(",
"'[label=\"{}\",shape=circle];'",
".",
"format",
"(",
"v",
")",
")",
"for",
"node",
"in",
"self",
".",
"dfs_postorder",
"(",
")",
":",
"if",
"node",
"is",
"not",
"BDDNODEZERO",
"and",
"node",
"is",
"not",
"BDDNODEONE",
":",
"parts",
"+=",
"[",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
")",
")",
",",
"'--'",
",",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
".",
"lo",
")",
")",
",",
"'[label=0,style=dashed];'",
"]",
"parts",
"+=",
"[",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
")",
")",
",",
"'--'",
",",
"'n'",
"+",
"str",
"(",
"id",
"(",
"node",
".",
"hi",
")",
")",
",",
"'[label=1];'",
"]",
"parts",
".",
"append",
"(",
"'}'",
")",
"return",
"\" \"",
".",
"join",
"(",
"parts",
")"
] | Convert to DOT language representation.
See the
`DOT language reference <http://www.graphviz.org/content/dot-language>`_
for details. | [
"Convert",
"to",
"DOT",
"language",
"representation",
"."
] | python | train |
Microsoft/nni | src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py#L154-L187 | def next_hyperparameter_lowest_mu(fun_prediction,
fun_prediction_args,
x_bounds, x_types,
minimize_starting_points,
minimize_constraints_fun=None):
'''
"Lowest Mu" acquisition function
'''
best_x = None
best_acquisition_value = None
x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]
x_bounds_minmax = numpy.array(x_bounds_minmax)
for starting_point in numpy.array(minimize_starting_points):
res = minimize(fun=_lowest_mu,
x0=starting_point.reshape(1, -1),
bounds=x_bounds_minmax,
method="L-BFGS-B",
args=(fun_prediction, fun_prediction_args, \
x_bounds, x_types, minimize_constraints_fun))
if (best_acquisition_value is None) or (res.fun < best_acquisition_value):
res.x = numpy.ndarray.tolist(res.x)
res.x = lib_data.match_val_type(res.x, x_bounds, x_types)
if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True):
best_acquisition_value = res.fun
best_x = res.x
outputs = None
if best_x is not None:
mu, sigma = fun_prediction(best_x, *fun_prediction_args)
outputs = {'hyperparameter': best_x, 'expected_mu': mu,
'expected_sigma': sigma, 'acquisition_func': "lm"}
return outputs | [
"def",
"next_hyperparameter_lowest_mu",
"(",
"fun_prediction",
",",
"fun_prediction_args",
",",
"x_bounds",
",",
"x_types",
",",
"minimize_starting_points",
",",
"minimize_constraints_fun",
"=",
"None",
")",
":",
"best_x",
"=",
"None",
"best_acquisition_value",
"=",
"None",
"x_bounds_minmax",
"=",
"[",
"[",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"-",
"1",
"]",
"]",
"for",
"i",
"in",
"x_bounds",
"]",
"x_bounds_minmax",
"=",
"numpy",
".",
"array",
"(",
"x_bounds_minmax",
")",
"for",
"starting_point",
"in",
"numpy",
".",
"array",
"(",
"minimize_starting_points",
")",
":",
"res",
"=",
"minimize",
"(",
"fun",
"=",
"_lowest_mu",
",",
"x0",
"=",
"starting_point",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
",",
"bounds",
"=",
"x_bounds_minmax",
",",
"method",
"=",
"\"L-BFGS-B\"",
",",
"args",
"=",
"(",
"fun_prediction",
",",
"fun_prediction_args",
",",
"x_bounds",
",",
"x_types",
",",
"minimize_constraints_fun",
")",
")",
"if",
"(",
"best_acquisition_value",
"is",
"None",
")",
"or",
"(",
"res",
".",
"fun",
"<",
"best_acquisition_value",
")",
":",
"res",
".",
"x",
"=",
"numpy",
".",
"ndarray",
".",
"tolist",
"(",
"res",
".",
"x",
")",
"res",
".",
"x",
"=",
"lib_data",
".",
"match_val_type",
"(",
"res",
".",
"x",
",",
"x_bounds",
",",
"x_types",
")",
"if",
"(",
"minimize_constraints_fun",
"is",
"None",
")",
"or",
"(",
"minimize_constraints_fun",
"(",
"res",
".",
"x",
")",
"is",
"True",
")",
":",
"best_acquisition_value",
"=",
"res",
".",
"fun",
"best_x",
"=",
"res",
".",
"x",
"outputs",
"=",
"None",
"if",
"best_x",
"is",
"not",
"None",
":",
"mu",
",",
"sigma",
"=",
"fun_prediction",
"(",
"best_x",
",",
"*",
"fun_prediction_args",
")",
"outputs",
"=",
"{",
"'hyperparameter'",
":",
"best_x",
",",
"'expected_mu'",
":",
"mu",
",",
"'expected_sigma'",
":",
"sigma",
",",
"'acquisition_func'",
":",
"\"lm\"",
"}",
"return",
"outputs"
] | "Lowest Mu" acquisition function | [
"Lowest",
"Mu",
"acquisition",
"function"
] | python | train |
callowayproject/Transmogrify | transmogrify/contrib/django/templatetags/transmogrifiers.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/contrib/django/templatetags/transmogrifiers.py#L22-L29 | def resolve(var, context):
"""
Resolve the variable, or return the value passed to it in the first place
"""
try:
return var.resolve(context)
except template.VariableDoesNotExist:
return var.var | [
"def",
"resolve",
"(",
"var",
",",
"context",
")",
":",
"try",
":",
"return",
"var",
".",
"resolve",
"(",
"context",
")",
"except",
"template",
".",
"VariableDoesNotExist",
":",
"return",
"var",
".",
"var"
] | Resolve the variable, or return the value passed to it in the first place | [
"Resolve",
"the",
"variable",
"or",
"return",
"the",
"value",
"passed",
"to",
"it",
"in",
"the",
"first",
"place"
] | python | train |
singularityhub/sregistry-cli | sregistry/logger/message.py | https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L274-L290 | def table(self, rows, col_width=2):
'''table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
'''
labels = [str(x) for x in range(1,len(rows)+1)]
if isinstance(rows, dict):
labels = list(rows.keys())
rows = list(rows.values())
for row in rows:
label = labels.pop(0)
label = label.ljust(col_width)
message = "\t".join(row)
self.custom(prefix=label,
message=message) | [
"def",
"table",
"(",
"self",
",",
"rows",
",",
"col_width",
"=",
"2",
")",
":",
"labels",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"rows",
")",
"+",
"1",
")",
"]",
"if",
"isinstance",
"(",
"rows",
",",
"dict",
")",
":",
"labels",
"=",
"list",
"(",
"rows",
".",
"keys",
"(",
")",
")",
"rows",
"=",
"list",
"(",
"rows",
".",
"values",
"(",
")",
")",
"for",
"row",
"in",
"rows",
":",
"label",
"=",
"labels",
".",
"pop",
"(",
"0",
")",
"label",
"=",
"label",
".",
"ljust",
"(",
"col_width",
")",
"message",
"=",
"\"\\t\"",
".",
"join",
"(",
"row",
")",
"self",
".",
"custom",
"(",
"prefix",
"=",
"label",
",",
"message",
"=",
"message",
")"
] | table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used. | [
"table",
"will",
"print",
"a",
"table",
"of",
"entries",
".",
"If",
"the",
"rows",
"is",
"a",
"dictionary",
"the",
"keys",
"are",
"interpreted",
"as",
"column",
"names",
".",
"if",
"not",
"a",
"numbered",
"list",
"is",
"used",
"."
] | python | test |
ourway/auth | auth/CAS/authorization.py | https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L30-L36 | def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json()) | [
"def",
"get_permissions",
"(",
"self",
",",
"role",
")",
":",
"target_role",
"=",
"AuthGroup",
".",
"objects",
"(",
"role",
"=",
"role",
",",
"creator",
"=",
"self",
".",
"client",
")",
".",
"first",
"(",
")",
"if",
"not",
"target_role",
":",
"return",
"'[]'",
"targets",
"=",
"AuthPermission",
".",
"objects",
"(",
"groups",
"=",
"target_role",
",",
"creator",
"=",
"self",
".",
"client",
")",
".",
"only",
"(",
"'name'",
")",
"return",
"json",
".",
"loads",
"(",
"targets",
".",
"to_json",
"(",
")",
")"
] | gets permissions of role | [
"gets",
"permissions",
"of",
"role"
] | python | train |
KennethWilke/PingdomLib | pingdomlib/check.py | https://github.com/KennethWilke/PingdomLib/blob/3ed1e481f9c9d16b032558d62fb05c2166e162ed/pingdomlib/check.py#L87-L143 | def getAnalyses(self, **kwargs):
"""Returns a list of the latest root cause analysis results for a
specified check.
Optional Parameters:
* limit -- Limits the number of returned results to the
specified quantity.
Type: Integer
Default: 100
* offset -- Offset for listing. (Requires limit.)
Type: Integer
Default: 0
* time_from -- Return only results with timestamp of first test greater
or equal to this value. Format is UNIX timestamp.
Type: Integer
Default: 0
* time_to -- Return only results with timestamp of first test less or
equal to this value. Format is UNIX timestamp.
Type: Integer
Default: Current Time
Returned structure:
[
{
'id' : <Integer> Analysis id
'timefirsttest' : <Integer> Time of test that initiated the
confirmation test
'timeconfrimtest' : <Integer> Time of the confirmation test
that perfromed the error
analysis
},
...
]
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhandled kwargs
for key in kwargs:
if key not in ['limit', 'offset', 'from', 'to']:
sys.stderr.write('%s not a valid argument for analysis()\n'
% key)
response = self.pingdom.request('GET', 'analysis/%s' % self.id,
kwargs)
return [PingdomAnalysis(self, x) for x in response.json()['analysis']] | [
"def",
"getAnalyses",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# 'from' is a reserved word, use time_from instead",
"if",
"kwargs",
".",
"get",
"(",
"'time_from'",
")",
":",
"kwargs",
"[",
"'from'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'time_from'",
")",
"del",
"kwargs",
"[",
"'time_from'",
"]",
"if",
"kwargs",
".",
"get",
"(",
"'time_to'",
")",
":",
"kwargs",
"[",
"'to'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'time_to'",
")",
"del",
"kwargs",
"[",
"'time_to'",
"]",
"# Warn user about unhandled kwargs",
"for",
"key",
"in",
"kwargs",
":",
"if",
"key",
"not",
"in",
"[",
"'limit'",
",",
"'offset'",
",",
"'from'",
",",
"'to'",
"]",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'%s not a valid argument for analysis()\\n'",
"%",
"key",
")",
"response",
"=",
"self",
".",
"pingdom",
".",
"request",
"(",
"'GET'",
",",
"'analysis/%s'",
"%",
"self",
".",
"id",
",",
"kwargs",
")",
"return",
"[",
"PingdomAnalysis",
"(",
"self",
",",
"x",
")",
"for",
"x",
"in",
"response",
".",
"json",
"(",
")",
"[",
"'analysis'",
"]",
"]"
] | Returns a list of the latest root cause analysis results for a
specified check.
Optional Parameters:
* limit -- Limits the number of returned results to the
specified quantity.
Type: Integer
Default: 100
* offset -- Offset for listing. (Requires limit.)
Type: Integer
Default: 0
* time_from -- Return only results with timestamp of first test greater
or equal to this value. Format is UNIX timestamp.
Type: Integer
Default: 0
* time_to -- Return only results with timestamp of first test less or
equal to this value. Format is UNIX timestamp.
Type: Integer
Default: Current Time
Returned structure:
[
{
'id' : <Integer> Analysis id
'timefirsttest' : <Integer> Time of test that initiated the
confirmation test
'timeconfrimtest' : <Integer> Time of the confirmation test
that perfromed the error
analysis
},
...
] | [
"Returns",
"a",
"list",
"of",
"the",
"latest",
"root",
"cause",
"analysis",
"results",
"for",
"a",
"specified",
"check",
"."
] | python | train |
django-crispy-forms/django-crispy-forms | crispy_forms/templatetags/crispy_forms_tags.py | https://github.com/django-crispy-forms/django-crispy-forms/blob/cd476927a756133c667c199bb12120f877bf6b7e/crispy_forms/templatetags/crispy_forms_tags.py#L135-L184 | def get_response_dict(self, helper, context, is_formset):
"""
Returns a dictionary with all the parameters necessary to render the form/formset in a template.
:param context: `django.template.Context` for the node
:param is_formset: Boolean value. If set to True, indicates we are working with a formset.
"""
if not isinstance(helper, FormHelper):
raise TypeError('helper object provided to {% crispy %} tag must be a crispy.helper.FormHelper object.')
attrs = helper.get_attributes(template_pack=self.template_pack)
form_type = "form"
if is_formset:
form_type = "formset"
# We take form/formset parameters from attrs if they are set, otherwise we use defaults
response_dict = {
'template_pack': self.template_pack,
'%s_action' % form_type: attrs['attrs'].get("action", ''),
'%s_method' % form_type: attrs.get("form_method", 'post'),
'%s_tag' % form_type: attrs.get("form_tag", True),
'%s_class' % form_type: attrs['attrs'].get("class", ''),
'%s_id' % form_type: attrs['attrs'].get("id", ""),
'%s_style' % form_type: attrs.get("form_style", None),
'form_error_title': attrs.get("form_error_title", None),
'formset_error_title': attrs.get("formset_error_title", None),
'form_show_errors': attrs.get("form_show_errors", True),
'help_text_inline': attrs.get("help_text_inline", False),
'html5_required': attrs.get("html5_required", False),
'form_show_labels': attrs.get("form_show_labels", True),
'disable_csrf': attrs.get("disable_csrf", False),
'inputs': attrs.get('inputs', []),
'is_formset': is_formset,
'%s_attrs' % form_type: attrs.get('attrs', ''),
'flat_attrs': attrs.get('flat_attrs', ''),
'error_text_inline': attrs.get('error_text_inline', True),
'label_class': attrs.get('label_class', ''),
'field_class': attrs.get('field_class', ''),
'include_media': attrs.get('include_media', True),
}
# Handles custom attributes added to helpers
for attribute_name, value in attrs.items():
if attribute_name not in response_dict:
response_dict[attribute_name] = value
if 'csrf_token' in context:
response_dict['csrf_token'] = context['csrf_token']
return response_dict | [
"def",
"get_response_dict",
"(",
"self",
",",
"helper",
",",
"context",
",",
"is_formset",
")",
":",
"if",
"not",
"isinstance",
"(",
"helper",
",",
"FormHelper",
")",
":",
"raise",
"TypeError",
"(",
"'helper object provided to {% crispy %} tag must be a crispy.helper.FormHelper object.'",
")",
"attrs",
"=",
"helper",
".",
"get_attributes",
"(",
"template_pack",
"=",
"self",
".",
"template_pack",
")",
"form_type",
"=",
"\"form\"",
"if",
"is_formset",
":",
"form_type",
"=",
"\"formset\"",
"# We take form/formset parameters from attrs if they are set, otherwise we use defaults",
"response_dict",
"=",
"{",
"'template_pack'",
":",
"self",
".",
"template_pack",
",",
"'%s_action'",
"%",
"form_type",
":",
"attrs",
"[",
"'attrs'",
"]",
".",
"get",
"(",
"\"action\"",
",",
"''",
")",
",",
"'%s_method'",
"%",
"form_type",
":",
"attrs",
".",
"get",
"(",
"\"form_method\"",
",",
"'post'",
")",
",",
"'%s_tag'",
"%",
"form_type",
":",
"attrs",
".",
"get",
"(",
"\"form_tag\"",
",",
"True",
")",
",",
"'%s_class'",
"%",
"form_type",
":",
"attrs",
"[",
"'attrs'",
"]",
".",
"get",
"(",
"\"class\"",
",",
"''",
")",
",",
"'%s_id'",
"%",
"form_type",
":",
"attrs",
"[",
"'attrs'",
"]",
".",
"get",
"(",
"\"id\"",
",",
"\"\"",
")",
",",
"'%s_style'",
"%",
"form_type",
":",
"attrs",
".",
"get",
"(",
"\"form_style\"",
",",
"None",
")",
",",
"'form_error_title'",
":",
"attrs",
".",
"get",
"(",
"\"form_error_title\"",
",",
"None",
")",
",",
"'formset_error_title'",
":",
"attrs",
".",
"get",
"(",
"\"formset_error_title\"",
",",
"None",
")",
",",
"'form_show_errors'",
":",
"attrs",
".",
"get",
"(",
"\"form_show_errors\"",
",",
"True",
")",
",",
"'help_text_inline'",
":",
"attrs",
".",
"get",
"(",
"\"help_text_inline\"",
",",
"False",
")",
",",
"'html5_required'",
":",
"attrs",
".",
"get",
"(",
"\"html5_required\"",
",",
"False",
")",
",",
"'form_show_labels'",
":",
"attrs",
".",
"get",
"(",
"\"form_show_labels\"",
",",
"True",
")",
",",
"'disable_csrf'",
":",
"attrs",
".",
"get",
"(",
"\"disable_csrf\"",
",",
"False",
")",
",",
"'inputs'",
":",
"attrs",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
",",
"'is_formset'",
":",
"is_formset",
",",
"'%s_attrs'",
"%",
"form_type",
":",
"attrs",
".",
"get",
"(",
"'attrs'",
",",
"''",
")",
",",
"'flat_attrs'",
":",
"attrs",
".",
"get",
"(",
"'flat_attrs'",
",",
"''",
")",
",",
"'error_text_inline'",
":",
"attrs",
".",
"get",
"(",
"'error_text_inline'",
",",
"True",
")",
",",
"'label_class'",
":",
"attrs",
".",
"get",
"(",
"'label_class'",
",",
"''",
")",
",",
"'field_class'",
":",
"attrs",
".",
"get",
"(",
"'field_class'",
",",
"''",
")",
",",
"'include_media'",
":",
"attrs",
".",
"get",
"(",
"'include_media'",
",",
"True",
")",
",",
"}",
"# Handles custom attributes added to helpers",
"for",
"attribute_name",
",",
"value",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"if",
"attribute_name",
"not",
"in",
"response_dict",
":",
"response_dict",
"[",
"attribute_name",
"]",
"=",
"value",
"if",
"'csrf_token'",
"in",
"context",
":",
"response_dict",
"[",
"'csrf_token'",
"]",
"=",
"context",
"[",
"'csrf_token'",
"]",
"return",
"response_dict"
] | Returns a dictionary with all the parameters necessary to render the form/formset in a template.
:param context: `django.template.Context` for the node
:param is_formset: Boolean value. If set to True, indicates we are working with a formset. | [
"Returns",
"a",
"dictionary",
"with",
"all",
"the",
"parameters",
"necessary",
"to",
"render",
"the",
"form",
"/",
"formset",
"in",
"a",
"template",
"."
] | python | train |
glormph/msstitch | src/app/actions/headers/peptable.py | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/headers/peptable.py#L81-L94 | def get_proteininfo_fields(poolnames=False, genecentric=False):
"""Returns header fields for protein (group) information."""
allfields = OrderedDict()
basefields = [peptabledata.HEADER_PROTEINS,
peptabledata.HEADER_GENES,
peptabledata.HEADER_ASSOCIATED,
peptabledata.HEADER_DESCRIPTIONS,
peptabledata.HEADER_COVERAGES,
peptabledata.HEADER_NO_CONTENTPROTEINS,
]
for field in basefields:
allfields[field] = False
allfields[peptabledata.HEADER_NO_PSM] = poolnames
return allfields | [
"def",
"get_proteininfo_fields",
"(",
"poolnames",
"=",
"False",
",",
"genecentric",
"=",
"False",
")",
":",
"allfields",
"=",
"OrderedDict",
"(",
")",
"basefields",
"=",
"[",
"peptabledata",
".",
"HEADER_PROTEINS",
",",
"peptabledata",
".",
"HEADER_GENES",
",",
"peptabledata",
".",
"HEADER_ASSOCIATED",
",",
"peptabledata",
".",
"HEADER_DESCRIPTIONS",
",",
"peptabledata",
".",
"HEADER_COVERAGES",
",",
"peptabledata",
".",
"HEADER_NO_CONTENTPROTEINS",
",",
"]",
"for",
"field",
"in",
"basefields",
":",
"allfields",
"[",
"field",
"]",
"=",
"False",
"allfields",
"[",
"peptabledata",
".",
"HEADER_NO_PSM",
"]",
"=",
"poolnames",
"return",
"allfields"
] | Returns header fields for protein (group) information. | [
"Returns",
"header",
"fields",
"for",
"protein",
"(",
"group",
")",
"information",
"."
] | python | train |
webrecorder/warcio | warcio/statusandheaders.py | https://github.com/webrecorder/warcio/blob/c64c4394805e13256695f51af072c95389397ee9/warcio/statusandheaders.py#L176-L195 | def percent_encode_non_ascii_headers(self, encoding='UTF-8'):
""" Encode any headers that are not plain ascii
as UTF-8 as per:
https://tools.ietf.org/html/rfc8187#section-3.2.3
https://tools.ietf.org/html/rfc5987#section-3.2.2
"""
def do_encode(m):
return "*={0}''".format(encoding) + quote(to_native_str(m.group(1)))
for index in range(len(self.headers) - 1, -1, -1):
curr_name, curr_value = self.headers[index]
try:
# test if header is ascii encodable, no action needed
curr_value.encode('ascii')
except:
new_value = self.ENCODE_HEADER_RX.sub(do_encode, curr_value)
if new_value == curr_value:
new_value = quote(curr_value)
self.headers[index] = (curr_name, new_value) | [
"def",
"percent_encode_non_ascii_headers",
"(",
"self",
",",
"encoding",
"=",
"'UTF-8'",
")",
":",
"def",
"do_encode",
"(",
"m",
")",
":",
"return",
"\"*={0}''\"",
".",
"format",
"(",
"encoding",
")",
"+",
"quote",
"(",
"to_native_str",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
")",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"headers",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"curr_name",
",",
"curr_value",
"=",
"self",
".",
"headers",
"[",
"index",
"]",
"try",
":",
"# test if header is ascii encodable, no action needed",
"curr_value",
".",
"encode",
"(",
"'ascii'",
")",
"except",
":",
"new_value",
"=",
"self",
".",
"ENCODE_HEADER_RX",
".",
"sub",
"(",
"do_encode",
",",
"curr_value",
")",
"if",
"new_value",
"==",
"curr_value",
":",
"new_value",
"=",
"quote",
"(",
"curr_value",
")",
"self",
".",
"headers",
"[",
"index",
"]",
"=",
"(",
"curr_name",
",",
"new_value",
")"
] | Encode any headers that are not plain ascii
as UTF-8 as per:
https://tools.ietf.org/html/rfc8187#section-3.2.3
https://tools.ietf.org/html/rfc5987#section-3.2.2 | [
"Encode",
"any",
"headers",
"that",
"are",
"not",
"plain",
"ascii",
"as",
"UTF",
"-",
"8",
"as",
"per",
":",
"https",
":",
"//",
"tools",
".",
"ietf",
".",
"org",
"/",
"html",
"/",
"rfc8187#section",
"-",
"3",
".",
"2",
".",
"3",
"https",
":",
"//",
"tools",
".",
"ietf",
".",
"org",
"/",
"html",
"/",
"rfc5987#section",
"-",
"3",
".",
"2",
".",
"2"
] | python | train |
lincolnloop/goodconf | goodconf/values.py | https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/values.py#L96-L102 | def cast(self, val: str):
"""converts string to type requested by `cast_as`"""
try:
return getattr(self, 'cast_as_{}'.format(
self.cast_as.__name__.lower()))(val)
except AttributeError:
return self.cast_as(val) | [
"def",
"cast",
"(",
"self",
",",
"val",
":",
"str",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"'cast_as_{}'",
".",
"format",
"(",
"self",
".",
"cast_as",
".",
"__name__",
".",
"lower",
"(",
")",
")",
")",
"(",
"val",
")",
"except",
"AttributeError",
":",
"return",
"self",
".",
"cast_as",
"(",
"val",
")"
] | converts string to type requested by `cast_as` | [
"converts",
"string",
"to",
"type",
"requested",
"by",
"cast_as"
] | python | test |
Chilipp/psyplot | psyplot/plotter.py | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L2145-L2168 | def _set_sharing_keys(self, keys):
"""
Set the keys to share or unshare
Parameters
----------
keys: string or iterable of strings
The iterable may contain formatoptions that shall be shared (or
unshared), or group names of formatoptions to share all
formatoptions of that group (see the :attr:`fmt_groups` property).
If None, all formatoptions of this plotter are inserted.
Returns
-------
set
The set of formatoptions to share (or unshare)"""
if isinstance(keys, str):
keys = {keys}
keys = set(self) if keys is None else set(keys)
fmto_groups = self._fmto_groups
keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key])
for key in keys.intersection(fmto_groups))))
keys.difference_update(fmto_groups)
return keys | [
"def",
"_set_sharing_keys",
"(",
"self",
",",
"keys",
")",
":",
"if",
"isinstance",
"(",
"keys",
",",
"str",
")",
":",
"keys",
"=",
"{",
"keys",
"}",
"keys",
"=",
"set",
"(",
"self",
")",
"if",
"keys",
"is",
"None",
"else",
"set",
"(",
"keys",
")",
"fmto_groups",
"=",
"self",
".",
"_fmto_groups",
"keys",
".",
"update",
"(",
"chain",
"(",
"*",
"(",
"map",
"(",
"lambda",
"fmto",
":",
"fmto",
".",
"key",
",",
"fmto_groups",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"keys",
".",
"intersection",
"(",
"fmto_groups",
")",
")",
")",
")",
"keys",
".",
"difference_update",
"(",
"fmto_groups",
")",
"return",
"keys"
] | Set the keys to share or unshare
Parameters
----------
keys: string or iterable of strings
The iterable may contain formatoptions that shall be shared (or
unshared), or group names of formatoptions to share all
formatoptions of that group (see the :attr:`fmt_groups` property).
If None, all formatoptions of this plotter are inserted.
Returns
-------
set
The set of formatoptions to share (or unshare) | [
"Set",
"the",
"keys",
"to",
"share",
"or",
"unshare"
] | python | train |
Azure/azure-event-hubs-python | azure/eventhub/client.py | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/client.py#L147-L170 | def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs):
"""Create an EventHubClient from an existing auth token or token generator.
:param address: The Event Hub address URL
:type address: str
:param sas_token: A SAS token or function that returns a SAS token. If a function is supplied,
it will be used to retrieve subsequent tokens in the case of token expiry. The function should
take no arguments.
:type sas_token: str or callable
:param eventhub: The name of the EventHub, if not already included in the address URL.
:type eventhub: str
:param debug: Whether to output network trace logs to the logger. Default
is `False`.
:type debug: bool
:param http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present: 'username', 'password'.
:type http_proxy: dict[str, Any]
:param auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:type auth_timeout: int
"""
address = _build_uri(address, eventhub)
return cls(address, sas_token=sas_token, **kwargs) | [
"def",
"from_sas_token",
"(",
"cls",
",",
"address",
",",
"sas_token",
",",
"eventhub",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"address",
"=",
"_build_uri",
"(",
"address",
",",
"eventhub",
")",
"return",
"cls",
"(",
"address",
",",
"sas_token",
"=",
"sas_token",
",",
"*",
"*",
"kwargs",
")"
] | Create an EventHubClient from an existing auth token or token generator.
:param address: The Event Hub address URL
:type address: str
:param sas_token: A SAS token or function that returns a SAS token. If a function is supplied,
it will be used to retrieve subsequent tokens in the case of token expiry. The function should
take no arguments.
:type sas_token: str or callable
:param eventhub: The name of the EventHub, if not already included in the address URL.
:type eventhub: str
:param debug: Whether to output network trace logs to the logger. Default
is `False`.
:type debug: bool
:param http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present: 'username', 'password'.
:type http_proxy: dict[str, Any]
:param auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:type auth_timeout: int | [
"Create",
"an",
"EventHubClient",
"from",
"an",
"existing",
"auth",
"token",
"or",
"token",
"generator",
"."
] | python | train |
ctuning/ck | ck/repo/module/repo/module.py | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/repo/module/repo/module.py#L1591-L1627 | def unzip(i):
"""
Input: {
(data_uoa) - repo UOA where to unzip (default, if not specified)
zip - path to zipfile (local or remote http/ftp)
(overwrite) - if 'yes', overwrite files when unarchiving
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
duoa=i.get('data_uoa','')
if duoa=='': duoa='local'
overwrite=i.get('overwrite','')
zip=i.get('zip','')
if zip=='': zip='ckr.zip'
# Find path to repo
r=ck.find_path_to_repo({'repo_uoa':duoa})
if r['return']>0: return r
path=r['path']
# Unzipping archive
rz=get_and_unzip_archive({'zip':zip, 'path':path, 'overwrite':overwrite, 'out':o})
if rz['return']>0: return rz
return {'return':0} | [
"def",
"unzip",
"(",
"i",
")",
":",
"o",
"=",
"i",
".",
"get",
"(",
"'out'",
",",
"''",
")",
"duoa",
"=",
"i",
".",
"get",
"(",
"'data_uoa'",
",",
"''",
")",
"if",
"duoa",
"==",
"''",
":",
"duoa",
"=",
"'local'",
"overwrite",
"=",
"i",
".",
"get",
"(",
"'overwrite'",
",",
"''",
")",
"zip",
"=",
"i",
".",
"get",
"(",
"'zip'",
",",
"''",
")",
"if",
"zip",
"==",
"''",
":",
"zip",
"=",
"'ckr.zip'",
"# Find path to repo",
"r",
"=",
"ck",
".",
"find_path_to_repo",
"(",
"{",
"'repo_uoa'",
":",
"duoa",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"path",
"=",
"r",
"[",
"'path'",
"]",
"# Unzipping archive",
"rz",
"=",
"get_and_unzip_archive",
"(",
"{",
"'zip'",
":",
"zip",
",",
"'path'",
":",
"path",
",",
"'overwrite'",
":",
"overwrite",
",",
"'out'",
":",
"o",
"}",
")",
"if",
"rz",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"rz",
"return",
"{",
"'return'",
":",
"0",
"}"
] | Input: {
(data_uoa) - repo UOA where to unzip (default, if not specified)
zip - path to zipfile (local or remote http/ftp)
(overwrite) - if 'yes', overwrite files when unarchiving
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} | [
"Input",
":",
"{",
"(",
"data_uoa",
")",
"-",
"repo",
"UOA",
"where",
"to",
"unzip",
"(",
"default",
"if",
"not",
"specified",
")",
"zip",
"-",
"path",
"to",
"zipfile",
"(",
"local",
"or",
"remote",
"http",
"/",
"ftp",
")",
"(",
"overwrite",
")",
"-",
"if",
"yes",
"overwrite",
"files",
"when",
"unarchiving",
"}"
] | python | train |
agoragames/chai | chai/stub.py | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L576-L587 | def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type) | [
"def",
"_teardown",
"(",
"self",
")",
":",
"# __new__ is a super-special case in that even when stubbing a class",
"# which implements its own __new__ and subclasses object, the",
"# \"Class.__new__\" reference is a staticmethod and not a method (or",
"# function). That confuses the \"was_object_method\" logic in",
"# StubFunction which then fails to delattr and from then on the class",
"# is corrupted. So skip that teardown and use a __new__-specific case.",
"setattr",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_attr",
",",
"staticmethod",
"(",
"self",
".",
"_new",
")",
")",
"StubNew",
".",
"_cache",
".",
"pop",
"(",
"self",
".",
"_type",
")"
] | Overload so that we can clear out the cache after a test run. | [
"Overload",
"so",
"that",
"we",
"can",
"clear",
"out",
"the",
"cache",
"after",
"a",
"test",
"run",
"."
] | python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/treemodel.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/treemodel.py#L241-L254 | def set_model(self, model):
"""Set the model the item belongs to
A TreeItem can only belong to one model.
:param model: the model the item belongs to
:type model: :class:`Treemodel`
:returns: None
:rtype: None
:raises: None
"""
self._model = model
for c in self.childItems:
c.set_model(model) | [
"def",
"set_model",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_model",
"=",
"model",
"for",
"c",
"in",
"self",
".",
"childItems",
":",
"c",
".",
"set_model",
"(",
"model",
")"
] | Set the model the item belongs to
A TreeItem can only belong to one model.
:param model: the model the item belongs to
:type model: :class:`Treemodel`
:returns: None
:rtype: None
:raises: None | [
"Set",
"the",
"model",
"the",
"item",
"belongs",
"to"
] | python | train |
PythonCharmers/python-future | src/future/backports/http/server.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L441-L452 | def send_response(self, code, message=None):
"""Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string()) | [
"def",
"send_response",
"(",
"self",
",",
"code",
",",
"message",
"=",
"None",
")",
":",
"self",
".",
"log_request",
"(",
"code",
")",
"self",
".",
"send_response_only",
"(",
"code",
",",
"message",
")",
"self",
".",
"send_header",
"(",
"'Server'",
",",
"self",
".",
"version_string",
"(",
")",
")",
"self",
".",
"send_header",
"(",
"'Date'",
",",
"self",
".",
"date_time_string",
"(",
")",
")"
] | Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date. | [
"Add",
"the",
"response",
"header",
"to",
"the",
"headers",
"buffer",
"and",
"log",
"the",
"response",
"code",
"."
] | python | train |
keon/algorithms | algorithms/set/set_covering.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/set/set_covering.py#L37-L58 | def optimal_set_cover(universe, subsets, costs):
""" Optimal algorithm - DONT USE ON BIG INPUTS - O(2^n) complexity!
Finds the minimum cost subcollection os S that covers all elements of U
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
"""
pset = powerset(subsets.keys())
best_set = None
best_cost = float("inf")
for subset in pset:
covered = set()
cost = 0
for s in subset:
covered.update(subsets[s])
cost += costs[s]
if len(covered) == len(universe) and cost < best_cost:
best_set = subset
best_cost = cost
return best_set | [
"def",
"optimal_set_cover",
"(",
"universe",
",",
"subsets",
",",
"costs",
")",
":",
"pset",
"=",
"powerset",
"(",
"subsets",
".",
"keys",
"(",
")",
")",
"best_set",
"=",
"None",
"best_cost",
"=",
"float",
"(",
"\"inf\"",
")",
"for",
"subset",
"in",
"pset",
":",
"covered",
"=",
"set",
"(",
")",
"cost",
"=",
"0",
"for",
"s",
"in",
"subset",
":",
"covered",
".",
"update",
"(",
"subsets",
"[",
"s",
"]",
")",
"cost",
"+=",
"costs",
"[",
"s",
"]",
"if",
"len",
"(",
"covered",
")",
"==",
"len",
"(",
"universe",
")",
"and",
"cost",
"<",
"best_cost",
":",
"best_set",
"=",
"subset",
"best_cost",
"=",
"cost",
"return",
"best_set"
] | Optimal algorithm - DONT USE ON BIG INPUTS - O(2^n) complexity!
Finds the minimum cost subcollection os S that covers all elements of U
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...} | [
"Optimal",
"algorithm",
"-",
"DONT",
"USE",
"ON",
"BIG",
"INPUTS",
"-",
"O",
"(",
"2^n",
")",
"complexity!",
"Finds",
"the",
"minimum",
"cost",
"subcollection",
"os",
"S",
"that",
"covers",
"all",
"elements",
"of",
"U"
] | python | train |
synw/dataswim | dataswim/data/__init__.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/__init__.py#L55-L65 | def clone_(self, quiet=False):
"""
Clone the DataSwim instance
"""
ds2 = self._duplicate_(quiet=True)
if ds2 is None:
self.err("Can not clone instance")
else:
if quiet is False:
self.ok("Instance cloned")
return ds2 | [
"def",
"clone_",
"(",
"self",
",",
"quiet",
"=",
"False",
")",
":",
"ds2",
"=",
"self",
".",
"_duplicate_",
"(",
"quiet",
"=",
"True",
")",
"if",
"ds2",
"is",
"None",
":",
"self",
".",
"err",
"(",
"\"Can not clone instance\"",
")",
"else",
":",
"if",
"quiet",
"is",
"False",
":",
"self",
".",
"ok",
"(",
"\"Instance cloned\"",
")",
"return",
"ds2"
] | Clone the DataSwim instance | [
"Clone",
"the",
"DataSwim",
"instance"
] | python | train |
tensorflow/probability | tensorflow_probability/examples/disentangled_vae.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L421-L441 | def call(self, inputs):
"""Runs the model to generate an intermediate representation of x_t.
Args:
inputs: A batch of image sequences `x_{1:T}` of shape
`[sample_shape, batch_size, timesteps, height, width,
channels]`.
Returns:
A batch of intermediate representations of shape [sample_shape,
batch_size, timesteps, hidden_size].
"""
image_shape = tf.shape(input=inputs)[-3:]
collapsed_shape = tf.concat(([-1], image_shape), axis=0)
out = tf.reshape(inputs, collapsed_shape) # (sample*batch*T, h, w, c)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
expanded_shape = tf.concat((tf.shape(input=inputs)[:-3], [-1]), axis=0)
return tf.reshape(out, expanded_shape) | [
"def",
"call",
"(",
"self",
",",
"inputs",
")",
":",
"image_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"inputs",
")",
"[",
"-",
"3",
":",
"]",
"collapsed_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"[",
"-",
"1",
"]",
",",
"image_shape",
")",
",",
"axis",
"=",
"0",
")",
"out",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"collapsed_shape",
")",
"# (sample*batch*T, h, w, c)",
"out",
"=",
"self",
".",
"conv1",
"(",
"out",
")",
"out",
"=",
"self",
".",
"conv2",
"(",
"out",
")",
"out",
"=",
"self",
".",
"conv3",
"(",
"out",
")",
"out",
"=",
"self",
".",
"conv4",
"(",
"out",
")",
"expanded_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"inputs",
")",
"[",
":",
"-",
"3",
"]",
",",
"[",
"-",
"1",
"]",
")",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"reshape",
"(",
"out",
",",
"expanded_shape",
")"
] | Runs the model to generate an intermediate representation of x_t.
Args:
inputs: A batch of image sequences `x_{1:T}` of shape
`[sample_shape, batch_size, timesteps, height, width,
channels]`.
Returns:
A batch of intermediate representations of shape [sample_shape,
batch_size, timesteps, hidden_size]. | [
"Runs",
"the",
"model",
"to",
"generate",
"an",
"intermediate",
"representation",
"of",
"x_t",
"."
] | python | test |
ruipgil/TrackToTrip | tracktotrip/transportation_mode.py | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/transportation_mode.py#L142-L167 | def detect_changepoints(points, min_time, data_processor=acc_difference):
""" Detects changepoints on points that have at least a specific duration
Args:
points (:obj:`Point`)
min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have
data_processor (function): Function to extract data to feed to the changepoint algorithm.
Defaults to `speed_difference`
Returns:
:obj:`list` of int: Indexes of changepoints
"""
data = data_processor(points)
changepoints = pelt(normal_mean(data, np.std(data)), len(data))
changepoints.append(len(points) - 1)
result = []
for start, end in pairwise(changepoints):
time_diff = points[end].time_difference(points[start])
if time_diff > min_time:
result.append(start)
# adds the first point
result.append(0)
# adds the last changepoint detected
result.append(len(points) - 1)
return sorted(list(set(result))) | [
"def",
"detect_changepoints",
"(",
"points",
",",
"min_time",
",",
"data_processor",
"=",
"acc_difference",
")",
":",
"data",
"=",
"data_processor",
"(",
"points",
")",
"changepoints",
"=",
"pelt",
"(",
"normal_mean",
"(",
"data",
",",
"np",
".",
"std",
"(",
"data",
")",
")",
",",
"len",
"(",
"data",
")",
")",
"changepoints",
".",
"append",
"(",
"len",
"(",
"points",
")",
"-",
"1",
")",
"result",
"=",
"[",
"]",
"for",
"start",
",",
"end",
"in",
"pairwise",
"(",
"changepoints",
")",
":",
"time_diff",
"=",
"points",
"[",
"end",
"]",
".",
"time_difference",
"(",
"points",
"[",
"start",
"]",
")",
"if",
"time_diff",
">",
"min_time",
":",
"result",
".",
"append",
"(",
"start",
")",
"# adds the first point",
"result",
".",
"append",
"(",
"0",
")",
"# adds the last changepoint detected",
"result",
".",
"append",
"(",
"len",
"(",
"points",
")",
"-",
"1",
")",
"return",
"sorted",
"(",
"list",
"(",
"set",
"(",
"result",
")",
")",
")"
] | Detects changepoints on points that have at least a specific duration
Args:
points (:obj:`Point`)
min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have
data_processor (function): Function to extract data to feed to the changepoint algorithm.
Defaults to `speed_difference`
Returns:
:obj:`list` of int: Indexes of changepoints | [
"Detects",
"changepoints",
"on",
"points",
"that",
"have",
"at",
"least",
"a",
"specific",
"duration"
] | python | train |
pantsbuild/pants | src/python/pants/base/deprecated.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/deprecated.py#L132-L202 | def warn_or_error(removal_version, deprecated_entity_description, hint=None,
deprecation_start_version=None,
stacklevel=3, frame_info=None, context=1, ensure_stderr=False):
"""Check the removal_version against the current pants version.
Issues a warning if the removal version is > current pants version, or an error otherwise.
:param string removal_version: The pantsbuild.pants version at which the deprecated entity
will be/was removed.
:param string deprecated_entity_description: A short description of the deprecated entity, that
we can embed in warning/error messages.
:param string hint: A message describing how to migrate from the removed entity.
:param string deprecation_start_version: The pantsbuild.pants version at which the entity will
begin to display a deprecation warning. This must be less
than the `removal_version`. If not provided, the
deprecation warning is always displayed.
:param int stacklevel: The stacklevel to pass to warnings.warn.
:param FrameInfo frame_info: If provided, use this frame info instead of getting one from
`stacklevel`.
:param int context: The number of lines of source code surrounding the selected frame to display
in a warning message.
:param bool ensure_stderr: Whether use warnings.warn, or use warnings.showwarning to print
directly to stderr.
:raises DeprecationApplicationError: if the removal_version parameter is invalid.
:raises CodeRemovedError: if the current version is later than the version marked for removal.
"""
removal_semver = validate_deprecation_semver(removal_version, 'removal version')
if deprecation_start_version:
deprecation_start_semver = validate_deprecation_semver(
deprecation_start_version, 'deprecation start version')
if deprecation_start_semver >= removal_semver:
raise InvalidSemanticVersionOrderingError(
'The deprecation start version {} must be less than the end version {}.'
.format(deprecation_start_version, removal_version))
elif PANTS_SEMVER < deprecation_start_semver:
return
msg = 'DEPRECATED: {} {} removed in version {}.'.format(deprecated_entity_description,
get_deprecated_tense(removal_version), removal_version)
if hint:
msg += '\n {}'.format(hint)
# We need to have filename and line_number for warnings.formatwarning, which appears to be the only
# way to get a warning message to display to stderr. We get that from frame_info -- it's too bad
# we have to reconstruct the `stacklevel` logic ourselves, but we do also gain the ability to have
# multiple lines of context, which is neat.
if frame_info is None:
frame_info = _get_frame_info(stacklevel, context=context)
_, filename, line_number, _, code_context, _ = frame_info
if code_context:
context_lines = ''.join(code_context)
else:
context_lines = '<no code context available>'
if removal_semver > PANTS_SEMVER:
if ensure_stderr:
# No warning filters can stop us from printing this message directly to stderr.
warning_msg = warnings.formatwarning(
msg, DeprecationWarning, filename, line_number, line=context_lines)
print(warning_msg, file=sys.stderr)
else:
# This output is filtered by warning filters.
with _greater_warnings_context(context_lines):
warnings.warn_explicit(
message=DeprecationWarning(msg) if PY2 else msg,
category=DeprecationWarning,
filename=filename,
lineno=line_number)
return msg
else:
raise CodeRemovedError(msg) | [
"def",
"warn_or_error",
"(",
"removal_version",
",",
"deprecated_entity_description",
",",
"hint",
"=",
"None",
",",
"deprecation_start_version",
"=",
"None",
",",
"stacklevel",
"=",
"3",
",",
"frame_info",
"=",
"None",
",",
"context",
"=",
"1",
",",
"ensure_stderr",
"=",
"False",
")",
":",
"removal_semver",
"=",
"validate_deprecation_semver",
"(",
"removal_version",
",",
"'removal version'",
")",
"if",
"deprecation_start_version",
":",
"deprecation_start_semver",
"=",
"validate_deprecation_semver",
"(",
"deprecation_start_version",
",",
"'deprecation start version'",
")",
"if",
"deprecation_start_semver",
">=",
"removal_semver",
":",
"raise",
"InvalidSemanticVersionOrderingError",
"(",
"'The deprecation start version {} must be less than the end version {}.'",
".",
"format",
"(",
"deprecation_start_version",
",",
"removal_version",
")",
")",
"elif",
"PANTS_SEMVER",
"<",
"deprecation_start_semver",
":",
"return",
"msg",
"=",
"'DEPRECATED: {} {} removed in version {}.'",
".",
"format",
"(",
"deprecated_entity_description",
",",
"get_deprecated_tense",
"(",
"removal_version",
")",
",",
"removal_version",
")",
"if",
"hint",
":",
"msg",
"+=",
"'\\n {}'",
".",
"format",
"(",
"hint",
")",
"# We need to have filename and line_number for warnings.formatwarning, which appears to be the only",
"# way to get a warning message to display to stderr. We get that from frame_info -- it's too bad",
"# we have to reconstruct the `stacklevel` logic ourselves, but we do also gain the ability to have",
"# multiple lines of context, which is neat.",
"if",
"frame_info",
"is",
"None",
":",
"frame_info",
"=",
"_get_frame_info",
"(",
"stacklevel",
",",
"context",
"=",
"context",
")",
"_",
",",
"filename",
",",
"line_number",
",",
"_",
",",
"code_context",
",",
"_",
"=",
"frame_info",
"if",
"code_context",
":",
"context_lines",
"=",
"''",
".",
"join",
"(",
"code_context",
")",
"else",
":",
"context_lines",
"=",
"'<no code context available>'",
"if",
"removal_semver",
">",
"PANTS_SEMVER",
":",
"if",
"ensure_stderr",
":",
"# No warning filters can stop us from printing this message directly to stderr.",
"warning_msg",
"=",
"warnings",
".",
"formatwarning",
"(",
"msg",
",",
"DeprecationWarning",
",",
"filename",
",",
"line_number",
",",
"line",
"=",
"context_lines",
")",
"print",
"(",
"warning_msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"else",
":",
"# This output is filtered by warning filters.",
"with",
"_greater_warnings_context",
"(",
"context_lines",
")",
":",
"warnings",
".",
"warn_explicit",
"(",
"message",
"=",
"DeprecationWarning",
"(",
"msg",
")",
"if",
"PY2",
"else",
"msg",
",",
"category",
"=",
"DeprecationWarning",
",",
"filename",
"=",
"filename",
",",
"lineno",
"=",
"line_number",
")",
"return",
"msg",
"else",
":",
"raise",
"CodeRemovedError",
"(",
"msg",
")"
] | Check the removal_version against the current pants version.
Issues a warning if the removal version is > current pants version, or an error otherwise.
:param string removal_version: The pantsbuild.pants version at which the deprecated entity
will be/was removed.
:param string deprecated_entity_description: A short description of the deprecated entity, that
we can embed in warning/error messages.
:param string hint: A message describing how to migrate from the removed entity.
:param string deprecation_start_version: The pantsbuild.pants version at which the entity will
begin to display a deprecation warning. This must be less
than the `removal_version`. If not provided, the
deprecation warning is always displayed.
:param int stacklevel: The stacklevel to pass to warnings.warn.
:param FrameInfo frame_info: If provided, use this frame info instead of getting one from
`stacklevel`.
:param int context: The number of lines of source code surrounding the selected frame to display
in a warning message.
:param bool ensure_stderr: Whether use warnings.warn, or use warnings.showwarning to print
directly to stderr.
:raises DeprecationApplicationError: if the removal_version parameter is invalid.
:raises CodeRemovedError: if the current version is later than the version marked for removal. | [
"Check",
"the",
"removal_version",
"against",
"the",
"current",
"pants",
"version",
"."
] | python | train |
fermiPy/fermipy | fermipy/gtutils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L490-L497 | def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF | [
"def",
"nFreeParams",
"(",
"self",
")",
":",
"nF",
"=",
"0",
"pars",
"=",
"self",
".",
"params",
"(",
")",
"for",
"par",
"in",
"pars",
":",
"if",
"par",
".",
"isFree",
"(",
")",
":",
"nF",
"+=",
"1",
"return",
"nF"
] | Count the number of free parameters in the active model. | [
"Count",
"the",
"number",
"of",
"free",
"parameters",
"in",
"the",
"active",
"model",
"."
] | python | train |
sprockets/sprockets | sprockets/cli.py | https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L189-L202 | def _get_application_module(self, controller, application):
"""Return the module for an application. If it's a entry-point
registered application name, return the module name from the entry
points data. If not, the passed in application name is returned.
:param str controller: The controller type
:param str application: The application name or module
:rtype: str
"""
for pkg in self._get_applications(controller):
if pkg.name == application:
return pkg.module_name
return application | [
"def",
"_get_application_module",
"(",
"self",
",",
"controller",
",",
"application",
")",
":",
"for",
"pkg",
"in",
"self",
".",
"_get_applications",
"(",
"controller",
")",
":",
"if",
"pkg",
".",
"name",
"==",
"application",
":",
"return",
"pkg",
".",
"module_name",
"return",
"application"
] | Return the module for an application. If it's a entry-point
registered application name, return the module name from the entry
points data. If not, the passed in application name is returned.
:param str controller: The controller type
:param str application: The application name or module
:rtype: str | [
"Return",
"the",
"module",
"for",
"an",
"application",
".",
"If",
"it",
"s",
"a",
"entry",
"-",
"point",
"registered",
"application",
"name",
"return",
"the",
"module",
"name",
"from",
"the",
"entry",
"points",
"data",
".",
"If",
"not",
"the",
"passed",
"in",
"application",
"name",
"is",
"returned",
"."
] | python | train |
pjmark/NIMPA | resources/resources.py | https://github.com/pjmark/NIMPA/blob/3f4231fed2934a1d92e4cd8e9e153b0118e29d86/resources/resources.py#L281-L376 | def get_mmr_constants():
'''
Put all the constants together in a dictionary
'''
Cnt = {
'ISOTOPE':'F18',
'DCYCRR':DCYCRR,
'ALPHA':ALPHA,
'NRNG':NRNG,
'NSRNG':NSRNG,
'NCRS':NCRS,
'NCRSR':NCRSR,
'NBCKT':224,
'NSANGLES':A,
'NSBINS':W,
'Naw':-1, # number of total active bins per 2D sino
'NSN11': NSN11, # number of sinos in span-11
'NSN1': NSN1, # number of sinos in span-1
'NSN64': NSN64, # number of sinos in span-1 with no MRD limit
'MRD': MRD, # maximum ring difference RD
'SPN':SPAN, # span-1 (1), span-11 (11), ssrb (0)
'TFOV2':TFOV2, # squared radius of TFOV
'RNG_STRT':RNG_STRT, # limit axial extension by defining start and end ring
'RNG_END' :RNG_END, # this feature only works with span-1 processing (Cnt['SPN']=1)
'SS_IMZ':SS_IMZ, #Scatter mu-map iamge size
'SS_IMY':SS_IMY,
'SS_IMX':SS_IMX,
'SS_VXZ':SS_VXZ,
'SS_VXY':SS_VXY,
'IS_VXZ':IS_VXZ,
'SSE_IMZ':SSE_IMZ, #Scatter emission image size
'SSE_IMY':SSE_IMY,
'SSE_IMX':SSE_IMX,
'SSE_VXZ':SSE_VXZ,
'SSE_VXY':SSE_VXY,
'SZ_IMZ':SZ_IMZ, #GPU optimised image size
'SZ_IMY':SZ_IMY,
'SZ_IMX':SZ_IMX,
'SZ_VOXZ':SZ_VOXZ,
'SZ_VOXY':SZ_VOXY,
'SZ_VOXZi':SZ_VOXZi,
'SO_IMZ':SO_IMZ, #Original image size (from Siemens)
'SO_IMY':SO_IMY,
'SO_IMX':SO_IMX,
'SO_VXZ':SO_VXZ,
'SO_VXY':SO_VXY,
'SO_VXX':SO_VXX,
'NSEG0':SEG0,
'RE':RE, #effective ring radius
'R':R,
'SEG':seg,
'MNRD':minrd,
'MXRD':maxrd,
'SCTRNG':sct_irng,
'TGAP':TGAP,
'OFFGAP':OFFGAP,
'AXR':AXR,
'R02':R02, #squared electron radius
'LLD':LLD, #lower energy threashold
'E511':E511,
'ER':ER, #energy resolution
'COSUPSMX':COSUPSMX, #cosine of max allowed scatter angle
'NCOS':NCOS, #number of cos samples for LUT
'COSSTP':COSSTP, #cosine step
'ICOSSTP':ICOSSTP, #inverse of cosine step
'ETHRLD':ETHRLD, #intensity emission image threshold (used in scatter modelling)
'CLGHT':CLGHT, #speed of light [cm/s]
'CWND':CWND, #coincidence time window [ps]
'TOFBINN':TOFBINN, #number of TOF bins
'TOFBINS':TOFBINS, #TOF bin width [ps]
'TOFBIND':TOFBIND,
'ITOFBIND':ITOFBIND,
# affine and image size for the reconstructed image, assuming the centre of voxels in mm
'AFFINE':np.array([ [-10*SO_VXX, 0., 0., 5.*SO_IMX*SO_VXX ], #+5.*SO_VXX
[0., 10*SO_VXY, 0., -5.*SO_IMY*SO_VXY ], #+5.*SO_VXY
[0., 0., 10*SO_VXZ, -5.*SO_IMZ*SO_VXZ ], #-5.*SO_VXZ
[0., 0., 0., 1.]]),
'IMSIZE':np.array([SO_IMZ, SO_IMY, SO_IMX]),
'BTP':0, #1:non parametric bootstrap, 2: parametric bootstrap (recommended)
'BTPRT':1.0, # Ratio of bootstrapped/original events (enables downsampling)
'VERBOSE':False,
'SCTSCLEM':SCTSCLEM,
'SCTSCLMU':SCTSCLMU,
}
# get the setup for GPU and third party apps
Cnt = get_setup(Cnt=Cnt)
return Cnt | [
"def",
"get_mmr_constants",
"(",
")",
":",
"Cnt",
"=",
"{",
"'ISOTOPE'",
":",
"'F18'",
",",
"'DCYCRR'",
":",
"DCYCRR",
",",
"'ALPHA'",
":",
"ALPHA",
",",
"'NRNG'",
":",
"NRNG",
",",
"'NSRNG'",
":",
"NSRNG",
",",
"'NCRS'",
":",
"NCRS",
",",
"'NCRSR'",
":",
"NCRSR",
",",
"'NBCKT'",
":",
"224",
",",
"'NSANGLES'",
":",
"A",
",",
"'NSBINS'",
":",
"W",
",",
"'Naw'",
":",
"-",
"1",
",",
"# number of total active bins per 2D sino",
"'NSN11'",
":",
"NSN11",
",",
"# number of sinos in span-11",
"'NSN1'",
":",
"NSN1",
",",
"# number of sinos in span-1",
"'NSN64'",
":",
"NSN64",
",",
"# number of sinos in span-1 with no MRD limit",
"'MRD'",
":",
"MRD",
",",
"# maximum ring difference RD",
"'SPN'",
":",
"SPAN",
",",
"# span-1 (1), span-11 (11), ssrb (0)",
"'TFOV2'",
":",
"TFOV2",
",",
"# squared radius of TFOV",
"'RNG_STRT'",
":",
"RNG_STRT",
",",
"# limit axial extension by defining start and end ring",
"'RNG_END'",
":",
"RNG_END",
",",
"# this feature only works with span-1 processing (Cnt['SPN']=1)",
"'SS_IMZ'",
":",
"SS_IMZ",
",",
"#Scatter mu-map iamge size ",
"'SS_IMY'",
":",
"SS_IMY",
",",
"'SS_IMX'",
":",
"SS_IMX",
",",
"'SS_VXZ'",
":",
"SS_VXZ",
",",
"'SS_VXY'",
":",
"SS_VXY",
",",
"'IS_VXZ'",
":",
"IS_VXZ",
",",
"'SSE_IMZ'",
":",
"SSE_IMZ",
",",
"#Scatter emission image size",
"'SSE_IMY'",
":",
"SSE_IMY",
",",
"'SSE_IMX'",
":",
"SSE_IMX",
",",
"'SSE_VXZ'",
":",
"SSE_VXZ",
",",
"'SSE_VXY'",
":",
"SSE_VXY",
",",
"'SZ_IMZ'",
":",
"SZ_IMZ",
",",
"#GPU optimised image size",
"'SZ_IMY'",
":",
"SZ_IMY",
",",
"'SZ_IMX'",
":",
"SZ_IMX",
",",
"'SZ_VOXZ'",
":",
"SZ_VOXZ",
",",
"'SZ_VOXY'",
":",
"SZ_VOXY",
",",
"'SZ_VOXZi'",
":",
"SZ_VOXZi",
",",
"'SO_IMZ'",
":",
"SO_IMZ",
",",
"#Original image size (from Siemens)",
"'SO_IMY'",
":",
"SO_IMY",
",",
"'SO_IMX'",
":",
"SO_IMX",
",",
"'SO_VXZ'",
":",
"SO_VXZ",
",",
"'SO_VXY'",
":",
"SO_VXY",
",",
"'SO_VXX'",
":",
"SO_VXX",
",",
"'NSEG0'",
":",
"SEG0",
",",
"'RE'",
":",
"RE",
",",
"#effective ring radius",
"'R'",
":",
"R",
",",
"'SEG'",
":",
"seg",
",",
"'MNRD'",
":",
"minrd",
",",
"'MXRD'",
":",
"maxrd",
",",
"'SCTRNG'",
":",
"sct_irng",
",",
"'TGAP'",
":",
"TGAP",
",",
"'OFFGAP'",
":",
"OFFGAP",
",",
"'AXR'",
":",
"AXR",
",",
"'R02'",
":",
"R02",
",",
"#squared electron radius",
"'LLD'",
":",
"LLD",
",",
"#lower energy threashold",
"'E511'",
":",
"E511",
",",
"'ER'",
":",
"ER",
",",
"#energy resolution",
"'COSUPSMX'",
":",
"COSUPSMX",
",",
"#cosine of max allowed scatter angle",
"'NCOS'",
":",
"NCOS",
",",
"#number of cos samples for LUT",
"'COSSTP'",
":",
"COSSTP",
",",
"#cosine step",
"'ICOSSTP'",
":",
"ICOSSTP",
",",
"#inverse of cosine step",
"'ETHRLD'",
":",
"ETHRLD",
",",
"#intensity emission image threshold (used in scatter modelling)",
"'CLGHT'",
":",
"CLGHT",
",",
"#speed of light [cm/s]",
"'CWND'",
":",
"CWND",
",",
"#coincidence time window [ps]",
"'TOFBINN'",
":",
"TOFBINN",
",",
"#number of TOF bins",
"'TOFBINS'",
":",
"TOFBINS",
",",
"#TOF bin width [ps]",
"'TOFBIND'",
":",
"TOFBIND",
",",
"'ITOFBIND'",
":",
"ITOFBIND",
",",
"# affine and image size for the reconstructed image, assuming the centre of voxels in mm",
"'AFFINE'",
":",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"10",
"*",
"SO_VXX",
",",
"0.",
",",
"0.",
",",
"5.",
"*",
"SO_IMX",
"*",
"SO_VXX",
"]",
",",
"#+5.*SO_VXX",
"[",
"0.",
",",
"10",
"*",
"SO_VXY",
",",
"0.",
",",
"-",
"5.",
"*",
"SO_IMY",
"*",
"SO_VXY",
"]",
",",
"#+5.*SO_VXY",
"[",
"0.",
",",
"0.",
",",
"10",
"*",
"SO_VXZ",
",",
"-",
"5.",
"*",
"SO_IMZ",
"*",
"SO_VXZ",
"]",
",",
"#-5.*SO_VXZ",
"[",
"0.",
",",
"0.",
",",
"0.",
",",
"1.",
"]",
"]",
")",
",",
"'IMSIZE'",
":",
"np",
".",
"array",
"(",
"[",
"SO_IMZ",
",",
"SO_IMY",
",",
"SO_IMX",
"]",
")",
",",
"'BTP'",
":",
"0",
",",
"#1:non parametric bootstrap, 2: parametric bootstrap (recommended)",
"'BTPRT'",
":",
"1.0",
",",
"# Ratio of bootstrapped/original events (enables downsampling)",
"'VERBOSE'",
":",
"False",
",",
"'SCTSCLEM'",
":",
"SCTSCLEM",
",",
"'SCTSCLMU'",
":",
"SCTSCLMU",
",",
"}",
"# get the setup for GPU and third party apps",
"Cnt",
"=",
"get_setup",
"(",
"Cnt",
"=",
"Cnt",
")",
"return",
"Cnt"
] | Put all the constants together in a dictionary | [
"Put",
"all",
"the",
"constants",
"together",
"in",
"a",
"dictionary"
] | python | train |
mitsei/dlkit | dlkit/runtime/impls/configuration/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/runtime/impls/configuration/objects.py#L247-L274 | def get_next_parameters(self, n=None):
"""Gets the next set of ``Parameters`` in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``Parameter`` elements
requested which must be less than or equal to
``available()``
return: (osid.configuration.Parameter) - an array of
``Parameter`` elements.The length of the array is less
than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
if n > self.available():
# !!! This is not quite as specified (see method docs) !!!
raise IllegalState('not enough elements available in this list')
else:
next_list = []
x = 0
while x < n:
try:
next_list.append(self.next())
except: # Need to specify exceptions here
raise OperationFailed()
x = x + 1
return next_list | [
"def",
"get_next_parameters",
"(",
"self",
",",
"n",
"=",
"None",
")",
":",
"# Implemented from template for osid.resource.ResourceList.get_next_resources",
"if",
"n",
">",
"self",
".",
"available",
"(",
")",
":",
"# !!! This is not quite as specified (see method docs) !!!",
"raise",
"IllegalState",
"(",
"'not enough elements available in this list'",
")",
"else",
":",
"next_list",
"=",
"[",
"]",
"x",
"=",
"0",
"while",
"x",
"<",
"n",
":",
"try",
":",
"next_list",
".",
"append",
"(",
"self",
".",
"next",
"(",
")",
")",
"except",
":",
"# Need to specify exceptions here",
"raise",
"OperationFailed",
"(",
")",
"x",
"=",
"x",
"+",
"1",
"return",
"next_list"
] | Gets the next set of ``Parameters`` in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``Parameter`` elements
requested which must be less than or equal to
``available()``
return: (osid.configuration.Parameter) - an array of
``Parameter`` elements.The length of the array is less
than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"next",
"set",
"of",
"Parameters",
"in",
"this",
"list",
"which",
"must",
"be",
"less",
"than",
"or",
"equal",
"to",
"the",
"return",
"from",
"available",
"()",
"."
] | python | train |
broadinstitute/fiss | firecloud/api.py | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L1023-L1056 | def create_submission(wnamespace, workspace, cnamespace, config,
entity, etype, expression=None, use_callcache=True):
"""Submit job in FireCloud workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Method configuration namespace
config (str): Method configuration name
entity (str): Entity to submit job on. Should be the same type as
the root entity type of the method config, unless an
expression is used
etype (str): Entity type of root_entity
expression (str): Instead of using entity as the root entity,
evaluate the root entity from this expression.
use_callcache (bool): use call cache if applicable (default: true)
Swagger:
https://api.firecloud.org/#!/Submissions/createSubmission
"""
uri = "workspaces/{0}/{1}/submissions".format(wnamespace, workspace)
body = {
"methodConfigurationNamespace" : cnamespace,
"methodConfigurationName" : config,
"entityType" : etype,
"entityName" : entity,
"useCallCache" : use_callcache
}
if expression:
body['expression'] = expression
return __post(uri, json=body) | [
"def",
"create_submission",
"(",
"wnamespace",
",",
"workspace",
",",
"cnamespace",
",",
"config",
",",
"entity",
",",
"etype",
",",
"expression",
"=",
"None",
",",
"use_callcache",
"=",
"True",
")",
":",
"uri",
"=",
"\"workspaces/{0}/{1}/submissions\"",
".",
"format",
"(",
"wnamespace",
",",
"workspace",
")",
"body",
"=",
"{",
"\"methodConfigurationNamespace\"",
":",
"cnamespace",
",",
"\"methodConfigurationName\"",
":",
"config",
",",
"\"entityType\"",
":",
"etype",
",",
"\"entityName\"",
":",
"entity",
",",
"\"useCallCache\"",
":",
"use_callcache",
"}",
"if",
"expression",
":",
"body",
"[",
"'expression'",
"]",
"=",
"expression",
"return",
"__post",
"(",
"uri",
",",
"json",
"=",
"body",
")"
] | Submit job in FireCloud workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Method configuration namespace
config (str): Method configuration name
entity (str): Entity to submit job on. Should be the same type as
the root entity type of the method config, unless an
expression is used
etype (str): Entity type of root_entity
expression (str): Instead of using entity as the root entity,
evaluate the root entity from this expression.
use_callcache (bool): use call cache if applicable (default: true)
Swagger:
https://api.firecloud.org/#!/Submissions/createSubmission | [
"Submit",
"job",
"in",
"FireCloud",
"workspace",
"."
] | python | train |
raiden-network/raiden | raiden/transfer/channel.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/channel.py#L973-L1007 | def get_batch_unlock(
end_state: NettingChannelEndState,
) -> Optional[MerkleTreeLeaves]:
""" Unlock proof for an entire merkle tree of pending locks
The unlock proof contains all the merkle tree data, tightly packed, needed by the token
network contract to verify the secret expiry and calculate the token amounts to transfer.
"""
if len(end_state.merkletree.layers[LEAVES]) == 0: # pylint: disable=len-as-condition
return None
lockhashes_to_locks = dict()
lockhashes_to_locks.update({
lock.lockhash: lock
for secrethash, lock in end_state.secrethashes_to_lockedlocks.items()
})
lockhashes_to_locks.update({
proof.lock.lockhash: proof.lock
for secrethash, proof in end_state.secrethashes_to_unlockedlocks.items()
})
lockhashes_to_locks.update({
proof.lock.lockhash: proof.lock
for secrethash, proof in end_state.secrethashes_to_onchain_unlockedlocks.items()
})
ordered_locks = [
lockhashes_to_locks[LockHash(lockhash)]
for lockhash in end_state.merkletree.layers[LEAVES]
]
# Not sure why the cast is needed here. The error was:
# Incompatible return value type
# (got "List[HashTimeLockState]", expected "Optional[MerkleTreeLeaves]")
return cast(MerkleTreeLeaves, ordered_locks) | [
"def",
"get_batch_unlock",
"(",
"end_state",
":",
"NettingChannelEndState",
",",
")",
"->",
"Optional",
"[",
"MerkleTreeLeaves",
"]",
":",
"if",
"len",
"(",
"end_state",
".",
"merkletree",
".",
"layers",
"[",
"LEAVES",
"]",
")",
"==",
"0",
":",
"# pylint: disable=len-as-condition",
"return",
"None",
"lockhashes_to_locks",
"=",
"dict",
"(",
")",
"lockhashes_to_locks",
".",
"update",
"(",
"{",
"lock",
".",
"lockhash",
":",
"lock",
"for",
"secrethash",
",",
"lock",
"in",
"end_state",
".",
"secrethashes_to_lockedlocks",
".",
"items",
"(",
")",
"}",
")",
"lockhashes_to_locks",
".",
"update",
"(",
"{",
"proof",
".",
"lock",
".",
"lockhash",
":",
"proof",
".",
"lock",
"for",
"secrethash",
",",
"proof",
"in",
"end_state",
".",
"secrethashes_to_unlockedlocks",
".",
"items",
"(",
")",
"}",
")",
"lockhashes_to_locks",
".",
"update",
"(",
"{",
"proof",
".",
"lock",
".",
"lockhash",
":",
"proof",
".",
"lock",
"for",
"secrethash",
",",
"proof",
"in",
"end_state",
".",
"secrethashes_to_onchain_unlockedlocks",
".",
"items",
"(",
")",
"}",
")",
"ordered_locks",
"=",
"[",
"lockhashes_to_locks",
"[",
"LockHash",
"(",
"lockhash",
")",
"]",
"for",
"lockhash",
"in",
"end_state",
".",
"merkletree",
".",
"layers",
"[",
"LEAVES",
"]",
"]",
"# Not sure why the cast is needed here. The error was:",
"# Incompatible return value type",
"# (got \"List[HashTimeLockState]\", expected \"Optional[MerkleTreeLeaves]\")",
"return",
"cast",
"(",
"MerkleTreeLeaves",
",",
"ordered_locks",
")"
] | Unlock proof for an entire merkle tree of pending locks
The unlock proof contains all the merkle tree data, tightly packed, needed by the token
network contract to verify the secret expiry and calculate the token amounts to transfer. | [
"Unlock",
"proof",
"for",
"an",
"entire",
"merkle",
"tree",
"of",
"pending",
"locks"
] | python | train |
cjdrake/pyeda | pyeda/boolalg/table.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/table.py#L176-L191 | def iter_zeros(self):
"""Iterate through the indices of all zero items."""
num = quotient = 0
while num < self._len:
chunk = self.data[quotient]
if chunk & self.zero_mask:
remainder = 0
while remainder < self.width and num < self._len:
item = (chunk >> remainder) & 3
if item == PC_ZERO:
yield num
remainder += 2
num += 1
else:
num += (self.width >> 1)
quotient += 1 | [
"def",
"iter_zeros",
"(",
"self",
")",
":",
"num",
"=",
"quotient",
"=",
"0",
"while",
"num",
"<",
"self",
".",
"_len",
":",
"chunk",
"=",
"self",
".",
"data",
"[",
"quotient",
"]",
"if",
"chunk",
"&",
"self",
".",
"zero_mask",
":",
"remainder",
"=",
"0",
"while",
"remainder",
"<",
"self",
".",
"width",
"and",
"num",
"<",
"self",
".",
"_len",
":",
"item",
"=",
"(",
"chunk",
">>",
"remainder",
")",
"&",
"3",
"if",
"item",
"==",
"PC_ZERO",
":",
"yield",
"num",
"remainder",
"+=",
"2",
"num",
"+=",
"1",
"else",
":",
"num",
"+=",
"(",
"self",
".",
"width",
">>",
"1",
")",
"quotient",
"+=",
"1"
] | Iterate through the indices of all zero items. | [
"Iterate",
"through",
"the",
"indices",
"of",
"all",
"zero",
"items",
"."
] | python | train |
lincolnloop/goodconf | goodconf/__init__.py | https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/__init__.py#L18-L32 | def _load_config(path: str) -> dict:
"""
Given a file path, parse it based on its extension (YAML or JSON)
and return the values as a Python dictionary. JSON is the default if an
extension can't be determined.
"""
__, ext = os.path.splitext(path)
if ext in ['.yaml', '.yml']:
import ruamel.yaml
loader = ruamel.yaml.safe_load
else:
loader = json.load
with open(path) as f:
config = loader(f)
return config | [
"def",
"_load_config",
"(",
"path",
":",
"str",
")",
"->",
"dict",
":",
"__",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"if",
"ext",
"in",
"[",
"'.yaml'",
",",
"'.yml'",
"]",
":",
"import",
"ruamel",
".",
"yaml",
"loader",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"else",
":",
"loader",
"=",
"json",
".",
"load",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"config",
"=",
"loader",
"(",
"f",
")",
"return",
"config"
] | Given a file path, parse it based on its extension (YAML or JSON)
and return the values as a Python dictionary. JSON is the default if an
extension can't be determined. | [
"Given",
"a",
"file",
"path",
"parse",
"it",
"based",
"on",
"its",
"extension",
"(",
"YAML",
"or",
"JSON",
")",
"and",
"return",
"the",
"values",
"as",
"a",
"Python",
"dictionary",
".",
"JSON",
"is",
"the",
"default",
"if",
"an",
"extension",
"can",
"t",
"be",
"determined",
"."
] | python | test |
ethpm/py-ethpm | ethpm/deployments.py | https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/deployments.py#L45-L61 | def get_instance(self, contract_name: str) -> None:
"""
Fetches a contract instance belonging to deployment
after validating contract name.
"""
self._validate_name_and_references(contract_name)
# Use a deployment's "contract_type" to lookup contract factory
# in case the deployment uses a contract alias
contract_type = self.deployment_data[contract_name]["contract_type"]
factory = self.contract_factories[contract_type]
address = to_canonical_address(self.deployment_data[contract_name]["address"])
contract_kwargs = {
"abi": factory.abi,
"bytecode": factory.bytecode,
"bytecode_runtime": factory.bytecode_runtime,
}
return self.w3.eth.contract(address=address, **contract_kwargs) | [
"def",
"get_instance",
"(",
"self",
",",
"contract_name",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"_validate_name_and_references",
"(",
"contract_name",
")",
"# Use a deployment's \"contract_type\" to lookup contract factory",
"# in case the deployment uses a contract alias",
"contract_type",
"=",
"self",
".",
"deployment_data",
"[",
"contract_name",
"]",
"[",
"\"contract_type\"",
"]",
"factory",
"=",
"self",
".",
"contract_factories",
"[",
"contract_type",
"]",
"address",
"=",
"to_canonical_address",
"(",
"self",
".",
"deployment_data",
"[",
"contract_name",
"]",
"[",
"\"address\"",
"]",
")",
"contract_kwargs",
"=",
"{",
"\"abi\"",
":",
"factory",
".",
"abi",
",",
"\"bytecode\"",
":",
"factory",
".",
"bytecode",
",",
"\"bytecode_runtime\"",
":",
"factory",
".",
"bytecode_runtime",
",",
"}",
"return",
"self",
".",
"w3",
".",
"eth",
".",
"contract",
"(",
"address",
"=",
"address",
",",
"*",
"*",
"contract_kwargs",
")"
] | Fetches a contract instance belonging to deployment
after validating contract name. | [
"Fetches",
"a",
"contract",
"instance",
"belonging",
"to",
"deployment",
"after",
"validating",
"contract",
"name",
"."
] | python | train |
gwpy/gwpy | gwpy/signal/spectral/_ui.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L246-L263 | def psd(timeseries, method_func, *args, **kwargs):
"""Generate a PSD using a method function
All arguments are presumed to be given in physical units
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`, `tuple`
the data to process, or a 2-tuple of series to correlate
method_func : `callable`
the function that will be called to perform the signal processing
*args, **kwargs
other arguments to pass to ``method_func`` when calling
"""
# decorator has translated the arguments for us, so just call psdn()
return _psdn(timeseries, method_func, *args, **kwargs) | [
"def",
"psd",
"(",
"timeseries",
",",
"method_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# decorator has translated the arguments for us, so just call psdn()",
"return",
"_psdn",
"(",
"timeseries",
",",
"method_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Generate a PSD using a method function
All arguments are presumed to be given in physical units
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`, `tuple`
the data to process, or a 2-tuple of series to correlate
method_func : `callable`
the function that will be called to perform the signal processing
*args, **kwargs
other arguments to pass to ``method_func`` when calling | [
"Generate",
"a",
"PSD",
"using",
"a",
"method",
"function"
] | python | train |
serge-sans-paille/pythran | pythran/types/types.py | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L349-L359 | def visit_BoolOp(self, node):
"""
Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type.
"""
# Visit subnodes
self.generic_visit(node)
# Merge all operands types.
[self.combine(node, value) for value in node.values] | [
"def",
"visit_BoolOp",
"(",
"self",
",",
"node",
")",
":",
"# Visit subnodes",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# Merge all operands types.",
"[",
"self",
".",
"combine",
"(",
"node",
",",
"value",
")",
"for",
"value",
"in",
"node",
".",
"values",
"]"
] | Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type. | [
"Merge",
"BoolOp",
"operand",
"type",
"."
] | python | train |
paramiko/paramiko | paramiko/win_pageant.py | https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/win_pageant.py#L79-L114 | def _query_pageant(msg):
"""
Communication with the Pageant process is done through a shared
memory-mapped file.
"""
hwnd = _get_pageant_window_object()
if not hwnd:
# Raise a failure to connect exception, pageant isn't running anymore!
return None
# create a name for the mmap
map_name = "PageantRequest%08x" % thread.get_ident()
pymap = _winapi.MemoryMap(
map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()
)
with pymap:
pymap.write(msg)
# Create an array buffer containing the mapped filename
char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
char_buffer_address, char_buffer_size = char_buffer.buffer_info()
# Create a string to use for the SendMessage function call
cds = COPYDATASTRUCT(
_AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address
)
response = ctypes.windll.user32.SendMessageA(
hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)
)
if response > 0:
pymap.seek(0)
datalen = pymap.read(4)
retlen = struct.unpack(">I", datalen)[0]
return datalen + pymap.read(retlen)
return None | [
"def",
"_query_pageant",
"(",
"msg",
")",
":",
"hwnd",
"=",
"_get_pageant_window_object",
"(",
")",
"if",
"not",
"hwnd",
":",
"# Raise a failure to connect exception, pageant isn't running anymore!",
"return",
"None",
"# create a name for the mmap",
"map_name",
"=",
"\"PageantRequest%08x\"",
"%",
"thread",
".",
"get_ident",
"(",
")",
"pymap",
"=",
"_winapi",
".",
"MemoryMap",
"(",
"map_name",
",",
"_AGENT_MAX_MSGLEN",
",",
"_winapi",
".",
"get_security_attributes_for_user",
"(",
")",
")",
"with",
"pymap",
":",
"pymap",
".",
"write",
"(",
"msg",
")",
"# Create an array buffer containing the mapped filename",
"char_buffer",
"=",
"array",
".",
"array",
"(",
"\"b\"",
",",
"b",
"(",
"map_name",
")",
"+",
"zero_byte",
")",
"# noqa",
"char_buffer_address",
",",
"char_buffer_size",
"=",
"char_buffer",
".",
"buffer_info",
"(",
")",
"# Create a string to use for the SendMessage function call",
"cds",
"=",
"COPYDATASTRUCT",
"(",
"_AGENT_COPYDATA_ID",
",",
"char_buffer_size",
",",
"char_buffer_address",
")",
"response",
"=",
"ctypes",
".",
"windll",
".",
"user32",
".",
"SendMessageA",
"(",
"hwnd",
",",
"win32con_WM_COPYDATA",
",",
"ctypes",
".",
"sizeof",
"(",
"cds",
")",
",",
"ctypes",
".",
"byref",
"(",
"cds",
")",
")",
"if",
"response",
">",
"0",
":",
"pymap",
".",
"seek",
"(",
"0",
")",
"datalen",
"=",
"pymap",
".",
"read",
"(",
"4",
")",
"retlen",
"=",
"struct",
".",
"unpack",
"(",
"\">I\"",
",",
"datalen",
")",
"[",
"0",
"]",
"return",
"datalen",
"+",
"pymap",
".",
"read",
"(",
"retlen",
")",
"return",
"None"
] | Communication with the Pageant process is done through a shared
memory-mapped file. | [
"Communication",
"with",
"the",
"Pageant",
"process",
"is",
"done",
"through",
"a",
"shared",
"memory",
"-",
"mapped",
"file",
"."
] | python | train |
google/grr | grr/server/grr_response_server/data_store.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/data_store.py#L1238-L1273 | def IndexReadPostingLists(self,
index_urn,
keywords,
start_time,
end_time,
last_seen_map=None):
"""Finds all objects associated with any of the keywords.
Args:
index_urn: The base urn of the index.
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is treated as a dict and populated to map pairs
(keyword, name) to the timestamp of the latest connection found.
Returns:
A dict mapping each keyword to a set of relevant names.
"""
keyword_urns = {self._KeywordToURN(index_urn, k): k for k in keywords}
result = {}
for kw in keywords:
result[kw] = set()
for keyword_urn, value in self.MultiResolvePrefix(
list(iterkeys(keyword_urns)),
self._INDEX_PREFIX,
timestamp=(start_time, end_time + 1)):
for column, _, ts in value:
kw = keyword_urns[keyword_urn]
name = column[self._INDEX_PREFIX_LEN:]
result[kw].add(name)
if last_seen_map is not None:
last_seen_map[(kw, name)] = max(last_seen_map.get((kw, name), -1), ts)
return result | [
"def",
"IndexReadPostingLists",
"(",
"self",
",",
"index_urn",
",",
"keywords",
",",
"start_time",
",",
"end_time",
",",
"last_seen_map",
"=",
"None",
")",
":",
"keyword_urns",
"=",
"{",
"self",
".",
"_KeywordToURN",
"(",
"index_urn",
",",
"k",
")",
":",
"k",
"for",
"k",
"in",
"keywords",
"}",
"result",
"=",
"{",
"}",
"for",
"kw",
"in",
"keywords",
":",
"result",
"[",
"kw",
"]",
"=",
"set",
"(",
")",
"for",
"keyword_urn",
",",
"value",
"in",
"self",
".",
"MultiResolvePrefix",
"(",
"list",
"(",
"iterkeys",
"(",
"keyword_urns",
")",
")",
",",
"self",
".",
"_INDEX_PREFIX",
",",
"timestamp",
"=",
"(",
"start_time",
",",
"end_time",
"+",
"1",
")",
")",
":",
"for",
"column",
",",
"_",
",",
"ts",
"in",
"value",
":",
"kw",
"=",
"keyword_urns",
"[",
"keyword_urn",
"]",
"name",
"=",
"column",
"[",
"self",
".",
"_INDEX_PREFIX_LEN",
":",
"]",
"result",
"[",
"kw",
"]",
".",
"add",
"(",
"name",
")",
"if",
"last_seen_map",
"is",
"not",
"None",
":",
"last_seen_map",
"[",
"(",
"kw",
",",
"name",
")",
"]",
"=",
"max",
"(",
"last_seen_map",
".",
"get",
"(",
"(",
"kw",
",",
"name",
")",
",",
"-",
"1",
")",
",",
"ts",
")",
"return",
"result"
] | Finds all objects associated with any of the keywords.
Args:
index_urn: The base urn of the index.
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is treated as a dict and populated to map pairs
(keyword, name) to the timestamp of the latest connection found.
Returns:
A dict mapping each keyword to a set of relevant names. | [
"Finds",
"all",
"objects",
"associated",
"with",
"any",
"of",
"the",
"keywords",
"."
] | python | train |
spacetelescope/acstools | acstools/acszpt.py | https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/acszpt.py#L250-L283 | def _check_date(self, fmt='%Y-%m-%d'):
"""Convenience method for determining if the input date is valid.
Parameters
----------
fmt : str
The format of the date string. The default is ``%Y-%m-%d``, which
corresponds to ``YYYY-MM-DD``.
Returns
-------
status : str or `None`
If the date is valid, returns `None`. If the date is invalid,
returns a message explaining the issue.
"""
result = None
try:
dt_obj = dt.datetime.strptime(self.date, fmt)
except ValueError:
result = '{} does not match YYYY-MM-DD format'.format(self.date)
else:
if dt_obj < self._acs_installation_date:
result = ('The observation date cannot occur '
'before ACS was installed ({})'
.format(self._acs_installation_date.strftime(fmt)))
elif dt_obj > self._extrapolation_date:
result = ('The observation date cannot occur after the '
'maximum allowable date, {}. Extrapolations of the '
'instrument throughput after this date lead to '
'high uncertainties and are therefore invalid.'
.format(self._extrapolation_date.strftime(fmt)))
finally:
return result | [
"def",
"_check_date",
"(",
"self",
",",
"fmt",
"=",
"'%Y-%m-%d'",
")",
":",
"result",
"=",
"None",
"try",
":",
"dt_obj",
"=",
"dt",
".",
"datetime",
".",
"strptime",
"(",
"self",
".",
"date",
",",
"fmt",
")",
"except",
"ValueError",
":",
"result",
"=",
"'{} does not match YYYY-MM-DD format'",
".",
"format",
"(",
"self",
".",
"date",
")",
"else",
":",
"if",
"dt_obj",
"<",
"self",
".",
"_acs_installation_date",
":",
"result",
"=",
"(",
"'The observation date cannot occur '",
"'before ACS was installed ({})'",
".",
"format",
"(",
"self",
".",
"_acs_installation_date",
".",
"strftime",
"(",
"fmt",
")",
")",
")",
"elif",
"dt_obj",
">",
"self",
".",
"_extrapolation_date",
":",
"result",
"=",
"(",
"'The observation date cannot occur after the '",
"'maximum allowable date, {}. Extrapolations of the '",
"'instrument throughput after this date lead to '",
"'high uncertainties and are therefore invalid.'",
".",
"format",
"(",
"self",
".",
"_extrapolation_date",
".",
"strftime",
"(",
"fmt",
")",
")",
")",
"finally",
":",
"return",
"result"
] | Convenience method for determining if the input date is valid.
Parameters
----------
fmt : str
The format of the date string. The default is ``%Y-%m-%d``, which
corresponds to ``YYYY-MM-DD``.
Returns
-------
status : str or `None`
If the date is valid, returns `None`. If the date is invalid,
returns a message explaining the issue. | [
"Convenience",
"method",
"for",
"determining",
"if",
"the",
"input",
"date",
"is",
"valid",
"."
] | python | train |
autokey/autokey | lib/autokey/qtapp.py | https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/qtapp.py#L281-L292 | def shutdown(self):
"""
Shut down the entire application.
"""
logging.info("Shutting down")
self.closeAllWindows()
self.notifier.hide()
self.service.shutdown()
self.monitor.stop()
self.quit()
os.remove(common.LOCK_FILE) # TODO: maybe use atexit to remove the lock/pid file?
logging.debug("All shutdown tasks complete... quitting") | [
"def",
"shutdown",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"\"Shutting down\"",
")",
"self",
".",
"closeAllWindows",
"(",
")",
"self",
".",
"notifier",
".",
"hide",
"(",
")",
"self",
".",
"service",
".",
"shutdown",
"(",
")",
"self",
".",
"monitor",
".",
"stop",
"(",
")",
"self",
".",
"quit",
"(",
")",
"os",
".",
"remove",
"(",
"common",
".",
"LOCK_FILE",
")",
"# TODO: maybe use atexit to remove the lock/pid file?",
"logging",
".",
"debug",
"(",
"\"All shutdown tasks complete... quitting\"",
")"
] | Shut down the entire application. | [
"Shut",
"down",
"the",
"entire",
"application",
"."
] | python | train |
fastai/fastai | fastai/vision/data.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L264-L266 | def open(self, fn):
"Open image in `fn`, subclass and overwrite for custom behavior."
return open_image(fn, convert_mode=self.convert_mode, after_open=self.after_open) | [
"def",
"open",
"(",
"self",
",",
"fn",
")",
":",
"return",
"open_image",
"(",
"fn",
",",
"convert_mode",
"=",
"self",
".",
"convert_mode",
",",
"after_open",
"=",
"self",
".",
"after_open",
")"
] | Open image in `fn`, subclass and overwrite for custom behavior. | [
"Open",
"image",
"in",
"fn",
"subclass",
"and",
"overwrite",
"for",
"custom",
"behavior",
"."
] | python | train |
ladybug-tools/ladybug | ladybug/wea.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/wea.py#L219-L247 | def from_stat_file(cls, statfile, timestep=1, is_leap_year=False):
"""Create an ASHRAE Revised Clear Sky wea object from the monthly sky
optical depths in a .stat file.
Args:
statfile: Full path to the .stat file.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are representing a leap year.
Default is False.
"""
stat = STAT(statfile)
# check to be sure the stat file does not have missing tau values
def check_missing(opt_data, data_name):
if opt_data == []:
raise ValueError('Stat file contains no optical data.')
for i, x in enumerate(opt_data):
if x is None:
raise ValueError(
'Missing optical depth data for {} at month {}'.format(
data_name, i)
)
check_missing(stat.monthly_tau_beam, 'monthly_tau_beam')
check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse')
return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam,
stat.monthly_tau_diffuse, timestep,
is_leap_year) | [
"def",
"from_stat_file",
"(",
"cls",
",",
"statfile",
",",
"timestep",
"=",
"1",
",",
"is_leap_year",
"=",
"False",
")",
":",
"stat",
"=",
"STAT",
"(",
"statfile",
")",
"# check to be sure the stat file does not have missing tau values",
"def",
"check_missing",
"(",
"opt_data",
",",
"data_name",
")",
":",
"if",
"opt_data",
"==",
"[",
"]",
":",
"raise",
"ValueError",
"(",
"'Stat file contains no optical data.'",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"opt_data",
")",
":",
"if",
"x",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Missing optical depth data for {} at month {}'",
".",
"format",
"(",
"data_name",
",",
"i",
")",
")",
"check_missing",
"(",
"stat",
".",
"monthly_tau_beam",
",",
"'monthly_tau_beam'",
")",
"check_missing",
"(",
"stat",
".",
"monthly_tau_diffuse",
",",
"'monthly_tau_diffuse'",
")",
"return",
"cls",
".",
"from_ashrae_revised_clear_sky",
"(",
"stat",
".",
"location",
",",
"stat",
".",
"monthly_tau_beam",
",",
"stat",
".",
"monthly_tau_diffuse",
",",
"timestep",
",",
"is_leap_year",
")"
] | Create an ASHRAE Revised Clear Sky wea object from the monthly sky
optical depths in a .stat file.
Args:
statfile: Full path to the .stat file.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are representing a leap year.
Default is False. | [
"Create",
"an",
"ASHRAE",
"Revised",
"Clear",
"Sky",
"wea",
"object",
"from",
"the",
"monthly",
"sky",
"optical",
"depths",
"in",
"a",
".",
"stat",
"file",
"."
] | python | train |
pgjones/quart | quart/app.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L778-L795 | def template_global(self, name: Optional[str]=None) -> Callable:
"""Add a template global.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.template_global('name')
def five():
return 5
Arguments:
name: The global name (defaults to function name).
"""
def decorator(func: Callable) -> Callable:
self.add_template_global(func, name=name)
return func
return decorator | [
"def",
"template_global",
"(",
"self",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Callable",
":",
"def",
"decorator",
"(",
"func",
":",
"Callable",
")",
"->",
"Callable",
":",
"self",
".",
"add_template_global",
"(",
"func",
",",
"name",
"=",
"name",
")",
"return",
"func",
"return",
"decorator"
] | Add a template global.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.template_global('name')
def five():
return 5
Arguments:
name: The global name (defaults to function name). | [
"Add",
"a",
"template",
"global",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L1043-L1063 | def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer) | [
"def",
"best_match",
"(",
"self",
",",
"req",
",",
"working_set",
",",
"installer",
"=",
"None",
")",
":",
"dist",
"=",
"working_set",
".",
"find",
"(",
"req",
")",
"if",
"dist",
"is",
"not",
"None",
":",
"return",
"dist",
"for",
"dist",
"in",
"self",
"[",
"req",
".",
"key",
"]",
":",
"if",
"dist",
"in",
"req",
":",
"return",
"dist",
"# try to download/install",
"return",
"self",
".",
"obtain",
"(",
"req",
",",
"installer",
")"
] | Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned. | [
"Find",
"distribution",
"best",
"matching",
"req",
"and",
"usable",
"on",
"working_set"
] | python | test |
josiahcarlson/rom | rom/model.py | https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/model.py#L523-L530 | def copy(self):
'''
Creates a shallow copy of the given entity (any entities that can be
retrieved from a OneToMany relationship will not be copied).
'''
x = self.to_dict()
x.pop(self._pkey)
return self.__class__(**x) | [
"def",
"copy",
"(",
"self",
")",
":",
"x",
"=",
"self",
".",
"to_dict",
"(",
")",
"x",
".",
"pop",
"(",
"self",
".",
"_pkey",
")",
"return",
"self",
".",
"__class__",
"(",
"*",
"*",
"x",
")"
] | Creates a shallow copy of the given entity (any entities that can be
retrieved from a OneToMany relationship will not be copied). | [
"Creates",
"a",
"shallow",
"copy",
"of",
"the",
"given",
"entity",
"(",
"any",
"entities",
"that",
"can",
"be",
"retrieved",
"from",
"a",
"OneToMany",
"relationship",
"will",
"not",
"be",
"copied",
")",
"."
] | python | test |
awslabs/sockeye | sockeye/output_handler.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/output_handler.py#L253-L276 | def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
line = "{sent_id} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\n"
self.stream.write(line.format(sent_id=t_input.sentence_id,
target=" ".join(t_output.tokens),
score=t_output.score,
source=" ".join(t_input.tokens),
source_len=len(t_input.tokens),
target_len=len(t_output.tokens)))
attention_matrix = t_output.attention_matrix.T
for i in range(0, attention_matrix.shape[0]):
attention_vector = attention_matrix[i]
self.stream.write(" ".join(["%f" % value for value in attention_vector]))
self.stream.write("\n")
self.stream.write("\n")
self.stream.flush() | [
"def",
"handle",
"(",
"self",
",",
"t_input",
":",
"inference",
".",
"TranslatorInput",
",",
"t_output",
":",
"inference",
".",
"TranslatorOutput",
",",
"t_walltime",
":",
"float",
"=",
"0.",
")",
":",
"line",
"=",
"\"{sent_id} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\\n\"",
"self",
".",
"stream",
".",
"write",
"(",
"line",
".",
"format",
"(",
"sent_id",
"=",
"t_input",
".",
"sentence_id",
",",
"target",
"=",
"\" \"",
".",
"join",
"(",
"t_output",
".",
"tokens",
")",
",",
"score",
"=",
"t_output",
".",
"score",
",",
"source",
"=",
"\" \"",
".",
"join",
"(",
"t_input",
".",
"tokens",
")",
",",
"source_len",
"=",
"len",
"(",
"t_input",
".",
"tokens",
")",
",",
"target_len",
"=",
"len",
"(",
"t_output",
".",
"tokens",
")",
")",
")",
"attention_matrix",
"=",
"t_output",
".",
"attention_matrix",
".",
"T",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"attention_matrix",
".",
"shape",
"[",
"0",
"]",
")",
":",
"attention_vector",
"=",
"attention_matrix",
"[",
"i",
"]",
"self",
".",
"stream",
".",
"write",
"(",
"\" \"",
".",
"join",
"(",
"[",
"\"%f\"",
"%",
"value",
"for",
"value",
"in",
"attention_vector",
"]",
")",
")",
"self",
".",
"stream",
".",
"write",
"(",
"\"\\n\"",
")",
"self",
".",
"stream",
".",
"write",
"(",
"\"\\n\"",
")",
"self",
".",
"stream",
".",
"flush",
"(",
")"
] | :param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation. | [
":",
"param",
"t_input",
":",
"Translator",
"input",
".",
":",
"param",
"t_output",
":",
"Translator",
"output",
".",
":",
"param",
"t_walltime",
":",
"Total",
"wall",
"-",
"clock",
"time",
"for",
"translation",
"."
] | python | train |
vpelletier/python-libusb1 | usb1/__init__.py | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2590-L2650 | def hotplugRegisterCallback(
self, callback,
# pylint: disable=undefined-variable
events=HOTPLUG_EVENT_DEVICE_ARRIVED | HOTPLUG_EVENT_DEVICE_LEFT,
flags=HOTPLUG_ENUMERATE,
vendor_id=HOTPLUG_MATCH_ANY,
product_id=HOTPLUG_MATCH_ANY,
dev_class=HOTPLUG_MATCH_ANY,
# pylint: enable=undefined-variable
):
"""
Registers an hotplug callback.
On success, returns an opaque value which can be passed to
hotplugDeregisterCallback.
Callback must accept the following positional arguments:
- this USBContext instance
- an USBDevice instance
If device has left, configuration descriptors may not be
available. Its device descriptor will be available.
- event type, one of:
HOTPLUG_EVENT_DEVICE_ARRIVED
HOTPLUG_EVENT_DEVICE_LEFT
Callback must return whether it must be unregistered (any true value
to be unregistered, any false value to be kept registered).
Note: given callback will be invoked during event handling, meaning
it cannot call any synchronous libusb function.
"""
def wrapped_callback(context_p, device_p, event, _):
assert addressof(context_p.contents) == addressof(
self.__context_p.contents), (context_p, self.__context_p)
device = USBDevice(
self,
device_p,
# pylint: disable=undefined-variable
event != HOTPLUG_EVENT_DEVICE_LEFT,
# pylint: enable=undefined-variable
)
self.__close_set.add(device)
unregister = bool(callback(
self,
device,
event,
))
if unregister:
del self.__hotplug_callback_dict[handle]
return unregister
handle = c_int()
callback_p = libusb1.libusb_hotplug_callback_fn_p(wrapped_callback)
mayRaiseUSBError(libusb1.libusb_hotplug_register_callback(
self.__context_p, events, flags, vendor_id, product_id, dev_class,
callback_p, None, byref(handle),
))
handle = handle.value
# Keep strong references
assert handle not in self.__hotplug_callback_dict, (
handle,
self.__hotplug_callback_dict,
)
self.__hotplug_callback_dict[handle] = (callback_p, wrapped_callback)
return handle | [
"def",
"hotplugRegisterCallback",
"(",
"self",
",",
"callback",
",",
"# pylint: disable=undefined-variable",
"events",
"=",
"HOTPLUG_EVENT_DEVICE_ARRIVED",
"|",
"HOTPLUG_EVENT_DEVICE_LEFT",
",",
"flags",
"=",
"HOTPLUG_ENUMERATE",
",",
"vendor_id",
"=",
"HOTPLUG_MATCH_ANY",
",",
"product_id",
"=",
"HOTPLUG_MATCH_ANY",
",",
"dev_class",
"=",
"HOTPLUG_MATCH_ANY",
",",
"# pylint: enable=undefined-variable",
")",
":",
"def",
"wrapped_callback",
"(",
"context_p",
",",
"device_p",
",",
"event",
",",
"_",
")",
":",
"assert",
"addressof",
"(",
"context_p",
".",
"contents",
")",
"==",
"addressof",
"(",
"self",
".",
"__context_p",
".",
"contents",
")",
",",
"(",
"context_p",
",",
"self",
".",
"__context_p",
")",
"device",
"=",
"USBDevice",
"(",
"self",
",",
"device_p",
",",
"# pylint: disable=undefined-variable",
"event",
"!=",
"HOTPLUG_EVENT_DEVICE_LEFT",
",",
"# pylint: enable=undefined-variable",
")",
"self",
".",
"__close_set",
".",
"add",
"(",
"device",
")",
"unregister",
"=",
"bool",
"(",
"callback",
"(",
"self",
",",
"device",
",",
"event",
",",
")",
")",
"if",
"unregister",
":",
"del",
"self",
".",
"__hotplug_callback_dict",
"[",
"handle",
"]",
"return",
"unregister",
"handle",
"=",
"c_int",
"(",
")",
"callback_p",
"=",
"libusb1",
".",
"libusb_hotplug_callback_fn_p",
"(",
"wrapped_callback",
")",
"mayRaiseUSBError",
"(",
"libusb1",
".",
"libusb_hotplug_register_callback",
"(",
"self",
".",
"__context_p",
",",
"events",
",",
"flags",
",",
"vendor_id",
",",
"product_id",
",",
"dev_class",
",",
"callback_p",
",",
"None",
",",
"byref",
"(",
"handle",
")",
",",
")",
")",
"handle",
"=",
"handle",
".",
"value",
"# Keep strong references",
"assert",
"handle",
"not",
"in",
"self",
".",
"__hotplug_callback_dict",
",",
"(",
"handle",
",",
"self",
".",
"__hotplug_callback_dict",
",",
")",
"self",
".",
"__hotplug_callback_dict",
"[",
"handle",
"]",
"=",
"(",
"callback_p",
",",
"wrapped_callback",
")",
"return",
"handle"
] | Registers an hotplug callback.
On success, returns an opaque value which can be passed to
hotplugDeregisterCallback.
Callback must accept the following positional arguments:
- this USBContext instance
- an USBDevice instance
If device has left, configuration descriptors may not be
available. Its device descriptor will be available.
- event type, one of:
HOTPLUG_EVENT_DEVICE_ARRIVED
HOTPLUG_EVENT_DEVICE_LEFT
Callback must return whether it must be unregistered (any true value
to be unregistered, any false value to be kept registered).
Note: given callback will be invoked during event handling, meaning
it cannot call any synchronous libusb function. | [
"Registers",
"an",
"hotplug",
"callback",
".",
"On",
"success",
"returns",
"an",
"opaque",
"value",
"which",
"can",
"be",
"passed",
"to",
"hotplugDeregisterCallback",
".",
"Callback",
"must",
"accept",
"the",
"following",
"positional",
"arguments",
":",
"-",
"this",
"USBContext",
"instance",
"-",
"an",
"USBDevice",
"instance",
"If",
"device",
"has",
"left",
"configuration",
"descriptors",
"may",
"not",
"be",
"available",
".",
"Its",
"device",
"descriptor",
"will",
"be",
"available",
".",
"-",
"event",
"type",
"one",
"of",
":",
"HOTPLUG_EVENT_DEVICE_ARRIVED",
"HOTPLUG_EVENT_DEVICE_LEFT",
"Callback",
"must",
"return",
"whether",
"it",
"must",
"be",
"unregistered",
"(",
"any",
"true",
"value",
"to",
"be",
"unregistered",
"any",
"false",
"value",
"to",
"be",
"kept",
"registered",
")",
"."
] | python | train |
Erotemic/timerit | timerit/core.py | https://github.com/Erotemic/timerit/blob/625449f5359f757fb0ab8093b228dc8f37b8ffaf/timerit/core.py#L99-L101 | def chunks(seq, size):
""" simple two-line alternative to `ubelt.chunks` """
return (seq[pos:pos + size] for pos in range(0, len(seq), size)) | [
"def",
"chunks",
"(",
"seq",
",",
"size",
")",
":",
"return",
"(",
"seq",
"[",
"pos",
":",
"pos",
"+",
"size",
"]",
"for",
"pos",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"seq",
")",
",",
"size",
")",
")"
] | simple two-line alternative to `ubelt.chunks` | [
"simple",
"two",
"-",
"line",
"alternative",
"to",
"ubelt",
".",
"chunks"
] | python | train |
Parsely/probably | probably/hll.py | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hll.py#L38-L45 | def _get_rho(self, w, arr):
""" Return the least signifiant bit
O(N) in the worst case
"""
lsb = 0
while not (w & arr[lsb]):
lsb += 1
return lsb + 1 | [
"def",
"_get_rho",
"(",
"self",
",",
"w",
",",
"arr",
")",
":",
"lsb",
"=",
"0",
"while",
"not",
"(",
"w",
"&",
"arr",
"[",
"lsb",
"]",
")",
":",
"lsb",
"+=",
"1",
"return",
"lsb",
"+",
"1"
] | Return the least signifiant bit
O(N) in the worst case | [
"Return",
"the",
"least",
"signifiant",
"bit",
"O",
"(",
"N",
")",
"in",
"the",
"worst",
"case"
] | python | train |
globus/globus-cli | globus_cli/commands/update.py | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/update.py#L18-L25 | def _call_pip(*args):
"""
Invoke pip *safely* and in the *supported* way:
https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program
"""
all_args = [sys.executable, "-m", "pip"] + list(args)
print("> {}".format(" ".join(all_args)))
subprocess.check_call(all_args) | [
"def",
"_call_pip",
"(",
"*",
"args",
")",
":",
"all_args",
"=",
"[",
"sys",
".",
"executable",
",",
"\"-m\"",
",",
"\"pip\"",
"]",
"+",
"list",
"(",
"args",
")",
"print",
"(",
"\"> {}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"all_args",
")",
")",
")",
"subprocess",
".",
"check_call",
"(",
"all_args",
")"
] | Invoke pip *safely* and in the *supported* way:
https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program | [
"Invoke",
"pip",
"*",
"safely",
"*",
"and",
"in",
"the",
"*",
"supported",
"*",
"way",
":",
"https",
":",
"//",
"pip",
".",
"pypa",
".",
"io",
"/",
"en",
"/",
"latest",
"/",
"user_guide",
"/",
"#using",
"-",
"pip",
"-",
"from",
"-",
"your",
"-",
"program"
] | python | train |
hydraplatform/hydra-base | hydra_base/lib/scenario.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/scenario.py#L910-L936 | def get_attribute_data(attr_ids, node_ids, **kwargs):
"""
For a given attribute or set of attributes, return all the resources and
resource scenarios in the network
"""
node_attrs = db.DBSession.query(ResourceAttr).\
options(joinedload_all('attr')).\
filter(ResourceAttr.node_id.in_(node_ids),
ResourceAttr.attr_id.in_(attr_ids)).all()
ra_ids = []
for ra in node_attrs:
ra_ids.append(ra.id)
resource_scenarios = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all()
for rs in resource_scenarios:
if rs.dataset.hidden == 'Y':
try:
rs.dataset.check_read_permission(kwargs.get('user_id'))
except:
rs.dataset.value = None
db.DBSession.expunge(rs)
return node_attrs, resource_scenarios | [
"def",
"get_attribute_data",
"(",
"attr_ids",
",",
"node_ids",
",",
"*",
"*",
"kwargs",
")",
":",
"node_attrs",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceAttr",
")",
".",
"options",
"(",
"joinedload_all",
"(",
"'attr'",
")",
")",
".",
"filter",
"(",
"ResourceAttr",
".",
"node_id",
".",
"in_",
"(",
"node_ids",
")",
",",
"ResourceAttr",
".",
"attr_id",
".",
"in_",
"(",
"attr_ids",
")",
")",
".",
"all",
"(",
")",
"ra_ids",
"=",
"[",
"]",
"for",
"ra",
"in",
"node_attrs",
":",
"ra_ids",
".",
"append",
"(",
"ra",
".",
"id",
")",
"resource_scenarios",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceScenario",
")",
".",
"filter",
"(",
"ResourceScenario",
".",
"resource_attr_id",
".",
"in_",
"(",
"ra_ids",
")",
")",
".",
"options",
"(",
"joinedload",
"(",
"'resourceattr'",
")",
")",
".",
"options",
"(",
"joinedload_all",
"(",
"'dataset.metadata'",
")",
")",
".",
"order_by",
"(",
"ResourceScenario",
".",
"scenario_id",
")",
".",
"all",
"(",
")",
"for",
"rs",
"in",
"resource_scenarios",
":",
"if",
"rs",
".",
"dataset",
".",
"hidden",
"==",
"'Y'",
":",
"try",
":",
"rs",
".",
"dataset",
".",
"check_read_permission",
"(",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
")",
"except",
":",
"rs",
".",
"dataset",
".",
"value",
"=",
"None",
"db",
".",
"DBSession",
".",
"expunge",
"(",
"rs",
")",
"return",
"node_attrs",
",",
"resource_scenarios"
] | For a given attribute or set of attributes, return all the resources and
resource scenarios in the network | [
"For",
"a",
"given",
"attribute",
"or",
"set",
"of",
"attributes",
"return",
"all",
"the",
"resources",
"and",
"resource",
"scenarios",
"in",
"the",
"network"
] | python | train |
pandas-dev/pandas | pandas/io/parsers.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2837-L2856 | def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n') | [
"def",
"_alert_malformed",
"(",
"self",
",",
"msg",
",",
"row_num",
")",
":",
"if",
"self",
".",
"error_bad_lines",
":",
"raise",
"ParserError",
"(",
"msg",
")",
"elif",
"self",
".",
"warn_bad_lines",
":",
"base",
"=",
"'Skipping line {row_num}: '",
".",
"format",
"(",
"row_num",
"=",
"row_num",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"base",
"+",
"msg",
"+",
"'\\n'",
")"
] | Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally. | [
"Alert",
"a",
"user",
"about",
"a",
"malformed",
"row",
"."
] | python | train |
CivicSpleen/ckcache | ckcache/filesystem.py | https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/filesystem.py#L924-L929 | def find(self, query):
'''Passes the query to the upstream, if it exists'''
if not self.upstream:
raise Exception("CompressionCache must have an upstream")
return self.upstream.find(query) | [
"def",
"find",
"(",
"self",
",",
"query",
")",
":",
"if",
"not",
"self",
".",
"upstream",
":",
"raise",
"Exception",
"(",
"\"CompressionCache must have an upstream\"",
")",
"return",
"self",
".",
"upstream",
".",
"find",
"(",
"query",
")"
] | Passes the query to the upstream, if it exists | [
"Passes",
"the",
"query",
"to",
"the",
"upstream",
"if",
"it",
"exists"
] | python | train |
cackharot/suds-py3 | suds/properties.py | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/properties.py#L407-L416 | def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self | [
"def",
"prime",
"(",
"self",
")",
":",
"for",
"d",
"in",
"self",
".",
"definitions",
".",
"values",
"(",
")",
":",
"self",
".",
"defined",
"[",
"d",
".",
"name",
"]",
"=",
"d",
".",
"default",
"return",
"self"
] | Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties} | [
"Prime",
"the",
"stored",
"values",
"based",
"on",
"default",
"values",
"found",
"in",
"property",
"definitions",
"."
] | python | train |
ebroecker/canmatrix | src/canmatrix/log.py | https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/log.py#L48-L61 | def set_log_level(logger, level): # type: (logging.Logger, int) -> None
"""Dynamic reconfiguration of the log level"""
if level > 2:
level = 2
if level < -1:
level = -1
levels = {
-1: logging.ERROR,
0: logging.WARN,
1: logging.INFO,
2: logging.DEBUG
}
logger.setLevel(levels[level]) | [
"def",
"set_log_level",
"(",
"logger",
",",
"level",
")",
":",
"# type: (logging.Logger, int) -> None",
"if",
"level",
">",
"2",
":",
"level",
"=",
"2",
"if",
"level",
"<",
"-",
"1",
":",
"level",
"=",
"-",
"1",
"levels",
"=",
"{",
"-",
"1",
":",
"logging",
".",
"ERROR",
",",
"0",
":",
"logging",
".",
"WARN",
",",
"1",
":",
"logging",
".",
"INFO",
",",
"2",
":",
"logging",
".",
"DEBUG",
"}",
"logger",
".",
"setLevel",
"(",
"levels",
"[",
"level",
"]",
")"
] | Dynamic reconfiguration of the log level | [
"Dynamic",
"reconfiguration",
"of",
"the",
"log",
"level"
] | python | train |
minorg/pastpy | py/src/pastpy/gen/database/impl/online/online_database_object_detail_image.py | https://github.com/minorg/pastpy/blob/7d5d6d511629481850216565e7451b5dcb8027a9/py/src/pastpy/gen/database/impl/online/online_database_object_detail_image.py#L445-L487 | def write(self, oprot):
'''
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.online.online_database_object_detail_image.OnlineDatabaseObjectDetailImage
'''
oprot.write_struct_begin('OnlineDatabaseObjectDetailImage')
oprot.write_field_begin(name='full_size_url', type=11, id=None)
oprot.write_string(self.full_size_url)
oprot.write_field_end()
oprot.write_field_begin(name='mediaid', type=11, id=None)
oprot.write_string(self.mediaid)
oprot.write_field_end()
oprot.write_field_begin(name='objectid', type=11, id=None)
oprot.write_string(self.objectid)
oprot.write_field_end()
oprot.write_field_begin(name='src', type=11, id=None)
oprot.write_string(self.src)
oprot.write_field_end()
oprot.write_field_begin(name='thumbnail_url', type=11, id=None)
oprot.write_string(self.thumbnail_url)
oprot.write_field_end()
oprot.write_field_begin(name='title', type=11, id=None)
oprot.write_string(self.title)
oprot.write_field_end()
oprot.write_field_begin(name='type', type=11, id=None)
oprot.write_string(str(self.type))
oprot.write_field_end()
oprot.write_field_stop()
oprot.write_struct_end()
return self | [
"def",
"write",
"(",
"self",
",",
"oprot",
")",
":",
"oprot",
".",
"write_struct_begin",
"(",
"'OnlineDatabaseObjectDetailImage'",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'full_size_url'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"full_size_url",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'mediaid'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"mediaid",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'objectid'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"objectid",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'src'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"src",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'thumbnail_url'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"thumbnail_url",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'title'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"self",
".",
"title",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_begin",
"(",
"name",
"=",
"'type'",
",",
"type",
"=",
"11",
",",
"id",
"=",
"None",
")",
"oprot",
".",
"write_string",
"(",
"str",
"(",
"self",
".",
"type",
")",
")",
"oprot",
".",
"write_field_end",
"(",
")",
"oprot",
".",
"write_field_stop",
"(",
")",
"oprot",
".",
"write_struct_end",
"(",
")",
"return",
"self"
] | Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.online.online_database_object_detail_image.OnlineDatabaseObjectDetailImage | [
"Write",
"this",
"object",
"to",
"the",
"given",
"output",
"protocol",
"and",
"return",
"self",
"."
] | python | train |
earwig/mwparserfromhell | mwparserfromhell/wikicode.py | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L163-L205 | def _do_weak_search(self, obj, recursive):
"""Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes.
"""
obj = parse_anything(obj)
if not obj or obj not in self:
raise ValueError(obj)
results = []
contexts = [self]
while contexts:
context = contexts.pop()
i = len(context.nodes) - 1
while i >= 0:
node = context.get(i)
if obj.get(-1) == node:
for j in range(-len(obj.nodes), -1):
if obj.get(j) != context.get(i + j + 1):
break
else:
i -= len(obj.nodes) - 1
index = slice(i, i + len(obj.nodes))
results.append((True, context, index))
elif recursive and obj in node:
contexts.extend(node.__children__())
i -= 1
if not results:
if not recursive:
raise ValueError(obj)
results.append((False, self, slice(0, len(self.nodes))))
return results | [
"def",
"_do_weak_search",
"(",
"self",
",",
"obj",
",",
"recursive",
")",
":",
"obj",
"=",
"parse_anything",
"(",
"obj",
")",
"if",
"not",
"obj",
"or",
"obj",
"not",
"in",
"self",
":",
"raise",
"ValueError",
"(",
"obj",
")",
"results",
"=",
"[",
"]",
"contexts",
"=",
"[",
"self",
"]",
"while",
"contexts",
":",
"context",
"=",
"contexts",
".",
"pop",
"(",
")",
"i",
"=",
"len",
"(",
"context",
".",
"nodes",
")",
"-",
"1",
"while",
"i",
">=",
"0",
":",
"node",
"=",
"context",
".",
"get",
"(",
"i",
")",
"if",
"obj",
".",
"get",
"(",
"-",
"1",
")",
"==",
"node",
":",
"for",
"j",
"in",
"range",
"(",
"-",
"len",
"(",
"obj",
".",
"nodes",
")",
",",
"-",
"1",
")",
":",
"if",
"obj",
".",
"get",
"(",
"j",
")",
"!=",
"context",
".",
"get",
"(",
"i",
"+",
"j",
"+",
"1",
")",
":",
"break",
"else",
":",
"i",
"-=",
"len",
"(",
"obj",
".",
"nodes",
")",
"-",
"1",
"index",
"=",
"slice",
"(",
"i",
",",
"i",
"+",
"len",
"(",
"obj",
".",
"nodes",
")",
")",
"results",
".",
"append",
"(",
"(",
"True",
",",
"context",
",",
"index",
")",
")",
"elif",
"recursive",
"and",
"obj",
"in",
"node",
":",
"contexts",
".",
"extend",
"(",
"node",
".",
"__children__",
"(",
")",
")",
"i",
"-=",
"1",
"if",
"not",
"results",
":",
"if",
"not",
"recursive",
":",
"raise",
"ValueError",
"(",
"obj",
")",
"results",
".",
"append",
"(",
"(",
"False",
",",
"self",
",",
"slice",
"(",
"0",
",",
"len",
"(",
"self",
".",
"nodes",
")",
")",
")",
")",
"return",
"results"
] | Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes. | [
"Search",
"for",
"an",
"element",
"that",
"looks",
"like",
"*",
"obj",
"*",
"within",
"the",
"node",
"list",
"."
] | python | train |
phoebe-project/phoebe2 | phoebe/frontend/bundle.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L936-L953 | def undo(self, i=-1):
"""
Undo an item in the history logs
:parameter int i: integer for indexing (can be positive or
negative). Defaults to -1 if not provided (the latest
recorded history item)
:raises ValueError: if no history items have been recorded
"""
_history_enabled = self.history_enabled
param = self.get_history(i)
self.disable_history()
param.undo()
# TODO: do we really want to remove this? then what's the point of redo?
self.remove_parameter(uniqueid=param.uniqueid)
if _history_enabled:
self.enable_history() | [
"def",
"undo",
"(",
"self",
",",
"i",
"=",
"-",
"1",
")",
":",
"_history_enabled",
"=",
"self",
".",
"history_enabled",
"param",
"=",
"self",
".",
"get_history",
"(",
"i",
")",
"self",
".",
"disable_history",
"(",
")",
"param",
".",
"undo",
"(",
")",
"# TODO: do we really want to remove this? then what's the point of redo?",
"self",
".",
"remove_parameter",
"(",
"uniqueid",
"=",
"param",
".",
"uniqueid",
")",
"if",
"_history_enabled",
":",
"self",
".",
"enable_history",
"(",
")"
] | Undo an item in the history logs
:parameter int i: integer for indexing (can be positive or
negative). Defaults to -1 if not provided (the latest
recorded history item)
:raises ValueError: if no history items have been recorded | [
"Undo",
"an",
"item",
"in",
"the",
"history",
"logs"
] | python | train |
TheRealLink/pylgtv | pylgtv/webos_client.py | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L314-L317 | def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume') | [
"def",
"get_volume",
"(",
"self",
")",
":",
"self",
".",
"request",
"(",
"EP_GET_VOLUME",
")",
"return",
"0",
"if",
"self",
".",
"last_response",
"is",
"None",
"else",
"self",
".",
"last_response",
".",
"get",
"(",
"'payload'",
")",
".",
"get",
"(",
"'volume'",
")"
] | Get the current volume. | [
"Get",
"the",
"current",
"volume",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.