repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
jmcgeheeiv/pyfakefs | pyfakefs/fake_pathlib.py | https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_pathlib.py#L229-L233 | def casefold_parts(self, parts):
"""Return the lower-case version of parts for a Windows filesystem."""
if self.filesystem.is_windows_fs:
return [p.lower() for p in parts]
return parts | [
"def",
"casefold_parts",
"(",
"self",
",",
"parts",
")",
":",
"if",
"self",
".",
"filesystem",
".",
"is_windows_fs",
":",
"return",
"[",
"p",
".",
"lower",
"(",
")",
"for",
"p",
"in",
"parts",
"]",
"return",
"parts"
]
| Return the lower-case version of parts for a Windows filesystem. | [
"Return",
"the",
"lower",
"-",
"case",
"version",
"of",
"parts",
"for",
"a",
"Windows",
"filesystem",
"."
]
| python | train | 43.2 |
hazelcast/hazelcast-python-client | hazelcast/proxy/map.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/map.py#L331-L350 | def get(self, key):
"""
Returns the value for the specified key, or ``None`` if this map does not contain this key.
**Warning:
This method returns a clone of original value, modifying the returned value does not change the actual value in
the map. One should put modified value back to make changes visible to all nodes.**
>>> value = map.get(key)
>>> value.update_some_property()
>>> map.put(key,value)
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (object), the value for the specified key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._get_internal(key_data) | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"check_not_none",
"(",
"key",
",",
"\"key can't be None\"",
")",
"key_data",
"=",
"self",
".",
"_to_data",
"(",
"key",
")",
"return",
"self",
".",
"_get_internal",
"(",
"key_data",
")"
]
| Returns the value for the specified key, or ``None`` if this map does not contain this key.
**Warning:
This method returns a clone of original value, modifying the returned value does not change the actual value in
the map. One should put modified value back to make changes visible to all nodes.**
>>> value = map.get(key)
>>> value.update_some_property()
>>> map.put(key,value)
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (object), the value for the specified key. | [
"Returns",
"the",
"value",
"for",
"the",
"specified",
"key",
"or",
"None",
"if",
"this",
"map",
"does",
"not",
"contain",
"this",
"key",
"."
]
| python | train | 44.8 |
inasafe/inasafe | safe/utilities/profiling.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/profiling.py#L83-L91 | def append(self, node):
"""To append a new child."""
if node.parent == self.key and not self.elapsed_time:
self.children.append(node)
else:
# Recursive call
for child in self.children:
if not child.elapsed_time:
child.append(node) | [
"def",
"append",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"parent",
"==",
"self",
".",
"key",
"and",
"not",
"self",
".",
"elapsed_time",
":",
"self",
".",
"children",
".",
"append",
"(",
"node",
")",
"else",
":",
"# Recursive call",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"not",
"child",
".",
"elapsed_time",
":",
"child",
".",
"append",
"(",
"node",
")"
]
| To append a new child. | [
"To",
"append",
"a",
"new",
"child",
"."
]
| python | train | 35.333333 |
scanny/python-pptx | pptx/chart/xlsx.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xlsx.py#L269-L298 | def _populate_worksheet(self, workbook, worksheet):
"""
Write chart data contents to *worksheet* in the bubble chart layout.
Write the data for each series to a separate three-column table with
X values in column A, Y values in column B, and bubble sizes in
column C. Place the series label in the first (heading) cell of the
values column.
"""
chart_num_format = workbook.add_format(
{'num_format': self._chart_data.number_format}
)
for series in self._chart_data:
series_num_format = (
workbook.add_format({'num_format': series.number_format})
)
offset = self.series_table_row_offset(series)
# write X values
worksheet.write_column(
offset+1, 0, series.x_values, chart_num_format
)
# write Y values
worksheet.write(offset, 1, series.name)
worksheet.write_column(
offset+1, 1, series.y_values, series_num_format
)
# write bubble sizes
worksheet.write(offset, 2, 'Size')
worksheet.write_column(
offset+1, 2, series.bubble_sizes, chart_num_format
) | [
"def",
"_populate_worksheet",
"(",
"self",
",",
"workbook",
",",
"worksheet",
")",
":",
"chart_num_format",
"=",
"workbook",
".",
"add_format",
"(",
"{",
"'num_format'",
":",
"self",
".",
"_chart_data",
".",
"number_format",
"}",
")",
"for",
"series",
"in",
"self",
".",
"_chart_data",
":",
"series_num_format",
"=",
"(",
"workbook",
".",
"add_format",
"(",
"{",
"'num_format'",
":",
"series",
".",
"number_format",
"}",
")",
")",
"offset",
"=",
"self",
".",
"series_table_row_offset",
"(",
"series",
")",
"# write X values",
"worksheet",
".",
"write_column",
"(",
"offset",
"+",
"1",
",",
"0",
",",
"series",
".",
"x_values",
",",
"chart_num_format",
")",
"# write Y values",
"worksheet",
".",
"write",
"(",
"offset",
",",
"1",
",",
"series",
".",
"name",
")",
"worksheet",
".",
"write_column",
"(",
"offset",
"+",
"1",
",",
"1",
",",
"series",
".",
"y_values",
",",
"series_num_format",
")",
"# write bubble sizes",
"worksheet",
".",
"write",
"(",
"offset",
",",
"2",
",",
"'Size'",
")",
"worksheet",
".",
"write_column",
"(",
"offset",
"+",
"1",
",",
"2",
",",
"series",
".",
"bubble_sizes",
",",
"chart_num_format",
")"
]
| Write chart data contents to *worksheet* in the bubble chart layout.
Write the data for each series to a separate three-column table with
X values in column A, Y values in column B, and bubble sizes in
column C. Place the series label in the first (heading) cell of the
values column. | [
"Write",
"chart",
"data",
"contents",
"to",
"*",
"worksheet",
"*",
"in",
"the",
"bubble",
"chart",
"layout",
".",
"Write",
"the",
"data",
"for",
"each",
"series",
"to",
"a",
"separate",
"three",
"-",
"column",
"table",
"with",
"X",
"values",
"in",
"column",
"A",
"Y",
"values",
"in",
"column",
"B",
"and",
"bubble",
"sizes",
"in",
"column",
"C",
".",
"Place",
"the",
"series",
"label",
"in",
"the",
"first",
"(",
"heading",
")",
"cell",
"of",
"the",
"values",
"column",
"."
]
| python | train | 41.4 |
singularityhub/singularity-cli | spython/oci/__init__.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/oci/__init__.py#L47-L64 | def get_container_id(self, container_id=None):
''' a helper function shared between functions that will return a
container_id. First preference goes to a container_id provided by
the user at runtime. Second preference goes to the container_id
instantiated with the client.
Parameters
==========
container_id: image uri to parse (required)
'''
# The user must provide a container_id, or have one with the client
if container_id == None and self.container_id == None:
bot.exit('You must provide a container_id.')
# Choose whichever is not None, with preference for function provided
container_id = container_id or self.container_id
return container_id | [
"def",
"get_container_id",
"(",
"self",
",",
"container_id",
"=",
"None",
")",
":",
"# The user must provide a container_id, or have one with the client",
"if",
"container_id",
"==",
"None",
"and",
"self",
".",
"container_id",
"==",
"None",
":",
"bot",
".",
"exit",
"(",
"'You must provide a container_id.'",
")",
"# Choose whichever is not None, with preference for function provided",
"container_id",
"=",
"container_id",
"or",
"self",
".",
"container_id",
"return",
"container_id"
]
| a helper function shared between functions that will return a
container_id. First preference goes to a container_id provided by
the user at runtime. Second preference goes to the container_id
instantiated with the client.
Parameters
==========
container_id: image uri to parse (required) | [
"a",
"helper",
"function",
"shared",
"between",
"functions",
"that",
"will",
"return",
"a",
"container_id",
".",
"First",
"preference",
"goes",
"to",
"a",
"container_id",
"provided",
"by",
"the",
"user",
"at",
"runtime",
".",
"Second",
"preference",
"goes",
"to",
"the",
"container_id",
"instantiated",
"with",
"the",
"client",
"."
]
| python | train | 43.111111 |
HPAC/matchpy | matchpy/matching/many_to_one.py | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/many_to_one.py#L775-L804 | def replace(self, expression: Expression, max_count: int=math.inf) -> Union[Expression, Sequence[Expression]]:
"""Replace all occurrences of the patterns according to the replacement rules.
Args:
expression:
The expression to which the replacement rules are applied.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule.
"""
replaced = True
replace_count = 0
while replaced and replace_count < max_count:
replaced = False
for subexpr, pos in preorder_iter_with_position(expression):
try:
replacement, subst = next(iter(self.matcher.match(subexpr)))
result = replacement(**subst)
expression = functions.replace(expression, pos, result)
replaced = True
break
except StopIteration:
pass
replace_count += 1
return expression | [
"def",
"replace",
"(",
"self",
",",
"expression",
":",
"Expression",
",",
"max_count",
":",
"int",
"=",
"math",
".",
"inf",
")",
"->",
"Union",
"[",
"Expression",
",",
"Sequence",
"[",
"Expression",
"]",
"]",
":",
"replaced",
"=",
"True",
"replace_count",
"=",
"0",
"while",
"replaced",
"and",
"replace_count",
"<",
"max_count",
":",
"replaced",
"=",
"False",
"for",
"subexpr",
",",
"pos",
"in",
"preorder_iter_with_position",
"(",
"expression",
")",
":",
"try",
":",
"replacement",
",",
"subst",
"=",
"next",
"(",
"iter",
"(",
"self",
".",
"matcher",
".",
"match",
"(",
"subexpr",
")",
")",
")",
"result",
"=",
"replacement",
"(",
"*",
"*",
"subst",
")",
"expression",
"=",
"functions",
".",
"replace",
"(",
"expression",
",",
"pos",
",",
"result",
")",
"replaced",
"=",
"True",
"break",
"except",
"StopIteration",
":",
"pass",
"replace_count",
"+=",
"1",
"return",
"expression"
]
| Replace all occurrences of the patterns according to the replacement rules.
Args:
expression:
The expression to which the replacement rules are applied.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule. | [
"Replace",
"all",
"occurrences",
"of",
"the",
"patterns",
"according",
"to",
"the",
"replacement",
"rules",
"."
]
| python | train | 48.8 |
analytehealth/django-analytics | djanalytics/reports/utils.py | https://github.com/analytehealth/django-analytics/blob/7782d3f81249dcb1b266afb0cb1e90000108c74d/djanalytics/reports/utils.py#L12-L19 | def average_duration(total_duration, visits):
""" Method to calculate and format an average duration safely """
if not visits:
seconds = 0
else:
seconds = int(round(total_duration / Decimal(visits)))
duration = timedelta(seconds=seconds)
return str(duration) | [
"def",
"average_duration",
"(",
"total_duration",
",",
"visits",
")",
":",
"if",
"not",
"visits",
":",
"seconds",
"=",
"0",
"else",
":",
"seconds",
"=",
"int",
"(",
"round",
"(",
"total_duration",
"/",
"Decimal",
"(",
"visits",
")",
")",
")",
"duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"return",
"str",
"(",
"duration",
")"
]
| Method to calculate and format an average duration safely | [
"Method",
"to",
"calculate",
"and",
"format",
"an",
"average",
"duration",
"safely"
]
| python | test | 35.875 |
googleapis/google-cloud-python | datastore/google/cloud/datastore/helpers.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/helpers.py#L270-L299 | def key_from_protobuf(pb):
"""Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance
"""
path_args = []
for element in pb.path:
path_args.append(element.kind)
if element.id: # Simple field (int64)
path_args.append(element.id)
# This is safe: we expect proto objects returned will only have
# one of `name` or `id` set.
if element.name: # Simple field (string)
path_args.append(element.name)
project = None
if pb.partition_id.project_id: # Simple field (string)
project = pb.partition_id.project_id
namespace = None
if pb.partition_id.namespace_id: # Simple field (string)
namespace = pb.partition_id.namespace_id
return Key(*path_args, namespace=namespace, project=project) | [
"def",
"key_from_protobuf",
"(",
"pb",
")",
":",
"path_args",
"=",
"[",
"]",
"for",
"element",
"in",
"pb",
".",
"path",
":",
"path_args",
".",
"append",
"(",
"element",
".",
"kind",
")",
"if",
"element",
".",
"id",
":",
"# Simple field (int64)",
"path_args",
".",
"append",
"(",
"element",
".",
"id",
")",
"# This is safe: we expect proto objects returned will only have",
"# one of `name` or `id` set.",
"if",
"element",
".",
"name",
":",
"# Simple field (string)",
"path_args",
".",
"append",
"(",
"element",
".",
"name",
")",
"project",
"=",
"None",
"if",
"pb",
".",
"partition_id",
".",
"project_id",
":",
"# Simple field (string)",
"project",
"=",
"pb",
".",
"partition_id",
".",
"project_id",
"namespace",
"=",
"None",
"if",
"pb",
".",
"partition_id",
".",
"namespace_id",
":",
"# Simple field (string)",
"namespace",
"=",
"pb",
".",
"partition_id",
".",
"namespace_id",
"return",
"Key",
"(",
"*",
"path_args",
",",
"namespace",
"=",
"namespace",
",",
"project",
"=",
"project",
")"
]
| Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance | [
"Factory",
"method",
"for",
"creating",
"a",
"key",
"based",
"on",
"a",
"protobuf",
"."
]
| python | train | 34.266667 |
sentinel-hub/sentinelhub-py | sentinelhub/areas.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/areas.py#L303-L325 | def _recursive_split(self, bbox, zoom_level, column, row):
"""Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the OSM grid
:type row: int
"""
if zoom_level == self.zoom_level:
self.bbox_list.append(bbox)
self.info_list.append({'zoom_level': zoom_level,
'index_x': column,
'index_y': row})
return
bbox_partition = bbox.get_partition(2, 2)
for i, j in itertools.product(range(2), range(2)):
if self._intersects_area(bbox_partition[i][j]):
self._recursive_split(bbox_partition[i][j], zoom_level + 1, 2 * column + i, 2 * row + 1 - j) | [
"def",
"_recursive_split",
"(",
"self",
",",
"bbox",
",",
"zoom_level",
",",
"column",
",",
"row",
")",
":",
"if",
"zoom_level",
"==",
"self",
".",
"zoom_level",
":",
"self",
".",
"bbox_list",
".",
"append",
"(",
"bbox",
")",
"self",
".",
"info_list",
".",
"append",
"(",
"{",
"'zoom_level'",
":",
"zoom_level",
",",
"'index_x'",
":",
"column",
",",
"'index_y'",
":",
"row",
"}",
")",
"return",
"bbox_partition",
"=",
"bbox",
".",
"get_partition",
"(",
"2",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"2",
")",
",",
"range",
"(",
"2",
")",
")",
":",
"if",
"self",
".",
"_intersects_area",
"(",
"bbox_partition",
"[",
"i",
"]",
"[",
"j",
"]",
")",
":",
"self",
".",
"_recursive_split",
"(",
"bbox_partition",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"zoom_level",
"+",
"1",
",",
"2",
"*",
"column",
"+",
"i",
",",
"2",
"*",
"row",
"+",
"1",
"-",
"j",
")"
]
| Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the OSM grid
:type row: int | [
"Method",
"that",
"recursively",
"creates",
"bounding",
"boxes",
"of",
"OSM",
"grid",
"that",
"intersect",
"the",
"area",
"."
]
| python | train | 41.608696 |
LuminosoInsight/luminoso-api-client-python | luminoso_api/v5_download.py | https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_download.py#L70-L92 | def download_docs(client, output_filename=None, expanded=False):
"""
Given a LuminosoClient pointing to a project and a filename to write to,
retrieve all its documents in batches, and write them to a JSON lines
(.jsons) file with one document per line.
"""
if output_filename is None:
# Find a default filename to download to, based on the project name.
projname = _sanitize_filename(client.get()['name'])
output_filename = '{}.jsons'.format(projname)
# If the file already exists, add .1, .2, ..., after the project name
# to unobtrusively get a unique filename.
counter = 0
while os.access(output_filename, os.F_OK):
counter += 1
output_filename = '{}.{}.jsons'.format(projname, counter)
print('Downloading project to {!r}'.format(output_filename))
with open(output_filename, 'w', encoding='utf-8') as out:
for doc in iterate_docs(client, expanded=expanded, progress=True):
print(json.dumps(doc, ensure_ascii=False), file=out) | [
"def",
"download_docs",
"(",
"client",
",",
"output_filename",
"=",
"None",
",",
"expanded",
"=",
"False",
")",
":",
"if",
"output_filename",
"is",
"None",
":",
"# Find a default filename to download to, based on the project name.",
"projname",
"=",
"_sanitize_filename",
"(",
"client",
".",
"get",
"(",
")",
"[",
"'name'",
"]",
")",
"output_filename",
"=",
"'{}.jsons'",
".",
"format",
"(",
"projname",
")",
"# If the file already exists, add .1, .2, ..., after the project name",
"# to unobtrusively get a unique filename.",
"counter",
"=",
"0",
"while",
"os",
".",
"access",
"(",
"output_filename",
",",
"os",
".",
"F_OK",
")",
":",
"counter",
"+=",
"1",
"output_filename",
"=",
"'{}.{}.jsons'",
".",
"format",
"(",
"projname",
",",
"counter",
")",
"print",
"(",
"'Downloading project to {!r}'",
".",
"format",
"(",
"output_filename",
")",
")",
"with",
"open",
"(",
"output_filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"out",
":",
"for",
"doc",
"in",
"iterate_docs",
"(",
"client",
",",
"expanded",
"=",
"expanded",
",",
"progress",
"=",
"True",
")",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"doc",
",",
"ensure_ascii",
"=",
"False",
")",
",",
"file",
"=",
"out",
")"
]
| Given a LuminosoClient pointing to a project and a filename to write to,
retrieve all its documents in batches, and write them to a JSON lines
(.jsons) file with one document per line. | [
"Given",
"a",
"LuminosoClient",
"pointing",
"to",
"a",
"project",
"and",
"a",
"filename",
"to",
"write",
"to",
"retrieve",
"all",
"its",
"documents",
"in",
"batches",
"and",
"write",
"them",
"to",
"a",
"JSON",
"lines",
"(",
".",
"jsons",
")",
"file",
"with",
"one",
"document",
"per",
"line",
"."
]
| python | test | 45.478261 |
dwavesystems/dwave-cloud-client | dwave/cloud/config.py | https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L336-L367 | def get_default_configfile_path():
"""Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
"""
base = homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False)
path = os.path.join(base, CONF_FILENAME)
return path | [
"def",
"get_default_configfile_path",
"(",
")",
":",
"base",
"=",
"homebase",
".",
"user_config_dir",
"(",
"app_author",
"=",
"CONF_AUTHOR",
",",
"app_name",
"=",
"CONF_APP",
",",
"roaming",
"=",
"False",
",",
"use_virtualenv",
"=",
"False",
",",
"create",
"=",
"False",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"CONF_FILENAME",
")",
"return",
"path"
]
| Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf' | [
"Return",
"the",
"default",
"configuration",
"-",
"file",
"path",
"."
]
| python | train | 33.9375 |
mlperf/training | reinforcement/tensorflow/minigo/mask_flags.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/mask_flags.py#L103-L109 | def run(cmd):
"""Prepare and run a subprocess cmd, returning a CompletedProcess."""
print("Preparing the following cmd:")
cmd = prepare_subprocess_cmd(cmd)
print("Running the following cmd:")
print('\n'.join(cmd))
return subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr) | [
"def",
"run",
"(",
"cmd",
")",
":",
"print",
"(",
"\"Preparing the following cmd:\"",
")",
"cmd",
"=",
"prepare_subprocess_cmd",
"(",
"cmd",
")",
"print",
"(",
"\"Running the following cmd:\"",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"cmd",
")",
")",
"return",
"subprocess",
".",
"run",
"(",
"cmd",
",",
"stdout",
"=",
"sys",
".",
"stdout",
",",
"stderr",
"=",
"sys",
".",
"stderr",
")"
]
| Prepare and run a subprocess cmd, returning a CompletedProcess. | [
"Prepare",
"and",
"run",
"a",
"subprocess",
"cmd",
"returning",
"a",
"CompletedProcess",
"."
]
| python | train | 42.285714 |
uber/rides-python-sdk | example/request_ride.py | https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/example/request_ride.py#L122-L142 | def update_ride(api_client, ride_status, ride_id):
"""Use an UberRidesClient to update ride status and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope.
ride_status (str)
New ride status to update to.
ride_id (str)
Unique identifier for ride to update.
"""
try:
update_product = api_client.update_sandbox_ride(ride_id, ride_status)
except (ClientError, ServerError) as error:
fail_print(error)
else:
message = '{} New status: {}'
message = message.format(update_product.status_code, ride_status)
success_print(message) | [
"def",
"update_ride",
"(",
"api_client",
",",
"ride_status",
",",
"ride_id",
")",
":",
"try",
":",
"update_product",
"=",
"api_client",
".",
"update_sandbox_ride",
"(",
"ride_id",
",",
"ride_status",
")",
"except",
"(",
"ClientError",
",",
"ServerError",
")",
"as",
"error",
":",
"fail_print",
"(",
"error",
")",
"else",
":",
"message",
"=",
"'{} New status: {}'",
"message",
"=",
"message",
".",
"format",
"(",
"update_product",
".",
"status_code",
",",
"ride_status",
")",
"success_print",
"(",
"message",
")"
]
| Use an UberRidesClient to update ride status and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope.
ride_status (str)
New ride status to update to.
ride_id (str)
Unique identifier for ride to update. | [
"Use",
"an",
"UberRidesClient",
"to",
"update",
"ride",
"status",
"and",
"print",
"the",
"results",
"."
]
| python | train | 32.666667 |
icgood/pymap | pymap/backend/maildir/subscriptions.py | https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/backend/maildir/subscriptions.py#L41-L46 | def set(self, folder: str, subscribed: bool) -> None:
"""Set the subscribed status of a folder."""
if subscribed:
self.add(folder)
else:
self.remove(folder) | [
"def",
"set",
"(",
"self",
",",
"folder",
":",
"str",
",",
"subscribed",
":",
"bool",
")",
"->",
"None",
":",
"if",
"subscribed",
":",
"self",
".",
"add",
"(",
"folder",
")",
"else",
":",
"self",
".",
"remove",
"(",
"folder",
")"
]
| Set the subscribed status of a folder. | [
"Set",
"the",
"subscribed",
"status",
"of",
"a",
"folder",
"."
]
| python | train | 33.166667 |
quantopian/zipline | zipline/pipeline/graph.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L488-L493 | def _assert_all_loadable_terms_specialized_to(self, domain):
"""Make sure that we've specialized all loadable terms in the graph.
"""
for term in self.graph.node:
if isinstance(term, LoadableTerm):
assert term.domain is domain | [
"def",
"_assert_all_loadable_terms_specialized_to",
"(",
"self",
",",
"domain",
")",
":",
"for",
"term",
"in",
"self",
".",
"graph",
".",
"node",
":",
"if",
"isinstance",
"(",
"term",
",",
"LoadableTerm",
")",
":",
"assert",
"term",
".",
"domain",
"is",
"domain"
]
| Make sure that we've specialized all loadable terms in the graph. | [
"Make",
"sure",
"that",
"we",
"ve",
"specialized",
"all",
"loadable",
"terms",
"in",
"the",
"graph",
"."
]
| python | train | 45.5 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/stats.py | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/stats.py#L34-L39 | def variance(data, data_mean=None):
"""Return variance of a sequence of numbers.
:param data_mean: Precomputed mean of the sequence.
"""
data_mean = data_mean or mean(data)
return sum((x - data_mean) ** 2 for x in data) / len(data) | [
"def",
"variance",
"(",
"data",
",",
"data_mean",
"=",
"None",
")",
":",
"data_mean",
"=",
"data_mean",
"or",
"mean",
"(",
"data",
")",
"return",
"sum",
"(",
"(",
"x",
"-",
"data_mean",
")",
"**",
"2",
"for",
"x",
"in",
"data",
")",
"/",
"len",
"(",
"data",
")"
]
| Return variance of a sequence of numbers.
:param data_mean: Precomputed mean of the sequence. | [
"Return",
"variance",
"of",
"a",
"sequence",
"of",
"numbers",
".",
":",
"param",
"data_mean",
":",
"Precomputed",
"mean",
"of",
"the",
"sequence",
"."
]
| python | train | 41 |
bslatkin/dpxdt | dpxdt/client/workers.py | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L297-L314 | def get_item(self):
"""Returns the item to send back into the workflow generator."""
if self.was_list:
result = ResultList()
for item in self:
if isinstance(item, WorkflowItem):
if item.done and not item.error:
result.append(item.result)
else:
# When there's an error or the workflow isn't done yet,
# just return the original WorkflowItem so the caller
# can inspect its entire state.
result.append(item)
else:
result.append(item)
return result
else:
return self[0] | [
"def",
"get_item",
"(",
"self",
")",
":",
"if",
"self",
".",
"was_list",
":",
"result",
"=",
"ResultList",
"(",
")",
"for",
"item",
"in",
"self",
":",
"if",
"isinstance",
"(",
"item",
",",
"WorkflowItem",
")",
":",
"if",
"item",
".",
"done",
"and",
"not",
"item",
".",
"error",
":",
"result",
".",
"append",
"(",
"item",
".",
"result",
")",
"else",
":",
"# When there's an error or the workflow isn't done yet,",
"# just return the original WorkflowItem so the caller",
"# can inspect its entire state.",
"result",
".",
"append",
"(",
"item",
")",
"else",
":",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result",
"else",
":",
"return",
"self",
"[",
"0",
"]"
]
| Returns the item to send back into the workflow generator. | [
"Returns",
"the",
"item",
"to",
"send",
"back",
"into",
"the",
"workflow",
"generator",
"."
]
| python | train | 40.722222 |
hazelcast/hazelcast-python-client | hazelcast/proxy/list.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/list.py#L73-L88 | def add_all_at(self, index, items):
"""
Adds all of the elements in the specified collection into this list at the specified position. Elements in this
positions and following elements are shifted to the right, if any. The order of new elements is determined by the
specified collection's iterator.
:param index: (int), the specified index at which the first element of specified collection is added.
:param items: (Collection), the specified collection which includes the elements to be added to list.
:return: (bool), ``true`` if this call changed the list, ``false`` otherwise.
"""
check_not_none(items, "Value can't be None")
data_items = []
for item in items:
check_not_none(item, "Value can't be None")
data_items.append(self._to_data(item))
return self._encode_invoke(list_add_all_with_index_codec, index=index, value_list=data_items) | [
"def",
"add_all_at",
"(",
"self",
",",
"index",
",",
"items",
")",
":",
"check_not_none",
"(",
"items",
",",
"\"Value can't be None\"",
")",
"data_items",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"check_not_none",
"(",
"item",
",",
"\"Value can't be None\"",
")",
"data_items",
".",
"append",
"(",
"self",
".",
"_to_data",
"(",
"item",
")",
")",
"return",
"self",
".",
"_encode_invoke",
"(",
"list_add_all_with_index_codec",
",",
"index",
"=",
"index",
",",
"value_list",
"=",
"data_items",
")"
]
| Adds all of the elements in the specified collection into this list at the specified position. Elements in this
positions and following elements are shifted to the right, if any. The order of new elements is determined by the
specified collection's iterator.
:param index: (int), the specified index at which the first element of specified collection is added.
:param items: (Collection), the specified collection which includes the elements to be added to list.
:return: (bool), ``true`` if this call changed the list, ``false`` otherwise. | [
"Adds",
"all",
"of",
"the",
"elements",
"in",
"the",
"specified",
"collection",
"into",
"this",
"list",
"at",
"the",
"specified",
"position",
".",
"Elements",
"in",
"this",
"positions",
"and",
"following",
"elements",
"are",
"shifted",
"to",
"the",
"right",
"if",
"any",
".",
"The",
"order",
"of",
"new",
"elements",
"is",
"determined",
"by",
"the",
"specified",
"collection",
"s",
"iterator",
"."
]
| python | train | 59.1875 |
99designs/colorific | colorific/palette.py | https://github.com/99designs/colorific/blob/f83e59f61295500f5527dee5894207f2f033cf35/colorific/palette.py#L37-L52 | def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs):
"""
Read filenames from the input stream and detect their palette.
"""
for line in istream:
filename = line.strip()
try:
palette = extract_colors(filename, **kwargs)
except Exception as e:
print(filename, e, file=sys.stderr)
continue
print_colors(filename, palette)
if save_palette:
save_palette_as_image(filename, palette) | [
"def",
"color_stream_st",
"(",
"istream",
"=",
"sys",
".",
"stdin",
",",
"save_palette",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"line",
"in",
"istream",
":",
"filename",
"=",
"line",
".",
"strip",
"(",
")",
"try",
":",
"palette",
"=",
"extract_colors",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"filename",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"print_colors",
"(",
"filename",
",",
"palette",
")",
"if",
"save_palette",
":",
"save_palette_as_image",
"(",
"filename",
",",
"palette",
")"
]
| Read filenames from the input stream and detect their palette. | [
"Read",
"filenames",
"from",
"the",
"input",
"stream",
"and",
"detect",
"their",
"palette",
"."
]
| python | train | 30.25 |
belbio/bel | bel/resources/resource.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/resource.py#L23-L61 | def load_resource(resource_url: str, forceupdate: bool = False):
"""Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
"""
log.info(f"Loading resource {resource_url}")
try:
# Download resource
fo = bel.utils.download_file(resource_url)
if not fo:
log.error(f"Could not download and open file {resource_url}")
return "Failed to download resource_url"
# Get metadata
fo.seek(0)
with gzip.open(fo, "rt") as f:
metadata = json.loads(f.__next__())
if "metadata" not in metadata:
log.error(f"Missing metadata entry for {resource_url}")
return "Cannot load resource file - missing metadata object in first line of file"
# Load resource files
if metadata["metadata"]["type"] == "namespace":
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif metadata["metadata"]["type"] == "ortholog":
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close() | [
"def",
"load_resource",
"(",
"resource_url",
":",
"str",
",",
"forceupdate",
":",
"bool",
"=",
"False",
")",
":",
"log",
".",
"info",
"(",
"f\"Loading resource {resource_url}\"",
")",
"try",
":",
"# Download resource",
"fo",
"=",
"bel",
".",
"utils",
".",
"download_file",
"(",
"resource_url",
")",
"if",
"not",
"fo",
":",
"log",
".",
"error",
"(",
"f\"Could not download and open file {resource_url}\"",
")",
"return",
"\"Failed to download resource_url\"",
"# Get metadata",
"fo",
".",
"seek",
"(",
"0",
")",
"with",
"gzip",
".",
"open",
"(",
"fo",
",",
"\"rt\"",
")",
"as",
"f",
":",
"metadata",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"__next__",
"(",
")",
")",
"if",
"\"metadata\"",
"not",
"in",
"metadata",
":",
"log",
".",
"error",
"(",
"f\"Missing metadata entry for {resource_url}\"",
")",
"return",
"\"Cannot load resource file - missing metadata object in first line of file\"",
"# Load resource files",
"if",
"metadata",
"[",
"\"metadata\"",
"]",
"[",
"\"type\"",
"]",
"==",
"\"namespace\"",
":",
"bel",
".",
"resources",
".",
"namespace",
".",
"load_terms",
"(",
"fo",
",",
"metadata",
",",
"forceupdate",
")",
"elif",
"metadata",
"[",
"\"metadata\"",
"]",
"[",
"\"type\"",
"]",
"==",
"\"ortholog\"",
":",
"bel",
".",
"resources",
".",
"ortholog",
".",
"load_orthologs",
"(",
"fo",
",",
"metadata",
")",
"finally",
":",
"fo",
".",
"close",
"(",
")"
]
| Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches | [
"Load",
"BEL",
"Resource",
"file"
]
| python | train | 34.512821 |
mailund/statusbar | statusbar/__init__.py | https://github.com/mailund/statusbar/blob/e42ac88cdaae281d47318dd8dcf156bfff2a7b2a/statusbar/__init__.py#L262-L291 | def format_table(self, width=None,
min_label_width=10, min_progress_width=10):
"""Format the entire table of progress bars.
The function first computes the widths of the fields so they can be
aligned across lines and then returns formatted lines as a list of
strings.
"""
# handle the special case of an empty table.
if len(self._lines) == 0:
return []
if width is None: # pragma: no cover
width = shutil.get_terminal_size()[0]
labelw, progw, summaryw = self.calculate_field_widths(
width=width,
min_label_width=min_label_width,
min_progress_width=min_progress_width
)
output = [
sb.format_status(
label_width=labelw,
progress_width=progw,
summary_width=summaryw
)
for sb in self._lines
]
return output | [
"def",
"format_table",
"(",
"self",
",",
"width",
"=",
"None",
",",
"min_label_width",
"=",
"10",
",",
"min_progress_width",
"=",
"10",
")",
":",
"# handle the special case of an empty table.",
"if",
"len",
"(",
"self",
".",
"_lines",
")",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"width",
"is",
"None",
":",
"# pragma: no cover",
"width",
"=",
"shutil",
".",
"get_terminal_size",
"(",
")",
"[",
"0",
"]",
"labelw",
",",
"progw",
",",
"summaryw",
"=",
"self",
".",
"calculate_field_widths",
"(",
"width",
"=",
"width",
",",
"min_label_width",
"=",
"min_label_width",
",",
"min_progress_width",
"=",
"min_progress_width",
")",
"output",
"=",
"[",
"sb",
".",
"format_status",
"(",
"label_width",
"=",
"labelw",
",",
"progress_width",
"=",
"progw",
",",
"summary_width",
"=",
"summaryw",
")",
"for",
"sb",
"in",
"self",
".",
"_lines",
"]",
"return",
"output"
]
| Format the entire table of progress bars.
The function first computes the widths of the fields so they can be
aligned across lines and then returns formatted lines as a list of
strings. | [
"Format",
"the",
"entire",
"table",
"of",
"progress",
"bars",
"."
]
| python | train | 31.566667 |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/core_v1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L7427-L7456 | def delete_collection_namespaced_config_map(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_config_map # noqa: E501
delete collection of ConfigMap # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_config_map(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_config_map_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_collection_namespaced_config_map_with_http_info(namespace, **kwargs) # noqa: E501
return data | [
"def",
"delete_collection_namespaced_config_map",
"(",
"self",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"delete_collection_namespaced_config_map_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"delete_collection_namespaced_config_map_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| delete_collection_namespaced_config_map # noqa: E501
delete collection of ConfigMap # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_config_map(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | [
"delete_collection_namespaced_config_map",
"#",
"noqa",
":",
"E501"
]
| python | train | 164.2 |
inveniosoftware/invenio-celery | invenio_celery/ext.py | https://github.com/inveniosoftware/invenio-celery/blob/4d075d5dbdb7ee849abdb0c8d7e7a49cb7973474/invenio_celery/ext.py#L78-L81 | def get_queues(self):
"""Return a list of current active Celery queues."""
res = self.celery.control.inspect().active_queues() or dict()
return [result.get('name') for host in res.values() for result in host] | [
"def",
"get_queues",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"celery",
".",
"control",
".",
"inspect",
"(",
")",
".",
"active_queues",
"(",
")",
"or",
"dict",
"(",
")",
"return",
"[",
"result",
".",
"get",
"(",
"'name'",
")",
"for",
"host",
"in",
"res",
".",
"values",
"(",
")",
"for",
"result",
"in",
"host",
"]"
]
| Return a list of current active Celery queues. | [
"Return",
"a",
"list",
"of",
"current",
"active",
"Celery",
"queues",
"."
]
| python | train | 57.25 |
dssg/argcmdr | src/argcmdr.py | https://github.com/dssg/argcmdr/blob/346b6158987464c3d3a32d315f3800a4807744b4/src/argcmdr.py#L729-L757 | def cmd(*args, **kwargs):
"""Decorate a callable to replace it with a manufactured command
class.
Extends the interface of ``CommandDecorator``, allowing the same
``cmd`` to be used as a decorator or as a decorator factory::
@cmd(root=True)
def build():
...
@build.register
@cmd
def deploy():
...
Further enables composition of configuration, for example via
partials, as helpers.
"""
try:
(first, *remainder) = args
except ValueError:
pass
else:
if callable(first):
return CommandDecorator(*remainder, **kwargs)(first)
return CommandDecorator(*args, **kwargs) | [
"def",
"cmd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"(",
"first",
",",
"*",
"remainder",
")",
"=",
"args",
"except",
"ValueError",
":",
"pass",
"else",
":",
"if",
"callable",
"(",
"first",
")",
":",
"return",
"CommandDecorator",
"(",
"*",
"remainder",
",",
"*",
"*",
"kwargs",
")",
"(",
"first",
")",
"return",
"CommandDecorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Decorate a callable to replace it with a manufactured command
class.
Extends the interface of ``CommandDecorator``, allowing the same
``cmd`` to be used as a decorator or as a decorator factory::
@cmd(root=True)
def build():
...
@build.register
@cmd
def deploy():
...
Further enables composition of configuration, for example via
partials, as helpers. | [
"Decorate",
"a",
"callable",
"to",
"replace",
"it",
"with",
"a",
"manufactured",
"command",
"class",
"."
]
| python | train | 23.517241 |
pypa/pipenv | pipenv/vendor/jinja2/nativetypes.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/nativetypes.py#L39-L195 | def visit_Output(self, node, frame):
"""Same as :meth:`CodeGenerator.visit_Output`, but do not call
``to_string`` on output nodes in generated code.
"""
if self.has_known_extends and frame.require_output_check:
return
finalize = self.environment.finalize
finalize_context = getattr(finalize, 'contextfunction', False)
finalize_eval = getattr(finalize, 'evalcontextfunction', False)
finalize_env = getattr(finalize, 'environmentfunction', False)
if finalize is not None:
if finalize_context or finalize_eval:
const_finalize = None
elif finalize_env:
def const_finalize(x):
return finalize(self.environment, x)
else:
const_finalize = finalize
else:
def const_finalize(x):
return x
# If we are inside a frame that requires output checking, we do so.
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# Try to evaluate as many chunks as possible into a static string at
# compile time.
body = []
for child in node.nodes:
try:
if const_finalize is None:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, because otherwise the as_const
# function would raise an Impossible exception at that point
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = const_finalize(const)
except Exception:
# if something goes wrong here we evaluate the node at runtime
# for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(native_concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
close += 1
self.visit(item, frame)
if close > 0:
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(native_concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
elif finalize_eval:
self.write('context.eval_ctx, ')
elif finalize_env:
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent() | [
"def",
"visit_Output",
"(",
"self",
",",
"node",
",",
"frame",
")",
":",
"if",
"self",
".",
"has_known_extends",
"and",
"frame",
".",
"require_output_check",
":",
"return",
"finalize",
"=",
"self",
".",
"environment",
".",
"finalize",
"finalize_context",
"=",
"getattr",
"(",
"finalize",
",",
"'contextfunction'",
",",
"False",
")",
"finalize_eval",
"=",
"getattr",
"(",
"finalize",
",",
"'evalcontextfunction'",
",",
"False",
")",
"finalize_env",
"=",
"getattr",
"(",
"finalize",
",",
"'environmentfunction'",
",",
"False",
")",
"if",
"finalize",
"is",
"not",
"None",
":",
"if",
"finalize_context",
"or",
"finalize_eval",
":",
"const_finalize",
"=",
"None",
"elif",
"finalize_env",
":",
"def",
"const_finalize",
"(",
"x",
")",
":",
"return",
"finalize",
"(",
"self",
".",
"environment",
",",
"x",
")",
"else",
":",
"const_finalize",
"=",
"finalize",
"else",
":",
"def",
"const_finalize",
"(",
"x",
")",
":",
"return",
"x",
"# If we are inside a frame that requires output checking, we do so.",
"outdent_later",
"=",
"False",
"if",
"frame",
".",
"require_output_check",
":",
"self",
".",
"writeline",
"(",
"'if parent_template is None:'",
")",
"self",
".",
"indent",
"(",
")",
"outdent_later",
"=",
"True",
"# Try to evaluate as many chunks as possible into a static string at",
"# compile time.",
"body",
"=",
"[",
"]",
"for",
"child",
"in",
"node",
".",
"nodes",
":",
"try",
":",
"if",
"const_finalize",
"is",
"None",
":",
"raise",
"nodes",
".",
"Impossible",
"(",
")",
"const",
"=",
"child",
".",
"as_const",
"(",
"frame",
".",
"eval_ctx",
")",
"if",
"not",
"has_safe_repr",
"(",
"const",
")",
":",
"raise",
"nodes",
".",
"Impossible",
"(",
")",
"except",
"nodes",
".",
"Impossible",
":",
"body",
".",
"append",
"(",
"child",
")",
"continue",
"# the frame can't be volatile here, because otherwise the as_const",
"# function would raise an Impossible exception at that point",
"try",
":",
"if",
"frame",
".",
"eval_ctx",
".",
"autoescape",
":",
"if",
"hasattr",
"(",
"const",
",",
"'__html__'",
")",
":",
"const",
"=",
"const",
".",
"__html__",
"(",
")",
"else",
":",
"const",
"=",
"escape",
"(",
"const",
")",
"const",
"=",
"const_finalize",
"(",
"const",
")",
"except",
"Exception",
":",
"# if something goes wrong here we evaluate the node at runtime",
"# for easier debugging",
"body",
".",
"append",
"(",
"child",
")",
"continue",
"if",
"body",
"and",
"isinstance",
"(",
"body",
"[",
"-",
"1",
"]",
",",
"list",
")",
":",
"body",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"const",
")",
"else",
":",
"body",
".",
"append",
"(",
"[",
"const",
"]",
")",
"# if we have less than 3 nodes or a buffer we yield or extend/append",
"if",
"len",
"(",
"body",
")",
"<",
"3",
"or",
"frame",
".",
"buffer",
"is",
"not",
"None",
":",
"if",
"frame",
".",
"buffer",
"is",
"not",
"None",
":",
"# for one item we append, for more we extend",
"if",
"len",
"(",
"body",
")",
"==",
"1",
":",
"self",
".",
"writeline",
"(",
"'%s.append('",
"%",
"frame",
".",
"buffer",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'%s.extend(('",
"%",
"frame",
".",
"buffer",
")",
"self",
".",
"indent",
"(",
")",
"for",
"item",
"in",
"body",
":",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"val",
"=",
"repr",
"(",
"native_concat",
"(",
"item",
")",
")",
"if",
"frame",
".",
"buffer",
"is",
"None",
":",
"self",
".",
"writeline",
"(",
"'yield '",
"+",
"val",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"val",
"+",
"','",
")",
"else",
":",
"if",
"frame",
".",
"buffer",
"is",
"None",
":",
"self",
".",
"writeline",
"(",
"'yield '",
",",
"item",
")",
"else",
":",
"self",
".",
"newline",
"(",
"item",
")",
"close",
"=",
"0",
"if",
"finalize",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"'environment.finalize('",
")",
"if",
"finalize_context",
":",
"self",
".",
"write",
"(",
"'context, '",
")",
"close",
"+=",
"1",
"self",
".",
"visit",
"(",
"item",
",",
"frame",
")",
"if",
"close",
">",
"0",
":",
"self",
".",
"write",
"(",
"')'",
"*",
"close",
")",
"if",
"frame",
".",
"buffer",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"','",
")",
"if",
"frame",
".",
"buffer",
"is",
"not",
"None",
":",
"# close the open parentheses",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"len",
"(",
"body",
")",
"==",
"1",
"and",
"')'",
"or",
"'))'",
")",
"# otherwise we create a format string as this is faster in that case",
"else",
":",
"format",
"=",
"[",
"]",
"arguments",
"=",
"[",
"]",
"for",
"item",
"in",
"body",
":",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"format",
".",
"append",
"(",
"native_concat",
"(",
"item",
")",
".",
"replace",
"(",
"'%'",
",",
"'%%'",
")",
")",
"else",
":",
"format",
".",
"append",
"(",
"'%s'",
")",
"arguments",
".",
"append",
"(",
"item",
")",
"self",
".",
"writeline",
"(",
"'yield '",
")",
"self",
".",
"write",
"(",
"repr",
"(",
"concat",
"(",
"format",
")",
")",
"+",
"' % ('",
")",
"self",
".",
"indent",
"(",
")",
"for",
"argument",
"in",
"arguments",
":",
"self",
".",
"newline",
"(",
"argument",
")",
"close",
"=",
"0",
"if",
"finalize",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"'environment.finalize('",
")",
"if",
"finalize_context",
":",
"self",
".",
"write",
"(",
"'context, '",
")",
"elif",
"finalize_eval",
":",
"self",
".",
"write",
"(",
"'context.eval_ctx, '",
")",
"elif",
"finalize_env",
":",
"self",
".",
"write",
"(",
"'environment, '",
")",
"close",
"+=",
"1",
"self",
".",
"visit",
"(",
"argument",
",",
"frame",
")",
"self",
".",
"write",
"(",
"')'",
"*",
"close",
"+",
"', '",
")",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"')'",
")",
"if",
"outdent_later",
":",
"self",
".",
"outdent",
"(",
")"
]
| Same as :meth:`CodeGenerator.visit_Output`, but do not call
``to_string`` on output nodes in generated code. | [
"Same",
"as",
":",
"meth",
":",
"CodeGenerator",
".",
"visit_Output",
"but",
"do",
"not",
"call",
"to_string",
"on",
"output",
"nodes",
"in",
"generated",
"code",
"."
]
| python | train | 32.815287 |
osrg/ryu | ryu/services/protocols/bgp/peer.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/peer.py#L685-L707 | def _send_outgoing_route_refresh_msg(self, rr_msg):
"""Sends given message `rr_msg` to peer.
Parameters:
- rr_msg: (RouteRefresh) route refresh message to send to peer.
Update appropriate counters and set appropriate timers.
"""
assert rr_msg.type == BGP_MSG_ROUTE_REFRESH
self._protocol.send(rr_msg)
LOG.debug('RouteRefresh %s>> %s',
self._neigh_conf.ip_address, rr_msg)
# Collect update statistics for sent refresh request.
if rr_msg.demarcation == 0:
self.state.incr(PeerCounterNames.SENT_REFRESH)
# If SOR is sent, we set Max. EOR timer if needed.
elif (rr_msg.demarcation == 1 and
self._common_conf.refresh_max_eor_time != 0):
eor_timer = self._common_conf.refresh_max_eor_time
# Set timer to send EOR demarcation.
self._spawn_after('end-of-rib-timer', eor_timer,
self._enqueue_eor_msg, rr_msg)
LOG.debug('Enhanced RR max. EOR timer set.') | [
"def",
"_send_outgoing_route_refresh_msg",
"(",
"self",
",",
"rr_msg",
")",
":",
"assert",
"rr_msg",
".",
"type",
"==",
"BGP_MSG_ROUTE_REFRESH",
"self",
".",
"_protocol",
".",
"send",
"(",
"rr_msg",
")",
"LOG",
".",
"debug",
"(",
"'RouteRefresh %s>> %s'",
",",
"self",
".",
"_neigh_conf",
".",
"ip_address",
",",
"rr_msg",
")",
"# Collect update statistics for sent refresh request.",
"if",
"rr_msg",
".",
"demarcation",
"==",
"0",
":",
"self",
".",
"state",
".",
"incr",
"(",
"PeerCounterNames",
".",
"SENT_REFRESH",
")",
"# If SOR is sent, we set Max. EOR timer if needed.",
"elif",
"(",
"rr_msg",
".",
"demarcation",
"==",
"1",
"and",
"self",
".",
"_common_conf",
".",
"refresh_max_eor_time",
"!=",
"0",
")",
":",
"eor_timer",
"=",
"self",
".",
"_common_conf",
".",
"refresh_max_eor_time",
"# Set timer to send EOR demarcation.",
"self",
".",
"_spawn_after",
"(",
"'end-of-rib-timer'",
",",
"eor_timer",
",",
"self",
".",
"_enqueue_eor_msg",
",",
"rr_msg",
")",
"LOG",
".",
"debug",
"(",
"'Enhanced RR max. EOR timer set.'",
")"
]
| Sends given message `rr_msg` to peer.
Parameters:
- rr_msg: (RouteRefresh) route refresh message to send to peer.
Update appropriate counters and set appropriate timers. | [
"Sends",
"given",
"message",
"rr_msg",
"to",
"peer",
"."
]
| python | train | 45.478261 |
samjabrahams/anchorhub | anchorhub/lib/armedswitch.py | https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/lib/armedswitch.py#L26-L43 | def switch(self, val=None):
"""
Set the state of the switch. If the armed state is set to False,
the function does nothing.
:param val: Boolean. The value to set the switch state to. When None,
the switch will be set to the opposite of its current state.
:return: Boolean. Returns True if operation was successful (i.e. the
switch was armed before this was called)
"""
if self._armed:
if val is None:
val = not self._switched
self._switched = val
self._armed = False
return True
else:
return False | [
"def",
"switch",
"(",
"self",
",",
"val",
"=",
"None",
")",
":",
"if",
"self",
".",
"_armed",
":",
"if",
"val",
"is",
"None",
":",
"val",
"=",
"not",
"self",
".",
"_switched",
"self",
".",
"_switched",
"=",
"val",
"self",
".",
"_armed",
"=",
"False",
"return",
"True",
"else",
":",
"return",
"False"
]
| Set the state of the switch. If the armed state is set to False,
the function does nothing.
:param val: Boolean. The value to set the switch state to. When None,
the switch will be set to the opposite of its current state.
:return: Boolean. Returns True if operation was successful (i.e. the
switch was armed before this was called) | [
"Set",
"the",
"state",
"of",
"the",
"switch",
".",
"If",
"the",
"armed",
"state",
"is",
"set",
"to",
"False",
"the",
"function",
"does",
"nothing",
"."
]
| python | train | 35.833333 |
inspirehep/harvesting-kit | harvestingkit/etree_utils.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/etree_utils.py#L60-L81 | def element_tree_collection_to_records(tree):
"""Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection>
"""
from .bibrecord import create_record
records = []
collection = tree.getroot()
for record_element in collection.getchildren():
marcxml = ET.tostring(record_element, encoding="utf-8")
record, status, errors = create_record(marcxml)
if errors:
print(str(status))
records.append(record)
return records | [
"def",
"element_tree_collection_to_records",
"(",
"tree",
")",
":",
"from",
".",
"bibrecord",
"import",
"create_record",
"records",
"=",
"[",
"]",
"collection",
"=",
"tree",
".",
"getroot",
"(",
")",
"for",
"record_element",
"in",
"collection",
".",
"getchildren",
"(",
")",
":",
"marcxml",
"=",
"ET",
".",
"tostring",
"(",
"record_element",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"record",
",",
"status",
",",
"errors",
"=",
"create_record",
"(",
"marcxml",
")",
"if",
"errors",
":",
"print",
"(",
"str",
"(",
"status",
")",
")",
"records",
".",
"append",
"(",
"record",
")",
"return",
"records"
]
| Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection> | [
"Take",
"an",
"ElementTree",
"and",
"converts",
"the",
"nodes",
"into",
"BibRecord",
"records",
"."
]
| python | valid | 30 |
ic-labs/django-icekit | icekit_events/admin.py | https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/admin.py#L224-L243 | def get_urls(self):
"""
Add a calendar URL.
"""
from django.conf.urls import patterns, url
urls = super(EventAdmin, self).get_urls()
my_urls = patterns(
'',
url(
r'^calendar/$',
self.admin_site.admin_view(self.calendar),
name='icekit_events_eventbase_calendar'
),
url(
r'^calendar_data/$',
self.admin_site.admin_view(self.calendar_data),
name='icekit_events_eventbase_calendar_data'
),
)
return my_urls + urls | [
"def",
"get_urls",
"(",
"self",
")",
":",
"from",
"django",
".",
"conf",
".",
"urls",
"import",
"patterns",
",",
"url",
"urls",
"=",
"super",
"(",
"EventAdmin",
",",
"self",
")",
".",
"get_urls",
"(",
")",
"my_urls",
"=",
"patterns",
"(",
"''",
",",
"url",
"(",
"r'^calendar/$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"self",
".",
"calendar",
")",
",",
"name",
"=",
"'icekit_events_eventbase_calendar'",
")",
",",
"url",
"(",
"r'^calendar_data/$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"self",
".",
"calendar_data",
")",
",",
"name",
"=",
"'icekit_events_eventbase_calendar_data'",
")",
",",
")",
"return",
"my_urls",
"+",
"urls"
]
| Add a calendar URL. | [
"Add",
"a",
"calendar",
"URL",
"."
]
| python | train | 30.5 |
frictionlessdata/datapackage-py | datapackage/package.py | https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L489-L495 | def _slugify_foreign_key(schema):
"""Slugify foreign key
"""
for foreign_key in schema.get('foreignKeys', []):
foreign_key['reference']['resource'] = _slugify_resource_name(
foreign_key['reference'].get('resource', ''))
return schema | [
"def",
"_slugify_foreign_key",
"(",
"schema",
")",
":",
"for",
"foreign_key",
"in",
"schema",
".",
"get",
"(",
"'foreignKeys'",
",",
"[",
"]",
")",
":",
"foreign_key",
"[",
"'reference'",
"]",
"[",
"'resource'",
"]",
"=",
"_slugify_resource_name",
"(",
"foreign_key",
"[",
"'reference'",
"]",
".",
"get",
"(",
"'resource'",
",",
"''",
")",
")",
"return",
"schema"
]
| Slugify foreign key | [
"Slugify",
"foreign",
"key"
]
| python | valid | 37.571429 |
lra/mackup | mackup/mackup.py | https://github.com/lra/mackup/blob/ed0b5626b033f232868900bfd5108df448873725/mackup/mackup.py#L53-L61 | def check_for_usable_restore_env(self):
"""Check if the current env can be used to restore files."""
self.check_for_usable_environment()
if not os.path.isdir(self.mackup_folder):
utils.error("Unable to find the Mackup folder: {}\n"
"You might want to back up some files or get your"
" storage directory synced first."
.format(self.mackup_folder)) | [
"def",
"check_for_usable_restore_env",
"(",
"self",
")",
":",
"self",
".",
"check_for_usable_environment",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"mackup_folder",
")",
":",
"utils",
".",
"error",
"(",
"\"Unable to find the Mackup folder: {}\\n\"",
"\"You might want to back up some files or get your\"",
"\" storage directory synced first.\"",
".",
"format",
"(",
"self",
".",
"mackup_folder",
")",
")"
]
| Check if the current env can be used to restore files. | [
"Check",
"if",
"the",
"current",
"env",
"can",
"be",
"used",
"to",
"restore",
"files",
"."
]
| python | train | 49.666667 |
bretth/woven | woven/project.py | https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/project.py#L166-L184 | def deploy_media():
"""
Deploy MEDIA_ROOT unversioned on host
"""
if not env.MEDIA_URL or not env.MEDIA_ROOT or 'http://' in env.MEDIA_URL: return
local_dir = env.MEDIA_ROOT
remote_dir = '/'.join([deployment_root(),'public'])
media_url = env.MEDIA_URL[1:]
if media_url:
remote_dir = '/'.join([remote_dir,media_url])
if env.verbosity:
print env.host,"DEPLOYING media",remote_dir
deployed = deploy_files(local_dir,remote_dir)
#make writable for www-data for file uploads
sudo("chown -R www-data:sudo %s" % remote_dir)
sudo("chmod -R ug+w %s"% remote_dir)
return deployed | [
"def",
"deploy_media",
"(",
")",
":",
"if",
"not",
"env",
".",
"MEDIA_URL",
"or",
"not",
"env",
".",
"MEDIA_ROOT",
"or",
"'http://'",
"in",
"env",
".",
"MEDIA_URL",
":",
"return",
"local_dir",
"=",
"env",
".",
"MEDIA_ROOT",
"remote_dir",
"=",
"'/'",
".",
"join",
"(",
"[",
"deployment_root",
"(",
")",
",",
"'public'",
"]",
")",
"media_url",
"=",
"env",
".",
"MEDIA_URL",
"[",
"1",
":",
"]",
"if",
"media_url",
":",
"remote_dir",
"=",
"'/'",
".",
"join",
"(",
"[",
"remote_dir",
",",
"media_url",
"]",
")",
"if",
"env",
".",
"verbosity",
":",
"print",
"env",
".",
"host",
",",
"\"DEPLOYING media\"",
",",
"remote_dir",
"deployed",
"=",
"deploy_files",
"(",
"local_dir",
",",
"remote_dir",
")",
"#make writable for www-data for file uploads",
"sudo",
"(",
"\"chown -R www-data:sudo %s\"",
"%",
"remote_dir",
")",
"sudo",
"(",
"\"chmod -R ug+w %s\"",
"%",
"remote_dir",
")",
"return",
"deployed"
]
| Deploy MEDIA_ROOT unversioned on host | [
"Deploy",
"MEDIA_ROOT",
"unversioned",
"on",
"host"
]
| python | train | 33.526316 |
IdentityPython/pyop | src/pyop/provider.py | https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/provider.py#L331-L388 | def _do_code_exchange(self, request, # type: Dict[str, str]
extra_id_token_claims=None
# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]
):
# type: (...) -> oic.message.AccessTokenResponse
"""
Handles a token request for exchanging an authorization code for an access token
(grant_type=authorization_code).
:param request: parsed http request parameters
:param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like
object or as a callable object accepting the local user identifier and client identifier which returns
any extra claims which might depend on the user id and/or client id.
:return: a token response containing a signed ID Token, an Access Token, and a Refresh Token
:raise InvalidTokenRequest: if the token request is invalid
"""
token_request = AccessTokenRequest().from_dict(request)
try:
token_request.verify()
except MessageException as e:
raise InvalidTokenRequest(str(e), token_request) from e
authentication_request = self.authz_state.get_authorization_request_for_code(token_request['code'])
if token_request['client_id'] != authentication_request['client_id']:
logger.info('Authorization code \'%s\' belonging to \'%s\' was used by \'%s\'',
token_request['code'], authentication_request['client_id'], token_request['client_id'])
raise InvalidAuthorizationCode('{} unknown'.format(token_request['code']))
if token_request['redirect_uri'] != authentication_request['redirect_uri']:
raise InvalidTokenRequest('Invalid redirect_uri: {} != {}'.format(token_request['redirect_uri'],
authentication_request['redirect_uri']),
token_request)
sub = self.authz_state.get_subject_identifier_for_code(token_request['code'])
user_id = self.authz_state.get_user_id_for_subject_identifier(sub)
response = AccessTokenResponse()
access_token = self.authz_state.exchange_code_for_token(token_request['code'])
self._add_access_token_to_response(response, access_token)
refresh_token = self.authz_state.create_refresh_token(access_token.value)
if refresh_token is not None:
response['refresh_token'] = refresh_token
if extra_id_token_claims is None:
extra_id_token_claims = {}
elif callable(extra_id_token_claims):
extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id'])
requested_claims = self._get_requested_claims_in(authentication_request, 'id_token')
user_claims = self.userinfo.get_claims_for(user_id, requested_claims)
response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub,
user_claims,
authentication_request.get('nonce'),
None, access_token.value,
extra_id_token_claims)
logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s',
response['id_token'], requested_claims, user_claims, extra_id_token_claims)
return response | [
"def",
"_do_code_exchange",
"(",
"self",
",",
"request",
",",
"# type: Dict[str, str]",
"extra_id_token_claims",
"=",
"None",
"# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]",
")",
":",
"# type: (...) -> oic.message.AccessTokenResponse",
"token_request",
"=",
"AccessTokenRequest",
"(",
")",
".",
"from_dict",
"(",
"request",
")",
"try",
":",
"token_request",
".",
"verify",
"(",
")",
"except",
"MessageException",
"as",
"e",
":",
"raise",
"InvalidTokenRequest",
"(",
"str",
"(",
"e",
")",
",",
"token_request",
")",
"from",
"e",
"authentication_request",
"=",
"self",
".",
"authz_state",
".",
"get_authorization_request_for_code",
"(",
"token_request",
"[",
"'code'",
"]",
")",
"if",
"token_request",
"[",
"'client_id'",
"]",
"!=",
"authentication_request",
"[",
"'client_id'",
"]",
":",
"logger",
".",
"info",
"(",
"'Authorization code \\'%s\\' belonging to \\'%s\\' was used by \\'%s\\''",
",",
"token_request",
"[",
"'code'",
"]",
",",
"authentication_request",
"[",
"'client_id'",
"]",
",",
"token_request",
"[",
"'client_id'",
"]",
")",
"raise",
"InvalidAuthorizationCode",
"(",
"'{} unknown'",
".",
"format",
"(",
"token_request",
"[",
"'code'",
"]",
")",
")",
"if",
"token_request",
"[",
"'redirect_uri'",
"]",
"!=",
"authentication_request",
"[",
"'redirect_uri'",
"]",
":",
"raise",
"InvalidTokenRequest",
"(",
"'Invalid redirect_uri: {} != {}'",
".",
"format",
"(",
"token_request",
"[",
"'redirect_uri'",
"]",
",",
"authentication_request",
"[",
"'redirect_uri'",
"]",
")",
",",
"token_request",
")",
"sub",
"=",
"self",
".",
"authz_state",
".",
"get_subject_identifier_for_code",
"(",
"token_request",
"[",
"'code'",
"]",
")",
"user_id",
"=",
"self",
".",
"authz_state",
".",
"get_user_id_for_subject_identifier",
"(",
"sub",
")",
"response",
"=",
"AccessTokenResponse",
"(",
")",
"access_token",
"=",
"self",
".",
"authz_state",
".",
"exchange_code_for_token",
"(",
"token_request",
"[",
"'code'",
"]",
")",
"self",
".",
"_add_access_token_to_response",
"(",
"response",
",",
"access_token",
")",
"refresh_token",
"=",
"self",
".",
"authz_state",
".",
"create_refresh_token",
"(",
"access_token",
".",
"value",
")",
"if",
"refresh_token",
"is",
"not",
"None",
":",
"response",
"[",
"'refresh_token'",
"]",
"=",
"refresh_token",
"if",
"extra_id_token_claims",
"is",
"None",
":",
"extra_id_token_claims",
"=",
"{",
"}",
"elif",
"callable",
"(",
"extra_id_token_claims",
")",
":",
"extra_id_token_claims",
"=",
"extra_id_token_claims",
"(",
"user_id",
",",
"authentication_request",
"[",
"'client_id'",
"]",
")",
"requested_claims",
"=",
"self",
".",
"_get_requested_claims_in",
"(",
"authentication_request",
",",
"'id_token'",
")",
"user_claims",
"=",
"self",
".",
"userinfo",
".",
"get_claims_for",
"(",
"user_id",
",",
"requested_claims",
")",
"response",
"[",
"'id_token'",
"]",
"=",
"self",
".",
"_create_signed_id_token",
"(",
"authentication_request",
"[",
"'client_id'",
"]",
",",
"sub",
",",
"user_claims",
",",
"authentication_request",
".",
"get",
"(",
"'nonce'",
")",
",",
"None",
",",
"access_token",
".",
"value",
",",
"extra_id_token_claims",
")",
"logger",
".",
"debug",
"(",
"'issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s'",
",",
"response",
"[",
"'id_token'",
"]",
",",
"requested_claims",
",",
"user_claims",
",",
"extra_id_token_claims",
")",
"return",
"response"
]
| Handles a token request for exchanging an authorization code for an access token
(grant_type=authorization_code).
:param request: parsed http request parameters
:param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like
object or as a callable object accepting the local user identifier and client identifier which returns
any extra claims which might depend on the user id and/or client id.
:return: a token response containing a signed ID Token, an Access Token, and a Refresh Token
:raise InvalidTokenRequest: if the token request is invalid | [
"Handles",
"a",
"token",
"request",
"for",
"exchanging",
"an",
"authorization",
"code",
"for",
"an",
"access",
"token",
"(",
"grant_type",
"=",
"authorization_code",
")",
".",
":",
"param",
"request",
":",
"parsed",
"http",
"request",
"parameters",
":",
"param",
"extra_id_token_claims",
":",
"any",
"extra",
"parameters",
"to",
"include",
"in",
"the",
"signed",
"ID",
"Token",
"either",
"as",
"a",
"dict",
"-",
"like",
"object",
"or",
"as",
"a",
"callable",
"object",
"accepting",
"the",
"local",
"user",
"identifier",
"and",
"client",
"identifier",
"which",
"returns",
"any",
"extra",
"claims",
"which",
"might",
"depend",
"on",
"the",
"user",
"id",
"and",
"/",
"or",
"client",
"id",
".",
":",
"return",
":",
"a",
"token",
"response",
"containing",
"a",
"signed",
"ID",
"Token",
"an",
"Access",
"Token",
"and",
"a",
"Refresh",
"Token",
":",
"raise",
"InvalidTokenRequest",
":",
"if",
"the",
"token",
"request",
"is",
"invalid"
]
| python | train | 62.655172 |
mitsei/dlkit | dlkit/json_/authorization/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L896-L926 | def get_authorizations_for_resource_and_function(self, resource_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_peers
# NOTE: This implementation currently ignores plenary and effective views
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'sourceId': str(resource_id),
'destinationId': str(function_id)},
**self._view_filter())).sort('_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime) | [
"def",
"get_authorizations_for_resource_and_function",
"(",
"self",
",",
"resource_id",
",",
"function_id",
")",
":",
"# Implemented from template for",
"# osid.relationship.RelationshipLookupSession.get_relationships_for_peers",
"# NOTE: This implementation currently ignores plenary and effective views",
"collection",
"=",
"JSONClientValidated",
"(",
"'authorization'",
",",
"collection",
"=",
"'Authorization'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"result",
"=",
"collection",
".",
"find",
"(",
"dict",
"(",
"{",
"'sourceId'",
":",
"str",
"(",
"resource_id",
")",
",",
"'destinationId'",
":",
"str",
"(",
"function_id",
")",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
".",
"sort",
"(",
"'_id'",
",",
"ASCENDING",
")",
"return",
"objects",
".",
"AuthorizationList",
"(",
"result",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")"
]
| Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"a",
"list",
"of",
"Authorizations",
"associated",
"with",
"a",
"given",
"resource",
"."
]
| python | train | 53.741935 |
loli/medpy | medpy/filter/image.py | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/image.py#L287-L337 | def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin) | [
"def",
"sum_filter",
"(",
"input",
",",
"size",
"=",
"None",
",",
"footprint",
"=",
"None",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"footprint",
"=",
"__make_footprint",
"(",
"input",
",",
"size",
",",
"footprint",
")",
"slicer",
"=",
"[",
"slice",
"(",
"None",
",",
"None",
",",
"-",
"1",
")",
"]",
"*",
"footprint",
".",
"ndim",
"return",
"convolve",
"(",
"input",
",",
"footprint",
"[",
"slicer",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")"
]
| r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel. | [
"r",
"Calculates",
"a",
"multi",
"-",
"dimensional",
"sum",
"filter",
"."
]
| python | train | 39.333333 |
booktype/python-ooxml | ooxml/serialize.py | https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L505-L523 | def get_css_classes(document, style):
"""Returns CSS classes for this style.
This function will check all the styles specified style is based on and return their CSS classes.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
String representing all the CSS classes for this element.
>>> get_css_classes(doc, st)
'header1 normal'
"""
lst = [st.lower() for st in get_all_styles(document, style)[-1:]] + \
['{}-fontsize'.format(st.lower()) for st in get_all_styles(document, style)[-1:]]
return ' '.join(lst) | [
"def",
"get_css_classes",
"(",
"document",
",",
"style",
")",
":",
"lst",
"=",
"[",
"st",
".",
"lower",
"(",
")",
"for",
"st",
"in",
"get_all_styles",
"(",
"document",
",",
"style",
")",
"[",
"-",
"1",
":",
"]",
"]",
"+",
"[",
"'{}-fontsize'",
".",
"format",
"(",
"st",
".",
"lower",
"(",
")",
")",
"for",
"st",
"in",
"get_all_styles",
"(",
"document",
",",
"style",
")",
"[",
"-",
"1",
":",
"]",
"]",
"return",
"' '",
".",
"join",
"(",
"lst",
")"
]
| Returns CSS classes for this style.
This function will check all the styles specified style is based on and return their CSS classes.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
String representing all the CSS classes for this element.
>>> get_css_classes(doc, st)
'header1 normal' | [
"Returns",
"CSS",
"classes",
"for",
"this",
"style",
"."
]
| python | train | 33.052632 |
dahlia/sqlalchemy-imageattach | sqlalchemy_imageattach/store.py | https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/store.py#L143-L165 | def store(self, image, file):
"""Stores the actual data ``file`` of the given ``image``.
::
with open(imagefile, 'rb') as f:
store.store(image, f)
:param image: the image to store its actual data file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param file: the image file to put
:type file: file-like object, :class:`file`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif not callable(getattr(file, 'read', None)):
raise TypeError('file must be a readable file-like object that '
'implements read() method, not ' + repr(file))
self.put_file(file, image.object_type, image.object_id,
image.width, image.height, image.mimetype,
not image.original) | [
"def",
"store",
"(",
"self",
",",
"image",
",",
"file",
")",
":",
"from",
".",
"entity",
"import",
"Image",
"if",
"not",
"isinstance",
"(",
"image",
",",
"Image",
")",
":",
"raise",
"TypeError",
"(",
"'image must be a sqlalchemy_imageattach.entity.'",
"'Image instance, not '",
"+",
"repr",
"(",
"image",
")",
")",
"elif",
"not",
"callable",
"(",
"getattr",
"(",
"file",
",",
"'read'",
",",
"None",
")",
")",
":",
"raise",
"TypeError",
"(",
"'file must be a readable file-like object that '",
"'implements read() method, not '",
"+",
"repr",
"(",
"file",
")",
")",
"self",
".",
"put_file",
"(",
"file",
",",
"image",
".",
"object_type",
",",
"image",
".",
"object_id",
",",
"image",
".",
"width",
",",
"image",
".",
"height",
",",
"image",
".",
"mimetype",
",",
"not",
"image",
".",
"original",
")"
]
| Stores the actual data ``file`` of the given ``image``.
::
with open(imagefile, 'rb') as f:
store.store(image, f)
:param image: the image to store its actual data file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param file: the image file to put
:type file: file-like object, :class:`file` | [
"Stores",
"the",
"actual",
"data",
"file",
"of",
"the",
"given",
"image",
".",
"::"
]
| python | train | 43.608696 |
IdentityPython/pysaml2 | example/idp2/idp_uwsgi.py | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/example/idp2/idp_uwsgi.py#L462-L476 | def do_authentication(environ, start_response, authn_context, key,
redirect_uri):
"""
Display the login form
"""
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)", method, reference)
return method(environ, start_response, reference, key, redirect_uri)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response) | [
"def",
"do_authentication",
"(",
"environ",
",",
"start_response",
",",
"authn_context",
",",
"key",
",",
"redirect_uri",
")",
":",
"logger",
".",
"debug",
"(",
"\"Do authentication\"",
")",
"auth_info",
"=",
"AUTHN_BROKER",
".",
"pick",
"(",
"authn_context",
")",
"if",
"len",
"(",
"auth_info",
")",
":",
"method",
",",
"reference",
"=",
"auth_info",
"[",
"0",
"]",
"logger",
".",
"debug",
"(",
"\"Authn chosen: %s (ref=%s)\"",
",",
"method",
",",
"reference",
")",
"return",
"method",
"(",
"environ",
",",
"start_response",
",",
"reference",
",",
"key",
",",
"redirect_uri",
")",
"else",
":",
"resp",
"=",
"Unauthorized",
"(",
"\"No usable authentication method\"",
")",
"return",
"resp",
"(",
"environ",
",",
"start_response",
")"
]
| Display the login form | [
"Display",
"the",
"login",
"form"
]
| python | train | 36.533333 |
python-cmd2/cmd2 | tasks.py | https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/tasks.py#L112-L117 | def livehtml(context):
"Launch webserver on http://localhost:8000 with rendered documentation"
builder = 'html'
outputdir = os.path.join(DOCS_BUILDDIR, builder)
cmdline = 'sphinx-autobuild -b {} {} {}'.format(builder, DOCS_SRCDIR, outputdir)
context.run(cmdline, pty=True) | [
"def",
"livehtml",
"(",
"context",
")",
":",
"builder",
"=",
"'html'",
"outputdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DOCS_BUILDDIR",
",",
"builder",
")",
"cmdline",
"=",
"'sphinx-autobuild -b {} {} {}'",
".",
"format",
"(",
"builder",
",",
"DOCS_SRCDIR",
",",
"outputdir",
")",
"context",
".",
"run",
"(",
"cmdline",
",",
"pty",
"=",
"True",
")"
]
| Launch webserver on http://localhost:8000 with rendered documentation | [
"Launch",
"webserver",
"on",
"http",
":",
"//",
"localhost",
":",
"8000",
"with",
"rendered",
"documentation"
]
| python | train | 47.833333 |
rstoneback/pysat | pysat/instruments/supermag_magnetometer.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/supermag_magnetometer.py#L197-L259 | def load_csv_data(fname, tag):
"""Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
"""
import re
if tag == "stations":
# Because there may be multiple operators, the default pandas reader
# cannot be used.
ddict = dict()
dkeys = list()
date_list = list()
# Open and read the file
with open(fname, "r") as fopen:
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
sline = [ll for ll in re.split(r'[,\n]+', fline) if len(ll) > 0]
if len(ddict.items()) == 0:
for kk in sline:
kk = re.sub("-", "_", kk)
ddict[kk] = list()
dkeys.append(kk)
else:
date_list.append(dtime)
for i,ll in enumerate(sline):
if i >= 1 and i <= 4:
ddict[dkeys[i]].append(float(ll))
elif i == 6:
ddict[dkeys[i]].append(int(ll))
elif i < len(dkeys):
ddict[dkeys[i]].append(ll)
else:
ddict[dkeys[-1]][-1] += " {:s}".format(ll)
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
else:
# Define the date parser
def parse_smag_date(dd):
return pysat.datetime.strptime(dd, "%Y-%m-%d %H:%M:%S")
# Load the file into a data frame
data = pds.read_csv(fname, parse_dates={'datetime':[0]},
date_parser=parse_smag_date, index_col='datetime')
return data | [
"def",
"load_csv_data",
"(",
"fname",
",",
"tag",
")",
":",
"import",
"re",
"if",
"tag",
"==",
"\"stations\"",
":",
"# Because there may be multiple operators, the default pandas reader",
"# cannot be used.",
"ddict",
"=",
"dict",
"(",
")",
"dkeys",
"=",
"list",
"(",
")",
"date_list",
"=",
"list",
"(",
")",
"# Open and read the file",
"with",
"open",
"(",
"fname",
",",
"\"r\"",
")",
"as",
"fopen",
":",
"dtime",
"=",
"pds",
".",
"datetime",
".",
"strptime",
"(",
"fname",
".",
"split",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
",",
"\"%Y\"",
")",
"for",
"fline",
"in",
"fopen",
".",
"readlines",
"(",
")",
":",
"sline",
"=",
"[",
"ll",
"for",
"ll",
"in",
"re",
".",
"split",
"(",
"r'[,\\n]+'",
",",
"fline",
")",
"if",
"len",
"(",
"ll",
")",
">",
"0",
"]",
"if",
"len",
"(",
"ddict",
".",
"items",
"(",
")",
")",
"==",
"0",
":",
"for",
"kk",
"in",
"sline",
":",
"kk",
"=",
"re",
".",
"sub",
"(",
"\"-\"",
",",
"\"_\"",
",",
"kk",
")",
"ddict",
"[",
"kk",
"]",
"=",
"list",
"(",
")",
"dkeys",
".",
"append",
"(",
"kk",
")",
"else",
":",
"date_list",
".",
"append",
"(",
"dtime",
")",
"for",
"i",
",",
"ll",
"in",
"enumerate",
"(",
"sline",
")",
":",
"if",
"i",
">=",
"1",
"and",
"i",
"<=",
"4",
":",
"ddict",
"[",
"dkeys",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"float",
"(",
"ll",
")",
")",
"elif",
"i",
"==",
"6",
":",
"ddict",
"[",
"dkeys",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"int",
"(",
"ll",
")",
")",
"elif",
"i",
"<",
"len",
"(",
"dkeys",
")",
":",
"ddict",
"[",
"dkeys",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"ll",
")",
"else",
":",
"ddict",
"[",
"dkeys",
"[",
"-",
"1",
"]",
"]",
"[",
"-",
"1",
"]",
"+=",
"\" {:s}\"",
".",
"format",
"(",
"ll",
")",
"# Create a data frame for this file",
"data",
"=",
"pds",
".",
"DataFrame",
"(",
"ddict",
",",
"index",
"=",
"date_list",
",",
"columns",
"=",
"ddict",
".",
"keys",
"(",
")",
")",
"else",
":",
"# Define the date parser",
"def",
"parse_smag_date",
"(",
"dd",
")",
":",
"return",
"pysat",
".",
"datetime",
".",
"strptime",
"(",
"dd",
",",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"# Load the file into a data frame",
"data",
"=",
"pds",
".",
"read_csv",
"(",
"fname",
",",
"parse_dates",
"=",
"{",
"'datetime'",
":",
"[",
"0",
"]",
"}",
",",
"date_parser",
"=",
"parse_smag_date",
",",
"index_col",
"=",
"'datetime'",
")",
"return",
"data"
]
| Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame | [
"Load",
"data",
"from",
"a",
"comma",
"separated",
"SuperMAG",
"file"
]
| python | train | 34.650794 |
saltstack/salt | salt/modules/win_task.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_task.py#L1277-L1316 | def status(name, location='\\'):
r'''
Determine the status of a task. Is it Running, Queued, Ready, etc.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return: The current status of the task. Will be one of the following:
- Unknown
- Disabled
- Queued
- Ready
- Running
:rtype: string
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_status <task_name>
'''
# Check for existing folder
if name not in list_tasks(location):
return '{0} not found in {1}'.format(name, location)
# connect to the task scheduler
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# get the folder where the task is defined
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
return states[task.State] | [
"def",
"status",
"(",
"name",
",",
"location",
"=",
"'\\\\'",
")",
":",
"# Check for existing folder",
"if",
"name",
"not",
"in",
"list_tasks",
"(",
"location",
")",
":",
"return",
"'{0} not found in {1}'",
".",
"format",
"(",
"name",
",",
"location",
")",
"# connect to the task scheduler",
"with",
"salt",
".",
"utils",
".",
"winapi",
".",
"Com",
"(",
")",
":",
"task_service",
"=",
"win32com",
".",
"client",
".",
"Dispatch",
"(",
"\"Schedule.Service\"",
")",
"task_service",
".",
"Connect",
"(",
")",
"# get the folder where the task is defined",
"task_folder",
"=",
"task_service",
".",
"GetFolder",
"(",
"location",
")",
"task",
"=",
"task_folder",
".",
"GetTask",
"(",
"name",
")",
"return",
"states",
"[",
"task",
".",
"State",
"]"
]
| r'''
Determine the status of a task. Is it Running, Queued, Ready, etc.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return: The current status of the task. Will be one of the following:
- Unknown
- Disabled
- Queued
- Ready
- Running
:rtype: string
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_status <task_name> | [
"r",
"Determine",
"the",
"status",
"of",
"a",
"task",
".",
"Is",
"it",
"Running",
"Queued",
"Ready",
"etc",
"."
]
| python | train | 26.7 |
OzymandiasTheGreat/python-libinput | libinput/device.py | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1892-L1908 | def has_button(self, button):
"""Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0, 'This device is not a pointer device'
return bool(rc) | [
"def",
"has_button",
"(",
"self",
",",
"button",
")",
":",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_pointer_has_button",
"(",
"self",
".",
"_handle",
",",
"button",
")",
"assert",
"rc",
">=",
"0",
",",
"'This device is not a pointer device'",
"return",
"bool",
"(",
"rc",
")"
]
| Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError | [
"Check",
"if",
"this",
"device",
"has",
"a",
"given",
"button",
"."
]
| python | train | 25.705882 |
cloudant/python-cloudant | src/cloudant/design_document.py | https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/design_document.py#L510-L549 | def save(self):
"""
Saves changes made to the locally cached DesignDocument object's data
structures to the remote database. If the design document does not
exist remotely then it is created in the remote database. If the object
does exist remotely then the design document is updated remotely. In
either case the locally cached DesignDocument object is also updated
accordingly based on the successful response of the operation.
"""
if self.views:
if self.get('language', None) != QUERY_LANGUAGE:
for view_name, view in self.iterviews():
if isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(104, view_name)
else:
for view_name, view in self.iterviews():
if not isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(105, view_name)
if self.indexes:
if self.get('language', None) != QUERY_LANGUAGE:
for index_name, search in self.iterindexes():
# Check the instance of the javascript search function
if not isinstance(search['index'], STRTYPE):
raise CloudantDesignDocumentException(106, index_name)
else:
for index_name, index in self.iterindexes():
if not isinstance(index['index'], dict):
raise CloudantDesignDocumentException(107, index_name)
for prop in self._nested_object_names:
if not getattr(self, prop):
# Ensure empty dict for each sub-object is not saved remotely.
self.__delitem__(prop)
super(DesignDocument, self).save()
for prop in self._nested_object_names:
# Ensure views, indexes, and lists dict exist in locally cached DesignDocument.
getattr(self, prop, self.setdefault(prop, dict())) | [
"def",
"save",
"(",
"self",
")",
":",
"if",
"self",
".",
"views",
":",
"if",
"self",
".",
"get",
"(",
"'language'",
",",
"None",
")",
"!=",
"QUERY_LANGUAGE",
":",
"for",
"view_name",
",",
"view",
"in",
"self",
".",
"iterviews",
"(",
")",
":",
"if",
"isinstance",
"(",
"view",
",",
"QueryIndexView",
")",
":",
"raise",
"CloudantDesignDocumentException",
"(",
"104",
",",
"view_name",
")",
"else",
":",
"for",
"view_name",
",",
"view",
"in",
"self",
".",
"iterviews",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"view",
",",
"QueryIndexView",
")",
":",
"raise",
"CloudantDesignDocumentException",
"(",
"105",
",",
"view_name",
")",
"if",
"self",
".",
"indexes",
":",
"if",
"self",
".",
"get",
"(",
"'language'",
",",
"None",
")",
"!=",
"QUERY_LANGUAGE",
":",
"for",
"index_name",
",",
"search",
"in",
"self",
".",
"iterindexes",
"(",
")",
":",
"# Check the instance of the javascript search function",
"if",
"not",
"isinstance",
"(",
"search",
"[",
"'index'",
"]",
",",
"STRTYPE",
")",
":",
"raise",
"CloudantDesignDocumentException",
"(",
"106",
",",
"index_name",
")",
"else",
":",
"for",
"index_name",
",",
"index",
"in",
"self",
".",
"iterindexes",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"index",
"[",
"'index'",
"]",
",",
"dict",
")",
":",
"raise",
"CloudantDesignDocumentException",
"(",
"107",
",",
"index_name",
")",
"for",
"prop",
"in",
"self",
".",
"_nested_object_names",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"prop",
")",
":",
"# Ensure empty dict for each sub-object is not saved remotely.",
"self",
".",
"__delitem__",
"(",
"prop",
")",
"super",
"(",
"DesignDocument",
",",
"self",
")",
".",
"save",
"(",
")",
"for",
"prop",
"in",
"self",
".",
"_nested_object_names",
":",
"# Ensure views, indexes, and lists dict exist in locally cached DesignDocument.",
"getattr",
"(",
"self",
",",
"prop",
",",
"self",
".",
"setdefault",
"(",
"prop",
",",
"dict",
"(",
")",
")",
")"
]
| Saves changes made to the locally cached DesignDocument object's data
structures to the remote database. If the design document does not
exist remotely then it is created in the remote database. If the object
does exist remotely then the design document is updated remotely. In
either case the locally cached DesignDocument object is also updated
accordingly based on the successful response of the operation. | [
"Saves",
"changes",
"made",
"to",
"the",
"locally",
"cached",
"DesignDocument",
"object",
"s",
"data",
"structures",
"to",
"the",
"remote",
"database",
".",
"If",
"the",
"design",
"document",
"does",
"not",
"exist",
"remotely",
"then",
"it",
"is",
"created",
"in",
"the",
"remote",
"database",
".",
"If",
"the",
"object",
"does",
"exist",
"remotely",
"then",
"the",
"design",
"document",
"is",
"updated",
"remotely",
".",
"In",
"either",
"case",
"the",
"locally",
"cached",
"DesignDocument",
"object",
"is",
"also",
"updated",
"accordingly",
"based",
"on",
"the",
"successful",
"response",
"of",
"the",
"operation",
"."
]
| python | train | 49.775 |
anchore/anchore | anchore/cli/logs.py | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/logs.py#L39-L84 | def format(self, record):
"""
Modified from logging/__init__.py in python 2.7 lib
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno >= logging.getLevelName('ERROR'):
s = self._err_format % record.__dict__
else:
s = self._fmt % record.__dict__
if record.exc_info:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# Trim the trailing newline
if s[-1:] == "\n":
s = s[:-1]
try:
# Delimit with the colon
s = s + ': ' + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
# Reset to avoid using the cache since this logger changes the exception format
record.exc_text = None
return s | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"record",
".",
"message",
"=",
"record",
".",
"getMessage",
"(",
")",
"if",
"self",
".",
"usesTime",
"(",
")",
":",
"record",
".",
"asctime",
"=",
"self",
".",
"formatTime",
"(",
"record",
",",
"self",
".",
"datefmt",
")",
"if",
"record",
".",
"levelno",
">=",
"logging",
".",
"getLevelName",
"(",
"'ERROR'",
")",
":",
"s",
"=",
"self",
".",
"_err_format",
"%",
"record",
".",
"__dict__",
"else",
":",
"s",
"=",
"self",
".",
"_fmt",
"%",
"record",
".",
"__dict__",
"if",
"record",
".",
"exc_info",
":",
"record",
".",
"exc_text",
"=",
"self",
".",
"formatException",
"(",
"record",
".",
"exc_info",
")",
"if",
"record",
".",
"exc_text",
":",
"# Trim the trailing newline",
"if",
"s",
"[",
"-",
"1",
":",
"]",
"==",
"\"\\n\"",
":",
"s",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"try",
":",
"# Delimit with the colon",
"s",
"=",
"s",
"+",
"': '",
"+",
"record",
".",
"exc_text",
"except",
"UnicodeError",
":",
"# Sometimes filenames have non-ASCII chars, which can lead",
"# to errors when s is Unicode and record.exc_text is str",
"# See issue 8924.",
"# We also use replace for when there are multiple",
"# encodings, e.g. UTF-8 for the filesystem and latin-1",
"# for a script. See issue 13232.",
"s",
"=",
"s",
"+",
"record",
".",
"exc_text",
".",
"decode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
",",
"'replace'",
")",
"# Reset to avoid using the cache since this logger changes the exception format",
"record",
".",
"exc_text",
"=",
"None",
"return",
"s"
]
| Modified from logging/__init__.py in python 2.7 lib
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message. | [
"Modified",
"from",
"logging",
"/",
"__init__",
".",
"py",
"in",
"python",
"2",
".",
"7",
"lib"
]
| python | train | 43.652174 |
joeferraro/mm | mm/sforce/base.py | https://github.com/joeferraro/mm/blob/43dce48a2249faab4d872c228ada9fbdbeec147b/mm/sforce/base.py#L523-L529 | def query(self, queryString):
'''
Executes a query against the specified object and returns data that matches
the specified criteria.
'''
self._setHeaders('query')
return self._sforce.service.query(queryString) | [
"def",
"query",
"(",
"self",
",",
"queryString",
")",
":",
"self",
".",
"_setHeaders",
"(",
"'query'",
")",
"return",
"self",
".",
"_sforce",
".",
"service",
".",
"query",
"(",
"queryString",
")"
]
| Executes a query against the specified object and returns data that matches
the specified criteria. | [
"Executes",
"a",
"query",
"against",
"the",
"specified",
"object",
"and",
"returns",
"data",
"that",
"matches",
"the",
"specified",
"criteria",
"."
]
| python | train | 32.571429 |
JarryShaw/PyPCAPKit | src/protocols/internet/hip.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L299-L333 | def _read_para_unassigned(self, code, cbit, clen, *, desc, length, version):
"""Read HIP unassigned parameters.
Structure of HIP unassigned parameters [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type |C| Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
/ Contents /
/ +-+-+-+-+-+-+-+-+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 para.type Parameter Type
1 15 para.critical Critical Bit
2 16 para.length Length of Contents
4 32 para.contents Contents
- - - Padding
"""
unassigned = dict(
type=desc,
critical=cbit,
length=clen,
contents=self._read_fileng(clen),
)
plen = length - clen
if plen:
self._read_fileng(plen)
return unassigned | [
"def",
"_read_para_unassigned",
"(",
"self",
",",
"code",
",",
"cbit",
",",
"clen",
",",
"*",
",",
"desc",
",",
"length",
",",
"version",
")",
":",
"unassigned",
"=",
"dict",
"(",
"type",
"=",
"desc",
",",
"critical",
"=",
"cbit",
",",
"length",
"=",
"clen",
",",
"contents",
"=",
"self",
".",
"_read_fileng",
"(",
"clen",
")",
",",
")",
"plen",
"=",
"length",
"-",
"clen",
"if",
"plen",
":",
"self",
".",
"_read_fileng",
"(",
"plen",
")",
"return",
"unassigned"
]
| Read HIP unassigned parameters.
Structure of HIP unassigned parameters [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type |C| Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
/ Contents /
/ +-+-+-+-+-+-+-+-+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 para.type Parameter Type
1 15 para.critical Critical Bit
2 16 para.length Length of Contents
4 32 para.contents Contents
- - - Padding | [
"Read",
"HIP",
"unassigned",
"parameters",
"."
]
| python | train | 46.542857 |
gofed/gofedlib | gofedlib/go/symbolsextractor/extractor.py | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/symbolsextractor/extractor.py#L203-L222 | def _mergeGoSymbols(self, jsons = []):
"""
Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols.
"""
# <siXy> imports are per file, exports are per package
# on the highest level we have: pkgname, types, funcs, vars, imports.
symbols = {}
symbols["types"] = []
symbols["funcs"] = []
symbols["vars"] = []
for file_json in jsons:
symbols["types"] += file_json["types"]
symbols["funcs"] += file_json["funcs"]
symbols["vars"] += file_json["vars"]
return symbols | [
"def",
"_mergeGoSymbols",
"(",
"self",
",",
"jsons",
"=",
"[",
"]",
")",
":",
"# <siXy> imports are per file, exports are per package",
"# on the highest level we have: pkgname, types, funcs, vars, imports.",
"symbols",
"=",
"{",
"}",
"symbols",
"[",
"\"types\"",
"]",
"=",
"[",
"]",
"symbols",
"[",
"\"funcs\"",
"]",
"=",
"[",
"]",
"symbols",
"[",
"\"vars\"",
"]",
"=",
"[",
"]",
"for",
"file_json",
"in",
"jsons",
":",
"symbols",
"[",
"\"types\"",
"]",
"+=",
"file_json",
"[",
"\"types\"",
"]",
"symbols",
"[",
"\"funcs\"",
"]",
"+=",
"file_json",
"[",
"\"funcs\"",
"]",
"symbols",
"[",
"\"vars\"",
"]",
"+=",
"file_json",
"[",
"\"vars\"",
"]",
"return",
"symbols"
]
| Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols. | [
"Exported",
"symbols",
"for",
"a",
"given",
"package",
"does",
"not",
"have",
"any",
"prefix",
".",
"So",
"I",
"can",
"drop",
"all",
"import",
"paths",
"that",
"are",
"file",
"specific",
"and",
"merge",
"all",
"symbols",
".",
"Assuming",
"all",
"files",
"in",
"the",
"given",
"package",
"has",
"mutual",
"exclusive",
"symbols",
"."
]
| python | train | 31.75 |
marshmallow-code/marshmallow | src/marshmallow/fields.py | https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/fields.py#L767-L772 | def _format_num(self, value):
"""Return the number value for value, given this field's `num_type`."""
# (value is True or value is False) is ~5x faster than isinstance(value, bool)
if value is True or value is False:
raise TypeError('value must be a Number, not a boolean.')
return self.num_type(value) | [
"def",
"_format_num",
"(",
"self",
",",
"value",
")",
":",
"# (value is True or value is False) is ~5x faster than isinstance(value, bool)",
"if",
"value",
"is",
"True",
"or",
"value",
"is",
"False",
":",
"raise",
"TypeError",
"(",
"'value must be a Number, not a boolean.'",
")",
"return",
"self",
".",
"num_type",
"(",
"value",
")"
]
| Return the number value for value, given this field's `num_type`. | [
"Return",
"the",
"number",
"value",
"for",
"value",
"given",
"this",
"field",
"s",
"num_type",
"."
]
| python | train | 56.833333 |
mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L505-L524 | def records(self):
"""Return a list of records for this sentence."""
compounds = ModelList()
seen_labels = set()
# Ensure no control characters are sent to a parser (need to be XML compatible)
tagged_tokens = [(CONTROL_RE.sub('', token), tag) for token, tag in self.tagged_tokens]
for parser in self.parsers:
for record in parser.parse(tagged_tokens):
p = record.serialize()
if not p: # TODO: Potential performance issues?
continue
# Skip duplicate records
if record in compounds:
continue
# Skip just labels that have already been seen (bit of a hack)
if all(k in {'labels', 'roles'} for k in p.keys()) and set(record.labels).issubset(seen_labels):
continue
seen_labels.update(record.labels)
compounds.append(record)
return compounds | [
"def",
"records",
"(",
"self",
")",
":",
"compounds",
"=",
"ModelList",
"(",
")",
"seen_labels",
"=",
"set",
"(",
")",
"# Ensure no control characters are sent to a parser (need to be XML compatible)",
"tagged_tokens",
"=",
"[",
"(",
"CONTROL_RE",
".",
"sub",
"(",
"''",
",",
"token",
")",
",",
"tag",
")",
"for",
"token",
",",
"tag",
"in",
"self",
".",
"tagged_tokens",
"]",
"for",
"parser",
"in",
"self",
".",
"parsers",
":",
"for",
"record",
"in",
"parser",
".",
"parse",
"(",
"tagged_tokens",
")",
":",
"p",
"=",
"record",
".",
"serialize",
"(",
")",
"if",
"not",
"p",
":",
"# TODO: Potential performance issues?",
"continue",
"# Skip duplicate records",
"if",
"record",
"in",
"compounds",
":",
"continue",
"# Skip just labels that have already been seen (bit of a hack)",
"if",
"all",
"(",
"k",
"in",
"{",
"'labels'",
",",
"'roles'",
"}",
"for",
"k",
"in",
"p",
".",
"keys",
"(",
")",
")",
"and",
"set",
"(",
"record",
".",
"labels",
")",
".",
"issubset",
"(",
"seen_labels",
")",
":",
"continue",
"seen_labels",
".",
"update",
"(",
"record",
".",
"labels",
")",
"compounds",
".",
"append",
"(",
"record",
")",
"return",
"compounds"
]
| Return a list of records for this sentence. | [
"Return",
"a",
"list",
"of",
"records",
"for",
"this",
"sentence",
"."
]
| python | train | 48.6 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L143-L149 | def get_instances(feature_name):
"""Return all all instances that compute `feature_name`"""
feats = []
for ft in AncillaryFeature.features:
if ft.feature_name == feature_name:
feats.append(ft)
return feats | [
"def",
"get_instances",
"(",
"feature_name",
")",
":",
"feats",
"=",
"[",
"]",
"for",
"ft",
"in",
"AncillaryFeature",
".",
"features",
":",
"if",
"ft",
".",
"feature_name",
"==",
"feature_name",
":",
"feats",
".",
"append",
"(",
"ft",
")",
"return",
"feats"
]
| Return all all instances that compute `feature_name` | [
"Return",
"all",
"all",
"instances",
"that",
"compute",
"feature_name"
]
| python | train | 37 |
Dullage/starlingbank | starlingbank/__init__.py | https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L111-L129 | def get_image(self, filename: str=None) -> None:
"""Download the photo associated with a Savings Goal."""
if filename is None:
filename = "{0}.png".format(self.name)
endpoint = "/account/{0}/savings-goals/{1}/photo".format(
self._account_uid,
self.uid
)
response = get(
_url(endpoint, self._sandbox),
headers=self._auth_headers
)
response.raise_for_status()
base64_image = response.json()['base64EncodedPhoto']
with open(filename, 'wb') as file:
file.write(b64decode(base64_image)) | [
"def",
"get_image",
"(",
"self",
",",
"filename",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"\"{0}.png\"",
".",
"format",
"(",
"self",
".",
"name",
")",
"endpoint",
"=",
"\"/account/{0}/savings-goals/{1}/photo\"",
".",
"format",
"(",
"self",
".",
"_account_uid",
",",
"self",
".",
"uid",
")",
"response",
"=",
"get",
"(",
"_url",
"(",
"endpoint",
",",
"self",
".",
"_sandbox",
")",
",",
"headers",
"=",
"self",
".",
"_auth_headers",
")",
"response",
".",
"raise_for_status",
"(",
")",
"base64_image",
"=",
"response",
".",
"json",
"(",
")",
"[",
"'base64EncodedPhoto'",
"]",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"b64decode",
"(",
"base64_image",
")",
")"
]
| Download the photo associated with a Savings Goal. | [
"Download",
"the",
"photo",
"associated",
"with",
"a",
"Savings",
"Goal",
"."
]
| python | train | 32.105263 |
wandb/client | wandb/vendor/prompt_toolkit/layout/prompt.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/prompt.py#L101-L111 | def _get_arg_tokens(cli):
"""
Tokens for the arg-prompt.
"""
arg = cli.input_processor.arg
return [
(Token.Prompt.Arg, '(arg: '),
(Token.Prompt.Arg.Text, str(arg)),
(Token.Prompt.Arg, ') '),
] | [
"def",
"_get_arg_tokens",
"(",
"cli",
")",
":",
"arg",
"=",
"cli",
".",
"input_processor",
".",
"arg",
"return",
"[",
"(",
"Token",
".",
"Prompt",
".",
"Arg",
",",
"'(arg: '",
")",
",",
"(",
"Token",
".",
"Prompt",
".",
"Arg",
".",
"Text",
",",
"str",
"(",
"arg",
")",
")",
",",
"(",
"Token",
".",
"Prompt",
".",
"Arg",
",",
"') '",
")",
",",
"]"
]
| Tokens for the arg-prompt. | [
"Tokens",
"for",
"the",
"arg",
"-",
"prompt",
"."
]
| python | train | 21 |
pudo-attic/scrapekit | scrapekit/tasks.py | https://github.com/pudo-attic/scrapekit/blob/cfd258120922fcd571430cdf00ba50f3cf18dc15/scrapekit/tasks.py#L38-L44 | def _spawn(self):
""" Initialize the queue and the threads. """
self.queue = Queue(maxsize=self.num_threads * 10)
for i in range(self.num_threads):
t = Thread(target=self._consume)
t.daemon = True
t.start() | [
"def",
"_spawn",
"(",
"self",
")",
":",
"self",
".",
"queue",
"=",
"Queue",
"(",
"maxsize",
"=",
"self",
".",
"num_threads",
"*",
"10",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_threads",
")",
":",
"t",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"_consume",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")"
]
| Initialize the queue and the threads. | [
"Initialize",
"the",
"queue",
"and",
"the",
"threads",
"."
]
| python | train | 37.142857 |
foremast/foremast | src/foremast/configs/prepare_configs.py | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/configs/prepare_configs.py#L65-L93 | def process_configs(file_lookup, app_config_format, pipeline_config):
"""Processes the configs from lookup sources.
Args:
file_lookup (FileLookup): Source to look for file/config
app_config_format (str): The format for application config files.
pipeline_config (str): Name/path of the pipeline config
Returns:
dict: Retreived application config
"""
app_configs = collections.defaultdict(dict)
for env in ENVS:
file_json = app_config_format.format(env=env)
try:
env_config = file_lookup.json(filename=file_json)
app_configs[env] = apply_region_configs(env_config)
except FileNotFoundError:
LOG.critical('Application configuration not available for %s.', env)
continue
try:
app_configs['pipeline'] = file_lookup.json(filename=pipeline_config)
except FileNotFoundError:
LOG.warning('Unable to process pipeline.json. Using defaults.')
app_configs['pipeline'] = {'env': ['stage', 'prod']}
LOG.debug('Application configs:\n%s', app_configs)
return app_configs | [
"def",
"process_configs",
"(",
"file_lookup",
",",
"app_config_format",
",",
"pipeline_config",
")",
":",
"app_configs",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"env",
"in",
"ENVS",
":",
"file_json",
"=",
"app_config_format",
".",
"format",
"(",
"env",
"=",
"env",
")",
"try",
":",
"env_config",
"=",
"file_lookup",
".",
"json",
"(",
"filename",
"=",
"file_json",
")",
"app_configs",
"[",
"env",
"]",
"=",
"apply_region_configs",
"(",
"env_config",
")",
"except",
"FileNotFoundError",
":",
"LOG",
".",
"critical",
"(",
"'Application configuration not available for %s.'",
",",
"env",
")",
"continue",
"try",
":",
"app_configs",
"[",
"'pipeline'",
"]",
"=",
"file_lookup",
".",
"json",
"(",
"filename",
"=",
"pipeline_config",
")",
"except",
"FileNotFoundError",
":",
"LOG",
".",
"warning",
"(",
"'Unable to process pipeline.json. Using defaults.'",
")",
"app_configs",
"[",
"'pipeline'",
"]",
"=",
"{",
"'env'",
":",
"[",
"'stage'",
",",
"'prod'",
"]",
"}",
"LOG",
".",
"debug",
"(",
"'Application configs:\\n%s'",
",",
"app_configs",
")",
"return",
"app_configs"
]
| Processes the configs from lookup sources.
Args:
file_lookup (FileLookup): Source to look for file/config
app_config_format (str): The format for application config files.
pipeline_config (str): Name/path of the pipeline config
Returns:
dict: Retreived application config | [
"Processes",
"the",
"configs",
"from",
"lookup",
"sources",
"."
]
| python | train | 37.827586 |
inasafe/inasafe | safe/gui/tools/wizard/step_kw30_field.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw30_field.py#L203-L306 | def set_widgets(self):
"""Set widgets on the Field tab."""
self.clear_further_steps()
purpose = self.parent.step_kw_purpose.selected_purpose()
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
unit = self.parent.step_kw_unit.selected_unit()
layer_mode = self.parent.step_kw_layermode.selected_layermode()
# Set mode
# Notes(IS) I hard coded this one, need to fix it after it's working.
field_key = self.parent.field_keyword_for_the_layer()
if field_key == population_count_field['key']:
self.mode = MULTI_MODE
else:
self.mode = SINGLE_MODE
# Filtering based on field type
layer_field = definition(field_key)
layer_field_types = deepcopy(layer_field['type'])
if not isinstance(layer_field_types, list):
layer_field_types = [layer_field_types]
# Remove string for continuous layer
if layer_mode == layer_mode_continuous and unit:
if QVariant.String in layer_field_types:
layer_field_types.remove(QVariant.String)
if purpose == layer_purpose_aggregation:
question_text = field_question_aggregation
elif layer_mode == layer_mode_continuous and unit:
subcategory_unit_relation = get_question_text(
'%s_%s_question' % (subcategory['key'], unit['key']))
if 'MISSING' in subcategory_unit_relation:
subcategory_unit_relation = self.tr(
'{subcategory} in {unit} unit').format(
subcategory=subcategory['name'].lower(),
unit=unit['plural_name'])
question_text = field_question_subcategory_unit % (
purpose['name'],
subcategory['name'],
unit['name'],
subcategory_unit_relation)
else:
question_text = field_question_subcategory_classified % (
subcategory['name'].lower(), subcategory['name'].lower())
if self.mode == SINGLE_MODE:
question_text += tr('\nYou can select 1 field only.')
self.lstFields.setSelectionMode(QAbstractItemView.SingleSelection)
elif self.mode == MULTI_MODE:
question_text += tr(
'\nYou can select more than 1 field. InaSAFE will sum up the '
'value of the fields that you choose.')
self.lstFields.setSelectionMode(
QAbstractItemView.ExtendedSelection)
self.lblSelectField.setText(question_text)
self.lstFields.clear()
default_item = None
for field in self.parent.layer.fields():
# Skip if it's not in the field types requirement
if field.type() not in layer_field_types:
continue
field_name = field.name()
item = QListWidgetItem(field_name, self.lstFields)
item.setData(Qt.UserRole, field_name)
# Select the item if it match the unit's default_attribute
if unit and 'default_attribute' in unit \
and field_name == unit['default_attribute']:
default_item = item
# For continuous data, gray out id, gid, fid and text fields
if self.parent.step_kw_layermode.\
selected_layermode() == layer_mode_continuous and unit:
field_type = field.type()
if field_type > 9 or re.match('.{0,2}id$', field_name, re.I):
continue # Don't show unmatched field type
if default_item:
self.lstFields.setCurrentItem(default_item)
self.lblDescribeField.clear()
# Set values based on existing keywords (if already assigned)
field_keyword = self.parent.field_keyword_for_the_layer()
inasafe_field_keywords = self.parent.get_existing_keyword(
'inasafe_fields')
if inasafe_field_keywords:
fields = inasafe_field_keywords.get(field_keyword)
if isinstance(fields, str):
fields = [fields]
if fields:
option_fields = []
for index in range(self.lstFields.count()):
option_fields.append(
str(self.lstFields.item(index).text()))
for field in fields:
if field in option_fields:
self.lstFields.item(option_fields.index(
field)).setSelected(True)
self.auto_select_one_item(self.lstFields)
if self.selected_fields():
self.parent.pbnNext.setEnabled(True)
else:
self.parent.pbnNext.setEnabled(False) | [
"def",
"set_widgets",
"(",
"self",
")",
":",
"self",
".",
"clear_further_steps",
"(",
")",
"purpose",
"=",
"self",
".",
"parent",
".",
"step_kw_purpose",
".",
"selected_purpose",
"(",
")",
"subcategory",
"=",
"self",
".",
"parent",
".",
"step_kw_subcategory",
".",
"selected_subcategory",
"(",
")",
"unit",
"=",
"self",
".",
"parent",
".",
"step_kw_unit",
".",
"selected_unit",
"(",
")",
"layer_mode",
"=",
"self",
".",
"parent",
".",
"step_kw_layermode",
".",
"selected_layermode",
"(",
")",
"# Set mode",
"# Notes(IS) I hard coded this one, need to fix it after it's working.",
"field_key",
"=",
"self",
".",
"parent",
".",
"field_keyword_for_the_layer",
"(",
")",
"if",
"field_key",
"==",
"population_count_field",
"[",
"'key'",
"]",
":",
"self",
".",
"mode",
"=",
"MULTI_MODE",
"else",
":",
"self",
".",
"mode",
"=",
"SINGLE_MODE",
"# Filtering based on field type",
"layer_field",
"=",
"definition",
"(",
"field_key",
")",
"layer_field_types",
"=",
"deepcopy",
"(",
"layer_field",
"[",
"'type'",
"]",
")",
"if",
"not",
"isinstance",
"(",
"layer_field_types",
",",
"list",
")",
":",
"layer_field_types",
"=",
"[",
"layer_field_types",
"]",
"# Remove string for continuous layer",
"if",
"layer_mode",
"==",
"layer_mode_continuous",
"and",
"unit",
":",
"if",
"QVariant",
".",
"String",
"in",
"layer_field_types",
":",
"layer_field_types",
".",
"remove",
"(",
"QVariant",
".",
"String",
")",
"if",
"purpose",
"==",
"layer_purpose_aggregation",
":",
"question_text",
"=",
"field_question_aggregation",
"elif",
"layer_mode",
"==",
"layer_mode_continuous",
"and",
"unit",
":",
"subcategory_unit_relation",
"=",
"get_question_text",
"(",
"'%s_%s_question'",
"%",
"(",
"subcategory",
"[",
"'key'",
"]",
",",
"unit",
"[",
"'key'",
"]",
")",
")",
"if",
"'MISSING'",
"in",
"subcategory_unit_relation",
":",
"subcategory_unit_relation",
"=",
"self",
".",
"tr",
"(",
"'{subcategory} in {unit} unit'",
")",
".",
"format",
"(",
"subcategory",
"=",
"subcategory",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
",",
"unit",
"=",
"unit",
"[",
"'plural_name'",
"]",
")",
"question_text",
"=",
"field_question_subcategory_unit",
"%",
"(",
"purpose",
"[",
"'name'",
"]",
",",
"subcategory",
"[",
"'name'",
"]",
",",
"unit",
"[",
"'name'",
"]",
",",
"subcategory_unit_relation",
")",
"else",
":",
"question_text",
"=",
"field_question_subcategory_classified",
"%",
"(",
"subcategory",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
",",
"subcategory",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
")",
"if",
"self",
".",
"mode",
"==",
"SINGLE_MODE",
":",
"question_text",
"+=",
"tr",
"(",
"'\\nYou can select 1 field only.'",
")",
"self",
".",
"lstFields",
".",
"setSelectionMode",
"(",
"QAbstractItemView",
".",
"SingleSelection",
")",
"elif",
"self",
".",
"mode",
"==",
"MULTI_MODE",
":",
"question_text",
"+=",
"tr",
"(",
"'\\nYou can select more than 1 field. InaSAFE will sum up the '",
"'value of the fields that you choose.'",
")",
"self",
".",
"lstFields",
".",
"setSelectionMode",
"(",
"QAbstractItemView",
".",
"ExtendedSelection",
")",
"self",
".",
"lblSelectField",
".",
"setText",
"(",
"question_text",
")",
"self",
".",
"lstFields",
".",
"clear",
"(",
")",
"default_item",
"=",
"None",
"for",
"field",
"in",
"self",
".",
"parent",
".",
"layer",
".",
"fields",
"(",
")",
":",
"# Skip if it's not in the field types requirement",
"if",
"field",
".",
"type",
"(",
")",
"not",
"in",
"layer_field_types",
":",
"continue",
"field_name",
"=",
"field",
".",
"name",
"(",
")",
"item",
"=",
"QListWidgetItem",
"(",
"field_name",
",",
"self",
".",
"lstFields",
")",
"item",
".",
"setData",
"(",
"Qt",
".",
"UserRole",
",",
"field_name",
")",
"# Select the item if it match the unit's default_attribute",
"if",
"unit",
"and",
"'default_attribute'",
"in",
"unit",
"and",
"field_name",
"==",
"unit",
"[",
"'default_attribute'",
"]",
":",
"default_item",
"=",
"item",
"# For continuous data, gray out id, gid, fid and text fields",
"if",
"self",
".",
"parent",
".",
"step_kw_layermode",
".",
"selected_layermode",
"(",
")",
"==",
"layer_mode_continuous",
"and",
"unit",
":",
"field_type",
"=",
"field",
".",
"type",
"(",
")",
"if",
"field_type",
">",
"9",
"or",
"re",
".",
"match",
"(",
"'.{0,2}id$'",
",",
"field_name",
",",
"re",
".",
"I",
")",
":",
"continue",
"# Don't show unmatched field type",
"if",
"default_item",
":",
"self",
".",
"lstFields",
".",
"setCurrentItem",
"(",
"default_item",
")",
"self",
".",
"lblDescribeField",
".",
"clear",
"(",
")",
"# Set values based on existing keywords (if already assigned)",
"field_keyword",
"=",
"self",
".",
"parent",
".",
"field_keyword_for_the_layer",
"(",
")",
"inasafe_field_keywords",
"=",
"self",
".",
"parent",
".",
"get_existing_keyword",
"(",
"'inasafe_fields'",
")",
"if",
"inasafe_field_keywords",
":",
"fields",
"=",
"inasafe_field_keywords",
".",
"get",
"(",
"field_keyword",
")",
"if",
"isinstance",
"(",
"fields",
",",
"str",
")",
":",
"fields",
"=",
"[",
"fields",
"]",
"if",
"fields",
":",
"option_fields",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"lstFields",
".",
"count",
"(",
")",
")",
":",
"option_fields",
".",
"append",
"(",
"str",
"(",
"self",
".",
"lstFields",
".",
"item",
"(",
"index",
")",
".",
"text",
"(",
")",
")",
")",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
"in",
"option_fields",
":",
"self",
".",
"lstFields",
".",
"item",
"(",
"option_fields",
".",
"index",
"(",
"field",
")",
")",
".",
"setSelected",
"(",
"True",
")",
"self",
".",
"auto_select_one_item",
"(",
"self",
".",
"lstFields",
")",
"if",
"self",
".",
"selected_fields",
"(",
")",
":",
"self",
".",
"parent",
".",
"pbnNext",
".",
"setEnabled",
"(",
"True",
")",
"else",
":",
"self",
".",
"parent",
".",
"pbnNext",
".",
"setEnabled",
"(",
"False",
")"
]
| Set widgets on the Field tab. | [
"Set",
"widgets",
"on",
"the",
"Field",
"tab",
"."
]
| python | train | 44.798077 |
jbeluch/xbmcswift2 | xbmcswift2/mockxbmc/utils.py | https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/mockxbmc/utils.py#L24-L28 | def get_addon_name(addonxml):
'''Parses an addon name from the given addon.xml filename.'''
xml = parse(addonxml)
addon_node = xml.getElementsByTagName('addon')[0]
return addon_node.getAttribute('name') | [
"def",
"get_addon_name",
"(",
"addonxml",
")",
":",
"xml",
"=",
"parse",
"(",
"addonxml",
")",
"addon_node",
"=",
"xml",
".",
"getElementsByTagName",
"(",
"'addon'",
")",
"[",
"0",
"]",
"return",
"addon_node",
".",
"getAttribute",
"(",
"'name'",
")"
]
| Parses an addon name from the given addon.xml filename. | [
"Parses",
"an",
"addon",
"name",
"from",
"the",
"given",
"addon",
".",
"xml",
"filename",
"."
]
| python | train | 42.8 |
saltstack/salt | salt/utils/roster_matcher.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/roster_matcher.py#L119-L130 | def get_data(self, minion):
'''
Return the configured ip
'''
ret = copy.deepcopy(__opts__.get('roster_defaults', {}))
if isinstance(self.raw[minion], six.string_types):
ret.update({'host': self.raw[minion]})
return ret
elif isinstance(self.raw[minion], dict):
ret.update(self.raw[minion])
return ret
return False | [
"def",
"get_data",
"(",
"self",
",",
"minion",
")",
":",
"ret",
"=",
"copy",
".",
"deepcopy",
"(",
"__opts__",
".",
"get",
"(",
"'roster_defaults'",
",",
"{",
"}",
")",
")",
"if",
"isinstance",
"(",
"self",
".",
"raw",
"[",
"minion",
"]",
",",
"six",
".",
"string_types",
")",
":",
"ret",
".",
"update",
"(",
"{",
"'host'",
":",
"self",
".",
"raw",
"[",
"minion",
"]",
"}",
")",
"return",
"ret",
"elif",
"isinstance",
"(",
"self",
".",
"raw",
"[",
"minion",
"]",
",",
"dict",
")",
":",
"ret",
".",
"update",
"(",
"self",
".",
"raw",
"[",
"minion",
"]",
")",
"return",
"ret",
"return",
"False"
]
| Return the configured ip | [
"Return",
"the",
"configured",
"ip"
]
| python | train | 33.75 |
vaexio/vaex | packages/vaex-core/vaex/dataframe.py | https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4447-L4451 | def _hide_column(self, column):
'''Hides a column by prefixing the name with \'__\''''
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name('__' + column)
self._rename(column, new_name) | [
"def",
"_hide_column",
"(",
"self",
",",
"column",
")",
":",
"column",
"=",
"_ensure_string_from_expression",
"(",
"column",
")",
"new_name",
"=",
"self",
".",
"_find_valid_name",
"(",
"'__'",
"+",
"column",
")",
"self",
".",
"_rename",
"(",
"column",
",",
"new_name",
")"
]
| Hides a column by prefixing the name with \'__\ | [
"Hides",
"a",
"column",
"by",
"prefixing",
"the",
"name",
"with",
"\\",
"__",
"\\"
]
| python | test | 48.2 |
DataDog/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/utils.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/utils.py#L129-L135 | def get_version_string(check_name):
"""
Get the version string for the given check.
"""
version = VERSION.search(read_version_file(check_name))
if version:
return version.group(1) | [
"def",
"get_version_string",
"(",
"check_name",
")",
":",
"version",
"=",
"VERSION",
".",
"search",
"(",
"read_version_file",
"(",
"check_name",
")",
")",
"if",
"version",
":",
"return",
"version",
".",
"group",
"(",
"1",
")"
]
| Get the version string for the given check. | [
"Get",
"the",
"version",
"string",
"for",
"the",
"given",
"check",
"."
]
| python | train | 28.714286 |
tritemio/PyBroMo | pybromo/diffusion.py | https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/diffusion.py#L131-L135 | def add(self, num_particles, D):
"""Add particles with diffusion coefficient `D` at random positions.
"""
self._plist += self._generate(num_particles, D, box=self.box,
rs=self.rs) | [
"def",
"add",
"(",
"self",
",",
"num_particles",
",",
"D",
")",
":",
"self",
".",
"_plist",
"+=",
"self",
".",
"_generate",
"(",
"num_particles",
",",
"D",
",",
"box",
"=",
"self",
".",
"box",
",",
"rs",
"=",
"self",
".",
"rs",
")"
]
| Add particles with diffusion coefficient `D` at random positions. | [
"Add",
"particles",
"with",
"diffusion",
"coefficient",
"D",
"at",
"random",
"positions",
"."
]
| python | valid | 47.4 |
cloudnull/turbolift | turbolift/clouderator/utils.py | https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/clouderator/utils.py#L76-L84 | def unique_list_dicts(dlist, key):
"""Return a list of dictionaries which are sorted for only unique entries.
:param dlist:
:param key:
:return list:
"""
return list(dict((val[key], val) for val in dlist).values()) | [
"def",
"unique_list_dicts",
"(",
"dlist",
",",
"key",
")",
":",
"return",
"list",
"(",
"dict",
"(",
"(",
"val",
"[",
"key",
"]",
",",
"val",
")",
"for",
"val",
"in",
"dlist",
")",
".",
"values",
"(",
")",
")"
]
| Return a list of dictionaries which are sorted for only unique entries.
:param dlist:
:param key:
:return list: | [
"Return",
"a",
"list",
"of",
"dictionaries",
"which",
"are",
"sorted",
"for",
"only",
"unique",
"entries",
"."
]
| python | train | 25.777778 |
teepark/junction | junction/futures.py | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L375-L401 | def wait_any(futures, timeout=None):
'''Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit
'''
for fut in futures:
if fut.complete:
return fut
wait = _Wait(futures)
for fut in futures:
fut._waits.add(wait)
if wait.done.wait(timeout):
raise errors.WaitTimeout()
return wait.completed_future | [
"def",
"wait_any",
"(",
"futures",
",",
"timeout",
"=",
"None",
")",
":",
"for",
"fut",
"in",
"futures",
":",
"if",
"fut",
".",
"complete",
":",
"return",
"fut",
"wait",
"=",
"_Wait",
"(",
"futures",
")",
"for",
"fut",
"in",
"futures",
":",
"fut",
".",
"_waits",
".",
"add",
"(",
"wait",
")",
"if",
"wait",
".",
"done",
".",
"wait",
"(",
"timeout",
")",
":",
"raise",
"errors",
".",
"WaitTimeout",
"(",
")",
"return",
"wait",
".",
"completed_future"
]
| Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit | [
"Wait",
"for",
"the",
"completion",
"of",
"any",
"(",
"the",
"first",
")",
"one",
"of",
"multiple",
"futures"
]
| python | train | 27.592593 |
mikeboers/MultiMap | multimap.py | https://github.com/mikeboers/MultiMap/blob/0251e5d5df693cc247b4ac5b95adfdd10e3bec04/multimap.py#L399-L416 | def _insert_pairs(self, ids_and_pairs):
"""Insert some new pairs, and keep the _key_ids updated.
Params:
ids_and_pairs -- A list of (index, (key, value)) tuples.
"""
ids_to_insert = [x[0] for x in ids_and_pairs]
# We use the bisect to tell us how many spots the given index is
# shifting up in the list.
for ids in self._key_ids.itervalues():
for i, id in enumerate(ids):
ids[i] += bisect(ids_to_insert, id)
# Do the actual insertion
for i, pair in ids_and_pairs:
self._pairs.insert(i, pair) | [
"def",
"_insert_pairs",
"(",
"self",
",",
"ids_and_pairs",
")",
":",
"ids_to_insert",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"ids_and_pairs",
"]",
"# We use the bisect to tell us how many spots the given index is",
"# shifting up in the list.",
"for",
"ids",
"in",
"self",
".",
"_key_ids",
".",
"itervalues",
"(",
")",
":",
"for",
"i",
",",
"id",
"in",
"enumerate",
"(",
"ids",
")",
":",
"ids",
"[",
"i",
"]",
"+=",
"bisect",
"(",
"ids_to_insert",
",",
"id",
")",
"# Do the actual insertion",
"for",
"i",
",",
"pair",
"in",
"ids_and_pairs",
":",
"self",
".",
"_pairs",
".",
"insert",
"(",
"i",
",",
"pair",
")"
]
| Insert some new pairs, and keep the _key_ids updated.
Params:
ids_and_pairs -- A list of (index, (key, value)) tuples. | [
"Insert",
"some",
"new",
"pairs",
"and",
"keep",
"the",
"_key_ids",
"updated",
".",
"Params",
":",
"ids_and_pairs",
"--",
"A",
"list",
"of",
"(",
"index",
"(",
"key",
"value",
"))",
"tuples",
"."
]
| python | train | 35.222222 |
angr/angr | angr/analyses/decompiler/clinic.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/decompiler/clinic.py#L151-L167 | def _simplify_blocks(self, stack_pointer_tracker=None):
"""
Simplify all blocks in self._blocks.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: None
"""
# First of all, let's simplify blocks one by one
for key in self._blocks:
ail_block = self._blocks[key]
simplified = self._simplify_block(ail_block, stack_pointer_tracker=stack_pointer_tracker)
self._blocks[key] = simplified
# Update the function graph so that we can use reaching definitions
self._update_graph() | [
"def",
"_simplify_blocks",
"(",
"self",
",",
"stack_pointer_tracker",
"=",
"None",
")",
":",
"# First of all, let's simplify blocks one by one",
"for",
"key",
"in",
"self",
".",
"_blocks",
":",
"ail_block",
"=",
"self",
".",
"_blocks",
"[",
"key",
"]",
"simplified",
"=",
"self",
".",
"_simplify_block",
"(",
"ail_block",
",",
"stack_pointer_tracker",
"=",
"stack_pointer_tracker",
")",
"self",
".",
"_blocks",
"[",
"key",
"]",
"=",
"simplified",
"# Update the function graph so that we can use reaching definitions",
"self",
".",
"_update_graph",
"(",
")"
]
| Simplify all blocks in self._blocks.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: None | [
"Simplify",
"all",
"blocks",
"in",
"self",
".",
"_blocks",
"."
]
| python | train | 36.647059 |
lltk/lltk | lltk/nl/scrapers/vandale.py | https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/nl/scrapers/vandale.py#L56-L70 | def articles(self):
''' Tries to scrape the correct articles for singular and plural from vandale.nl. '''
result = [None, None]
element = self._first('NN')
if element:
if re.search('(de|het/?de|het);', element, re.U):
result[0] = re.findall('(de|het/?de|het);', element, re.U)[0].split('/')
if re.search('meervoud: (\w+)', element, re.U):
# It's a noun with a plural form
result[1] = ['de']
else:
# It's a noun without a plural form
result[1] = ['']
return result | [
"def",
"articles",
"(",
"self",
")",
":",
"result",
"=",
"[",
"None",
",",
"None",
"]",
"element",
"=",
"self",
".",
"_first",
"(",
"'NN'",
")",
"if",
"element",
":",
"if",
"re",
".",
"search",
"(",
"'(de|het/?de|het);'",
",",
"element",
",",
"re",
".",
"U",
")",
":",
"result",
"[",
"0",
"]",
"=",
"re",
".",
"findall",
"(",
"'(de|het/?de|het);'",
",",
"element",
",",
"re",
".",
"U",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'/'",
")",
"if",
"re",
".",
"search",
"(",
"'meervoud: (\\w+)'",
",",
"element",
",",
"re",
".",
"U",
")",
":",
"# It's a noun with a plural form",
"result",
"[",
"1",
"]",
"=",
"[",
"'de'",
"]",
"else",
":",
"# It's a noun without a plural form",
"result",
"[",
"1",
"]",
"=",
"[",
"''",
"]",
"return",
"result"
]
| Tries to scrape the correct articles for singular and plural from vandale.nl. | [
"Tries",
"to",
"scrape",
"the",
"correct",
"articles",
"for",
"singular",
"and",
"plural",
"from",
"vandale",
".",
"nl",
"."
]
| python | train | 32.6 |
FNNDSC/pfurl | pfurl/pfurl.py | https://github.com/FNNDSC/pfurl/blob/572f634ab582b7b7b7a3fbfd5bf12aadc1ba7958/pfurl/pfurl.py#L776-L799 | def pullPath_copy(self, d_msg, **kwargs):
"""
Handle the "copy" pull operation
"""
# Parse "header" information
d_meta = d_msg['meta']
d_local = d_meta['local']
str_localPath = d_local['path']
d_remote = d_meta['remote']
d_transport = d_meta['transport']
d_copy = d_transport['copy']
# Pull the actual data into a dictionary holder
d_curl = {}
d_curl['remoteServer'] = self.pullPath_core()
d_curl['copy'] = {}
d_curl['copy']['status'] = d_curl['remoteServer']['status']
if not d_curl['copy']['status']:
d_curl['copy']['msg'] = "Copy on remote server failed!"
else:
d_curl['copy']['msg'] = "Copy on remote server success!"
return d_curl | [
"def",
"pullPath_copy",
"(",
"self",
",",
"d_msg",
",",
"*",
"*",
"kwargs",
")",
":",
"# Parse \"header\" information",
"d_meta",
"=",
"d_msg",
"[",
"'meta'",
"]",
"d_local",
"=",
"d_meta",
"[",
"'local'",
"]",
"str_localPath",
"=",
"d_local",
"[",
"'path'",
"]",
"d_remote",
"=",
"d_meta",
"[",
"'remote'",
"]",
"d_transport",
"=",
"d_meta",
"[",
"'transport'",
"]",
"d_copy",
"=",
"d_transport",
"[",
"'copy'",
"]",
"# Pull the actual data into a dictionary holder",
"d_curl",
"=",
"{",
"}",
"d_curl",
"[",
"'remoteServer'",
"]",
"=",
"self",
".",
"pullPath_core",
"(",
")",
"d_curl",
"[",
"'copy'",
"]",
"=",
"{",
"}",
"d_curl",
"[",
"'copy'",
"]",
"[",
"'status'",
"]",
"=",
"d_curl",
"[",
"'remoteServer'",
"]",
"[",
"'status'",
"]",
"if",
"not",
"d_curl",
"[",
"'copy'",
"]",
"[",
"'status'",
"]",
":",
"d_curl",
"[",
"'copy'",
"]",
"[",
"'msg'",
"]",
"=",
"\"Copy on remote server failed!\"",
"else",
":",
"d_curl",
"[",
"'copy'",
"]",
"[",
"'msg'",
"]",
"=",
"\"Copy on remote server success!\"",
"return",
"d_curl"
]
| Handle the "copy" pull operation | [
"Handle",
"the",
"copy",
"pull",
"operation"
]
| python | train | 37.166667 |
rigetti/pyquil | pyquil/device.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/device.py#L265-L324 | def to_dict(self):
"""
Create a JSON-serializable representation of the device Specs.
The dictionary representation is of the form::
{
'1Q': {
"0": {
"f1QRB": 0.99,
"T1": 20e-6,
...
},
"1": {
"f1QRB": 0.989,
"T1": 19e-6,
...
},
...
},
'2Q': {
"1-4": {
"fBellState": 0.93,
"fCZ": 0.92,
"fCZ_std_err": 0.03,
"fCPHASE": 0.91
},
"1-5": {
"fBellState": 0.9,
"fCZ": 0.89,
"fCZ_std_err": 0.05,
"fCPHASE": 0.88
},
...
},
...
}
:return: A dctionary representation of self.
:rtype: Dict[str, Any]
"""
return {
'1Q': {
"{}".format(qs.id): {
'f1QRB': qs.f1QRB,
'fRO': qs.fRO,
'T1': qs.T1,
'T2': qs.T2,
'fActiveReset': qs.fActiveReset
} for qs in self.qubits_specs
},
'2Q': {
"{}-{}".format(*es.targets): {
'fBellState': es.fBellState,
'fCZ': es.fCZ,
'fCZ_std_err': es.fCZ_std_err,
'fCPHASE': es.fCPHASE
} for es in self.edges_specs
}
} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"'1Q'",
":",
"{",
"\"{}\"",
".",
"format",
"(",
"qs",
".",
"id",
")",
":",
"{",
"'f1QRB'",
":",
"qs",
".",
"f1QRB",
",",
"'fRO'",
":",
"qs",
".",
"fRO",
",",
"'T1'",
":",
"qs",
".",
"T1",
",",
"'T2'",
":",
"qs",
".",
"T2",
",",
"'fActiveReset'",
":",
"qs",
".",
"fActiveReset",
"}",
"for",
"qs",
"in",
"self",
".",
"qubits_specs",
"}",
",",
"'2Q'",
":",
"{",
"\"{}-{}\"",
".",
"format",
"(",
"*",
"es",
".",
"targets",
")",
":",
"{",
"'fBellState'",
":",
"es",
".",
"fBellState",
",",
"'fCZ'",
":",
"es",
".",
"fCZ",
",",
"'fCZ_std_err'",
":",
"es",
".",
"fCZ_std_err",
",",
"'fCPHASE'",
":",
"es",
".",
"fCPHASE",
"}",
"for",
"es",
"in",
"self",
".",
"edges_specs",
"}",
"}"
]
| Create a JSON-serializable representation of the device Specs.
The dictionary representation is of the form::
{
'1Q': {
"0": {
"f1QRB": 0.99,
"T1": 20e-6,
...
},
"1": {
"f1QRB": 0.989,
"T1": 19e-6,
...
},
...
},
'2Q': {
"1-4": {
"fBellState": 0.93,
"fCZ": 0.92,
"fCZ_std_err": 0.03,
"fCPHASE": 0.91
},
"1-5": {
"fBellState": 0.9,
"fCZ": 0.89,
"fCZ_std_err": 0.05,
"fCPHASE": 0.88
},
...
},
...
}
:return: A dctionary representation of self.
:rtype: Dict[str, Any] | [
"Create",
"a",
"JSON",
"-",
"serializable",
"representation",
"of",
"the",
"device",
"Specs",
"."
]
| python | train | 29.383333 |
serge-sans-paille/pythran | pythran/utils.py | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/utils.py#L10-L23 | def attr_to_path(node):
""" Compute path and final object for an attribute node """
def get_intrinsic_path(modules, attr):
""" Get function path and intrinsic from an ast.Attribute. """
if isinstance(attr, ast.Name):
return modules[demangle(attr.id)], (demangle(attr.id),)
elif isinstance(attr, ast.Attribute):
module, path = get_intrinsic_path(modules, attr.value)
return module[attr.attr], path + (attr.attr,)
obj, path = get_intrinsic_path(MODULES, node)
if not obj.isliteral():
path = path[:-1] + ('functor', path[-1])
return obj, ('pythonic', ) + path | [
"def",
"attr_to_path",
"(",
"node",
")",
":",
"def",
"get_intrinsic_path",
"(",
"modules",
",",
"attr",
")",
":",
"\"\"\" Get function path and intrinsic from an ast.Attribute. \"\"\"",
"if",
"isinstance",
"(",
"attr",
",",
"ast",
".",
"Name",
")",
":",
"return",
"modules",
"[",
"demangle",
"(",
"attr",
".",
"id",
")",
"]",
",",
"(",
"demangle",
"(",
"attr",
".",
"id",
")",
",",
")",
"elif",
"isinstance",
"(",
"attr",
",",
"ast",
".",
"Attribute",
")",
":",
"module",
",",
"path",
"=",
"get_intrinsic_path",
"(",
"modules",
",",
"attr",
".",
"value",
")",
"return",
"module",
"[",
"attr",
".",
"attr",
"]",
",",
"path",
"+",
"(",
"attr",
".",
"attr",
",",
")",
"obj",
",",
"path",
"=",
"get_intrinsic_path",
"(",
"MODULES",
",",
"node",
")",
"if",
"not",
"obj",
".",
"isliteral",
"(",
")",
":",
"path",
"=",
"path",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"'functor'",
",",
"path",
"[",
"-",
"1",
"]",
")",
"return",
"obj",
",",
"(",
"'pythonic'",
",",
")",
"+",
"path"
]
| Compute path and final object for an attribute node | [
"Compute",
"path",
"and",
"final",
"object",
"for",
"an",
"attribute",
"node"
]
| python | train | 45.214286 |
ubc/ubcpi | ubcpi/persistence.py | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/persistence.py#L134-L145 | def add_answer(self, vote, rationale):
"""
Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
"""
self.raw_answers.append({
VOTE_KEY: vote,
RATIONALE_KEY: rationale,
}) | [
"def",
"add_answer",
"(",
"self",
",",
"vote",
",",
"rationale",
")",
":",
"self",
".",
"raw_answers",
".",
"append",
"(",
"{",
"VOTE_KEY",
":",
"vote",
",",
"RATIONALE_KEY",
":",
"rationale",
",",
"}",
")"
]
| Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option | [
"Add",
"an",
"answer"
]
| python | train | 27.75 |
lesscpy/lesscpy | lesscpy/plib/mixin.py | https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/mixin.py#L81-L116 | def _parse_arg(self, var, arg, scope):
""" Parse a single argument to mixin.
args:
var (Variable object): variable
arg (mixed): argument
scope (Scope object): current scope
returns:
Variable object or None
"""
if isinstance(var, Variable):
# kwarg
if arg:
if utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable(var.tokens[:-1] + [val])
else:
# arg
if utility.is_variable(var):
if arg is None:
raise SyntaxError('Missing argument to mixin')
elif utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable([var, None, val])
else:
return None
return var | [
"def",
"_parse_arg",
"(",
"self",
",",
"var",
",",
"arg",
",",
"scope",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"Variable",
")",
":",
"# kwarg",
"if",
"arg",
":",
"if",
"utility",
".",
"is_variable",
"(",
"arg",
"[",
"0",
"]",
")",
":",
"tmp",
"=",
"scope",
".",
"variables",
"(",
"arg",
"[",
"0",
"]",
")",
"if",
"not",
"tmp",
":",
"return",
"None",
"val",
"=",
"tmp",
".",
"value",
"else",
":",
"val",
"=",
"arg",
"var",
"=",
"Variable",
"(",
"var",
".",
"tokens",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"val",
"]",
")",
"else",
":",
"# arg",
"if",
"utility",
".",
"is_variable",
"(",
"var",
")",
":",
"if",
"arg",
"is",
"None",
":",
"raise",
"SyntaxError",
"(",
"'Missing argument to mixin'",
")",
"elif",
"utility",
".",
"is_variable",
"(",
"arg",
"[",
"0",
"]",
")",
":",
"tmp",
"=",
"scope",
".",
"variables",
"(",
"arg",
"[",
"0",
"]",
")",
"if",
"not",
"tmp",
":",
"return",
"None",
"val",
"=",
"tmp",
".",
"value",
"else",
":",
"val",
"=",
"arg",
"var",
"=",
"Variable",
"(",
"[",
"var",
",",
"None",
",",
"val",
"]",
")",
"else",
":",
"return",
"None",
"return",
"var"
]
| Parse a single argument to mixin.
args:
var (Variable object): variable
arg (mixed): argument
scope (Scope object): current scope
returns:
Variable object or None | [
"Parse",
"a",
"single",
"argument",
"to",
"mixin",
".",
"args",
":",
"var",
"(",
"Variable",
"object",
")",
":",
"variable",
"arg",
"(",
"mixed",
")",
":",
"argument",
"scope",
"(",
"Scope",
"object",
")",
":",
"current",
"scope",
"returns",
":",
"Variable",
"object",
"or",
"None"
]
| python | valid | 32.888889 |
mardix/Mocha | mocha/contrib/auth/models.py | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/models.py#L173-L177 | def get_by_email(cls, email):
"""
Return a User by email address
"""
return cls.query().filter(cls.email == email).first() | [
"def",
"get_by_email",
"(",
"cls",
",",
"email",
")",
":",
"return",
"cls",
".",
"query",
"(",
")",
".",
"filter",
"(",
"cls",
".",
"email",
"==",
"email",
")",
".",
"first",
"(",
")"
]
| Return a User by email address | [
"Return",
"a",
"User",
"by",
"email",
"address"
]
| python | train | 30 |
ArtoLabs/SimpleSteem | simplesteem/simplesteem.py | https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/simplesteem.py#L117-L135 | def steem_instance(self):
''' Returns the steem instance if it already exists
otherwise uses the goodnode method to fetch a node
and instantiate the Steem class.
'''
if self.s:
return self.s
for num_of_retries in range(default.max_retry):
node = self.util.goodnode(self.nodes)
try:
self.s = Steem(keys=self.keys,
nodes=[node])
except Exception as e:
self.util.retry("COULD NOT GET STEEM INSTANCE",
e, num_of_retries, default.wait_time)
self.s = None
else:
return self.s
return False | [
"def",
"steem_instance",
"(",
"self",
")",
":",
"if",
"self",
".",
"s",
":",
"return",
"self",
".",
"s",
"for",
"num_of_retries",
"in",
"range",
"(",
"default",
".",
"max_retry",
")",
":",
"node",
"=",
"self",
".",
"util",
".",
"goodnode",
"(",
"self",
".",
"nodes",
")",
"try",
":",
"self",
".",
"s",
"=",
"Steem",
"(",
"keys",
"=",
"self",
".",
"keys",
",",
"nodes",
"=",
"[",
"node",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"util",
".",
"retry",
"(",
"\"COULD NOT GET STEEM INSTANCE\"",
",",
"e",
",",
"num_of_retries",
",",
"default",
".",
"wait_time",
")",
"self",
".",
"s",
"=",
"None",
"else",
":",
"return",
"self",
".",
"s",
"return",
"False"
]
| Returns the steem instance if it already exists
otherwise uses the goodnode method to fetch a node
and instantiate the Steem class. | [
"Returns",
"the",
"steem",
"instance",
"if",
"it",
"already",
"exists",
"otherwise",
"uses",
"the",
"goodnode",
"method",
"to",
"fetch",
"a",
"node",
"and",
"instantiate",
"the",
"Steem",
"class",
"."
]
| python | train | 36.105263 |
bokeh/bokeh | bokeh/resources.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/resources.py#L155-L171 | def _collect_external_resources(self, resource_attr):
""" Collect external resources set on resource_attr attribute of all models."""
external_resources = []
for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):
external = getattr(cls, resource_attr, None)
if isinstance(external, string_types):
if external not in external_resources:
external_resources.append(external)
elif isinstance(external, list):
for e in external:
if e not in external_resources:
external_resources.append(e)
return external_resources | [
"def",
"_collect_external_resources",
"(",
"self",
",",
"resource_attr",
")",
":",
"external_resources",
"=",
"[",
"]",
"for",
"_",
",",
"cls",
"in",
"sorted",
"(",
"Model",
".",
"model_class_reverse_map",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"arg",
":",
"arg",
"[",
"0",
"]",
")",
":",
"external",
"=",
"getattr",
"(",
"cls",
",",
"resource_attr",
",",
"None",
")",
"if",
"isinstance",
"(",
"external",
",",
"string_types",
")",
":",
"if",
"external",
"not",
"in",
"external_resources",
":",
"external_resources",
".",
"append",
"(",
"external",
")",
"elif",
"isinstance",
"(",
"external",
",",
"list",
")",
":",
"for",
"e",
"in",
"external",
":",
"if",
"e",
"not",
"in",
"external_resources",
":",
"external_resources",
".",
"append",
"(",
"e",
")",
"return",
"external_resources"
]
| Collect external resources set on resource_attr attribute of all models. | [
"Collect",
"external",
"resources",
"set",
"on",
"resource_attr",
"attribute",
"of",
"all",
"models",
"."
]
| python | train | 40.705882 |
bitesofcode/projexui | projexui/widgets/xsnapshotwidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsnapshotwidget.py#L104-L117 | def mouseMoveEvent(self, event):
"""
Drags the selection view for this widget.
:param event | <QMouseMoveEvent>
"""
w = event.pos().x() - self._region.x()
h = event.pos().y() - self._region.y()
self._region.setWidth(w)
self._region.setHeight(h)
self.repaint()
super(XSnapshotWidget, self).mouseMoveEvent(event) | [
"def",
"mouseMoveEvent",
"(",
"self",
",",
"event",
")",
":",
"w",
"=",
"event",
".",
"pos",
"(",
")",
".",
"x",
"(",
")",
"-",
"self",
".",
"_region",
".",
"x",
"(",
")",
"h",
"=",
"event",
".",
"pos",
"(",
")",
".",
"y",
"(",
")",
"-",
"self",
".",
"_region",
".",
"y",
"(",
")",
"self",
".",
"_region",
".",
"setWidth",
"(",
"w",
")",
"self",
".",
"_region",
".",
"setHeight",
"(",
"h",
")",
"self",
".",
"repaint",
"(",
")",
"super",
"(",
"XSnapshotWidget",
",",
"self",
")",
".",
"mouseMoveEvent",
"(",
"event",
")"
]
| Drags the selection view for this widget.
:param event | <QMouseMoveEvent> | [
"Drags",
"the",
"selection",
"view",
"for",
"this",
"widget",
".",
":",
"param",
"event",
"|",
"<QMouseMoveEvent",
">"
]
| python | train | 30.142857 |
Numigi/gitoo | src/core.py | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L113-L122 | def _get_module_folders(self, temp_repo):
"""Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules.
"""
paths = (
os.path.join(temp_repo, path) for path in os.listdir(temp_repo)
if self._is_module_included(path)
)
return (path for path in paths if os.path.isdir(path)) | [
"def",
"_get_module_folders",
"(",
"self",
",",
"temp_repo",
")",
":",
"paths",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_repo",
",",
"path",
")",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"temp_repo",
")",
"if",
"self",
".",
"_is_module_included",
"(",
"path",
")",
")",
"return",
"(",
"path",
"for",
"path",
"in",
"paths",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")"
]
| Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules. | [
"Get",
"a",
"list",
"of",
"module",
"paths",
"contained",
"in",
"a",
"temp",
"directory",
"."
]
| python | train | 39.5 |
paetzke/consolor | consolor/consolor.py | https://github.com/paetzke/consolor/blob/2d6b6063c181095ae9ec805cc4571ad1f960e5fd/consolor/consolor.py#L61-L87 | def get_line(s, bold=False, underline=False, blinking=False, color=None,
bgcolor=None, update_line=False):
"""
Returns a string with the given formatting.
"""
parts = []
if update_line:
parts.append(_UPDATE_LINE)
for val in [color, bgcolor]:
if val:
parts.append(val)
if bold:
parts.append(_TURN_BOLD_MODE_ON)
if underline:
parts.append(_TURN_UNDERLINE_MODE_ON)
if blinking:
parts.append(_TURN_BLINKING_MODE_ON)
parts.append(s)
parts.append(_TURN_OFF_CHARACTER_ATTS)
result = ''.join(parts)
return result | [
"def",
"get_line",
"(",
"s",
",",
"bold",
"=",
"False",
",",
"underline",
"=",
"False",
",",
"blinking",
"=",
"False",
",",
"color",
"=",
"None",
",",
"bgcolor",
"=",
"None",
",",
"update_line",
"=",
"False",
")",
":",
"parts",
"=",
"[",
"]",
"if",
"update_line",
":",
"parts",
".",
"append",
"(",
"_UPDATE_LINE",
")",
"for",
"val",
"in",
"[",
"color",
",",
"bgcolor",
"]",
":",
"if",
"val",
":",
"parts",
".",
"append",
"(",
"val",
")",
"if",
"bold",
":",
"parts",
".",
"append",
"(",
"_TURN_BOLD_MODE_ON",
")",
"if",
"underline",
":",
"parts",
".",
"append",
"(",
"_TURN_UNDERLINE_MODE_ON",
")",
"if",
"blinking",
":",
"parts",
".",
"append",
"(",
"_TURN_BLINKING_MODE_ON",
")",
"parts",
".",
"append",
"(",
"s",
")",
"parts",
".",
"append",
"(",
"_TURN_OFF_CHARACTER_ATTS",
")",
"result",
"=",
"''",
".",
"join",
"(",
"parts",
")",
"return",
"result"
]
| Returns a string with the given formatting. | [
"Returns",
"a",
"string",
"with",
"the",
"given",
"formatting",
"."
]
| python | train | 22.259259 |
tanghaibao/jcvi | jcvi/variation/cnv.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L578-L613 | def vcf_to_df_worker(arg):
""" Convert CANVAS vcf to a dict, single thread
"""
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit('_', 1)[0]
d = {'SampleKey': samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d | [
"def",
"vcf_to_df_worker",
"(",
"arg",
")",
":",
"canvasvcf",
",",
"exonbed",
",",
"i",
"=",
"arg",
"logging",
".",
"debug",
"(",
"\"Working on job {}: {}\"",
".",
"format",
"(",
"i",
",",
"canvasvcf",
")",
")",
"samplekey",
"=",
"op",
".",
"basename",
"(",
"canvasvcf",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'_'",
",",
"1",
")",
"[",
"0",
"]",
"d",
"=",
"{",
"'SampleKey'",
":",
"samplekey",
"}",
"exons",
"=",
"BedTool",
"(",
"exonbed",
")",
"cn",
"=",
"parse_segments",
"(",
"canvasvcf",
")",
"overlaps",
"=",
"exons",
".",
"intersect",
"(",
"cn",
",",
"wao",
"=",
"True",
")",
"gcn_store",
"=",
"{",
"}",
"for",
"ov",
"in",
"overlaps",
":",
"# Example of ov.fields:",
"# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',",
"# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',",
"# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']",
"gene_name",
"=",
"\"|\"",
".",
"join",
"(",
"(",
"ov",
".",
"fields",
"[",
"6",
"]",
",",
"ov",
".",
"fields",
"[",
"3",
"]",
",",
"ov",
".",
"fields",
"[",
"5",
"]",
")",
")",
"if",
"gene_name",
"not",
"in",
"gcn_store",
":",
"gcn_store",
"[",
"gene_name",
"]",
"=",
"defaultdict",
"(",
"int",
")",
"cn",
"=",
"ov",
".",
"fields",
"[",
"-",
"2",
"]",
"if",
"cn",
"==",
"\".\"",
":",
"continue",
"cn",
"=",
"int",
"(",
"cn",
")",
"if",
"cn",
">",
"10",
":",
"cn",
"=",
"10",
"amt",
"=",
"int",
"(",
"ov",
".",
"fields",
"[",
"-",
"1",
"]",
")",
"gcn_store",
"[",
"gene_name",
"]",
"[",
"cn",
"]",
"+=",
"amt",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"gcn_store",
".",
"items",
"(",
")",
")",
":",
"v_mean",
",",
"v_median",
"=",
"counter_mean_and_median",
"(",
"v",
")",
"d",
"[",
"k",
"+",
"\".avgcn\"",
"]",
"=",
"v_mean",
"d",
"[",
"k",
"+",
"\".medcn\"",
"]",
"=",
"v_median",
"cleanup",
"(",
")",
"return",
"d"
]
| Convert CANVAS vcf to a dict, single thread | [
"Convert",
"CANVAS",
"vcf",
"to",
"a",
"dict",
"single",
"thread"
]
| python | train | 32.916667 |
askedrelic/libgreader | libgreader/googlereader.py | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L136-L162 | def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False) | [
"def",
"_getFeedContent",
"(",
"self",
",",
"url",
",",
"excludeRead",
"=",
"False",
",",
"continuation",
"=",
"None",
",",
"loadLimit",
"=",
"20",
",",
"since",
"=",
"None",
",",
"until",
"=",
"None",
")",
":",
"parameters",
"=",
"{",
"}",
"if",
"excludeRead",
":",
"parameters",
"[",
"'xt'",
"]",
"=",
"'user/-/state/com.google/read'",
"if",
"continuation",
":",
"parameters",
"[",
"'c'",
"]",
"=",
"continuation",
"parameters",
"[",
"'n'",
"]",
"=",
"loadLimit",
"if",
"since",
":",
"parameters",
"[",
"'ot'",
"]",
"=",
"since",
"if",
"until",
":",
"parameters",
"[",
"'nt'",
"]",
"=",
"until",
"contentJson",
"=",
"self",
".",
"httpGet",
"(",
"url",
",",
"parameters",
")",
"return",
"json",
".",
"loads",
"(",
"contentJson",
",",
"strict",
"=",
"False",
")"
]
| A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner) | [
"A",
"list",
"of",
"items",
"(",
"from",
"a",
"feed",
"a",
"category",
"or",
"from",
"URLs",
"made",
"with",
"SPECIAL_ITEMS_URL",
")"
]
| python | train | 40.592593 |
danilobellini/dose | dose/misc.py | https://github.com/danilobellini/dose/blob/141f48322f7812b7d32e3d5f065d4473a11102a4/dose/misc.py#L114-L124 | def read_plain_text(fname, encoding="utf-8"):
"""Reads a file as a list of strings."""
with io.open(fname, encoding=encoding) as f:
result = list(f)
if result:
if result[-1][-1:] == "\n":
result.append("\n")
else:
result[-1] += "\n"
return [line[:-1] for line in result]
return [] | [
"def",
"read_plain_text",
"(",
"fname",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"with",
"io",
".",
"open",
"(",
"fname",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"result",
"=",
"list",
"(",
"f",
")",
"if",
"result",
":",
"if",
"result",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
":",
"]",
"==",
"\"\\n\"",
":",
"result",
".",
"append",
"(",
"\"\\n\"",
")",
"else",
":",
"result",
"[",
"-",
"1",
"]",
"+=",
"\"\\n\"",
"return",
"[",
"line",
"[",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"result",
"]",
"return",
"[",
"]"
]
| Reads a file as a list of strings. | [
"Reads",
"a",
"file",
"as",
"a",
"list",
"of",
"strings",
"."
]
| python | train | 31.090909 |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L581-L588 | def set_mode_loiter(self):
'''enter LOITER mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_LOITER = 27
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_LOITER) | [
"def",
"set_mode_loiter",
"(",
"self",
")",
":",
"if",
"self",
".",
"mavlink10",
"(",
")",
":",
"self",
".",
"mav",
".",
"command_long_send",
"(",
"self",
".",
"target_system",
",",
"self",
".",
"target_component",
",",
"mavlink",
".",
"MAV_CMD_NAV_LOITER_UNLIM",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"else",
":",
"MAV_ACTION_LOITER",
"=",
"27",
"self",
".",
"mav",
".",
"action_send",
"(",
"self",
".",
"target_system",
",",
"self",
".",
"target_component",
",",
"MAV_ACTION_LOITER",
")"
]
| enter LOITER mode | [
"enter",
"LOITER",
"mode"
]
| python | train | 50.375 |
Vital-Fernandez/dazer | bin/lib/Plotting_Libraries/dazer_plotter.py | https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Plotting_Libraries/dazer_plotter.py#L42-L163 | def define_format(self, plotStyle, plotSize):
#Default sizes for computer
sizing_dict = {}
sizing_dict['figure.figsize'] = (14, 8)
sizing_dict['legend.fontsize'] = 15
sizing_dict['axes.labelsize'] = 20
sizing_dict['axes.titlesize'] = 24
sizing_dict['xtick.labelsize'] = 14
sizing_dict['ytick.labelsize'] = 14
self.colorVector = {
'iron':'#4c4c4c',
'silver':'#cccccc',
'dark blue':'#0072B2',
'green':'#009E73',
'orangish':'#D55E00',
'pink':'#CC79A7',
'yellow':'#F0E442',
'cyan':'#56B4E9',
'olive':'#bcbd22',
'grey':'#7f7f7f',
'skin':'#FFB5B8'}
#sizing_dict['text.usetex'] = True
#--Update the colors/format
if plotStyle == None:
self.ColorVector = [None, None, None]
elif plotStyle == 'dark':
plt.style.use('dark_background')
elif plotStyle == 'night':
plt.style.use('seaborn-colorblind')
iron_color = '#4c4c4c' #Iron: (76 76 76)
silver_color = '#cccccc' #Silver: (204 204 204)
sizing_dict['axes.facecolor'] = iron_color
sizing_dict['figure.facecolor'] = iron_color
sizing_dict['axes.edgecolor'] = silver_color
sizing_dict['text.color'] = silver_color
sizing_dict['axes.labelcolor'] = silver_color
sizing_dict['xtick.color'] = silver_color
sizing_dict['ytick.color'] = silver_color
sizing_dict['axes.edgecolor'] = silver_color
#'plt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y']) + cycler('linestyle', ['-', '--', ':', '-.'])))'
#This should be the set up for the cycler we just need to add the colors
#axes.prop_cycle : cycler('color', 'bgrcmyk')
elif plotStyle == 'colorblind':
plt.style.use('seaborn-colorblind')
else:
plt.style.use(plotStyle)
#--Load particular configuration for this plot
if plotSize == 'medium':
rcParams.update(sizing_dict)
elif type(plotSize) is dict:
sizing_dict.update(plotSize)
rcParams.update(sizing_dict)
'''
Seaborn color blind
#0072B2 dark blue
#009E73 green
#D55E00 orangish
#CC79A7 pink
#F0E442 yellow
#56B4E9 cyan
#bcbd22 olive #adicional
#7f7f7f grey
#FFB5B8 skin
'''
'''
Matplotlib default palete
#17becf dark blue
#bcbd22 orange
#2ca02c green
#e377c2 red
#8c564b purple
#9467bd brown
#d62728 pink
#7f7f7f grey
#ff7f0e olive
#1f77b4 cyan
'''
'''
--These are matplotlib styles
seaborn-darkgrid
seaborn-notebook
classic
seaborn-ticks
grayscale
bmh
seaborn-talk
dark_background
ggplot
fivethirtyeight
seaborn-colorblind
seaborn-deep
seaborn-whitegrid
seaborn-bright
seaborn-poster
seaborn-muted
seaborn-paper
seaborn-white
seaborn-pastel
seaborn-dark
seaborn
seaborn-dark-palette
''' | [
"def",
"define_format",
"(",
"self",
",",
"plotStyle",
",",
"plotSize",
")",
":",
"#Default sizes for computer",
"sizing_dict",
"=",
"{",
"}",
"sizing_dict",
"[",
"'figure.figsize'",
"]",
"=",
"(",
"14",
",",
"8",
")",
"sizing_dict",
"[",
"'legend.fontsize'",
"]",
"=",
"15",
"sizing_dict",
"[",
"'axes.labelsize'",
"]",
"=",
"20",
"sizing_dict",
"[",
"'axes.titlesize'",
"]",
"=",
"24",
"sizing_dict",
"[",
"'xtick.labelsize'",
"]",
"=",
"14",
"sizing_dict",
"[",
"'ytick.labelsize'",
"]",
"=",
"14",
"self",
".",
"colorVector",
"=",
"{",
"'iron'",
":",
"'#4c4c4c'",
",",
"'silver'",
":",
"'#cccccc'",
",",
"'dark blue'",
":",
"'#0072B2'",
",",
"'green'",
":",
"'#009E73'",
",",
"'orangish'",
":",
"'#D55E00'",
",",
"'pink'",
":",
"'#CC79A7'",
",",
"'yellow'",
":",
"'#F0E442'",
",",
"'cyan'",
":",
"'#56B4E9'",
",",
"'olive'",
":",
"'#bcbd22'",
",",
"'grey'",
":",
"'#7f7f7f'",
",",
"'skin'",
":",
"'#FFB5B8'",
"}",
"#sizing_dict['text.usetex'] = True",
"#--Update the colors/format",
"if",
"plotStyle",
"==",
"None",
":",
"self",
".",
"ColorVector",
"=",
"[",
"None",
",",
"None",
",",
"None",
"]",
"elif",
"plotStyle",
"==",
"'dark'",
":",
"plt",
".",
"style",
".",
"use",
"(",
"'dark_background'",
")",
"elif",
"plotStyle",
"==",
"'night'",
":",
"plt",
".",
"style",
".",
"use",
"(",
"'seaborn-colorblind'",
")",
"iron_color",
"=",
"'#4c4c4c'",
"#Iron: (76 76 76)",
"silver_color",
"=",
"'#cccccc'",
"#Silver: (204 204 204) ",
"sizing_dict",
"[",
"'axes.facecolor'",
"]",
"=",
"iron_color",
"sizing_dict",
"[",
"'figure.facecolor'",
"]",
"=",
"iron_color",
"sizing_dict",
"[",
"'axes.edgecolor'",
"]",
"=",
"silver_color",
"sizing_dict",
"[",
"'text.color'",
"]",
"=",
"silver_color",
"sizing_dict",
"[",
"'axes.labelcolor'",
"]",
"=",
"silver_color",
"sizing_dict",
"[",
"'xtick.color'",
"]",
"=",
"silver_color",
"sizing_dict",
"[",
"'ytick.color'",
"]",
"=",
"silver_color",
"sizing_dict",
"[",
"'axes.edgecolor'",
"]",
"=",
"silver_color",
"#'plt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y']) + cycler('linestyle', ['-', '--', ':', '-.'])))'",
"#This should be the set up for the cycler we just need to add the colors",
"#axes.prop_cycle : cycler('color', 'bgrcmyk')",
"elif",
"plotStyle",
"==",
"'colorblind'",
":",
"plt",
".",
"style",
".",
"use",
"(",
"'seaborn-colorblind'",
")",
"else",
":",
"plt",
".",
"style",
".",
"use",
"(",
"plotStyle",
")",
"#--Load particular configuration for this plot",
"if",
"plotSize",
"==",
"'medium'",
":",
"rcParams",
".",
"update",
"(",
"sizing_dict",
")",
"elif",
"type",
"(",
"plotSize",
")",
"is",
"dict",
":",
"sizing_dict",
".",
"update",
"(",
"plotSize",
")",
"rcParams",
".",
"update",
"(",
"sizing_dict",
")",
"'''\n Matplotlib default palete\n #17becf dark blue\n #bcbd22 orange\n #2ca02c green\n #e377c2 red\n #8c564b purple\n #9467bd brown\n #d62728 pink\n #7f7f7f grey\n #ff7f0e olive\n #1f77b4 cyan\n '''",
"'''\n --These are matplotlib styles\n seaborn-darkgrid\n seaborn-notebook\n classic\n seaborn-ticks\n grayscale\n bmh\n seaborn-talk\n dark_background\n ggplot\n fivethirtyeight\n seaborn-colorblind\n seaborn-deep\n seaborn-whitegrid\n seaborn-bright\n seaborn-poster\n seaborn-muted\n seaborn-paper\n seaborn-white\n seaborn-pastel\n seaborn-dark\n seaborn\n seaborn-dark-palette\n '''"
]
| Seaborn color blind
#0072B2 dark blue
#009E73 green
#D55E00 orangish
#CC79A7 pink
#F0E442 yellow
#56B4E9 cyan
#bcbd22 olive #adicional
#7f7f7f grey
#FFB5B8 skin | [
"Seaborn",
"color",
"blind",
"#0072B2",
"dark",
"blue",
"#009E73",
"green",
"#D55E00",
"orangish",
"#CC79A7",
"pink",
"#F0E442",
"yellow",
"#56B4E9",
"cyan",
"#bcbd22",
"olive",
"#adicional",
"#7f7f7f",
"grey",
"#FFB5B8",
"skin"
]
| python | train | 27.540984 |
ejhigson/nestcheck | nestcheck/estimators.py | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L78-L98 | def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | [
"def",
"evidence",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
"return",
"np",
".",
"exp",
"(",
"scipy",
".",
"special",
".",
"logsumexp",
"(",
"logw",
")",
")"
]
| r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | [
"r",
"Bayesian",
"evidence",
":",
"math",
":",
"\\",
"log",
"\\",
"mathcal",
"{",
"Z",
"}",
"."
]
| python | train | 28.619048 |
Chilipp/psyplot | psyplot/config/rcsetup.py | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/config/rcsetup.py#L761-L854 | def load_plugins(self, raise_error=False):
"""
Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised"""
pm_env = os.getenv('PSYPLOT_PLOTMETHODS', '').split('::')
include_pms = [s[4:] for s in pm_env if s.startswith('yes:')]
exclude_pms = [s[3:] for s in pm_env if s.startswith('no:')]
logger = logging.getLogger(__name__)
plotters = self['project.plotters']
def_plots = {'default': list(plotters)}
defaultParams = self.defaultParams
def_keys = {'default': defaultParams}
def register_pm(ep, name):
full_name = '%s:%s' % (ep.module_name, name)
ret = True
if pm_env == ['no']:
ret = False
elif name in exclude_pms or full_name in exclude_pms:
ret = False
elif include_pms and (name not in include_pms and
full_name not in include_pms):
ret = False
if not ret:
logger.debug('Skipping plot method %s', full_name)
return ret
for ep in self._load_plugin_entrypoints():
plugin_mod = ep.load()
rc = plugin_mod.rcParams
# load the plotters
plugin_plotters = {
key: val for key, val in rc.get('project.plotters', {}).items()
if register_pm(ep, key)}
already_defined = set(plotters).intersection(plugin_plotters)
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following plotters have already been "
"defined") % ep
msg += 'and will be overwritten:' if not raise_error else ':'
msg += '\n' + '\n'.join(chain.from_iterable(
(('%s by %s' % (key, plugin)
for plugin, keys in def_plots.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
for d in plugin_plotters.values():
d['plugin'] = ep.module_name
plotters.update(plugin_plotters)
def_plots[ep] = list(plugin_plotters)
# load the defaultParams keys
plugin_defaultParams = rc.defaultParams
already_defined = set(defaultParams).intersection(
plugin_defaultParams) - {'project.plotters'}
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following default keys have already been "
"defined:") % ep
msg += '\n' + '\n'.join(chain.from_iterable(
(('%s by %s' % (key, plugin)
for plugin, keys in def_keys.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
update_keys = set(plugin_defaultParams) - {'project.plotters'}
def_keys[ep] = update_keys
self.defaultParams.update(
{key: plugin_defaultParams[key] for key in update_keys})
# load the rcParams (without validation)
super(RcParams, self).update({key: rc[key] for key in update_keys})
# add the deprecated keys
self._deprecated_ignore_map.update(rc._deprecated_ignore_map)
self._deprecated_map.update(rc._deprecated_map) | [
"def",
"load_plugins",
"(",
"self",
",",
"raise_error",
"=",
"False",
")",
":",
"pm_env",
"=",
"os",
".",
"getenv",
"(",
"'PSYPLOT_PLOTMETHODS'",
",",
"''",
")",
".",
"split",
"(",
"'::'",
")",
"include_pms",
"=",
"[",
"s",
"[",
"4",
":",
"]",
"for",
"s",
"in",
"pm_env",
"if",
"s",
".",
"startswith",
"(",
"'yes:'",
")",
"]",
"exclude_pms",
"=",
"[",
"s",
"[",
"3",
":",
"]",
"for",
"s",
"in",
"pm_env",
"if",
"s",
".",
"startswith",
"(",
"'no:'",
")",
"]",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"plotters",
"=",
"self",
"[",
"'project.plotters'",
"]",
"def_plots",
"=",
"{",
"'default'",
":",
"list",
"(",
"plotters",
")",
"}",
"defaultParams",
"=",
"self",
".",
"defaultParams",
"def_keys",
"=",
"{",
"'default'",
":",
"defaultParams",
"}",
"def",
"register_pm",
"(",
"ep",
",",
"name",
")",
":",
"full_name",
"=",
"'%s:%s'",
"%",
"(",
"ep",
".",
"module_name",
",",
"name",
")",
"ret",
"=",
"True",
"if",
"pm_env",
"==",
"[",
"'no'",
"]",
":",
"ret",
"=",
"False",
"elif",
"name",
"in",
"exclude_pms",
"or",
"full_name",
"in",
"exclude_pms",
":",
"ret",
"=",
"False",
"elif",
"include_pms",
"and",
"(",
"name",
"not",
"in",
"include_pms",
"and",
"full_name",
"not",
"in",
"include_pms",
")",
":",
"ret",
"=",
"False",
"if",
"not",
"ret",
":",
"logger",
".",
"debug",
"(",
"'Skipping plot method %s'",
",",
"full_name",
")",
"return",
"ret",
"for",
"ep",
"in",
"self",
".",
"_load_plugin_entrypoints",
"(",
")",
":",
"plugin_mod",
"=",
"ep",
".",
"load",
"(",
")",
"rc",
"=",
"plugin_mod",
".",
"rcParams",
"# load the plotters",
"plugin_plotters",
"=",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"rc",
".",
"get",
"(",
"'project.plotters'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
"if",
"register_pm",
"(",
"ep",
",",
"key",
")",
"}",
"already_defined",
"=",
"set",
"(",
"plotters",
")",
".",
"intersection",
"(",
"plugin_plotters",
")",
"if",
"already_defined",
":",
"msg",
"=",
"(",
"\"Error while loading psyplot plugin %s! The \"",
"\"following plotters have already been \"",
"\"defined\"",
")",
"%",
"ep",
"msg",
"+=",
"'and will be overwritten:'",
"if",
"not",
"raise_error",
"else",
"':'",
"msg",
"+=",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"chain",
".",
"from_iterable",
"(",
"(",
"(",
"'%s by %s'",
"%",
"(",
"key",
",",
"plugin",
")",
"for",
"plugin",
",",
"keys",
"in",
"def_plots",
".",
"items",
"(",
")",
"if",
"key",
"in",
"keys",
")",
"for",
"key",
"in",
"already_defined",
")",
")",
")",
"if",
"raise_error",
":",
"raise",
"ImportError",
"(",
"msg",
")",
"else",
":",
"warn",
"(",
"msg",
")",
"for",
"d",
"in",
"plugin_plotters",
".",
"values",
"(",
")",
":",
"d",
"[",
"'plugin'",
"]",
"=",
"ep",
".",
"module_name",
"plotters",
".",
"update",
"(",
"plugin_plotters",
")",
"def_plots",
"[",
"ep",
"]",
"=",
"list",
"(",
"plugin_plotters",
")",
"# load the defaultParams keys",
"plugin_defaultParams",
"=",
"rc",
".",
"defaultParams",
"already_defined",
"=",
"set",
"(",
"defaultParams",
")",
".",
"intersection",
"(",
"plugin_defaultParams",
")",
"-",
"{",
"'project.plotters'",
"}",
"if",
"already_defined",
":",
"msg",
"=",
"(",
"\"Error while loading psyplot plugin %s! The \"",
"\"following default keys have already been \"",
"\"defined:\"",
")",
"%",
"ep",
"msg",
"+=",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"chain",
".",
"from_iterable",
"(",
"(",
"(",
"'%s by %s'",
"%",
"(",
"key",
",",
"plugin",
")",
"for",
"plugin",
",",
"keys",
"in",
"def_keys",
".",
"items",
"(",
")",
"if",
"key",
"in",
"keys",
")",
"for",
"key",
"in",
"already_defined",
")",
")",
")",
"if",
"raise_error",
":",
"raise",
"ImportError",
"(",
"msg",
")",
"else",
":",
"warn",
"(",
"msg",
")",
"update_keys",
"=",
"set",
"(",
"plugin_defaultParams",
")",
"-",
"{",
"'project.plotters'",
"}",
"def_keys",
"[",
"ep",
"]",
"=",
"update_keys",
"self",
".",
"defaultParams",
".",
"update",
"(",
"{",
"key",
":",
"plugin_defaultParams",
"[",
"key",
"]",
"for",
"key",
"in",
"update_keys",
"}",
")",
"# load the rcParams (without validation)",
"super",
"(",
"RcParams",
",",
"self",
")",
".",
"update",
"(",
"{",
"key",
":",
"rc",
"[",
"key",
"]",
"for",
"key",
"in",
"update_keys",
"}",
")",
"# add the deprecated keys",
"self",
".",
"_deprecated_ignore_map",
".",
"update",
"(",
"rc",
".",
"_deprecated_ignore_map",
")",
"self",
".",
"_deprecated_map",
".",
"update",
"(",
"rc",
".",
"_deprecated_map",
")"
]
| Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised | [
"Load",
"the",
"plotters",
"and",
"defaultParams",
"from",
"the",
"plugins"
]
| python | train | 42.776596 |
Parsl/parsl | parsl/app/app.py | https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/app/app.py#L78-L116 | def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
"""The App decorator function.
Args:
- apptype (string) : Apptype can be bash|python
Kwargs:
- data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for
managing this app. This can be omitted only
after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.
- walltime (int) : Walltime for app in seconds,
default=60
- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.
- cache (Bool) : Enable caching of the app call
default=False
Returns:
A PythonApp or BashApp object, which when called runs the apps through the executor.
"""
from parsl.app.python import PythonApp
from parsl.app.bash import BashApp
logger.warning("The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.")
if apptype == 'python':
app_class = PythonApp
elif apptype == 'bash':
app_class = BashApp
else:
raise InvalidAppTypeError("Invalid apptype requested {}; must be 'python' or 'bash'".format(apptype))
def wrapper(f):
return app_class(f,
data_flow_kernel=data_flow_kernel,
walltime=walltime,
cache=cache,
executors=executors)
return wrapper | [
"def",
"App",
"(",
"apptype",
",",
"data_flow_kernel",
"=",
"None",
",",
"walltime",
"=",
"60",
",",
"cache",
"=",
"False",
",",
"executors",
"=",
"'all'",
")",
":",
"from",
"parsl",
".",
"app",
".",
"python",
"import",
"PythonApp",
"from",
"parsl",
".",
"app",
".",
"bash",
"import",
"BashApp",
"logger",
".",
"warning",
"(",
"\"The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.\"",
")",
"if",
"apptype",
"==",
"'python'",
":",
"app_class",
"=",
"PythonApp",
"elif",
"apptype",
"==",
"'bash'",
":",
"app_class",
"=",
"BashApp",
"else",
":",
"raise",
"InvalidAppTypeError",
"(",
"\"Invalid apptype requested {}; must be 'python' or 'bash'\"",
".",
"format",
"(",
"apptype",
")",
")",
"def",
"wrapper",
"(",
"f",
")",
":",
"return",
"app_class",
"(",
"f",
",",
"data_flow_kernel",
"=",
"data_flow_kernel",
",",
"walltime",
"=",
"walltime",
",",
"cache",
"=",
"cache",
",",
"executors",
"=",
"executors",
")",
"return",
"wrapper"
]
| The App decorator function.
Args:
- apptype (string) : Apptype can be bash|python
Kwargs:
- data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for
managing this app. This can be omitted only
after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.
- walltime (int) : Walltime for app in seconds,
default=60
- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.
- cache (Bool) : Enable caching of the app call
default=False
Returns:
A PythonApp or BashApp object, which when called runs the apps through the executor. | [
"The",
"App",
"decorator",
"function",
"."
]
| python | valid | 38.153846 |
brocade/pynos | pynos/versions/base/yang/brocade_ras.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L465-L474 | def support_autoupload_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
autoupload = ET.SubElement(support, "autoupload")
enable = ET.SubElement(autoupload, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"support_autoupload_enable",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"support",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"support\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-ras\"",
")",
"autoupload",
"=",
"ET",
".",
"SubElement",
"(",
"support",
",",
"\"autoupload\"",
")",
"enable",
"=",
"ET",
".",
"SubElement",
"(",
"autoupload",
",",
"\"enable\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train | 41.3 |
dnanexus/dx-toolkit | src/python/dxpy/api.py | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1219-L1225 | def system_find_apps(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findApps API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindApps
"""
return DXHTTPRequest('/system/findApps', input_params, always_retry=always_retry, **kwargs) | [
"def",
"system_find_apps",
"(",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/system/findApps'",
",",
"input_params",
",",
"always_retry",
"=",
"always_retry",
",",
"*",
"*",
"kwargs",
")"
]
| Invokes the /system/findApps API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindApps | [
"Invokes",
"the",
"/",
"system",
"/",
"findApps",
"API",
"method",
"."
]
| python | train | 48 |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L107-L114 | def items(self):
"""
return list of pair of name and value of all declared columns.
"""
return [
(c.name, getattr(self, c.name, None))
for c in self.__table__._columns
] | [
"def",
"items",
"(",
"self",
")",
":",
"return",
"[",
"(",
"c",
".",
"name",
",",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
",",
"None",
")",
")",
"for",
"c",
"in",
"self",
".",
"__table__",
".",
"_columns",
"]"
]
| return list of pair of name and value of all declared columns. | [
"return",
"list",
"of",
"pair",
"of",
"name",
"and",
"value",
"of",
"all",
"declared",
"columns",
"."
]
| python | train | 28.25 |
damnit/pymite | pymite/adapters.py | https://github.com/damnit/pymite/blob/1e9b9bf6aef790af2d8781f9f77c098c54ca0342/pymite/adapters.py#L261-L265 | def stop(self, id):
""" stop the tracker. """
path = partial(_path, self.adapter)
path = path(id)
return self._delete(path) | [
"def",
"stop",
"(",
"self",
",",
"id",
")",
":",
"path",
"=",
"partial",
"(",
"_path",
",",
"self",
".",
"adapter",
")",
"path",
"=",
"path",
"(",
"id",
")",
"return",
"self",
".",
"_delete",
"(",
"path",
")"
]
| stop the tracker. | [
"stop",
"the",
"tracker",
"."
]
| python | train | 30.2 |
YosaiProject/yosai | yosai/core/mgt/mgt.py | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L514-L582 | def create_subject(self,
authc_token=None,
account_id=None,
existing_subject=None,
subject_context=None):
"""
Creates a ``Subject`` instance for the user represented by the given method
arguments.
It is an overloaded method, due to porting java to python, and is
consequently highly likely to be refactored.
It gets called in one of two ways:
1) when creating an anonymous subject, passing create_subject
a subject_context argument
2) following a after successful login, passing all but the context argument
This implementation functions as follows:
- Ensures that the ``SubjectContext`` exists and is as populated as it can be,
using heuristics to acquire data that may not have already been available
to it (such as a referenced session or remembered identifiers).
- Calls subject_context.do_create_subject to perform the Subject
instance creation
- Calls subject.save to ensure the constructed Subject's state is
accessible for future requests/invocations if necessary
- Returns the constructed Subject instance
:type authc_token: subject_abcs.AuthenticationToken
:param account_id: the identifiers of a newly authenticated user
:type account: SimpleIdentifierCollection
:param existing_subject: the existing Subject instance that initiated the
authentication attempt
:type subject: subject_abcs.Subject
:type subject_context: subject_abcs.SubjectContext
:returns: the Subject instance that represents the context and session
data for the newly authenticated subject
"""
if subject_context is None: # this that means a successful login just happened
# passing existing_subject is new to yosai:
context = self.create_subject_context(existing_subject)
context.authenticated = True
context.authentication_token = authc_token
context.account_id = account_id
if (existing_subject):
context.subject = existing_subject
else:
context = copy.copy(subject_context) # if this necessary? TBD.
context = self.ensure_security_manager(context)
context = self.resolve_session(context)
context = self.resolve_identifiers(context)
subject = self.do_create_subject(context) # DelegatingSubject
# save this subject for future reference if necessary:
# (this is needed here in case remember_me identifiers were resolved
# and they need to be stored in the session, so we don't constantly
# re-hydrate the remember_me identifier_collection on every operation).
self.save(subject)
return subject | [
"def",
"create_subject",
"(",
"self",
",",
"authc_token",
"=",
"None",
",",
"account_id",
"=",
"None",
",",
"existing_subject",
"=",
"None",
",",
"subject_context",
"=",
"None",
")",
":",
"if",
"subject_context",
"is",
"None",
":",
"# this that means a successful login just happened",
"# passing existing_subject is new to yosai:",
"context",
"=",
"self",
".",
"create_subject_context",
"(",
"existing_subject",
")",
"context",
".",
"authenticated",
"=",
"True",
"context",
".",
"authentication_token",
"=",
"authc_token",
"context",
".",
"account_id",
"=",
"account_id",
"if",
"(",
"existing_subject",
")",
":",
"context",
".",
"subject",
"=",
"existing_subject",
"else",
":",
"context",
"=",
"copy",
".",
"copy",
"(",
"subject_context",
")",
"# if this necessary? TBD.",
"context",
"=",
"self",
".",
"ensure_security_manager",
"(",
"context",
")",
"context",
"=",
"self",
".",
"resolve_session",
"(",
"context",
")",
"context",
"=",
"self",
".",
"resolve_identifiers",
"(",
"context",
")",
"subject",
"=",
"self",
".",
"do_create_subject",
"(",
"context",
")",
"# DelegatingSubject",
"# save this subject for future reference if necessary:",
"# (this is needed here in case remember_me identifiers were resolved",
"# and they need to be stored in the session, so we don't constantly",
"# re-hydrate the remember_me identifier_collection on every operation).",
"self",
".",
"save",
"(",
"subject",
")",
"return",
"subject"
]
| Creates a ``Subject`` instance for the user represented by the given method
arguments.
It is an overloaded method, due to porting java to python, and is
consequently highly likely to be refactored.
It gets called in one of two ways:
1) when creating an anonymous subject, passing create_subject
a subject_context argument
2) following a after successful login, passing all but the context argument
This implementation functions as follows:
- Ensures that the ``SubjectContext`` exists and is as populated as it can be,
using heuristics to acquire data that may not have already been available
to it (such as a referenced session or remembered identifiers).
- Calls subject_context.do_create_subject to perform the Subject
instance creation
- Calls subject.save to ensure the constructed Subject's state is
accessible for future requests/invocations if necessary
- Returns the constructed Subject instance
:type authc_token: subject_abcs.AuthenticationToken
:param account_id: the identifiers of a newly authenticated user
:type account: SimpleIdentifierCollection
:param existing_subject: the existing Subject instance that initiated the
authentication attempt
:type subject: subject_abcs.Subject
:type subject_context: subject_abcs.SubjectContext
:returns: the Subject instance that represents the context and session
data for the newly authenticated subject | [
"Creates",
"a",
"Subject",
"instance",
"for",
"the",
"user",
"represented",
"by",
"the",
"given",
"method",
"arguments",
"."
]
| python | train | 41.898551 |
ella/ella | ella/photos/models.py | https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L427-L432 | def file(self):
""" Method returns formated photo path - derived from format.id and source Photo filename """
if photos_settings.FORMATED_PHOTO_FILENAME is not None:
return photos_settings.FORMATED_PHOTO_FILENAME(self)
source_file = path.split(self.photo.image.name)
return path.join(source_file[0], str(self.format.id) + '-' + source_file[1]) | [
"def",
"file",
"(",
"self",
")",
":",
"if",
"photos_settings",
".",
"FORMATED_PHOTO_FILENAME",
"is",
"not",
"None",
":",
"return",
"photos_settings",
".",
"FORMATED_PHOTO_FILENAME",
"(",
"self",
")",
"source_file",
"=",
"path",
".",
"split",
"(",
"self",
".",
"photo",
".",
"image",
".",
"name",
")",
"return",
"path",
".",
"join",
"(",
"source_file",
"[",
"0",
"]",
",",
"str",
"(",
"self",
".",
"format",
".",
"id",
")",
"+",
"'-'",
"+",
"source_file",
"[",
"1",
"]",
")"
]
| Method returns formated photo path - derived from format.id and source Photo filename | [
"Method",
"returns",
"formated",
"photo",
"path",
"-",
"derived",
"from",
"format",
".",
"id",
"and",
"source",
"Photo",
"filename"
]
| python | train | 63.666667 |
ellmetha/django-machina | machina/apps/forum_conversation/views.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L493-L510 | def form_valid(self, post_form, attachment_formset, poll_option_formset, **kwargs):
""" Processes valid forms. """
save_poll_option_formset = poll_option_formset is not None \
and not self.preview
valid = super().form_valid(
post_form, attachment_formset, poll_option_formset=poll_option_formset, **kwargs)
if save_poll_option_formset:
poll_option_formset.topic = self.forum_post.topic
poll_option_formset.save(
poll_question=post_form.cleaned_data.pop('poll_question', None),
poll_max_options=post_form.cleaned_data.pop('poll_max_options', None),
poll_duration=post_form.cleaned_data.pop('poll_duration', None),
poll_user_changes=post_form.cleaned_data.pop('poll_user_changes', None),
)
return valid | [
"def",
"form_valid",
"(",
"self",
",",
"post_form",
",",
"attachment_formset",
",",
"poll_option_formset",
",",
"*",
"*",
"kwargs",
")",
":",
"save_poll_option_formset",
"=",
"poll_option_formset",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"preview",
"valid",
"=",
"super",
"(",
")",
".",
"form_valid",
"(",
"post_form",
",",
"attachment_formset",
",",
"poll_option_formset",
"=",
"poll_option_formset",
",",
"*",
"*",
"kwargs",
")",
"if",
"save_poll_option_formset",
":",
"poll_option_formset",
".",
"topic",
"=",
"self",
".",
"forum_post",
".",
"topic",
"poll_option_formset",
".",
"save",
"(",
"poll_question",
"=",
"post_form",
".",
"cleaned_data",
".",
"pop",
"(",
"'poll_question'",
",",
"None",
")",
",",
"poll_max_options",
"=",
"post_form",
".",
"cleaned_data",
".",
"pop",
"(",
"'poll_max_options'",
",",
"None",
")",
",",
"poll_duration",
"=",
"post_form",
".",
"cleaned_data",
".",
"pop",
"(",
"'poll_duration'",
",",
"None",
")",
",",
"poll_user_changes",
"=",
"post_form",
".",
"cleaned_data",
".",
"pop",
"(",
"'poll_user_changes'",
",",
"None",
")",
",",
")",
"return",
"valid"
]
| Processes valid forms. | [
"Processes",
"valid",
"forms",
"."
]
| python | train | 47.222222 |
calocan/rescape-python-helpers | rescape_python_helpers/functional/ramda.py | https://github.com/calocan/rescape-python-helpers/blob/91a1724f062ee40a25aa60fc96b2d7acadd99618/rescape_python_helpers/functional/ramda.py#L306-L313 | def map_with_obj_deep(f, dct):
"""
Implementation of map that recurses. This tests the same keys at every level of dict and in lists
:param f: 2-ary function expecting a key and value and returns a modified value
:param dct: Dict for deep processing
:return: Modified dct with matching props mapped
"""
return _map_deep(lambda k, v: [k, f(k, v)], dct) | [
"def",
"map_with_obj_deep",
"(",
"f",
",",
"dct",
")",
":",
"return",
"_map_deep",
"(",
"lambda",
"k",
",",
"v",
":",
"[",
"k",
",",
"f",
"(",
"k",
",",
"v",
")",
"]",
",",
"dct",
")"
]
| Implementation of map that recurses. This tests the same keys at every level of dict and in lists
:param f: 2-ary function expecting a key and value and returns a modified value
:param dct: Dict for deep processing
:return: Modified dct with matching props mapped | [
"Implementation",
"of",
"map",
"that",
"recurses",
".",
"This",
"tests",
"the",
"same",
"keys",
"at",
"every",
"level",
"of",
"dict",
"and",
"in",
"lists",
":",
"param",
"f",
":",
"2",
"-",
"ary",
"function",
"expecting",
"a",
"key",
"and",
"value",
"and",
"returns",
"a",
"modified",
"value",
":",
"param",
"dct",
":",
"Dict",
"for",
"deep",
"processing",
":",
"return",
":",
"Modified",
"dct",
"with",
"matching",
"props",
"mapped"
]
| python | train | 46.5 |
apache/incubator-heron | heron/statemgrs/src/python/zkstatemanager.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/statemgrs/src/python/zkstatemanager.py#L426-L450 | def create_execution_state(self, topologyName, executionState):
""" create execution state """
if not executionState or not executionState.IsInitialized():
raise_(StateException("Execution State protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_execution_state_path(topologyName)
LOG.info("Adding topology: {0} to path: {1}".format(
topologyName, path))
executionStateString = executionState.SerializeToString()
try:
self.client.create(path, value=executionStateString, makepath=True)
return True
except NoNodeError:
raise_(StateException("NoNodeError while creating execution state",
StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
except NodeExistsError:
raise_(StateException("NodeExistsError while creating execution state",
StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
except ZookeeperError:
raise_(StateException("Zookeeper while creating execution state",
StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
except Exception:
# Just re raise the exception.
raise | [
"def",
"create_execution_state",
"(",
"self",
",",
"topologyName",
",",
"executionState",
")",
":",
"if",
"not",
"executionState",
"or",
"not",
"executionState",
".",
"IsInitialized",
"(",
")",
":",
"raise_",
"(",
"StateException",
"(",
"\"Execution State protobuf not init properly\"",
",",
"StateException",
".",
"EX_TYPE_PROTOBUF_ERROR",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"path",
"=",
"self",
".",
"get_execution_state_path",
"(",
"topologyName",
")",
"LOG",
".",
"info",
"(",
"\"Adding topology: {0} to path: {1}\"",
".",
"format",
"(",
"topologyName",
",",
"path",
")",
")",
"executionStateString",
"=",
"executionState",
".",
"SerializeToString",
"(",
")",
"try",
":",
"self",
".",
"client",
".",
"create",
"(",
"path",
",",
"value",
"=",
"executionStateString",
",",
"makepath",
"=",
"True",
")",
"return",
"True",
"except",
"NoNodeError",
":",
"raise_",
"(",
"StateException",
"(",
"\"NoNodeError while creating execution state\"",
",",
"StateException",
".",
"EX_TYPE_NO_NODE_ERROR",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"except",
"NodeExistsError",
":",
"raise_",
"(",
"StateException",
"(",
"\"NodeExistsError while creating execution state\"",
",",
"StateException",
".",
"EX_TYPE_NODE_EXISTS_ERROR",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"except",
"ZookeeperError",
":",
"raise_",
"(",
"StateException",
"(",
"\"Zookeeper while creating execution state\"",
",",
"StateException",
".",
"EX_TYPE_ZOOKEEPER_ERROR",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"except",
"Exception",
":",
"# Just re raise the exception.",
"raise"
]
| create execution state | [
"create",
"execution",
"state"
]
| python | valid | 49.72 |
AtomHash/evernode | evernode/models/session_model.py | https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/models/session_model.py#L40-L43 | def count(cls, user_id):
""" Count sessions with user_id """
return cls.query.with_entities(
cls.user_id).filter_by(user_id=user_id).count() | [
"def",
"count",
"(",
"cls",
",",
"user_id",
")",
":",
"return",
"cls",
".",
"query",
".",
"with_entities",
"(",
"cls",
".",
"user_id",
")",
".",
"filter_by",
"(",
"user_id",
"=",
"user_id",
")",
".",
"count",
"(",
")"
]
| Count sessions with user_id | [
"Count",
"sessions",
"with",
"user_id"
]
| python | train | 42 |
chrisjrn/registrasion | registrasion/views.py | https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/views.py#L1095-L1128 | def badges(request):
''' Either displays a form containing a list of users with badges to
render, or returns a .zip file containing their badges. '''
category = request.GET.getlist("category", [])
product = request.GET.getlist("product", [])
status = request.GET.get("status")
form = forms.InvoicesWithProductAndStatusForm(
request.POST or None,
category=category,
product=product,
status=status,
)
if form.is_valid():
response = HttpResponse()
response["Content-Type"] = "application.zip"
response["Content-Disposition"] = 'attachment; filename="badges.zip"'
z = zipfile.ZipFile(response, "w")
for invoice in form.cleaned_data["invoice"]:
user = invoice.user
badge = render_badge(user)
z.writestr("badge_%d.svg" % user.id, badge.encode("utf-8"))
return response
data = {
"form": form,
}
return render(request, "registrasion/badges.html", data) | [
"def",
"badges",
"(",
"request",
")",
":",
"category",
"=",
"request",
".",
"GET",
".",
"getlist",
"(",
"\"category\"",
",",
"[",
"]",
")",
"product",
"=",
"request",
".",
"GET",
".",
"getlist",
"(",
"\"product\"",
",",
"[",
"]",
")",
"status",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"\"status\"",
")",
"form",
"=",
"forms",
".",
"InvoicesWithProductAndStatusForm",
"(",
"request",
".",
"POST",
"or",
"None",
",",
"category",
"=",
"category",
",",
"product",
"=",
"product",
",",
"status",
"=",
"status",
",",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"response",
"=",
"HttpResponse",
"(",
")",
"response",
"[",
"\"Content-Type\"",
"]",
"=",
"\"application.zip\"",
"response",
"[",
"\"Content-Disposition\"",
"]",
"=",
"'attachment; filename=\"badges.zip\"'",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"response",
",",
"\"w\"",
")",
"for",
"invoice",
"in",
"form",
".",
"cleaned_data",
"[",
"\"invoice\"",
"]",
":",
"user",
"=",
"invoice",
".",
"user",
"badge",
"=",
"render_badge",
"(",
"user",
")",
"z",
".",
"writestr",
"(",
"\"badge_%d.svg\"",
"%",
"user",
".",
"id",
",",
"badge",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"return",
"response",
"data",
"=",
"{",
"\"form\"",
":",
"form",
",",
"}",
"return",
"render",
"(",
"request",
",",
"\"registrasion/badges.html\"",
",",
"data",
")"
]
| Either displays a form containing a list of users with badges to
render, or returns a .zip file containing their badges. | [
"Either",
"displays",
"a",
"form",
"containing",
"a",
"list",
"of",
"users",
"with",
"badges",
"to",
"render",
"or",
"returns",
"a",
".",
"zip",
"file",
"containing",
"their",
"badges",
"."
]
| python | test | 29.029412 |
AltSchool/dynamic-rest | dynamic_rest/serializers.py | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/serializers.py#L710-L727 | def save(self, *args, **kwargs):
"""Serializer save that address prefetch issues."""
update = getattr(self, 'instance', None) is not None
instance = super(
WithDynamicSerializerMixin,
self
).save(
*args,
**kwargs
)
view = self._context.get('view')
if view and update:
if int(DRF_VERSION[0]) <= 3 and int(DRF_VERSION[1]) < 5:
# Reload the object on update
# to get around prefetch cache issues
# Fixed in DRF in 3.5.0
instance = self.instance = view.get_object()
return instance | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"update",
"=",
"getattr",
"(",
"self",
",",
"'instance'",
",",
"None",
")",
"is",
"not",
"None",
"instance",
"=",
"super",
"(",
"WithDynamicSerializerMixin",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"view",
"=",
"self",
".",
"_context",
".",
"get",
"(",
"'view'",
")",
"if",
"view",
"and",
"update",
":",
"if",
"int",
"(",
"DRF_VERSION",
"[",
"0",
"]",
")",
"<=",
"3",
"and",
"int",
"(",
"DRF_VERSION",
"[",
"1",
"]",
")",
"<",
"5",
":",
"# Reload the object on update",
"# to get around prefetch cache issues",
"# Fixed in DRF in 3.5.0",
"instance",
"=",
"self",
".",
"instance",
"=",
"view",
".",
"get_object",
"(",
")",
"return",
"instance"
]
| Serializer save that address prefetch issues. | [
"Serializer",
"save",
"that",
"address",
"prefetch",
"issues",
"."
]
| python | train | 36 |
bmweiner/skillful | skillful/interface.py | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L45-L80 | def to_dict(self, drop_null=True, camel=False):
"""Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params.
"""
#return _to_dict(self, drop_null, camel)
def to_dict(obj, drop_null, camel):
"""Recursively constructs the dict."""
if isinstance(obj, (Body, BodyChild)):
obj = obj.__dict__
if isinstance(obj, dict):
data = {}
for attr, val in six.iteritems(obj):
if camel:
attr = _snake_to_camel(attr)
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data[attr] = to_dict(val, drop_null, camel)
return data
elif isinstance(obj, list):
data = []
for val in obj:
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data.append(to_dict(val, drop_null, camel))
return data
else:
return obj
return to_dict(self, drop_null, camel) | [
"def",
"to_dict",
"(",
"self",
",",
"drop_null",
"=",
"True",
",",
"camel",
"=",
"False",
")",
":",
"#return _to_dict(self, drop_null, camel)",
"def",
"to_dict",
"(",
"obj",
",",
"drop_null",
",",
"camel",
")",
":",
"\"\"\"Recursively constructs the dict.\"\"\"",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"Body",
",",
"BodyChild",
")",
")",
":",
"obj",
"=",
"obj",
".",
"__dict__",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"data",
"=",
"{",
"}",
"for",
"attr",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"obj",
")",
":",
"if",
"camel",
":",
"attr",
"=",
"_snake_to_camel",
"(",
"attr",
")",
"valid_null",
"=",
"(",
"isinstance",
"(",
"val",
",",
"bool",
")",
"or",
"val",
"==",
"0",
"or",
"(",
"val",
"and",
"to_dict",
"(",
"val",
",",
"drop_null",
",",
"camel",
")",
")",
")",
"if",
"not",
"drop_null",
"or",
"(",
"drop_null",
"and",
"valid_null",
")",
":",
"data",
"[",
"attr",
"]",
"=",
"to_dict",
"(",
"val",
",",
"drop_null",
",",
"camel",
")",
"return",
"data",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"data",
"=",
"[",
"]",
"for",
"val",
"in",
"obj",
":",
"valid_null",
"=",
"(",
"isinstance",
"(",
"val",
",",
"bool",
")",
"or",
"val",
"==",
"0",
"or",
"(",
"val",
"and",
"to_dict",
"(",
"val",
",",
"drop_null",
",",
"camel",
")",
")",
")",
"if",
"not",
"drop_null",
"or",
"(",
"drop_null",
"and",
"valid_null",
")",
":",
"data",
".",
"append",
"(",
"to_dict",
"(",
"val",
",",
"drop_null",
",",
"camel",
")",
")",
"return",
"data",
"else",
":",
"return",
"obj",
"return",
"to_dict",
"(",
"self",
",",
"drop_null",
",",
"camel",
")"
]
| Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params. | [
"Serialize",
"self",
"as",
"dict",
"."
]
| python | train | 41.722222 |
manns/pyspread | pyspread/src/model/model.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L720-L752 | def cell_array_generator(self, key):
"""Generator traversing cells specified in key
Parameters
----------
key: Iterable of Integer or slice
\tThe key specifies the cell keys of the generator
"""
for i, key_ele in enumerate(key):
# Get first element of key that is a slice
if type(key_ele) is SliceType:
slc_keys = xrange(*key_ele.indices(self.dict_grid.shape[i]))
key_list = list(key)
key_list[i] = None
has_subslice = any(type(ele) is SliceType for ele in key_list)
for slc_key in slc_keys:
key_list[i] = slc_key
if has_subslice:
# If there is a slice left yield generator
yield self.cell_array_generator(key_list)
else:
# No slices? Yield value
yield self[tuple(key_list)]
break | [
"def",
"cell_array_generator",
"(",
"self",
",",
"key",
")",
":",
"for",
"i",
",",
"key_ele",
"in",
"enumerate",
"(",
"key",
")",
":",
"# Get first element of key that is a slice",
"if",
"type",
"(",
"key_ele",
")",
"is",
"SliceType",
":",
"slc_keys",
"=",
"xrange",
"(",
"*",
"key_ele",
".",
"indices",
"(",
"self",
".",
"dict_grid",
".",
"shape",
"[",
"i",
"]",
")",
")",
"key_list",
"=",
"list",
"(",
"key",
")",
"key_list",
"[",
"i",
"]",
"=",
"None",
"has_subslice",
"=",
"any",
"(",
"type",
"(",
"ele",
")",
"is",
"SliceType",
"for",
"ele",
"in",
"key_list",
")",
"for",
"slc_key",
"in",
"slc_keys",
":",
"key_list",
"[",
"i",
"]",
"=",
"slc_key",
"if",
"has_subslice",
":",
"# If there is a slice left yield generator",
"yield",
"self",
".",
"cell_array_generator",
"(",
"key_list",
")",
"else",
":",
"# No slices? Yield value",
"yield",
"self",
"[",
"tuple",
"(",
"key_list",
")",
"]",
"break"
]
| Generator traversing cells specified in key
Parameters
----------
key: Iterable of Integer or slice
\tThe key specifies the cell keys of the generator | [
"Generator",
"traversing",
"cells",
"specified",
"in",
"key"
]
| python | train | 30 |
rameshg87/pyremotevbox | pyremotevbox/ZSI/schema.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/schema.py#L387-L397 | def RegisterAnyElement(cls):
'''If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper.
'''
for k,v in cls.types_dict.items():
what = Any.serialmap.get(k)
if what is None: continue
if v in what.__class__.seriallist: continue
what.__class__.seriallist.append(v)
RegisterType(what.__class__, clobber=1, **what.__dict__) | [
"def",
"RegisterAnyElement",
"(",
"cls",
")",
":",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"types_dict",
".",
"items",
"(",
")",
":",
"what",
"=",
"Any",
".",
"serialmap",
".",
"get",
"(",
"k",
")",
"if",
"what",
"is",
"None",
":",
"continue",
"if",
"v",
"in",
"what",
".",
"__class__",
".",
"seriallist",
":",
"continue",
"what",
".",
"__class__",
".",
"seriallist",
".",
"append",
"(",
"v",
")",
"RegisterType",
"(",
"what",
".",
"__class__",
",",
"clobber",
"=",
"1",
",",
"*",
"*",
"what",
".",
"__dict__",
")"
]
| If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper. | [
"If",
"find",
"registered",
"TypeCode",
"instance",
"add",
"Wrapper",
"class",
"to",
"TypeCode",
"class",
"serialmap",
"and",
"Re",
"-",
"RegisterType",
".",
"Provides",
"Any",
"serialzation",
"of",
"any",
"instances",
"of",
"the",
"Wrapper",
"."
]
| python | train | 47 |
MrYsLab/pymata-aio | pymata_aio/pymata_iot.py | https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L557-L566 | async def stepper_step(self, command):
"""
This method activates a stepper motor motion.
This is a FirmataPlus feature.
:param command: {"method": "stepper_step", "params": [SPEED, NUMBER_OF_STEPS]}
:returns:No message returned.
"""
speed = int(command[0])
num_steps = int(command[1])
await self.core.stepper_step(speed, num_steps) | [
"async",
"def",
"stepper_step",
"(",
"self",
",",
"command",
")",
":",
"speed",
"=",
"int",
"(",
"command",
"[",
"0",
"]",
")",
"num_steps",
"=",
"int",
"(",
"command",
"[",
"1",
"]",
")",
"await",
"self",
".",
"core",
".",
"stepper_step",
"(",
"speed",
",",
"num_steps",
")"
]
| This method activates a stepper motor motion.
This is a FirmataPlus feature.
:param command: {"method": "stepper_step", "params": [SPEED, NUMBER_OF_STEPS]}
:returns:No message returned. | [
"This",
"method",
"activates",
"a",
"stepper",
"motor",
"motion",
".",
"This",
"is",
"a",
"FirmataPlus",
"feature",
".",
":",
"param",
"command",
":",
"{",
"method",
":",
"stepper_step",
"params",
":",
"[",
"SPEED",
"NUMBER_OF_STEPS",
"]",
"}",
":",
"returns",
":",
"No",
"message",
"returned",
"."
]
| python | train | 39.4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.