repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
CxAalto/gtfspy
|
gtfspy/osm_transfers.py
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/osm_transfers.py#L15-L74
|
def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=1000):
"""
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
"""
if isinstance(gtfs, str):
gtfs = GTFS(gtfs)
assert (isinstance(gtfs, GTFS))
print("Reading in walk network")
walk_network = create_walk_network_from_osm(osm_path)
print("Matching stops to the OSM network")
stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)
transfers = gtfs.get_straight_line_transfer_distances()
from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}
for transfer_tuple in transfers.itertuples():
from_I = transfer_tuple.from_stop_I
to_I = transfer_tuple.to_stop_I
from_I_to_to_stop_Is[from_I].add(to_I)
print("Computing walking distances")
for from_I, to_stop_Is in from_I_to_to_stop_Is.items():
from_node = stop_I_to_nearest_osm_node[from_I]
from_dist = stop_I_to_nearest_osm_node_distance[from_I]
shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,
from_node,
cutoff=cutoff_distance_m - from_dist,
weight="distance")
for to_I in to_stop_Is:
to_distance = stop_I_to_nearest_osm_node_distance[to_I]
to_node = stop_I_to_nearest_osm_node[to_I]
osm_distance = shortest_paths.get(to_node, float('inf'))
total_distance = from_dist + osm_distance + to_distance
from_stop_I_transfers = transfers[transfers['from_stop_I'] == from_I]
straigth_distance = from_stop_I_transfers[from_stop_I_transfers["to_stop_I"] == to_I]["d"].values[0]
assert (straigth_distance < total_distance + 2) # allow for a maximum of 2 meters in calculations
if total_distance <= cutoff_distance_m:
gtfs.conn.execute("UPDATE stop_distances "
"SET d_walk = " + str(int(total_distance)) +
" WHERE from_stop_I=" + str(from_I) + " AND to_stop_I=" + str(to_I))
gtfs.conn.commit()
|
[
"def",
"add_walk_distances_to_db_python",
"(",
"gtfs",
",",
"osm_path",
",",
"cutoff_distance_m",
"=",
"1000",
")",
":",
"if",
"isinstance",
"(",
"gtfs",
",",
"str",
")",
":",
"gtfs",
"=",
"GTFS",
"(",
"gtfs",
")",
"assert",
"(",
"isinstance",
"(",
"gtfs",
",",
"GTFS",
")",
")",
"print",
"(",
"\"Reading in walk network\"",
")",
"walk_network",
"=",
"create_walk_network_from_osm",
"(",
"osm_path",
")",
"print",
"(",
"\"Matching stops to the OSM network\"",
")",
"stop_I_to_nearest_osm_node",
",",
"stop_I_to_nearest_osm_node_distance",
"=",
"match_stops_to_nodes",
"(",
"gtfs",
",",
"walk_network",
")",
"transfers",
"=",
"gtfs",
".",
"get_straight_line_transfer_distances",
"(",
")",
"from_I_to_to_stop_Is",
"=",
"{",
"stop_I",
":",
"set",
"(",
")",
"for",
"stop_I",
"in",
"stop_I_to_nearest_osm_node",
"}",
"for",
"transfer_tuple",
"in",
"transfers",
".",
"itertuples",
"(",
")",
":",
"from_I",
"=",
"transfer_tuple",
".",
"from_stop_I",
"to_I",
"=",
"transfer_tuple",
".",
"to_stop_I",
"from_I_to_to_stop_Is",
"[",
"from_I",
"]",
".",
"add",
"(",
"to_I",
")",
"print",
"(",
"\"Computing walking distances\"",
")",
"for",
"from_I",
",",
"to_stop_Is",
"in",
"from_I_to_to_stop_Is",
".",
"items",
"(",
")",
":",
"from_node",
"=",
"stop_I_to_nearest_osm_node",
"[",
"from_I",
"]",
"from_dist",
"=",
"stop_I_to_nearest_osm_node_distance",
"[",
"from_I",
"]",
"shortest_paths",
"=",
"networkx",
".",
"single_source_dijkstra_path_length",
"(",
"walk_network",
",",
"from_node",
",",
"cutoff",
"=",
"cutoff_distance_m",
"-",
"from_dist",
",",
"weight",
"=",
"\"distance\"",
")",
"for",
"to_I",
"in",
"to_stop_Is",
":",
"to_distance",
"=",
"stop_I_to_nearest_osm_node_distance",
"[",
"to_I",
"]",
"to_node",
"=",
"stop_I_to_nearest_osm_node",
"[",
"to_I",
"]",
"osm_distance",
"=",
"shortest_paths",
".",
"get",
"(",
"to_node",
",",
"float",
"(",
"'inf'",
")",
")",
"total_distance",
"=",
"from_dist",
"+",
"osm_distance",
"+",
"to_distance",
"from_stop_I_transfers",
"=",
"transfers",
"[",
"transfers",
"[",
"'from_stop_I'",
"]",
"==",
"from_I",
"]",
"straigth_distance",
"=",
"from_stop_I_transfers",
"[",
"from_stop_I_transfers",
"[",
"\"to_stop_I\"",
"]",
"==",
"to_I",
"]",
"[",
"\"d\"",
"]",
".",
"values",
"[",
"0",
"]",
"assert",
"(",
"straigth_distance",
"<",
"total_distance",
"+",
"2",
")",
"# allow for a maximum of 2 meters in calculations",
"if",
"total_distance",
"<=",
"cutoff_distance_m",
":",
"gtfs",
".",
"conn",
".",
"execute",
"(",
"\"UPDATE stop_distances \"",
"\"SET d_walk = \"",
"+",
"str",
"(",
"int",
"(",
"total_distance",
")",
")",
"+",
"\" WHERE from_stop_I=\"",
"+",
"str",
"(",
"from_I",
")",
"+",
"\" AND to_stop_I=\"",
"+",
"str",
"(",
"to_I",
")",
")",
"gtfs",
".",
"conn",
".",
"commit",
"(",
")"
] |
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
|
[
"Computes",
"the",
"walk",
"paths",
"between",
"stops",
"and",
"updates",
"these",
"to",
"the",
"gtfs",
"database",
"."
] |
python
|
valid
| 43.966667 |
hatemile/hatemile-for-python
|
hatemile/implementation/css.py
|
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L423-L443
|
def _get_regular_expression_of_symbols(self):
"""
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
"""
regular_expression = None
for symbol in self.symbols:
formated_symbol = self._get_formated_symbol(symbol['symbol'])
if regular_expression is None:
regular_expression = '(' + formated_symbol + ')'
else:
regular_expression = (
regular_expression +
'|(' +
formated_symbol +
')'
)
return regular_expression
|
[
"def",
"_get_regular_expression_of_symbols",
"(",
"self",
")",
":",
"regular_expression",
"=",
"None",
"for",
"symbol",
"in",
"self",
".",
"symbols",
":",
"formated_symbol",
"=",
"self",
".",
"_get_formated_symbol",
"(",
"symbol",
"[",
"'symbol'",
"]",
")",
"if",
"regular_expression",
"is",
"None",
":",
"regular_expression",
"=",
"'('",
"+",
"formated_symbol",
"+",
"')'",
"else",
":",
"regular_expression",
"=",
"(",
"regular_expression",
"+",
"'|('",
"+",
"formated_symbol",
"+",
"')'",
")",
"return",
"regular_expression"
] |
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
|
[
"Returns",
"the",
"regular",
"expression",
"to",
"search",
"all",
"symbols",
"."
] |
python
|
train
| 32.714286 |
mitsei/dlkit
|
dlkit/records/osid/base_records.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L2260-L2295
|
def clear_file(self):
"""stub"""
if (self.get_file_metadata().is_read_only() or
self.get_file_metadata().is_required()):
raise NoAccess()
if 'assetId' in self.my_osid_object_form._my_map['fileId']:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
catalog_id_str = ''
if 'assignedBankIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedBankIds'][0]
elif 'assignedRepositoryIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedRepositoryIds'][0]
try:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
except AttributeError:
# for update forms
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
aas.delete_asset(Id(self.my_osid_object_form._my_map['fileId']['assetId']))
self.my_osid_object_form._my_map['fileId'] = \
dict(self.get_file_metadata().get_default_object_values()[0])
|
[
"def",
"clear_file",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"get_file_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
"or",
"self",
".",
"get_file_metadata",
"(",
")",
".",
"is_required",
"(",
")",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"'assetId'",
"in",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'fileId'",
"]",
":",
"rm",
"=",
"self",
".",
"my_osid_object_form",
".",
"_get_provider_manager",
"(",
"'REPOSITORY'",
")",
"catalog_id_str",
"=",
"''",
"if",
"'assignedBankIds'",
"in",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
":",
"catalog_id_str",
"=",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'assignedBankIds'",
"]",
"[",
"0",
"]",
"elif",
"'assignedRepositoryIds'",
"in",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
":",
"catalog_id_str",
"=",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'assignedRepositoryIds'",
"]",
"[",
"0",
"]",
"try",
":",
"try",
":",
"aas",
"=",
"rm",
".",
"get_asset_admin_session_for_repository",
"(",
"Id",
"(",
"catalog_id_str",
")",
",",
"self",
".",
"my_osid_object_form",
".",
"_proxy",
")",
"except",
"NullArgument",
":",
"aas",
"=",
"rm",
".",
"get_asset_admin_session_for_repository",
"(",
"Id",
"(",
"catalog_id_str",
")",
")",
"except",
"AttributeError",
":",
"# for update forms",
"try",
":",
"aas",
"=",
"rm",
".",
"get_asset_admin_session_for_repository",
"(",
"Id",
"(",
"catalog_id_str",
")",
",",
"self",
".",
"my_osid_object_form",
".",
"_proxy",
")",
"except",
"NullArgument",
":",
"aas",
"=",
"rm",
".",
"get_asset_admin_session_for_repository",
"(",
"Id",
"(",
"catalog_id_str",
")",
")",
"aas",
".",
"delete_asset",
"(",
"Id",
"(",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'fileId'",
"]",
"[",
"'assetId'",
"]",
")",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'fileId'",
"]",
"=",
"dict",
"(",
"self",
".",
"get_file_metadata",
"(",
")",
".",
"get_default_object_values",
"(",
")",
"[",
"0",
"]",
")"
] |
stub
|
[
"stub"
] |
python
|
train
| 45.666667 |
quantumlib/Cirq
|
cirq/linalg/predicates.py
|
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/linalg/predicates.py#L114-L134
|
def is_unitary(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
"""Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance.
"""
return (matrix.shape[0] == matrix.shape[1] and
np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]),
rtol=rtol,
atol=atol))
|
[
"def",
"is_unitary",
"(",
"matrix",
":",
"np",
".",
"ndarray",
",",
"*",
",",
"rtol",
":",
"float",
"=",
"1e-5",
",",
"atol",
":",
"float",
"=",
"1e-8",
")",
"->",
"bool",
":",
"return",
"(",
"matrix",
".",
"shape",
"[",
"0",
"]",
"==",
"matrix",
".",
"shape",
"[",
"1",
"]",
"and",
"np",
".",
"allclose",
"(",
"matrix",
".",
"dot",
"(",
"np",
".",
"conj",
"(",
"matrix",
".",
"T",
")",
")",
",",
"np",
".",
"eye",
"(",
"matrix",
".",
"shape",
"[",
"0",
"]",
")",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")",
")"
] |
Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance.
|
[
"Determines",
"if",
"a",
"matrix",
"is",
"approximately",
"unitary",
"."
] |
python
|
train
| 33.285714 |
fboender/ansible-cmdb
|
src/ansiblecmdb/parser.py
|
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/parser.py#L194-L214
|
def _parse_vars(self, tokens):
"""
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
"""
key_values = {}
for token in tokens:
if token.startswith('#'):
# End parsing if we encounter a comment, which lasts
# until the end of the line.
break
else:
k, v = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values
|
[
"def",
"_parse_vars",
"(",
"self",
",",
"tokens",
")",
":",
"key_values",
"=",
"{",
"}",
"for",
"token",
"in",
"tokens",
":",
"if",
"token",
".",
"startswith",
"(",
"'#'",
")",
":",
"# End parsing if we encounter a comment, which lasts",
"# until the end of the line.",
"break",
"else",
":",
"k",
",",
"v",
"=",
"token",
".",
"split",
"(",
"'='",
",",
"1",
")",
"key",
"=",
"k",
".",
"strip",
"(",
")",
"key_values",
"[",
"key",
"]",
"=",
"v",
".",
"strip",
"(",
")",
"return",
"key_values"
] |
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
|
[
"Given",
"an",
"iterable",
"of",
"tokens",
"returns",
"variables",
"and",
"their",
"values",
"as",
"a",
"dictionary",
"."
] |
python
|
train
| 31.714286 |
rackerlabs/simpl
|
simpl/db/mongodb.py
|
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/db/mongodb.py#L567-L594
|
def _transform_incoming(self, son, collection, skip=0):
"""Recursively replace all keys that need transforming."""
skip = 0 if skip < 0 else skip
if isinstance(son, dict):
for (key, value) in son.items():
if key.startswith('$'):
if isinstance(value, dict):
skip = 2
else:
pass # allow mongo to complain
if self.replace in key:
k = key if skip else self.transform_key(key)
son[k] = self._transform_incoming(
son.pop(key), collection, skip=skip - 1)
elif isinstance(value, dict): # recurse into sub-docs
son[key] = self._transform_incoming(value, collection,
skip=skip - 1)
elif isinstance(value, list):
son[key] = [
self._transform_incoming(k, collection, skip=skip - 1)
for k in value
]
return son
elif isinstance(son, list):
return [self._transform_incoming(item, collection, skip=skip - 1)
for item in son]
else:
return son
|
[
"def",
"_transform_incoming",
"(",
"self",
",",
"son",
",",
"collection",
",",
"skip",
"=",
"0",
")",
":",
"skip",
"=",
"0",
"if",
"skip",
"<",
"0",
"else",
"skip",
"if",
"isinstance",
"(",
"son",
",",
"dict",
")",
":",
"for",
"(",
"key",
",",
"value",
")",
"in",
"son",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'$'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"skip",
"=",
"2",
"else",
":",
"pass",
"# allow mongo to complain",
"if",
"self",
".",
"replace",
"in",
"key",
":",
"k",
"=",
"key",
"if",
"skip",
"else",
"self",
".",
"transform_key",
"(",
"key",
")",
"son",
"[",
"k",
"]",
"=",
"self",
".",
"_transform_incoming",
"(",
"son",
".",
"pop",
"(",
"key",
")",
",",
"collection",
",",
"skip",
"=",
"skip",
"-",
"1",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"# recurse into sub-docs",
"son",
"[",
"key",
"]",
"=",
"self",
".",
"_transform_incoming",
"(",
"value",
",",
"collection",
",",
"skip",
"=",
"skip",
"-",
"1",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"son",
"[",
"key",
"]",
"=",
"[",
"self",
".",
"_transform_incoming",
"(",
"k",
",",
"collection",
",",
"skip",
"=",
"skip",
"-",
"1",
")",
"for",
"k",
"in",
"value",
"]",
"return",
"son",
"elif",
"isinstance",
"(",
"son",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"_transform_incoming",
"(",
"item",
",",
"collection",
",",
"skip",
"=",
"skip",
"-",
"1",
")",
"for",
"item",
"in",
"son",
"]",
"else",
":",
"return",
"son"
] |
Recursively replace all keys that need transforming.
|
[
"Recursively",
"replace",
"all",
"keys",
"that",
"need",
"transforming",
"."
] |
python
|
train
| 46 |
jantman/awslimitchecker
|
awslimitchecker/services/base.py
|
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/base.py#L172-L205
|
def set_limit_override(self, limit_name, value, override_ta=True):
"""
Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service
"""
try:
self.limits[limit_name].set_limit_override(
value,
override_ta=override_ta
)
logger.debug(
"Overriding %s limit %s; default=%d override=%d",
self.service_name,
limit_name,
value,
self.limits[limit_name].default_limit,
)
except KeyError:
raise ValueError("{s} service has no '{l}' limit".format(
s=self.service_name,
l=limit_name))
|
[
"def",
"set_limit_override",
"(",
"self",
",",
"limit_name",
",",
"value",
",",
"override_ta",
"=",
"True",
")",
":",
"try",
":",
"self",
".",
"limits",
"[",
"limit_name",
"]",
".",
"set_limit_override",
"(",
"value",
",",
"override_ta",
"=",
"override_ta",
")",
"logger",
".",
"debug",
"(",
"\"Overriding %s limit %s; default=%d override=%d\"",
",",
"self",
".",
"service_name",
",",
"limit_name",
",",
"value",
",",
"self",
".",
"limits",
"[",
"limit_name",
"]",
".",
"default_limit",
",",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"{s} service has no '{l}' limit\"",
".",
"format",
"(",
"s",
"=",
"self",
".",
"service_name",
",",
"l",
"=",
"limit_name",
")",
")"
] |
Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service
|
[
"Set",
"a",
"new",
"limit",
"value",
"for",
"the",
"specified",
"limit",
"overriding",
"the",
"default",
".",
"If",
"override_ta",
"is",
"True",
"also",
"use",
"this",
"value",
"instead",
"of",
"any",
"found",
"by",
"Trusted",
"Advisor",
".",
"This",
"method",
"simply",
"passes",
"the",
"data",
"through",
"to",
"the",
":",
"py",
":",
"meth",
":",
"~awslimitchecker",
".",
"limit",
".",
"AwsLimit",
".",
"set_limit_override",
"method",
"of",
"the",
"underlying",
":",
"py",
":",
"class",
":",
"~",
".",
"AwsLimit",
"instance",
"."
] |
python
|
train
| 40.705882 |
rocky/python3-trepan
|
celery/ctrepan.py
|
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/celery/ctrepan.py#L113-L122
|
def debug(frame=None):
"""Set breakpoint at current location, or a specified frame"""
# ???
if frame is None:
frame = _frame().f_back
dbg = RemoteCeleryTrepan()
dbg.say(BANNER.format(self=dbg))
# dbg.say(SESSION_STARTED.format(self=dbg))
trepan.api.debug(dbg_opts=dbg.dbg_opts)
|
[
"def",
"debug",
"(",
"frame",
"=",
"None",
")",
":",
"# ???",
"if",
"frame",
"is",
"None",
":",
"frame",
"=",
"_frame",
"(",
")",
".",
"f_back",
"dbg",
"=",
"RemoteCeleryTrepan",
"(",
")",
"dbg",
".",
"say",
"(",
"BANNER",
".",
"format",
"(",
"self",
"=",
"dbg",
")",
")",
"# dbg.say(SESSION_STARTED.format(self=dbg))",
"trepan",
".",
"api",
".",
"debug",
"(",
"dbg_opts",
"=",
"dbg",
".",
"dbg_opts",
")"
] |
Set breakpoint at current location, or a specified frame
|
[
"Set",
"breakpoint",
"at",
"current",
"location",
"or",
"a",
"specified",
"frame"
] |
python
|
test
| 30.5 |
gwastro/pycbc-glue
|
pycbc_glue/ligolw/table.py
|
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/table.py#L934-L942
|
def removeChild(self, child):
"""
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
"""
super(Table, self).removeChild(child)
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
return child
|
[
"def",
"removeChild",
"(",
"self",
",",
"child",
")",
":",
"super",
"(",
"Table",
",",
"self",
")",
".",
"removeChild",
"(",
"child",
")",
"if",
"child",
".",
"tagName",
"==",
"ligolw",
".",
"Column",
".",
"tagName",
":",
"self",
".",
"_update_column_info",
"(",
")",
"return",
"child"
] |
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
|
[
"Remove",
"a",
"child",
"from",
"this",
"element",
".",
"The",
"child",
"element",
"is",
"returned",
"and",
"it",
"s",
"parentNode",
"element",
"is",
"reset",
"."
] |
python
|
train
| 30.111111 |
spyder-ide/spyder
|
spyder/plugins/editor/plugin.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L2197-L2202
|
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_edit_conditional_breakpoint()
|
[
"def",
"set_or_edit_conditional_breakpoint",
"(",
"self",
")",
":",
"editorstack",
"=",
"self",
".",
"get_current_editorstack",
"(",
")",
"if",
"editorstack",
"is",
"not",
"None",
":",
"self",
".",
"switch_to_plugin",
"(",
")",
"editorstack",
".",
"set_or_edit_conditional_breakpoint",
"(",
")"
] |
Set/Edit conditional breakpoint
|
[
"Set",
"/",
"Edit",
"conditional",
"breakpoint"
] |
python
|
train
| 46.166667 |
manns/pyspread
|
pyspread/src/gui/grid_panels.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/grid_panels.py#L149-L160
|
def OnShiftVideo(self, event):
"""Shifts through the video"""
length = self.player.get_length()
time = self.player.get_time()
if event.GetWheelRotation() < 0:
target_time = max(0, time-length/100.0)
elif event.GetWheelRotation() > 0:
target_time = min(length, time+length/100.0)
self.player.set_time(int(target_time))
|
[
"def",
"OnShiftVideo",
"(",
"self",
",",
"event",
")",
":",
"length",
"=",
"self",
".",
"player",
".",
"get_length",
"(",
")",
"time",
"=",
"self",
".",
"player",
".",
"get_time",
"(",
")",
"if",
"event",
".",
"GetWheelRotation",
"(",
")",
"<",
"0",
":",
"target_time",
"=",
"max",
"(",
"0",
",",
"time",
"-",
"length",
"/",
"100.0",
")",
"elif",
"event",
".",
"GetWheelRotation",
"(",
")",
">",
"0",
":",
"target_time",
"=",
"min",
"(",
"length",
",",
"time",
"+",
"length",
"/",
"100.0",
")",
"self",
".",
"player",
".",
"set_time",
"(",
"int",
"(",
"target_time",
")",
")"
] |
Shifts through the video
|
[
"Shifts",
"through",
"the",
"video"
] |
python
|
train
| 31.75 |
idlesign/uwsgiconf
|
uwsgiconf/options/caching.py
|
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/caching.py#L19-L37
|
def set_basic_params(self, no_expire=None, expire_scan_interval=None, report_freed=None):
"""
:param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug.
"""
self._set('cache-no-expire', no_expire, cast=bool)
self._set('cache-report-freed-items', report_freed, cast=bool)
self._set('cache-expire-freq', expire_scan_interval)
return self._section
|
[
"def",
"set_basic_params",
"(",
"self",
",",
"no_expire",
"=",
"None",
",",
"expire_scan_interval",
"=",
"None",
",",
"report_freed",
"=",
"None",
")",
":",
"self",
".",
"_set",
"(",
"'cache-no-expire'",
",",
"no_expire",
",",
"cast",
"=",
"bool",
")",
"self",
".",
"_set",
"(",
"'cache-report-freed-items'",
",",
"report_freed",
",",
"cast",
"=",
"bool",
")",
"self",
".",
"_set",
"(",
"'cache-expire-freq'",
",",
"expire_scan_interval",
")",
"return",
"self",
".",
"_section"
] |
:param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug.
|
[
":",
"param",
"bool",
"no_expire",
":",
"Disable",
"auto",
"sweep",
"of",
"expired",
"items",
".",
"Since",
"uWSGI",
"1",
".",
"2",
"cache",
"item",
"expiration",
"is",
"managed",
"by",
"a",
"thread",
"in",
"the",
"master",
"process",
"to",
"reduce",
"the",
"risk",
"of",
"deadlock",
".",
"This",
"thread",
"can",
"be",
"disabled",
"(",
"making",
"item",
"expiry",
"a",
"no",
"-",
"op",
")",
"with",
"the",
"this",
"option",
"."
] |
python
|
train
| 45.368421 |
scheibler/khard
|
khard/address_book.py
|
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/address_book.py#L202-L215
|
def get_short_uid(self, uid):
"""Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str
"""
if uid:
short_uids = self.get_short_uid_dict()
for length_of_uid in range(len(uid), 0, -1):
if short_uids.get(uid[:length_of_uid]) is not None:
return uid[:length_of_uid]
return ""
|
[
"def",
"get_short_uid",
"(",
"self",
",",
"uid",
")",
":",
"if",
"uid",
":",
"short_uids",
"=",
"self",
".",
"get_short_uid_dict",
"(",
")",
"for",
"length_of_uid",
"in",
"range",
"(",
"len",
"(",
"uid",
")",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"short_uids",
".",
"get",
"(",
"uid",
"[",
":",
"length_of_uid",
"]",
")",
"is",
"not",
"None",
":",
"return",
"uid",
"[",
":",
"length_of_uid",
"]",
"return",
"\"\""
] |
Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str
|
[
"Get",
"the",
"shortend",
"UID",
"for",
"the",
"given",
"UID",
"."
] |
python
|
test
| 34.214286 |
jtauber/sebastian
|
sebastian/core/elements.py
|
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L226-L231
|
def append(self, point):
"""
appends a copy of the given point to this sequence
"""
point = Point(point)
self._elements.append(point)
|
[
"def",
"append",
"(",
"self",
",",
"point",
")",
":",
"point",
"=",
"Point",
"(",
"point",
")",
"self",
".",
"_elements",
".",
"append",
"(",
"point",
")"
] |
appends a copy of the given point to this sequence
|
[
"appends",
"a",
"copy",
"of",
"the",
"given",
"point",
"to",
"this",
"sequence"
] |
python
|
train
| 28 |
pybel/pybel-tools
|
src/pybel_tools/selection/induce_subgraph.py
|
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/selection/induce_subgraph.py#L35-L44
|
def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:
"""Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.
"""
nodes = search_node_names(graph, query)
return get_subgraph_by_induction(graph, nodes)
|
[
"def",
"get_subgraph_by_node_search",
"(",
"graph",
":",
"BELGraph",
",",
"query",
":",
"Strings",
")",
"->",
"BELGraph",
":",
"nodes",
"=",
"search_node_names",
"(",
"graph",
",",
"query",
")",
"return",
"get_subgraph_by_induction",
"(",
"graph",
",",
"nodes",
")"
] |
Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.
|
[
"Get",
"a",
"sub",
"-",
"graph",
"induced",
"over",
"all",
"nodes",
"matching",
"the",
"query",
"string",
"."
] |
python
|
valid
| 43.5 |
CalebBell/fluids
|
fluids/core.py
|
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/core.py#L2202-L2238
|
def gravity(latitude, H):
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
lat = latitude*pi/180
g = 9.780356*(1+0.0052885*sin(lat)**2 -0.0000059*sin(2*lat)**2)-3.086E-6*H
return g
|
[
"def",
"gravity",
"(",
"latitude",
",",
"H",
")",
":",
"lat",
"=",
"latitude",
"*",
"pi",
"/",
"180",
"g",
"=",
"9.780356",
"*",
"(",
"1",
"+",
"0.0052885",
"*",
"sin",
"(",
"lat",
")",
"**",
"2",
"-",
"0.0000059",
"*",
"sin",
"(",
"2",
"*",
"lat",
")",
"**",
"2",
")",
"-",
"3.086E-6",
"*",
"H",
"return",
"g"
] |
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
|
[
"r",
"Calculates",
"local",
"acceleration",
"due",
"to",
"gravity",
"g",
"according",
"to",
"[",
"1",
"]",
"_",
".",
"Uses",
"latitude",
"and",
"height",
"to",
"calculate",
"g",
"."
] |
python
|
train
| 23.72973 |
adamrehn/ue4cli
|
ue4cli/ThirdPartyLibraryDetails.py
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ThirdPartyLibraryDetails.py#L118-L122
|
def getPreprocessorDefinitions(self, engineRoot, delimiter=' '):
"""
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
"""
return delimiter.join(self.resolveRoot(self.definitions, engineRoot))
|
[
"def",
"getPreprocessorDefinitions",
"(",
"self",
",",
"engineRoot",
",",
"delimiter",
"=",
"' '",
")",
":",
"return",
"delimiter",
".",
"join",
"(",
"self",
".",
"resolveRoot",
"(",
"self",
".",
"definitions",
",",
"engineRoot",
")",
")"
] |
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
|
[
"Returns",
"the",
"list",
"of",
"preprocessor",
"definitions",
"for",
"this",
"library",
"joined",
"using",
"the",
"specified",
"delimiter"
] |
python
|
train
| 49.2 |
sorgerlab/indra
|
indra/sources/sparser/api.py
|
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L190-L212
|
def process_xml(xml_str):
"""Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute.
"""
try:
tree = ET.XML(xml_str, parser=UTB())
except ET.ParseError as e:
logger.error('Could not parse XML string')
logger.error(e)
return None
sp = _process_elementtree(tree)
return sp
|
[
"def",
"process_xml",
"(",
"xml_str",
")",
":",
"try",
":",
"tree",
"=",
"ET",
".",
"XML",
"(",
"xml_str",
",",
"parser",
"=",
"UTB",
"(",
")",
")",
"except",
"ET",
".",
"ParseError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not parse XML string'",
")",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"None",
"sp",
"=",
"_process_elementtree",
"(",
"tree",
")",
"return",
"sp"
] |
Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute.
|
[
"Return",
"processor",
"with",
"Statements",
"extracted",
"from",
"a",
"Sparser",
"XML",
"."
] |
python
|
train
| 26.695652 |
genialis/resolwe
|
resolwe/flow/elastic_indexes/base.py
|
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/base.py#L61-L66
|
def get_owner_names_value(self, obj):
"""Extract owners' names."""
return [
self._get_user(user)
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
]
|
[
"def",
"get_owner_names_value",
"(",
"self",
",",
"obj",
")",
":",
"return",
"[",
"self",
".",
"_get_user",
"(",
"user",
")",
"for",
"user",
"in",
"get_users_with_permission",
"(",
"obj",
",",
"get_full_perm",
"(",
"'owner'",
",",
"obj",
")",
")",
"]"
] |
Extract owners' names.
|
[
"Extract",
"owners",
"names",
"."
] |
python
|
train
| 35.5 |
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L1611-L1638
|
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
|
[
"def",
"get_work_item_template",
"(",
"self",
",",
"project",
",",
"type",
",",
"fields",
"=",
"None",
",",
"as_of",
"=",
"None",
",",
"expand",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"type",
"is",
"not",
"None",
":",
"route_values",
"[",
"'type'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'type'",
",",
"type",
",",
"'str'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"fields",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'fields'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'fields'",
",",
"fields",
",",
"'str'",
")",
"if",
"as_of",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'asOf'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'as_of'",
",",
"as_of",
",",
"'iso-8601'",
")",
"if",
"expand",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$expand'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'expand'",
",",
"expand",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'62d3d110-0047-428c-ad3c-4fe872c91c74'",
",",
"version",
"=",
"'5.1-preview.3'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'WorkItem'",
",",
"response",
")"
] |
GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
|
[
"GetWorkItemTemplate",
".",
"[",
"Preview",
"API",
"]",
"Returns",
"a",
"single",
"work",
"item",
"from",
"a",
"template",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"param",
"str",
"type",
":",
"The",
"work",
"item",
"type",
"name",
":",
"param",
"str",
"fields",
":",
"Comma",
"-",
"separated",
"list",
"of",
"requested",
"fields",
":",
"param",
"datetime",
"as_of",
":",
"AsOf",
"UTC",
"date",
"time",
"string",
":",
"param",
"str",
"expand",
":",
"The",
"expand",
"parameters",
"for",
"work",
"item",
"attributes",
".",
"Possible",
"options",
"are",
"{",
"None",
"Relations",
"Fields",
"Links",
"All",
"}",
".",
":",
"rtype",
":",
":",
"class",
":",
"<WorkItem",
">",
"<azure",
".",
"devops",
".",
"v5_1",
".",
"work",
"-",
"item",
"-",
"tracking",
".",
"models",
".",
"WorkItem",
">"
] |
python
|
train
| 58.035714 |
gccxml/pygccxml
|
pygccxml/declarations/scopedef.py
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/scopedef.py#L457-L470
|
def _find_single(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find single query execution - started')
start_time = timeit.default_timer()
norm_keywds = self.__normalize_args(**keywds)
decl_matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher.get_single(decl_matcher, decls, False)
self._logger.debug(
'find single query execution - done( %f seconds )',
(timeit.default_timer() - start_time))
return found
|
[
"def",
"_find_single",
"(",
"self",
",",
"match_class",
",",
"*",
"*",
"keywds",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"'find single query execution - started'",
")",
"start_time",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"norm_keywds",
"=",
"self",
".",
"__normalize_args",
"(",
"*",
"*",
"keywds",
")",
"decl_matcher",
"=",
"self",
".",
"__create_matcher",
"(",
"match_class",
",",
"*",
"*",
"norm_keywds",
")",
"dtype",
"=",
"self",
".",
"__findout_decl_type",
"(",
"match_class",
",",
"*",
"*",
"norm_keywds",
")",
"recursive_",
"=",
"self",
".",
"__findout_recursive",
"(",
"*",
"*",
"norm_keywds",
")",
"decls",
"=",
"self",
".",
"__findout_range",
"(",
"norm_keywds",
"[",
"'name'",
"]",
",",
"dtype",
",",
"recursive_",
")",
"found",
"=",
"matcher",
".",
"get_single",
"(",
"decl_matcher",
",",
"decls",
",",
"False",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"'find single query execution - done( %f seconds )'",
",",
"(",
"timeit",
".",
"default_timer",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"found"
] |
implementation details
|
[
"implementation",
"details"
] |
python
|
train
| 53.071429 |
estnltk/estnltk
|
estnltk/text.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L238-L249
|
def spans(self, layer):
"""Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
"""
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans
|
[
"def",
"spans",
"(",
"self",
",",
"layer",
")",
":",
"spans",
"=",
"[",
"]",
"for",
"data",
"in",
"self",
"[",
"layer",
"]",
":",
"spans",
".",
"append",
"(",
"(",
"data",
"[",
"START",
"]",
",",
"data",
"[",
"END",
"]",
")",
")",
"return",
"spans"
] |
Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
|
[
"Retrieve",
"(",
"start",
"end",
")",
"tuples",
"denoting",
"the",
"spans",
"of",
"given",
"layer",
"elements",
"."
] |
python
|
train
| 27.75 |
mongodb/mongo-python-driver
|
bson/__init__.py
|
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/bson/__init__.py#L399-L416
|
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
try:
value, position = _ELEMENT_GETTER[element_type](data, position,
obj_end, opts,
element_name)
except KeyError:
_raise_unknown_type(element_type, element_name)
if opts.type_registry._decoder_map:
custom_decoder = opts.type_registry._decoder_map.get(type(value))
if custom_decoder is not None:
value = custom_decoder(value)
return element_name, value, position
|
[
"def",
"_element_to_dict",
"(",
"data",
",",
"position",
",",
"obj_end",
",",
"opts",
")",
":",
"element_type",
"=",
"data",
"[",
"position",
":",
"position",
"+",
"1",
"]",
"position",
"+=",
"1",
"element_name",
",",
"position",
"=",
"_get_c_string",
"(",
"data",
",",
"position",
",",
"opts",
")",
"try",
":",
"value",
",",
"position",
"=",
"_ELEMENT_GETTER",
"[",
"element_type",
"]",
"(",
"data",
",",
"position",
",",
"obj_end",
",",
"opts",
",",
"element_name",
")",
"except",
"KeyError",
":",
"_raise_unknown_type",
"(",
"element_type",
",",
"element_name",
")",
"if",
"opts",
".",
"type_registry",
".",
"_decoder_map",
":",
"custom_decoder",
"=",
"opts",
".",
"type_registry",
".",
"_decoder_map",
".",
"get",
"(",
"type",
"(",
"value",
")",
")",
"if",
"custom_decoder",
"is",
"not",
"None",
":",
"value",
"=",
"custom_decoder",
"(",
"value",
")",
"return",
"element_name",
",",
"value",
",",
"position"
] |
Decode a single key, value pair.
|
[
"Decode",
"a",
"single",
"key",
"value",
"pair",
"."
] |
python
|
train
| 41.388889 |
tylertreat/BigQuery-Python
|
bigquery/client.py
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1583-L1603
|
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)]
|
[
"def",
"_filter_tables_by_time",
"(",
"self",
",",
"tables",
",",
"start_time",
",",
"end_time",
")",
":",
"return",
"[",
"table_name",
"for",
"(",
"table_name",
",",
"unix_seconds",
")",
"in",
"tables",
".",
"items",
"(",
")",
"if",
"self",
".",
"_in_range",
"(",
"start_time",
",",
"end_time",
",",
"unix_seconds",
")",
"]"
] |
Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
|
[
"Filter",
"a",
"table",
"dictionary",
"and",
"return",
"table",
"names",
"based",
"on",
"the",
"range",
"of",
"start",
"and",
"end",
"times",
"in",
"unix",
"seconds",
"."
] |
python
|
train
| 33.714286 |
wbond/asn1crypto
|
asn1crypto/core.py
|
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L4329-L4344
|
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if force:
self._set_contents(force=force)
return Asn1Value.dump(self)
|
[
"def",
"dump",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"self",
".",
"_set_contents",
"(",
"force",
"=",
"force",
")",
"return",
"Asn1Value",
".",
"dump",
"(",
"self",
")"
] |
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
|
[
"Encodes",
"the",
"value",
"using",
"DER"
] |
python
|
train
| 25.5625 |
Tethik/flask-session-captcha
|
flask_session_captcha/__init__.py
|
https://github.com/Tethik/flask-session-captcha/blob/02bb68b9563ecc4967650b49b4c8447fecbc92d6/flask_session_captcha/__init__.py#L43-L58
|
def generate(self):
"""
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
"""
answer = self.rand.randrange(self.max)
answer = str(answer).zfill(self.digits)
image_data = self.image_generator.generate(answer)
base64_captcha = base64.b64encode(image_data.getvalue()).decode("ascii")
logging.debug('Generated captcha with answer: ' + answer)
session['captcha_answer'] = answer
return base64_captcha
|
[
"def",
"generate",
"(",
"self",
")",
":",
"answer",
"=",
"self",
".",
"rand",
".",
"randrange",
"(",
"self",
".",
"max",
")",
"answer",
"=",
"str",
"(",
"answer",
")",
".",
"zfill",
"(",
"self",
".",
"digits",
")",
"image_data",
"=",
"self",
".",
"image_generator",
".",
"generate",
"(",
"answer",
")",
"base64_captcha",
"=",
"base64",
".",
"b64encode",
"(",
"image_data",
".",
"getvalue",
"(",
")",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"logging",
".",
"debug",
"(",
"'Generated captcha with answer: '",
"+",
"answer",
")",
"session",
"[",
"'captcha_answer'",
"]",
"=",
"answer",
"return",
"base64_captcha"
] |
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
|
[
"Generates",
"and",
"returns",
"a",
"numeric",
"captcha",
"image",
"in",
"base64",
"format",
".",
"Saves",
"the",
"correct",
"answer",
"in",
"session",
"[",
"captcha_answer",
"]",
"Use",
"later",
"as",
":"
] |
python
|
train
| 40.4375 |
un33k/django-toolware
|
toolware/utils/convert.py
|
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/convert.py#L20-L26
|
def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content)
return text
|
[
"def",
"md_to_text",
"(",
"content",
")",
":",
"text",
"=",
"None",
"html",
"=",
"markdown",
".",
"markdown",
"(",
"content",
")",
"if",
"html",
":",
"text",
"=",
"html_to_text",
"(",
"content",
")",
"return",
"text"
] |
Converts markdown content to text
|
[
"Converts",
"markdown",
"content",
"to",
"text"
] |
python
|
test
| 26.285714 |
SCIP-Interfaces/PySCIPOpt
|
examples/finished/atsp.py
|
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/atsp.py#L172-L182
|
def sequence(arcs):
"""sequence: make a list of cities to visit, from set of arcs"""
succ = {}
for (i,j) in arcs:
succ[i] = j
curr = 1 # first node being visited
sol = [curr]
for i in range(len(arcs)-2):
curr = succ[curr]
sol.append(curr)
return sol
|
[
"def",
"sequence",
"(",
"arcs",
")",
":",
"succ",
"=",
"{",
"}",
"for",
"(",
"i",
",",
"j",
")",
"in",
"arcs",
":",
"succ",
"[",
"i",
"]",
"=",
"j",
"curr",
"=",
"1",
"# first node being visited",
"sol",
"=",
"[",
"curr",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arcs",
")",
"-",
"2",
")",
":",
"curr",
"=",
"succ",
"[",
"curr",
"]",
"sol",
".",
"append",
"(",
"curr",
")",
"return",
"sol"
] |
sequence: make a list of cities to visit, from set of arcs
|
[
"sequence",
":",
"make",
"a",
"list",
"of",
"cities",
"to",
"visit",
"from",
"set",
"of",
"arcs"
] |
python
|
train
| 26.727273 |
Tanganelli/CoAPthon3
|
coapthon/client/helperclient.py
|
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/client/helperclient.py#L264-L277
|
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request
|
[
"def",
"mk_request_non",
"(",
"self",
",",
"method",
",",
"path",
")",
":",
"request",
"=",
"Request",
"(",
")",
"request",
".",
"destination",
"=",
"self",
".",
"server",
"request",
".",
"code",
"=",
"method",
".",
"number",
"request",
".",
"uri_path",
"=",
"path",
"request",
".",
"type",
"=",
"defines",
".",
"Types",
"[",
"\"NON\"",
"]",
"return",
"request"
] |
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
|
[
"Create",
"a",
"request",
"."
] |
python
|
train
| 28.357143 |
Music-Moo/music2storage
|
music2storage/connection.py
|
https://github.com/Music-Moo/music2storage/blob/de12b9046dd227fc8c1512b5060e7f5fcd8b0ee2/music2storage/connection.py#L36-L58
|
def use_storage_service(self, service_name, custom_path):
"""
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
"""
try:
self.current_storage = self.storage_services[service_name]
except KeyError:
if service_name == 'google drive':
self.storage_services['google drive'] = GoogleDrive()
self.current_storage = self.storage_services['google drive']
self.current_storage.connect()
elif service_name == 'dropbox':
log.error('Dropbox is not supported yet.')
elif service_name == 'local':
self.storage_services['local'] = LocalStorage(custom_path=custom_path)
self.current_storage = self.storage_services['local']
self.current_storage.connect()
else:
log.error('Storage service name is not recognized.')
|
[
"def",
"use_storage_service",
"(",
"self",
",",
"service_name",
",",
"custom_path",
")",
":",
"try",
":",
"self",
".",
"current_storage",
"=",
"self",
".",
"storage_services",
"[",
"service_name",
"]",
"except",
"KeyError",
":",
"if",
"service_name",
"==",
"'google drive'",
":",
"self",
".",
"storage_services",
"[",
"'google drive'",
"]",
"=",
"GoogleDrive",
"(",
")",
"self",
".",
"current_storage",
"=",
"self",
".",
"storage_services",
"[",
"'google drive'",
"]",
"self",
".",
"current_storage",
".",
"connect",
"(",
")",
"elif",
"service_name",
"==",
"'dropbox'",
":",
"log",
".",
"error",
"(",
"'Dropbox is not supported yet.'",
")",
"elif",
"service_name",
"==",
"'local'",
":",
"self",
".",
"storage_services",
"[",
"'local'",
"]",
"=",
"LocalStorage",
"(",
"custom_path",
"=",
"custom_path",
")",
"self",
".",
"current_storage",
"=",
"self",
".",
"storage_services",
"[",
"'local'",
"]",
"self",
".",
"current_storage",
".",
"connect",
"(",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Storage service name is not recognized.'",
")"
] |
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
|
[
"Sets",
"the",
"current",
"storage",
"service",
"to",
"service_name",
"and",
"runs",
"the",
"connect",
"method",
"on",
"the",
"service",
"."
] |
python
|
test
| 50.26087 |
sosy-lab/benchexec
|
benchexec/check_cgroups.py
|
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/check_cgroups.py#L36-L80
|
def check_cgroup_availability(wait=1):
"""
Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable
"""
logging.basicConfig(format="%(levelname)s: %(message)s")
runexecutor = RunExecutor()
my_cgroups = runexecutor.cgroups
if not (CPUACCT in my_cgroups and
CPUSET in my_cgroups and
# FREEZER in my_cgroups and # For now, we do not require freezer
MEMORY in my_cgroups):
sys.exit(1)
with tempfile.NamedTemporaryFile(mode='rt') as tmp:
runexecutor.execute_run(['sh', '-c', 'sleep {0}; cat /proc/self/cgroup'.format(wait)], tmp.name,
memlimit=1024*1024, # set memlimit to force check for swapaccount
# set cores and memory_nodes to force usage of CPUSET
cores=util.parse_int_list(my_cgroups.get_value(CPUSET, 'cpus')),
memory_nodes=my_cgroups.read_allowed_memory_banks())
lines = []
for line in tmp:
line = line.strip()
if line and not line == "sh -c 'sleep {0}; cat /proc/self/cgroup'".format(wait) \
and not all(c == '-' for c in line):
lines.append(line)
task_cgroups = find_my_cgroups(lines)
fail = False
for subsystem in CPUACCT, CPUSET, MEMORY, FREEZER:
if subsystem in my_cgroups:
if not task_cgroups[subsystem].startswith(os.path.join(my_cgroups[subsystem], 'benchmark_')):
logging.warning('Task was in cgroup %s for subsystem %s, '
'which is not the expected sub-cgroup of %s. '
'Maybe some other program is interfering with cgroup management?',
task_cgroups[subsystem], subsystem, my_cgroups[subsystem])
fail = True
if fail:
sys.exit(1)
|
[
"def",
"check_cgroup_availability",
"(",
"wait",
"=",
"1",
")",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"\"%(levelname)s: %(message)s\"",
")",
"runexecutor",
"=",
"RunExecutor",
"(",
")",
"my_cgroups",
"=",
"runexecutor",
".",
"cgroups",
"if",
"not",
"(",
"CPUACCT",
"in",
"my_cgroups",
"and",
"CPUSET",
"in",
"my_cgroups",
"and",
"# FREEZER in my_cgroups and # For now, we do not require freezer",
"MEMORY",
"in",
"my_cgroups",
")",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'rt'",
")",
"as",
"tmp",
":",
"runexecutor",
".",
"execute_run",
"(",
"[",
"'sh'",
",",
"'-c'",
",",
"'sleep {0}; cat /proc/self/cgroup'",
".",
"format",
"(",
"wait",
")",
"]",
",",
"tmp",
".",
"name",
",",
"memlimit",
"=",
"1024",
"*",
"1024",
",",
"# set memlimit to force check for swapaccount",
"# set cores and memory_nodes to force usage of CPUSET",
"cores",
"=",
"util",
".",
"parse_int_list",
"(",
"my_cgroups",
".",
"get_value",
"(",
"CPUSET",
",",
"'cpus'",
")",
")",
",",
"memory_nodes",
"=",
"my_cgroups",
".",
"read_allowed_memory_banks",
"(",
")",
")",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"tmp",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"not",
"line",
"==",
"\"sh -c 'sleep {0}; cat /proc/self/cgroup'\"",
".",
"format",
"(",
"wait",
")",
"and",
"not",
"all",
"(",
"c",
"==",
"'-'",
"for",
"c",
"in",
"line",
")",
":",
"lines",
".",
"append",
"(",
"line",
")",
"task_cgroups",
"=",
"find_my_cgroups",
"(",
"lines",
")",
"fail",
"=",
"False",
"for",
"subsystem",
"in",
"CPUACCT",
",",
"CPUSET",
",",
"MEMORY",
",",
"FREEZER",
":",
"if",
"subsystem",
"in",
"my_cgroups",
":",
"if",
"not",
"task_cgroups",
"[",
"subsystem",
"]",
".",
"startswith",
"(",
"os",
".",
"path",
".",
"join",
"(",
"my_cgroups",
"[",
"subsystem",
"]",
",",
"'benchmark_'",
")",
")",
":",
"logging",
".",
"warning",
"(",
"'Task was in cgroup %s for subsystem %s, '",
"'which is not the expected sub-cgroup of %s. '",
"'Maybe some other program is interfering with cgroup management?'",
",",
"task_cgroups",
"[",
"subsystem",
"]",
",",
"subsystem",
",",
"my_cgroups",
"[",
"subsystem",
"]",
")",
"fail",
"=",
"True",
"if",
"fail",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable
|
[
"Basic",
"utility",
"to",
"check",
"the",
"availability",
"and",
"permissions",
"of",
"cgroups",
".",
"This",
"will",
"log",
"some",
"warnings",
"for",
"the",
"user",
"if",
"necessary",
".",
"On",
"some",
"systems",
"daemons",
"such",
"as",
"cgrulesengd",
"might",
"interfere",
"with",
"the",
"cgroups",
"of",
"a",
"process",
"soon",
"after",
"it",
"was",
"started",
".",
"Thus",
"this",
"function",
"starts",
"a",
"process",
"waits",
"a",
"configurable",
"amount",
"of",
"time",
"and",
"check",
"whether",
"the",
"cgroups",
"have",
"been",
"changed",
"."
] |
python
|
train
| 51.666667 |
aestrivex/bctpy
|
bct/algorithms/centrality.py
|
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L703-L742
|
def participation_coef(W, ci, degree='undirected'):
'''
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
'''
if degree == 'in':
W = W.T
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W) # number of vertices
Ko = np.sum(W, axis=1) # (out) degree
Gc = np.dot((W != 0), np.diag(ci)) # neighbor community affiliation
Kc2 = np.zeros((n,)) # community-specific neighbors
for i in range(1, int(np.max(ci)) + 1):
Kc2 += np.square(np.sum(W * (Gc == i), axis=1))
P = np.ones((n,)) - Kc2 / np.square(Ko)
# P=0 if for nodes with no (out) neighbors
P[np.where(np.logical_not(Ko))] = 0
return P
|
[
"def",
"participation_coef",
"(",
"W",
",",
"ci",
",",
"degree",
"=",
"'undirected'",
")",
":",
"if",
"degree",
"==",
"'in'",
":",
"W",
"=",
"W",
".",
"T",
"_",
",",
"ci",
"=",
"np",
".",
"unique",
"(",
"ci",
",",
"return_inverse",
"=",
"True",
")",
"ci",
"+=",
"1",
"n",
"=",
"len",
"(",
"W",
")",
"# number of vertices",
"Ko",
"=",
"np",
".",
"sum",
"(",
"W",
",",
"axis",
"=",
"1",
")",
"# (out) degree",
"Gc",
"=",
"np",
".",
"dot",
"(",
"(",
"W",
"!=",
"0",
")",
",",
"np",
".",
"diag",
"(",
"ci",
")",
")",
"# neighbor community affiliation",
"Kc2",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
")",
")",
"# community-specific neighbors",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"int",
"(",
"np",
".",
"max",
"(",
"ci",
")",
")",
"+",
"1",
")",
":",
"Kc2",
"+=",
"np",
".",
"square",
"(",
"np",
".",
"sum",
"(",
"W",
"*",
"(",
"Gc",
"==",
"i",
")",
",",
"axis",
"=",
"1",
")",
")",
"P",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
")",
")",
"-",
"Kc2",
"/",
"np",
".",
"square",
"(",
"Ko",
")",
"# P=0 if for nodes with no (out) neighbors",
"P",
"[",
"np",
".",
"where",
"(",
"np",
".",
"logical_not",
"(",
"Ko",
")",
")",
"]",
"=",
"0",
"return",
"P"
] |
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
|
[
"Participation",
"coefficient",
"is",
"a",
"measure",
"of",
"diversity",
"of",
"intermodular",
"connections",
"of",
"individual",
"nodes",
"."
] |
python
|
train
| 29.525 |
saltstack/salt
|
salt/proxy/dummy.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L189-L198
|
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
|
[
"def",
"uptodate",
"(",
")",
":",
"DETAILS",
"=",
"_load_state",
"(",
")",
"for",
"p",
"in",
"DETAILS",
"[",
"'packages'",
"]",
":",
"version_float",
"=",
"float",
"(",
"DETAILS",
"[",
"'packages'",
"]",
"[",
"p",
"]",
")",
"version_float",
"=",
"version_float",
"+",
"1.0",
"DETAILS",
"[",
"'packages'",
"]",
"[",
"p",
"]",
"=",
"six",
".",
"text_type",
"(",
"version_float",
")",
"return",
"DETAILS",
"[",
"'packages'",
"]"
] |
Call the REST endpoint to see if the packages on the "server" are up to date.
|
[
"Call",
"the",
"REST",
"endpoint",
"to",
"see",
"if",
"the",
"packages",
"on",
"the",
"server",
"are",
"up",
"to",
"date",
"."
] |
python
|
train
| 35.7 |
GoogleCloudPlatform/google-cloud-datastore
|
python/googledatastore/helper.py
|
https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L71-L111
|
def get_credentials_from_env():
"""Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
"""
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)
return None
if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_EMULATOR_HOST_ENV)
return None
if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)
and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):
with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)
logging.info('connecting using private key file.')
return credentials
try:
credentials = client.GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(SCOPE)
logging.info('connecting using Google Application Default Credentials.')
return credentials
except client.ApplicationDefaultCredentialsError, e:
logging.error('Unable to find any credentials to use. '
'If you are running locally, make sure to set the '
'%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)
raise e
|
[
"def",
"get_credentials_from_env",
"(",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV",
")",
":",
"logging",
".",
"info",
"(",
"'connecting without credentials because %s is set.'",
",",
"_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV",
")",
"return",
"None",
"if",
"os",
".",
"getenv",
"(",
"_DATASTORE_EMULATOR_HOST_ENV",
")",
":",
"logging",
".",
"info",
"(",
"'connecting without credentials because %s is set.'",
",",
"_DATASTORE_EMULATOR_HOST_ENV",
")",
"return",
"None",
"if",
"(",
"os",
".",
"getenv",
"(",
"_DATASTORE_SERVICE_ACCOUNT_ENV",
")",
"and",
"os",
".",
"getenv",
"(",
"_DATASTORE_PRIVATE_KEY_FILE_ENV",
")",
")",
":",
"with",
"open",
"(",
"os",
".",
"getenv",
"(",
"_DATASTORE_PRIVATE_KEY_FILE_ENV",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"key",
"=",
"f",
".",
"read",
"(",
")",
"credentials",
"=",
"client",
".",
"SignedJwtAssertionCredentials",
"(",
"os",
".",
"getenv",
"(",
"_DATASTORE_SERVICE_ACCOUNT_ENV",
")",
",",
"key",
",",
"SCOPE",
")",
"logging",
".",
"info",
"(",
"'connecting using private key file.'",
")",
"return",
"credentials",
"try",
":",
"credentials",
"=",
"client",
".",
"GoogleCredentials",
".",
"get_application_default",
"(",
")",
"credentials",
"=",
"credentials",
".",
"create_scoped",
"(",
"SCOPE",
")",
"logging",
".",
"info",
"(",
"'connecting using Google Application Default Credentials.'",
")",
"return",
"credentials",
"except",
"client",
".",
"ApplicationDefaultCredentialsError",
",",
"e",
":",
"logging",
".",
"error",
"(",
"'Unable to find any credentials to use. '",
"'If you are running locally, make sure to set the '",
"'%s environment variable.'",
",",
"_DATASTORE_EMULATOR_HOST_ENV",
")",
"raise",
"e"
] |
Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
|
[
"Get",
"credentials",
"from",
"environment",
"variables",
"."
] |
python
|
train
| 41.365854 |
saltstack/salt
|
salt/modules/zonecfg.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zonecfg.py#L494-L570
|
def _resource(methode, zone, resource_type, resource_selector, **kwargs):
'''
internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties
'''
ret = {'status': True}
# parse kwargs
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k in kwargs:
if isinstance(kwargs[k], dict) or isinstance(kwargs[k], list):
kwargs[k] = _sanitize_value(kwargs[k])
if methode not in ['add', 'update']:
ret['status'] = False
ret['message'] = 'unknown methode {0}'.format(methode)
return ret
if methode in ['update'] and resource_selector and resource_selector not in kwargs:
ret['status'] = False
ret['message'] = 'resource selector {0} not found in parameters'.format(resource_selector)
return ret
# generate update script
cfg_file = salt.utils.files.mkstemp()
with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_:
if methode in ['add']:
fp_.write("add {0}\n".format(resource_type))
elif methode in ['update']:
if resource_selector:
value = kwargs[resource_selector]
if isinstance(value, dict) or isinstance(value, list):
value = _sanitize_value(value)
value = six.text_type(value).lower() if isinstance(value, bool) else six.text_type(value)
fp_.write("select {0} {1}={2}\n".format(resource_type, resource_selector, _sanitize_value(value)))
else:
fp_.write("select {0}\n".format(resource_type))
for k, v in six.iteritems(kwargs):
if methode in ['update'] and k == resource_selector:
continue
if isinstance(v, dict) or isinstance(v, list):
value = _sanitize_value(value)
value = six.text_type(v).lower() if isinstance(v, bool) else six.text_type(v)
if k in _zonecfg_resource_setters[resource_type]:
fp_.write("set {0}={1}\n".format(k, _sanitize_value(value)))
else:
fp_.write("add {0} {1}\n".format(k, _sanitize_value(value)))
fp_.write("end\n")
# update property
if cfg_file:
_dump_cfg(cfg_file)
res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {path}'.format(
zone=zone,
path=cfg_file,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
if ret['message'] == '':
del ret['message']
else:
ret['message'] = _clean_message(ret['message'])
# cleanup config file
if __salt__['file.file_exists'](cfg_file):
__salt__['file.remove'](cfg_file)
return ret
|
[
"def",
"_resource",
"(",
"methode",
",",
"zone",
",",
"resource_type",
",",
"resource_selector",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'status'",
":",
"True",
"}",
"# parse kwargs",
"kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"for",
"k",
"in",
"kwargs",
":",
"if",
"isinstance",
"(",
"kwargs",
"[",
"k",
"]",
",",
"dict",
")",
"or",
"isinstance",
"(",
"kwargs",
"[",
"k",
"]",
",",
"list",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"_sanitize_value",
"(",
"kwargs",
"[",
"k",
"]",
")",
"if",
"methode",
"not",
"in",
"[",
"'add'",
",",
"'update'",
"]",
":",
"ret",
"[",
"'status'",
"]",
"=",
"False",
"ret",
"[",
"'message'",
"]",
"=",
"'unknown methode {0}'",
".",
"format",
"(",
"methode",
")",
"return",
"ret",
"if",
"methode",
"in",
"[",
"'update'",
"]",
"and",
"resource_selector",
"and",
"resource_selector",
"not",
"in",
"kwargs",
":",
"ret",
"[",
"'status'",
"]",
"=",
"False",
"ret",
"[",
"'message'",
"]",
"=",
"'resource selector {0} not found in parameters'",
".",
"format",
"(",
"resource_selector",
")",
"return",
"ret",
"# generate update script",
"cfg_file",
"=",
"salt",
".",
"utils",
".",
"files",
".",
"mkstemp",
"(",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fpopen",
"(",
"cfg_file",
",",
"'w+'",
",",
"mode",
"=",
"0o600",
")",
"as",
"fp_",
":",
"if",
"methode",
"in",
"[",
"'add'",
"]",
":",
"fp_",
".",
"write",
"(",
"\"add {0}\\n\"",
".",
"format",
"(",
"resource_type",
")",
")",
"elif",
"methode",
"in",
"[",
"'update'",
"]",
":",
"if",
"resource_selector",
":",
"value",
"=",
"kwargs",
"[",
"resource_selector",
"]",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
"or",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"_sanitize_value",
"(",
"value",
")",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
".",
"lower",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
"else",
"six",
".",
"text_type",
"(",
"value",
")",
"fp_",
".",
"write",
"(",
"\"select {0} {1}={2}\\n\"",
".",
"format",
"(",
"resource_type",
",",
"resource_selector",
",",
"_sanitize_value",
"(",
"value",
")",
")",
")",
"else",
":",
"fp_",
".",
"write",
"(",
"\"select {0}\\n\"",
".",
"format",
"(",
"resource_type",
")",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"methode",
"in",
"[",
"'update'",
"]",
"and",
"k",
"==",
"resource_selector",
":",
"continue",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
"or",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"value",
"=",
"_sanitize_value",
"(",
"value",
")",
"value",
"=",
"six",
".",
"text_type",
"(",
"v",
")",
".",
"lower",
"(",
")",
"if",
"isinstance",
"(",
"v",
",",
"bool",
")",
"else",
"six",
".",
"text_type",
"(",
"v",
")",
"if",
"k",
"in",
"_zonecfg_resource_setters",
"[",
"resource_type",
"]",
":",
"fp_",
".",
"write",
"(",
"\"set {0}={1}\\n\"",
".",
"format",
"(",
"k",
",",
"_sanitize_value",
"(",
"value",
")",
")",
")",
"else",
":",
"fp_",
".",
"write",
"(",
"\"add {0} {1}\\n\"",
".",
"format",
"(",
"k",
",",
"_sanitize_value",
"(",
"value",
")",
")",
")",
"fp_",
".",
"write",
"(",
"\"end\\n\"",
")",
"# update property",
"if",
"cfg_file",
":",
"_dump_cfg",
"(",
"cfg_file",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"'zonecfg -z {zone} -f {path}'",
".",
"format",
"(",
"zone",
"=",
"zone",
",",
"path",
"=",
"cfg_file",
",",
")",
")",
"ret",
"[",
"'status'",
"]",
"=",
"res",
"[",
"'retcode'",
"]",
"==",
"0",
"ret",
"[",
"'message'",
"]",
"=",
"res",
"[",
"'stdout'",
"]",
"if",
"ret",
"[",
"'status'",
"]",
"else",
"res",
"[",
"'stderr'",
"]",
"if",
"ret",
"[",
"'message'",
"]",
"==",
"''",
":",
"del",
"ret",
"[",
"'message'",
"]",
"else",
":",
"ret",
"[",
"'message'",
"]",
"=",
"_clean_message",
"(",
"ret",
"[",
"'message'",
"]",
")",
"# cleanup config file",
"if",
"__salt__",
"[",
"'file.file_exists'",
"]",
"(",
"cfg_file",
")",
":",
"__salt__",
"[",
"'file.remove'",
"]",
"(",
"cfg_file",
")",
"return",
"ret"
] |
internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties
|
[
"internal",
"resource",
"hanlder"
] |
python
|
train
| 37.714286 |
pypa/pipenv
|
pipenv/vendor/distlib/index.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/index.py#L75-L88
|
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
|
[
"def",
"read_configuration",
"(",
"self",
")",
":",
"# get distutils to do the work",
"c",
"=",
"self",
".",
"_get_pypirc_command",
"(",
")",
"c",
".",
"repository",
"=",
"self",
".",
"url",
"cfg",
"=",
"c",
".",
"_read_pypirc",
"(",
")",
"self",
".",
"username",
"=",
"cfg",
".",
"get",
"(",
"'username'",
")",
"self",
".",
"password",
"=",
"cfg",
".",
"get",
"(",
"'password'",
")",
"self",
".",
"realm",
"=",
"cfg",
".",
"get",
"(",
"'realm'",
",",
"'pypi'",
")",
"self",
".",
"url",
"=",
"cfg",
".",
"get",
"(",
"'repository'",
",",
"self",
".",
"url",
")"
] |
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
|
[
"Read",
"the",
"PyPI",
"access",
"configuration",
"as",
"supported",
"by",
"distutils",
"getting",
"PyPI",
"to",
"do",
"the",
"actual",
"work",
".",
"This",
"populates",
"username",
"password",
"realm",
"and",
"url",
"attributes",
"from",
"the",
"configuration",
"."
] |
python
|
train
| 42 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/prompts.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/prompts.py#L209-L217
|
def cwd_filt(depth):
"""Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned."""
cwd = os.getcwdu().replace(HOME,"~")
out = os.sep.join(cwd.split(os.sep)[-depth:])
return out or os.sep
|
[
"def",
"cwd_filt",
"(",
"depth",
")",
":",
"cwd",
"=",
"os",
".",
"getcwdu",
"(",
")",
".",
"replace",
"(",
"HOME",
",",
"\"~\"",
")",
"out",
"=",
"os",
".",
"sep",
".",
"join",
"(",
"cwd",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"depth",
":",
"]",
")",
"return",
"out",
"or",
"os",
".",
"sep"
] |
Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned.
|
[
"Return",
"the",
"last",
"depth",
"elements",
"of",
"the",
"current",
"working",
"directory",
"."
] |
python
|
test
| 32 |
user-cont/conu
|
conu/backend/docker/utils.py
|
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/utils.py#L13-L62
|
def inspect_to_metadata(metadata_object, inspect_data):
"""
process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata
"""
identifier = graceful_get(inspect_data, 'Id')
if identifier:
if ":" in identifier:
# format of image name from docker inspect:
# sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129
metadata_object.identifier = identifier.split(':')[1]
else:
# container
metadata_object.identifier = identifier
# format of Environment Variables from docker inspect:
# ['DISTTAG=f26container', 'FGC=f26']
raw_env_vars = graceful_get(inspect_data, "Config", "Env") or []
if raw_env_vars:
metadata_object.env_variables = {}
for env_variable in raw_env_vars:
splits = env_variable.split("=", 1)
name = splits[0]
value = splits[1] if len(splits) > 1 else None
if value is not None:
metadata_object.env_variables.update({name: value})
raw_exposed_ports = graceful_get(inspect_data, "Config", "ExposedPorts")
if raw_exposed_ports:
metadata_object.exposed_ports = list(raw_exposed_ports.keys())
# specific to images
raw_repo_tags = graceful_get(inspect_data, 'RepoTags')
if raw_repo_tags:
metadata_object.name = raw_repo_tags[0]
metadata_object.labels = graceful_get(inspect_data, 'Config', 'Labels')
metadata_object.command = graceful_get(inspect_data, 'Config', 'Cmd')
metadata_object.creation_timestamp = inspect_data.get('Created', None)
# specific to images
metadata_object.image_names = inspect_data.get('RepoTags', None)
# specific to images
digests = inspect_data.get("RepoDigests", None)
if digests:
metadata_object.repo_digests = digests
metadata_object.digest = digests[0]
return metadata_object
|
[
"def",
"inspect_to_metadata",
"(",
"metadata_object",
",",
"inspect_data",
")",
":",
"identifier",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"'Id'",
")",
"if",
"identifier",
":",
"if",
"\":\"",
"in",
"identifier",
":",
"# format of image name from docker inspect:",
"# sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129",
"metadata_object",
".",
"identifier",
"=",
"identifier",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
"else",
":",
"# container",
"metadata_object",
".",
"identifier",
"=",
"identifier",
"# format of Environment Variables from docker inspect:",
"# ['DISTTAG=f26container', 'FGC=f26']",
"raw_env_vars",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"\"Config\"",
",",
"\"Env\"",
")",
"or",
"[",
"]",
"if",
"raw_env_vars",
":",
"metadata_object",
".",
"env_variables",
"=",
"{",
"}",
"for",
"env_variable",
"in",
"raw_env_vars",
":",
"splits",
"=",
"env_variable",
".",
"split",
"(",
"\"=\"",
",",
"1",
")",
"name",
"=",
"splits",
"[",
"0",
"]",
"value",
"=",
"splits",
"[",
"1",
"]",
"if",
"len",
"(",
"splits",
")",
">",
"1",
"else",
"None",
"if",
"value",
"is",
"not",
"None",
":",
"metadata_object",
".",
"env_variables",
".",
"update",
"(",
"{",
"name",
":",
"value",
"}",
")",
"raw_exposed_ports",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"\"Config\"",
",",
"\"ExposedPorts\"",
")",
"if",
"raw_exposed_ports",
":",
"metadata_object",
".",
"exposed_ports",
"=",
"list",
"(",
"raw_exposed_ports",
".",
"keys",
"(",
")",
")",
"# specific to images",
"raw_repo_tags",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"'RepoTags'",
")",
"if",
"raw_repo_tags",
":",
"metadata_object",
".",
"name",
"=",
"raw_repo_tags",
"[",
"0",
"]",
"metadata_object",
".",
"labels",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"'Config'",
",",
"'Labels'",
")",
"metadata_object",
".",
"command",
"=",
"graceful_get",
"(",
"inspect_data",
",",
"'Config'",
",",
"'Cmd'",
")",
"metadata_object",
".",
"creation_timestamp",
"=",
"inspect_data",
".",
"get",
"(",
"'Created'",
",",
"None",
")",
"# specific to images",
"metadata_object",
".",
"image_names",
"=",
"inspect_data",
".",
"get",
"(",
"'RepoTags'",
",",
"None",
")",
"# specific to images",
"digests",
"=",
"inspect_data",
".",
"get",
"(",
"\"RepoDigests\"",
",",
"None",
")",
"if",
"digests",
":",
"metadata_object",
".",
"repo_digests",
"=",
"digests",
"metadata_object",
".",
"digest",
"=",
"digests",
"[",
"0",
"]",
"return",
"metadata_object"
] |
process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata
|
[
"process",
"data",
"from",
"docker",
"inspect",
"and",
"update",
"provided",
"metadata",
"object"
] |
python
|
train
| 40.9 |
wummel/linkchecker
|
linkcheck/colorama.py
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/colorama.py#L82-L90
|
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
"""Get console screen buffer info object."""
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
if not success:
raise WinError()
return csbi
|
[
"def",
"GetConsoleScreenBufferInfo",
"(",
"stream_id",
"=",
"STDOUT",
")",
":",
"handle",
"=",
"handles",
"[",
"stream_id",
"]",
"csbi",
"=",
"CONSOLE_SCREEN_BUFFER_INFO",
"(",
")",
"success",
"=",
"windll",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
"(",
"handle",
",",
"byref",
"(",
"csbi",
")",
")",
"if",
"not",
"success",
":",
"raise",
"WinError",
"(",
")",
"return",
"csbi"
] |
Get console screen buffer info object.
|
[
"Get",
"console",
"screen",
"buffer",
"info",
"object",
"."
] |
python
|
train
| 34.444444 |
eallik/spinoff
|
spinoff/util/lockfile.py
|
https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/util/lockfile.py#L11-L31
|
def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0):
"""Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
"""
lock = lock_cls(path)
max_t = time.time() + timeout
while True:
if time.time() >= max_t:
raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) # same exception messages as in lockfile
try:
lock.acquire(timeout=0)
except AlreadyLocked:
sleep(maxdelay)
else:
try:
yield lock
break
finally:
lock.release()
|
[
"def",
"lock_file",
"(",
"path",
",",
"maxdelay",
"=",
".1",
",",
"lock_cls",
"=",
"LockFile",
",",
"timeout",
"=",
"10.0",
")",
":",
"lock",
"=",
"lock_cls",
"(",
"path",
")",
"max_t",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"while",
"True",
":",
"if",
"time",
".",
"time",
"(",
")",
">=",
"max_t",
":",
"raise",
"LockTimeout",
"(",
"\"Timeout waiting to acquire lock for %s\"",
"%",
"(",
"path",
",",
")",
")",
"# same exception messages as in lockfile",
"try",
":",
"lock",
".",
"acquire",
"(",
"timeout",
"=",
"0",
")",
"except",
"AlreadyLocked",
":",
"sleep",
"(",
"maxdelay",
")",
"else",
":",
"try",
":",
"yield",
"lock",
"break",
"finally",
":",
"lock",
".",
"release",
"(",
")"
] |
Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
|
[
"Cooperative",
"file",
"lock",
".",
"Uses",
"lockfile",
".",
"LockFile",
"polling",
"under",
"the",
"hood",
"."
] |
python
|
train
| 31.857143 |
KE-works/pykechain
|
pykechain/models/activity.py
|
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L168-L187
|
def configure(self, inputs, outputs):
"""Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity
"""
url = self._client._build_url('activity', activity_id=self.id)
r = self._client._request('PUT', url, params={'select_action': 'update_associations'}, json={
'inputs': [p.id for p in inputs],
'outputs': [p.id for p in outputs]
})
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not configure activity")
|
[
"def",
"configure",
"(",
"self",
",",
"inputs",
",",
"outputs",
")",
":",
"url",
"=",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'activity'",
",",
"activity_id",
"=",
"self",
".",
"id",
")",
"r",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'PUT'",
",",
"url",
",",
"params",
"=",
"{",
"'select_action'",
":",
"'update_associations'",
"}",
",",
"json",
"=",
"{",
"'inputs'",
":",
"[",
"p",
".",
"id",
"for",
"p",
"in",
"inputs",
"]",
",",
"'outputs'",
":",
"[",
"p",
".",
"id",
"for",
"p",
"in",
"outputs",
"]",
"}",
")",
"if",
"r",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not configure activity\"",
")"
] |
Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity
|
[
"Configure",
"activity",
"input",
"and",
"output",
"."
] |
python
|
train
| 43.7 |
CityOfZion/neo-python
|
neo/Core/State/StateDescriptor.py
|
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/StateDescriptor.py#L30-L38
|
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return s.uint8 + GetVarSize(self.Key) + GetVarSize(self.Field) + GetVarSize(self.Value)
|
[
"def",
"Size",
"(",
"self",
")",
":",
"return",
"s",
".",
"uint8",
"+",
"GetVarSize",
"(",
"self",
".",
"Key",
")",
"+",
"GetVarSize",
"(",
"self",
".",
"Field",
")",
"+",
"GetVarSize",
"(",
"self",
".",
"Value",
")"
] |
Get the total size in bytes of the object.
Returns:
int: size.
|
[
"Get",
"the",
"total",
"size",
"in",
"bytes",
"of",
"the",
"object",
"."
] |
python
|
train
| 24.444444 |
cjdrake/pyeda
|
pyeda/boolalg/bdd.py
|
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L549-L557
|
def _dfs_postorder(node, visited):
"""Iterate through nodes in DFS post-order."""
if node.lo is not None:
yield from _dfs_postorder(node.lo, visited)
if node.hi is not None:
yield from _dfs_postorder(node.hi, visited)
if node not in visited:
visited.add(node)
yield node
|
[
"def",
"_dfs_postorder",
"(",
"node",
",",
"visited",
")",
":",
"if",
"node",
".",
"lo",
"is",
"not",
"None",
":",
"yield",
"from",
"_dfs_postorder",
"(",
"node",
".",
"lo",
",",
"visited",
")",
"if",
"node",
".",
"hi",
"is",
"not",
"None",
":",
"yield",
"from",
"_dfs_postorder",
"(",
"node",
".",
"hi",
",",
"visited",
")",
"if",
"node",
"not",
"in",
"visited",
":",
"visited",
".",
"add",
"(",
"node",
")",
"yield",
"node"
] |
Iterate through nodes in DFS post-order.
|
[
"Iterate",
"through",
"nodes",
"in",
"DFS",
"post",
"-",
"order",
"."
] |
python
|
train
| 34.444444 |
google/grumpy
|
third_party/stdlib/re.py
|
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/re.py#L160-L169
|
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
|
[
"def",
"subn",
"(",
"pattern",
",",
"repl",
",",
"string",
",",
"count",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":",
"return",
"_compile",
"(",
"pattern",
",",
"flags",
")",
".",
"subn",
"(",
"repl",
",",
"string",
",",
"count",
")"
] |
Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used.
|
[
"Return",
"a",
"2",
"-",
"tuple",
"containing",
"(",
"new_string",
"number",
")",
".",
"new_string",
"is",
"the",
"string",
"obtained",
"by",
"replacing",
"the",
"leftmost",
"non",
"-",
"overlapping",
"occurrences",
"of",
"the",
"pattern",
"in",
"the",
"source",
"string",
"by",
"the",
"replacement",
"repl",
".",
"number",
"is",
"the",
"number",
"of",
"substitutions",
"that",
"were",
"made",
".",
"repl",
"can",
"be",
"either",
"a",
"string",
"or",
"a",
"callable",
";",
"if",
"a",
"string",
"backslash",
"escapes",
"in",
"it",
"are",
"processed",
".",
"If",
"it",
"is",
"a",
"callable",
"it",
"s",
"passed",
"the",
"match",
"object",
"and",
"must",
"return",
"a",
"replacement",
"string",
"to",
"be",
"used",
"."
] |
python
|
valid
| 58.9 |
apache/airflow
|
airflow/www/security.py
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L500-L560
|
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
|
[
"def",
"_sync_dag_view_permissions",
"(",
"self",
",",
"dag_id",
",",
"access_control",
")",
":",
"def",
"_get_or_create_dag_permission",
"(",
"perm_name",
")",
":",
"dag_perm",
"=",
"self",
".",
"find_permission_view_menu",
"(",
"perm_name",
",",
"dag_id",
")",
"if",
"not",
"dag_perm",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Creating new permission '%s' on view '%s'\"",
",",
"perm_name",
",",
"dag_id",
")",
"dag_perm",
"=",
"self",
".",
"add_permission_view_menu",
"(",
"perm_name",
",",
"dag_id",
")",
"return",
"dag_perm",
"def",
"_revoke_stale_permissions",
"(",
"dag_view",
")",
":",
"existing_dag_perms",
"=",
"self",
".",
"find_permissions_view_menu",
"(",
"dag_view",
")",
"for",
"perm",
"in",
"existing_dag_perms",
":",
"non_admin_roles",
"=",
"[",
"role",
"for",
"role",
"in",
"perm",
".",
"role",
"if",
"role",
".",
"name",
"!=",
"'Admin'",
"]",
"for",
"role",
"in",
"non_admin_roles",
":",
"target_perms_for_role",
"=",
"access_control",
".",
"get",
"(",
"role",
".",
"name",
",",
"{",
"}",
")",
"if",
"perm",
".",
"permission",
".",
"name",
"not",
"in",
"target_perms_for_role",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Revoking '%s' on DAG '%s' for role '%s'\"",
",",
"perm",
".",
"permission",
",",
"dag_id",
",",
"role",
".",
"name",
")",
"self",
".",
"del_permission_role",
"(",
"role",
",",
"perm",
")",
"dag_view",
"=",
"self",
".",
"find_view_menu",
"(",
"dag_id",
")",
"if",
"dag_view",
":",
"_revoke_stale_permissions",
"(",
"dag_view",
")",
"for",
"rolename",
",",
"perms",
"in",
"access_control",
".",
"items",
"(",
")",
":",
"role",
"=",
"self",
".",
"find_role",
"(",
"rolename",
")",
"if",
"not",
"role",
":",
"raise",
"AirflowException",
"(",
"\"The access_control mapping for DAG '{}' includes a role \"",
"\"named '{}', but that role does not exist\"",
".",
"format",
"(",
"dag_id",
",",
"rolename",
")",
")",
"perms",
"=",
"set",
"(",
"perms",
")",
"invalid_perms",
"=",
"perms",
"-",
"self",
".",
"DAG_PERMS",
"if",
"invalid_perms",
":",
"raise",
"AirflowException",
"(",
"\"The access_control map for DAG '{}' includes the following \"",
"\"invalid permissions: {}; The set of valid permissions \"",
"\"is: {}\"",
".",
"format",
"(",
"dag_id",
",",
"(",
"perms",
"-",
"self",
".",
"DAG_PERMS",
")",
",",
"self",
".",
"DAG_PERMS",
")",
")",
"for",
"perm_name",
"in",
"perms",
":",
"dag_perm",
"=",
"_get_or_create_dag_permission",
"(",
"perm_name",
")",
"self",
".",
"add_permission_role",
"(",
"role",
",",
"dag_perm",
")"
] |
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
|
[
"Set",
"the",
"access",
"policy",
"on",
"the",
"given",
"DAG",
"s",
"ViewModel",
"."
] |
python
|
test
| 43.803279 |
noahbenson/neuropythy
|
neuropythy/geometry/mesh.py
|
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/mesh.py#L4160-L4220
|
def to_mesh(obj):
'''
to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh.
'''
if is_mesh(obj): return obj
elif pimms.is_vector(obj) and len(obj) == 2:
(a,b) = obj
if pimms.is_matrix(a, 'int') and pimms.is_matrix(b, 'real'): return mesh(a, b)
elif pimms.is_matrix(b, 'int') and pimms.is_matrix(a, 'real'): return mesh(b, a)
elif is_mesh(a) and (b is None or pimms.is_str(b)): return a
elif is_mesh(a) and is_mesh(b): return b
elif is_topo(a):
from neuropythy import is_cortex
if is_mesh(b): return b
elif not pimms.is_str(b): raise ValueError('to_mesh: non-str surf/reg name: %s' % (b,))
(b0, lb) = (b, b.lower())
# check for translations of the name first:
s = b[4:] if lb.startswith('reg:') else b[5:] if lb.startswith('surf:') else b
ls = s.lower()
if ls.endswith('_sphere'): b = ('reg:' + s[:-7])
elif ls == 'sphere': b = 'reg:native'
lb = b.lower()
# we try surfaces first (if a is a cortex and has surfaces)
if is_cortex(a) and not lb.startswith('reg:'):
(s,ls) = (b[5:],lb[5:]) if lb.startswith('surf:') else (b,lb)
if s in a.surfaces: return a.surfaces[s]
elif ls in a.surfaces: return a.surfaces[ls]
# then check registrations
if not lb.startswith('surf:'):
(s,ls) = (b[4:],lb[4:]) if lb.startswith('reg:') else (b,lb)
if s in a.registrations: return a.registrations[s]
elif ls in a.registrations: return a.registrations[ls]
# nothing found
raise ValueError('to_mesh: mesh named "%s" not found in topology %s' % (b0, a))
else: raise ValueError('to_mesh: could not deduce meaning of row: %s' % (obj,))
else: raise ValueError('Could not deduce how object can be convertex into a mesh')
|
[
"def",
"to_mesh",
"(",
"obj",
")",
":",
"if",
"is_mesh",
"(",
"obj",
")",
":",
"return",
"obj",
"elif",
"pimms",
".",
"is_vector",
"(",
"obj",
")",
"and",
"len",
"(",
"obj",
")",
"==",
"2",
":",
"(",
"a",
",",
"b",
")",
"=",
"obj",
"if",
"pimms",
".",
"is_matrix",
"(",
"a",
",",
"'int'",
")",
"and",
"pimms",
".",
"is_matrix",
"(",
"b",
",",
"'real'",
")",
":",
"return",
"mesh",
"(",
"a",
",",
"b",
")",
"elif",
"pimms",
".",
"is_matrix",
"(",
"b",
",",
"'int'",
")",
"and",
"pimms",
".",
"is_matrix",
"(",
"a",
",",
"'real'",
")",
":",
"return",
"mesh",
"(",
"b",
",",
"a",
")",
"elif",
"is_mesh",
"(",
"a",
")",
"and",
"(",
"b",
"is",
"None",
"or",
"pimms",
".",
"is_str",
"(",
"b",
")",
")",
":",
"return",
"a",
"elif",
"is_mesh",
"(",
"a",
")",
"and",
"is_mesh",
"(",
"b",
")",
":",
"return",
"b",
"elif",
"is_topo",
"(",
"a",
")",
":",
"from",
"neuropythy",
"import",
"is_cortex",
"if",
"is_mesh",
"(",
"b",
")",
":",
"return",
"b",
"elif",
"not",
"pimms",
".",
"is_str",
"(",
"b",
")",
":",
"raise",
"ValueError",
"(",
"'to_mesh: non-str surf/reg name: %s'",
"%",
"(",
"b",
",",
")",
")",
"(",
"b0",
",",
"lb",
")",
"=",
"(",
"b",
",",
"b",
".",
"lower",
"(",
")",
")",
"# check for translations of the name first:",
"s",
"=",
"b",
"[",
"4",
":",
"]",
"if",
"lb",
".",
"startswith",
"(",
"'reg:'",
")",
"else",
"b",
"[",
"5",
":",
"]",
"if",
"lb",
".",
"startswith",
"(",
"'surf:'",
")",
"else",
"b",
"ls",
"=",
"s",
".",
"lower",
"(",
")",
"if",
"ls",
".",
"endswith",
"(",
"'_sphere'",
")",
":",
"b",
"=",
"(",
"'reg:'",
"+",
"s",
"[",
":",
"-",
"7",
"]",
")",
"elif",
"ls",
"==",
"'sphere'",
":",
"b",
"=",
"'reg:native'",
"lb",
"=",
"b",
".",
"lower",
"(",
")",
"# we try surfaces first (if a is a cortex and has surfaces)",
"if",
"is_cortex",
"(",
"a",
")",
"and",
"not",
"lb",
".",
"startswith",
"(",
"'reg:'",
")",
":",
"(",
"s",
",",
"ls",
")",
"=",
"(",
"b",
"[",
"5",
":",
"]",
",",
"lb",
"[",
"5",
":",
"]",
")",
"if",
"lb",
".",
"startswith",
"(",
"'surf:'",
")",
"else",
"(",
"b",
",",
"lb",
")",
"if",
"s",
"in",
"a",
".",
"surfaces",
":",
"return",
"a",
".",
"surfaces",
"[",
"s",
"]",
"elif",
"ls",
"in",
"a",
".",
"surfaces",
":",
"return",
"a",
".",
"surfaces",
"[",
"ls",
"]",
"# then check registrations",
"if",
"not",
"lb",
".",
"startswith",
"(",
"'surf:'",
")",
":",
"(",
"s",
",",
"ls",
")",
"=",
"(",
"b",
"[",
"4",
":",
"]",
",",
"lb",
"[",
"4",
":",
"]",
")",
"if",
"lb",
".",
"startswith",
"(",
"'reg:'",
")",
"else",
"(",
"b",
",",
"lb",
")",
"if",
"s",
"in",
"a",
".",
"registrations",
":",
"return",
"a",
".",
"registrations",
"[",
"s",
"]",
"elif",
"ls",
"in",
"a",
".",
"registrations",
":",
"return",
"a",
".",
"registrations",
"[",
"ls",
"]",
"# nothing found",
"raise",
"ValueError",
"(",
"'to_mesh: mesh named \"%s\" not found in topology %s'",
"%",
"(",
"b0",
",",
"a",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'to_mesh: could not deduce meaning of row: %s'",
"%",
"(",
"obj",
",",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Could not deduce how object can be convertex into a mesh'",
")"
] |
to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh.
|
[
"to_mesh",
"(",
"obj",
")",
"yields",
"a",
"Mesh",
"object",
"that",
"is",
"equivalent",
"to",
"obj",
"or",
"identical",
"to",
"obj",
"if",
"obj",
"is",
"itself",
"a",
"mesh",
"object",
"."
] |
python
|
train
| 61.770492 |
boriel/zxbasic
|
arch/zx48k/backend/__pload.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__pload.py#L148-L158
|
def _pload32(ins):
""" Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address.
"""
output = _pload(ins.quad[2], 4)
output.append('push de')
output.append('push hl')
return output
|
[
"def",
"_pload32",
"(",
"ins",
")",
":",
"output",
"=",
"_pload",
"(",
"ins",
".",
"quad",
"[",
"2",
"]",
",",
"4",
")",
"output",
".",
"append",
"(",
"'push de'",
")",
"output",
".",
"append",
"(",
"'push hl'",
")",
"return",
"output"
] |
Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address.
|
[
"Loads",
"from",
"stack",
"pointer",
"(",
"SP",
")",
"+",
"X",
"being",
"X",
"2st",
"parameter",
"."
] |
python
|
train
| 26.909091 |
rene-aguirre/pywinusb
|
pywinusb/hid/core.py
|
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L395-L507
|
def open(self, output_only = False, shared = True):
"""Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing
"""
if self.is_opened():
raise HIDError("Device already opened")
sharing_flags = 0
if shared:
sharing_flags = winapi.FILE_SHARE_READ | winapi.FILE_SHARE_WRITE
hid_handle = winapi.CreateFile(
self.device_path,
winapi.GENERIC_READ | winapi.GENERIC_WRITE,
sharing_flags,
None, # no security
winapi.OPEN_EXISTING,
winapi.FILE_ATTRIBUTE_NORMAL | winapi.FILE_FLAG_OVERLAPPED,
0 )
if not hid_handle or hid_handle == INVALID_HANDLE_VALUE:
raise HIDError("Error opening HID device: %s\n"%self.product_name)
#get pre parsed data
ptr_preparsed_data = ctypes.c_void_p()
if not hid_dll.HidD_GetPreparsedData(int(hid_handle),
byref(ptr_preparsed_data)):
winapi.CloseHandle(int(hid_handle))
raise HIDError("Failure to get HID pre parsed data")
self.ptr_preparsed_data = ptr_preparsed_data
self.hid_handle = hid_handle
#get top level capabilities
self.hid_caps = winapi.HIDP_CAPS()
HidStatus( hid_dll.HidP_GetCaps(ptr_preparsed_data,
byref(self.hid_caps)) )
#proceed with button capabilities
caps_length = c_ulong()
all_items = [\
(HidP_Input, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_input_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Input, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_input_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Output, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_output_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Output, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_output_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Feature, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_feature_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Feature, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_feature_value_caps,
hid_dll.HidP_GetValueCaps
),
]
for report_kind, struct_kind, max_items, get_control_caps in all_items:
if not int(max_items):
continue #nothing here
#create storage for control/data
ctrl_array_type = struct_kind * max_items
ctrl_array_struct = ctrl_array_type()
#target max size for API function
caps_length.value = max_items
HidStatus( get_control_caps(\
report_kind,
byref(ctrl_array_struct),
byref(caps_length),
ptr_preparsed_data) )
#keep reference of usages
for idx in range(caps_length.value):
usage_item = HidPUsageCaps( ctrl_array_struct[idx] )
#by report type
if report_kind not in self.usages_storage:
self.usages_storage[report_kind] = list()
self.usages_storage[report_kind].append( usage_item )
#also add report_id to known reports set
if report_kind not in self.report_set:
self.report_set[report_kind] = set()
self.report_set[report_kind].add( usage_item.report_id )
del ctrl_array_struct
del ctrl_array_type
# now is the time to consider the device opened, as report
# handling threads enforce it
self.__open_status = True
#now prepare the input report handler
self.__input_report_templates = dict()
if not output_only and self.hid_caps.input_report_byte_length and \
HidP_Input in self.report_set:
#first make templates for easy parsing input reports
for report_id in self.report_set[HidP_Input]:
self.__input_report_templates[report_id] = \
HidReport( self, HidP_Input, report_id )
#prepare input reports handlers
self._input_report_queue = HidDevice.InputReportQueue( \
self.max_input_queue_size,
self.hid_caps.input_report_byte_length)
self.__input_processing_thread = \
HidDevice.InputReportProcessingThread(self)
self.__reading_thread = HidDevice.InputReportReaderThread( \
self, self.hid_caps.input_report_byte_length)
|
[
"def",
"open",
"(",
"self",
",",
"output_only",
"=",
"False",
",",
"shared",
"=",
"True",
")",
":",
"if",
"self",
".",
"is_opened",
"(",
")",
":",
"raise",
"HIDError",
"(",
"\"Device already opened\"",
")",
"sharing_flags",
"=",
"0",
"if",
"shared",
":",
"sharing_flags",
"=",
"winapi",
".",
"FILE_SHARE_READ",
"|",
"winapi",
".",
"FILE_SHARE_WRITE",
"hid_handle",
"=",
"winapi",
".",
"CreateFile",
"(",
"self",
".",
"device_path",
",",
"winapi",
".",
"GENERIC_READ",
"|",
"winapi",
".",
"GENERIC_WRITE",
",",
"sharing_flags",
",",
"None",
",",
"# no security\r",
"winapi",
".",
"OPEN_EXISTING",
",",
"winapi",
".",
"FILE_ATTRIBUTE_NORMAL",
"|",
"winapi",
".",
"FILE_FLAG_OVERLAPPED",
",",
"0",
")",
"if",
"not",
"hid_handle",
"or",
"hid_handle",
"==",
"INVALID_HANDLE_VALUE",
":",
"raise",
"HIDError",
"(",
"\"Error opening HID device: %s\\n\"",
"%",
"self",
".",
"product_name",
")",
"#get pre parsed data\r",
"ptr_preparsed_data",
"=",
"ctypes",
".",
"c_void_p",
"(",
")",
"if",
"not",
"hid_dll",
".",
"HidD_GetPreparsedData",
"(",
"int",
"(",
"hid_handle",
")",
",",
"byref",
"(",
"ptr_preparsed_data",
")",
")",
":",
"winapi",
".",
"CloseHandle",
"(",
"int",
"(",
"hid_handle",
")",
")",
"raise",
"HIDError",
"(",
"\"Failure to get HID pre parsed data\"",
")",
"self",
".",
"ptr_preparsed_data",
"=",
"ptr_preparsed_data",
"self",
".",
"hid_handle",
"=",
"hid_handle",
"#get top level capabilities\r",
"self",
".",
"hid_caps",
"=",
"winapi",
".",
"HIDP_CAPS",
"(",
")",
"HidStatus",
"(",
"hid_dll",
".",
"HidP_GetCaps",
"(",
"ptr_preparsed_data",
",",
"byref",
"(",
"self",
".",
"hid_caps",
")",
")",
")",
"#proceed with button capabilities\r",
"caps_length",
"=",
"c_ulong",
"(",
")",
"all_items",
"=",
"[",
"(",
"HidP_Input",
",",
"winapi",
".",
"HIDP_BUTTON_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_input_button_caps",
",",
"hid_dll",
".",
"HidP_GetButtonCaps",
")",
",",
"(",
"HidP_Input",
",",
"winapi",
".",
"HIDP_VALUE_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_input_value_caps",
",",
"hid_dll",
".",
"HidP_GetValueCaps",
")",
",",
"(",
"HidP_Output",
",",
"winapi",
".",
"HIDP_BUTTON_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_output_button_caps",
",",
"hid_dll",
".",
"HidP_GetButtonCaps",
")",
",",
"(",
"HidP_Output",
",",
"winapi",
".",
"HIDP_VALUE_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_output_value_caps",
",",
"hid_dll",
".",
"HidP_GetValueCaps",
")",
",",
"(",
"HidP_Feature",
",",
"winapi",
".",
"HIDP_BUTTON_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_feature_button_caps",
",",
"hid_dll",
".",
"HidP_GetButtonCaps",
")",
",",
"(",
"HidP_Feature",
",",
"winapi",
".",
"HIDP_VALUE_CAPS",
",",
"self",
".",
"hid_caps",
".",
"number_feature_value_caps",
",",
"hid_dll",
".",
"HidP_GetValueCaps",
")",
",",
"]",
"for",
"report_kind",
",",
"struct_kind",
",",
"max_items",
",",
"get_control_caps",
"in",
"all_items",
":",
"if",
"not",
"int",
"(",
"max_items",
")",
":",
"continue",
"#nothing here\r",
"#create storage for control/data\r",
"ctrl_array_type",
"=",
"struct_kind",
"*",
"max_items",
"ctrl_array_struct",
"=",
"ctrl_array_type",
"(",
")",
"#target max size for API function\r",
"caps_length",
".",
"value",
"=",
"max_items",
"HidStatus",
"(",
"get_control_caps",
"(",
"report_kind",
",",
"byref",
"(",
"ctrl_array_struct",
")",
",",
"byref",
"(",
"caps_length",
")",
",",
"ptr_preparsed_data",
")",
")",
"#keep reference of usages\r",
"for",
"idx",
"in",
"range",
"(",
"caps_length",
".",
"value",
")",
":",
"usage_item",
"=",
"HidPUsageCaps",
"(",
"ctrl_array_struct",
"[",
"idx",
"]",
")",
"#by report type\r",
"if",
"report_kind",
"not",
"in",
"self",
".",
"usages_storage",
":",
"self",
".",
"usages_storage",
"[",
"report_kind",
"]",
"=",
"list",
"(",
")",
"self",
".",
"usages_storage",
"[",
"report_kind",
"]",
".",
"append",
"(",
"usage_item",
")",
"#also add report_id to known reports set\r",
"if",
"report_kind",
"not",
"in",
"self",
".",
"report_set",
":",
"self",
".",
"report_set",
"[",
"report_kind",
"]",
"=",
"set",
"(",
")",
"self",
".",
"report_set",
"[",
"report_kind",
"]",
".",
"add",
"(",
"usage_item",
".",
"report_id",
")",
"del",
"ctrl_array_struct",
"del",
"ctrl_array_type",
"# now is the time to consider the device opened, as report\r",
"# handling threads enforce it\r",
"self",
".",
"__open_status",
"=",
"True",
"#now prepare the input report handler\r",
"self",
".",
"__input_report_templates",
"=",
"dict",
"(",
")",
"if",
"not",
"output_only",
"and",
"self",
".",
"hid_caps",
".",
"input_report_byte_length",
"and",
"HidP_Input",
"in",
"self",
".",
"report_set",
":",
"#first make templates for easy parsing input reports\r",
"for",
"report_id",
"in",
"self",
".",
"report_set",
"[",
"HidP_Input",
"]",
":",
"self",
".",
"__input_report_templates",
"[",
"report_id",
"]",
"=",
"HidReport",
"(",
"self",
",",
"HidP_Input",
",",
"report_id",
")",
"#prepare input reports handlers\r",
"self",
".",
"_input_report_queue",
"=",
"HidDevice",
".",
"InputReportQueue",
"(",
"self",
".",
"max_input_queue_size",
",",
"self",
".",
"hid_caps",
".",
"input_report_byte_length",
")",
"self",
".",
"__input_processing_thread",
"=",
"HidDevice",
".",
"InputReportProcessingThread",
"(",
"self",
")",
"self",
".",
"__reading_thread",
"=",
"HidDevice",
".",
"InputReportReaderThread",
"(",
"self",
",",
"self",
".",
"hid_caps",
".",
"input_report_byte_length",
")"
] |
Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing
|
[
"Open",
"HID",
"device",
"and",
"obtain",
"Collection",
"Information",
".",
"It",
"effectively",
"prepares",
"the",
"HidDevice",
"object",
"for",
"reading",
"and",
"writing"
] |
python
|
train
| 43.026549 |
saltstack/salt
|
salt/modules/bluez_bluetooth.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bluez_bluetooth.py#L192-L208
|
def block(bdaddr):
'''
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
'''
if not salt.utils.validate.net.mac(bdaddr):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.block'
)
cmd = 'hciconfig {0} block'.format(bdaddr)
__salt__['cmd.run'](cmd).splitlines()
|
[
"def",
"block",
"(",
"bdaddr",
")",
":",
"if",
"not",
"salt",
".",
"utils",
".",
"validate",
".",
"net",
".",
"mac",
"(",
"bdaddr",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Invalid BD address passed to bluetooth.block'",
")",
"cmd",
"=",
"'hciconfig {0} block'",
".",
"format",
"(",
"bdaddr",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
".",
"splitlines",
"(",
")"
] |
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
|
[
"Block",
"a",
"specific",
"bluetooth",
"device",
"by",
"BD",
"Address"
] |
python
|
train
| 24.117647 |
The-Politico/politico-civic-election-night
|
electionnight/serializers/state.py
|
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/state.py#L69-L85
|
def get_content(self, obj):
"""All content for a state's page on an election day."""
election_day = ElectionDay.objects.get(
date=self.context['election_date'])
division = obj
# In case of house special election,
# use parent division.
if obj.level.name == DivisionLevel.DISTRICT:
division = obj.parent
special = True if self.context.get('special') else False
return PageContent.objects.division_content(
election_day,
division,
special
)
|
[
"def",
"get_content",
"(",
"self",
",",
"obj",
")",
":",
"election_day",
"=",
"ElectionDay",
".",
"objects",
".",
"get",
"(",
"date",
"=",
"self",
".",
"context",
"[",
"'election_date'",
"]",
")",
"division",
"=",
"obj",
"# In case of house special election,",
"# use parent division.",
"if",
"obj",
".",
"level",
".",
"name",
"==",
"DivisionLevel",
".",
"DISTRICT",
":",
"division",
"=",
"obj",
".",
"parent",
"special",
"=",
"True",
"if",
"self",
".",
"context",
".",
"get",
"(",
"'special'",
")",
"else",
"False",
"return",
"PageContent",
".",
"objects",
".",
"division_content",
"(",
"election_day",
",",
"division",
",",
"special",
")"
] |
All content for a state's page on an election day.
|
[
"All",
"content",
"for",
"a",
"state",
"s",
"page",
"on",
"an",
"election",
"day",
"."
] |
python
|
train
| 32.705882 |
elifesciences/elife-tools
|
elifetools/parseJATS.py
|
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L368-L387
|
def pub_dates(soup):
"""
return a list of all the pub dates
"""
pub_dates = []
tags = raw_parser.pub_date(soup)
for tag in tags:
pub_date = OrderedDict()
copy_attribute(tag.attrs, 'publication-format', pub_date)
copy_attribute(tag.attrs, 'date-type', pub_date)
copy_attribute(tag.attrs, 'pub-type', pub_date)
for tag_attr in ["date-type", "pub-type"]:
if tag_attr in tag.attrs:
(day, month, year) = ymd(tag)
pub_date['day'] = day
pub_date['month'] = month
pub_date['year'] = year
pub_date['date'] = date_struct_nn(year, month, day)
pub_dates.append(pub_date)
return pub_dates
|
[
"def",
"pub_dates",
"(",
"soup",
")",
":",
"pub_dates",
"=",
"[",
"]",
"tags",
"=",
"raw_parser",
".",
"pub_date",
"(",
"soup",
")",
"for",
"tag",
"in",
"tags",
":",
"pub_date",
"=",
"OrderedDict",
"(",
")",
"copy_attribute",
"(",
"tag",
".",
"attrs",
",",
"'publication-format'",
",",
"pub_date",
")",
"copy_attribute",
"(",
"tag",
".",
"attrs",
",",
"'date-type'",
",",
"pub_date",
")",
"copy_attribute",
"(",
"tag",
".",
"attrs",
",",
"'pub-type'",
",",
"pub_date",
")",
"for",
"tag_attr",
"in",
"[",
"\"date-type\"",
",",
"\"pub-type\"",
"]",
":",
"if",
"tag_attr",
"in",
"tag",
".",
"attrs",
":",
"(",
"day",
",",
"month",
",",
"year",
")",
"=",
"ymd",
"(",
"tag",
")",
"pub_date",
"[",
"'day'",
"]",
"=",
"day",
"pub_date",
"[",
"'month'",
"]",
"=",
"month",
"pub_date",
"[",
"'year'",
"]",
"=",
"year",
"pub_date",
"[",
"'date'",
"]",
"=",
"date_struct_nn",
"(",
"year",
",",
"month",
",",
"day",
")",
"pub_dates",
".",
"append",
"(",
"pub_date",
")",
"return",
"pub_dates"
] |
return a list of all the pub dates
|
[
"return",
"a",
"list",
"of",
"all",
"the",
"pub",
"dates"
] |
python
|
train
| 36.2 |
Gandi/gandi.cli
|
gandi/cli/modules/status.py
|
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/status.py#L39-L42
|
def status(cls):
"""Retrieve global status from status.gandi.net."""
return cls.json_get('%s/status' % cls.api_url, empty_key=True,
send_key=False)
|
[
"def",
"status",
"(",
"cls",
")",
":",
"return",
"cls",
".",
"json_get",
"(",
"'%s/status'",
"%",
"cls",
".",
"api_url",
",",
"empty_key",
"=",
"True",
",",
"send_key",
"=",
"False",
")"
] |
Retrieve global status from status.gandi.net.
|
[
"Retrieve",
"global",
"status",
"from",
"status",
".",
"gandi",
".",
"net",
"."
] |
python
|
train
| 47 |
bharadwaj-raju/libdesktop
|
libdesktop/applications.py
|
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/applications.py#L256-L322
|
def text_editor(file='', background=False, return_cmd=False):
'''Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
'''
desktop_env = system.get_name()
if desktop_env == 'windows':
editor_cmd_str = system.get_cmd_out(
['ftype', 'textfile']).split('=', 1)[1]
elif desktop_env == 'mac':
editor_cmd_str = 'open -a' + system.get_cmd_out(
['def',
'read',
'com.apple.LaunchServices',
'LSHandlers'
'-array'
'{LSHandlerContentType=public.plain-text;}']
)
else:
# Use def handler for MIME-type text/plain
editor_cmd_str = system.get_cmd_out(
['xdg-mime', 'query', 'default', 'text/plain'])
if '\n' in editor_cmd_str:
# Sometimes locate returns multiple results
# use first one
editor_cmd_str = editor_cmd_str.split('\n')[0]
if editor_cmd_str.endswith('.desktop'):
# We don't use desktopfile.execute() in order to have working
# return_cmd and background
editor_cmd_str = desktopfile.parse(
desktopfile.locate(editor_cmd_str)[0])['Exec']
for i in editor_cmd_str.split():
if i.startswith('%'):
# %-style formatters
editor_cmd_str = editor_cmd_str.replace(i, '')
if i == '--new-document':
# Gedit
editor_cmd_str = editor_cmd_str.replace(i, '')
if file:
editor_cmd_str += ' {}'.format(shlex.quote(file))
if return_cmd:
return editor_cmd_str
text_editor_proc = sp.Popen([editor_cmd_str], shell=True)
if not background:
text_editor_proc.wait()
|
[
"def",
"text_editor",
"(",
"file",
"=",
"''",
",",
"background",
"=",
"False",
",",
"return_cmd",
"=",
"False",
")",
":",
"desktop_env",
"=",
"system",
".",
"get_name",
"(",
")",
"if",
"desktop_env",
"==",
"'windows'",
":",
"editor_cmd_str",
"=",
"system",
".",
"get_cmd_out",
"(",
"[",
"'ftype'",
",",
"'textfile'",
"]",
")",
".",
"split",
"(",
"'='",
",",
"1",
")",
"[",
"1",
"]",
"elif",
"desktop_env",
"==",
"'mac'",
":",
"editor_cmd_str",
"=",
"'open -a'",
"+",
"system",
".",
"get_cmd_out",
"(",
"[",
"'def'",
",",
"'read'",
",",
"'com.apple.LaunchServices'",
",",
"'LSHandlers'",
"'-array'",
"'{LSHandlerContentType=public.plain-text;}'",
"]",
")",
"else",
":",
"# Use def handler for MIME-type text/plain",
"editor_cmd_str",
"=",
"system",
".",
"get_cmd_out",
"(",
"[",
"'xdg-mime'",
",",
"'query'",
",",
"'default'",
",",
"'text/plain'",
"]",
")",
"if",
"'\\n'",
"in",
"editor_cmd_str",
":",
"# Sometimes locate returns multiple results",
"# use first one",
"editor_cmd_str",
"=",
"editor_cmd_str",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"if",
"editor_cmd_str",
".",
"endswith",
"(",
"'.desktop'",
")",
":",
"# We don't use desktopfile.execute() in order to have working",
"# return_cmd and background",
"editor_cmd_str",
"=",
"desktopfile",
".",
"parse",
"(",
"desktopfile",
".",
"locate",
"(",
"editor_cmd_str",
")",
"[",
"0",
"]",
")",
"[",
"'Exec'",
"]",
"for",
"i",
"in",
"editor_cmd_str",
".",
"split",
"(",
")",
":",
"if",
"i",
".",
"startswith",
"(",
"'%'",
")",
":",
"# %-style formatters",
"editor_cmd_str",
"=",
"editor_cmd_str",
".",
"replace",
"(",
"i",
",",
"''",
")",
"if",
"i",
"==",
"'--new-document'",
":",
"# Gedit",
"editor_cmd_str",
"=",
"editor_cmd_str",
".",
"replace",
"(",
"i",
",",
"''",
")",
"if",
"file",
":",
"editor_cmd_str",
"+=",
"' {}'",
".",
"format",
"(",
"shlex",
".",
"quote",
"(",
"file",
")",
")",
"if",
"return_cmd",
":",
"return",
"editor_cmd_str",
"text_editor_proc",
"=",
"sp",
".",
"Popen",
"(",
"[",
"editor_cmd_str",
"]",
",",
"shell",
"=",
"True",
")",
"if",
"not",
"background",
":",
"text_editor_proc",
".",
"wait",
"(",
")"
] |
Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
|
[
"Starts",
"the",
"default",
"graphical",
"text",
"editor",
"."
] |
python
|
train
| 28.298507 |
reiinakano/xcessiv
|
xcessiv/stacker.py
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/stacker.py#L77-L103
|
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out
|
[
"def",
"_process_using_meta_feature_generator",
"(",
"self",
",",
"X",
",",
"meta_feature_generator",
")",
":",
"all_learner_meta_features",
"=",
"[",
"]",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"single_learner_meta_features",
"=",
"getattr",
"(",
"base_learner",
",",
"self",
".",
"meta_feature_generators",
"[",
"idx",
"]",
")",
"(",
"X",
")",
"if",
"len",
"(",
"single_learner_meta_features",
".",
"shape",
")",
"==",
"1",
":",
"single_learner_meta_features",
"=",
"single_learner_meta_features",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"all_learner_meta_features",
".",
"append",
"(",
"single_learner_meta_features",
")",
"all_learner_meta_features",
"=",
"np",
".",
"concatenate",
"(",
"all_learner_meta_features",
",",
"axis",
"=",
"1",
")",
"out",
"=",
"getattr",
"(",
"self",
".",
"secondary_learner",
",",
"meta_feature_generator",
")",
"(",
"all_learner_meta_features",
")",
"return",
"out"
] |
Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
|
[
"Process",
"using",
"secondary",
"learner",
"meta",
"-",
"feature",
"generator"
] |
python
|
train
| 44 |
rigetti/grove
|
grove/tomography/utils.py
|
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/utils.py#L380-L429
|
def run_in_parallel(programs, nsamples, cxn, shuffle=True):
"""
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array
"""
if shuffle:
n_groups = len(programs)
n_progs_per_group = len(programs[0])
permutations = np.outer(np.ones(n_groups, dtype=int),
np.arange(n_progs_per_group, dtype=int))
inverse_permutations = np.zeros_like(permutations)
for jj in range(n_groups):
# in-place operation
np.random.shuffle(permutations[jj])
# store inverse permutation
inverse_permutations[jj] = np.argsort(permutations[jj])
# apply to programs
shuffled_programs = np.empty((n_groups, n_progs_per_group), dtype=object)
for jdx, (progsj, pj) in enumerate(zip(programs, permutations)):
shuffled_programs[jdx] = [progsj[pjk] for pjk in pj]
shuffled_results = _run_in_parallel(shuffled_programs, nsamples, cxn)
# reverse shuffling of results
results = np.array([resultsj[pj]
for resultsj, pj in zip(shuffled_results, inverse_permutations)])
return results
else:
return _run_in_parallel(programs, nsamples, cxn)
|
[
"def",
"run_in_parallel",
"(",
"programs",
",",
"nsamples",
",",
"cxn",
",",
"shuffle",
"=",
"True",
")",
":",
"if",
"shuffle",
":",
"n_groups",
"=",
"len",
"(",
"programs",
")",
"n_progs_per_group",
"=",
"len",
"(",
"programs",
"[",
"0",
"]",
")",
"permutations",
"=",
"np",
".",
"outer",
"(",
"np",
".",
"ones",
"(",
"n_groups",
",",
"dtype",
"=",
"int",
")",
",",
"np",
".",
"arange",
"(",
"n_progs_per_group",
",",
"dtype",
"=",
"int",
")",
")",
"inverse_permutations",
"=",
"np",
".",
"zeros_like",
"(",
"permutations",
")",
"for",
"jj",
"in",
"range",
"(",
"n_groups",
")",
":",
"# in-place operation",
"np",
".",
"random",
".",
"shuffle",
"(",
"permutations",
"[",
"jj",
"]",
")",
"# store inverse permutation",
"inverse_permutations",
"[",
"jj",
"]",
"=",
"np",
".",
"argsort",
"(",
"permutations",
"[",
"jj",
"]",
")",
"# apply to programs",
"shuffled_programs",
"=",
"np",
".",
"empty",
"(",
"(",
"n_groups",
",",
"n_progs_per_group",
")",
",",
"dtype",
"=",
"object",
")",
"for",
"jdx",
",",
"(",
"progsj",
",",
"pj",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"programs",
",",
"permutations",
")",
")",
":",
"shuffled_programs",
"[",
"jdx",
"]",
"=",
"[",
"progsj",
"[",
"pjk",
"]",
"for",
"pjk",
"in",
"pj",
"]",
"shuffled_results",
"=",
"_run_in_parallel",
"(",
"shuffled_programs",
",",
"nsamples",
",",
"cxn",
")",
"# reverse shuffling of results",
"results",
"=",
"np",
".",
"array",
"(",
"[",
"resultsj",
"[",
"pj",
"]",
"for",
"resultsj",
",",
"pj",
"in",
"zip",
"(",
"shuffled_results",
",",
"inverse_permutations",
")",
"]",
")",
"return",
"results",
"else",
":",
"return",
"_run_in_parallel",
"(",
"programs",
",",
"nsamples",
",",
"cxn",
")"
] |
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array
|
[
"Take",
"sequences",
"of",
"Protoquil",
"programs",
"on",
"disjoint",
"qubits",
"and",
"execute",
"a",
"single",
"sequence",
"of",
"programs",
"that",
"executes",
"the",
"input",
"programs",
"in",
"parallel",
".",
"Optionally",
"randomize",
"within",
"each",
"qubit",
"-",
"specific",
"sequence",
"."
] |
python
|
train
| 51.52 |
tensorflow/tensor2tensor
|
tensor2tensor/models/research/transformer_nat.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L110-L118
|
def vq_discrete_unbottleneck(x, hparams):
"""Simple undiscretization from vector quantized representation."""
x_shape = common_layers.shape_list(x)
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_flat = tf.reshape(x, [-1, bottleneck_size])
result = tf.matmul(x_flat, means)
result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size])
return result
|
[
"def",
"vq_discrete_unbottleneck",
"(",
"x",
",",
"hparams",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"bottleneck_size",
"=",
"2",
"**",
"hparams",
".",
"bottleneck_bits",
"means",
"=",
"hparams",
".",
"means",
"x_flat",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"bottleneck_size",
"]",
")",
"result",
"=",
"tf",
".",
"matmul",
"(",
"x_flat",
",",
"means",
")",
"result",
"=",
"tf",
".",
"reshape",
"(",
"result",
",",
"x_shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"hparams",
".",
"hidden_size",
"]",
")",
"return",
"result"
] |
Simple undiscretization from vector quantized representation.
|
[
"Simple",
"undiscretization",
"from",
"vector",
"quantized",
"representation",
"."
] |
python
|
train
| 42.444444 |
sentinel-hub/sentinelhub-py
|
sentinelhub/aws.py
|
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L177-L192
|
def sort_download_list(self):
"""
Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09).
"""
def aws_sort_function(download_request):
data_name = download_request.properties['data_name']
if 'product_name' in download_request.properties:
product_name = download_request.properties['product_name']
else:
product_name = self._url_to_props(download_request.url)[0]
if data_name in self.bands:
return 0, product_name, self.bands.index(data_name)
return 1, product_name, self.metafiles.index(data_name)
self.download_list.sort(key=aws_sort_function)
|
[
"def",
"sort_download_list",
"(",
"self",
")",
":",
"def",
"aws_sort_function",
"(",
"download_request",
")",
":",
"data_name",
"=",
"download_request",
".",
"properties",
"[",
"'data_name'",
"]",
"if",
"'product_name'",
"in",
"download_request",
".",
"properties",
":",
"product_name",
"=",
"download_request",
".",
"properties",
"[",
"'product_name'",
"]",
"else",
":",
"product_name",
"=",
"self",
".",
"_url_to_props",
"(",
"download_request",
".",
"url",
")",
"[",
"0",
"]",
"if",
"data_name",
"in",
"self",
".",
"bands",
":",
"return",
"0",
",",
"product_name",
",",
"self",
".",
"bands",
".",
"index",
"(",
"data_name",
")",
"return",
"1",
",",
"product_name",
",",
"self",
".",
"metafiles",
".",
"index",
"(",
"data_name",
")",
"self",
".",
"download_list",
".",
"sort",
"(",
"key",
"=",
"aws_sort_function",
")"
] |
Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09).
|
[
"Method",
"for",
"sorting",
"the",
"list",
"of",
"download",
"requests",
".",
"Band",
"images",
"have",
"priority",
"before",
"metadata",
"files",
".",
"If",
"bands",
"images",
"or",
"metadata",
"files",
"are",
"specified",
"with",
"a",
"list",
"they",
"will",
"be",
"sorted",
"in",
"the",
"same",
"order",
"as",
"in",
"the",
"list",
".",
"Otherwise",
"they",
"will",
"be",
"sorted",
"alphabetically",
"(",
"band",
"B8A",
"will",
"be",
"between",
"B08",
"and",
"B09",
")",
"."
] |
python
|
train
| 58.4375 |
greenbender/pynntp
|
nntp/nntp.py
|
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L191-L226
|
def status(self):
"""Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
"""
line = next(self.__line_gen()).rstrip()
parts = line.split(None, 1)
try:
code, message = int(parts[0]), ""
except ValueError:
raise NNTPProtocolError(line)
if code < 100 or code >= 600:
raise NNTPProtocolError(line)
if len(parts) > 1:
message = parts[1]
if 400 <= code <= 499:
raise NNTPTemporaryError(code, message)
if 500 <= code <= 599:
raise NNTPPermanentError(code, message)
return code, message
|
[
"def",
"status",
"(",
"self",
")",
":",
"line",
"=",
"next",
"(",
"self",
".",
"__line_gen",
"(",
")",
")",
".",
"rstrip",
"(",
")",
"parts",
"=",
"line",
".",
"split",
"(",
"None",
",",
"1",
")",
"try",
":",
"code",
",",
"message",
"=",
"int",
"(",
"parts",
"[",
"0",
"]",
")",
",",
"\"\"",
"except",
"ValueError",
":",
"raise",
"NNTPProtocolError",
"(",
"line",
")",
"if",
"code",
"<",
"100",
"or",
"code",
">=",
"600",
":",
"raise",
"NNTPProtocolError",
"(",
"line",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"message",
"=",
"parts",
"[",
"1",
"]",
"if",
"400",
"<=",
"code",
"<=",
"499",
":",
"raise",
"NNTPTemporaryError",
"(",
"code",
",",
"message",
")",
"if",
"500",
"<=",
"code",
"<=",
"599",
":",
"raise",
"NNTPPermanentError",
"(",
"code",
",",
"message",
")",
"return",
"code",
",",
"message"
] |
Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
|
[
"Reads",
"a",
"command",
"response",
"status",
"."
] |
python
|
test
| 29.5 |
useblocks/sphinxcontrib-needs
|
sphinxcontrib/needs/directives/needflow.py
|
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/needflow.py#L63-L68
|
def make_entity_name(name):
"""Creates a valid PlantUML entity name from the given value."""
invalid_chars = "-=!#$%^&*[](){}/~'`<>:;"
for char in invalid_chars:
name = name.replace(char, "_")
return name
|
[
"def",
"make_entity_name",
"(",
"name",
")",
":",
"invalid_chars",
"=",
"\"-=!#$%^&*[](){}/~'`<>:;\"",
"for",
"char",
"in",
"invalid_chars",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"char",
",",
"\"_\"",
")",
"return",
"name"
] |
Creates a valid PlantUML entity name from the given value.
|
[
"Creates",
"a",
"valid",
"PlantUML",
"entity",
"name",
"from",
"the",
"given",
"value",
"."
] |
python
|
train
| 37.166667 |
Rapptz/discord.py
|
discord/role.py
|
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/role.py#L262-L283
|
async def delete(self, *, reason=None):
"""|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
await self._state.http.delete_role(self.guild.id, self.id, reason=reason)
|
[
"async",
"def",
"delete",
"(",
"self",
",",
"*",
",",
"reason",
"=",
"None",
")",
":",
"await",
"self",
".",
"_state",
".",
"http",
".",
"delete_role",
"(",
"self",
".",
"guild",
".",
"id",
",",
"self",
".",
"id",
",",
"reason",
"=",
"reason",
")"
] |
|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
|
[
"|coro|"
] |
python
|
train
| 26.136364 |
spyder-ide/spyder
|
spyder/utils/vcs.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/vcs.py#L100-L116
|
def get_hg_revision(repopath):
"""Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
"""
try:
assert osp.isdir(osp.join(repopath, '.hg'))
proc = programs.run_program('hg', ['id', '-nib', repopath])
output, _err = proc.communicate()
# output is now: ('eba7273c69df+ 2015+ default\n', None)
# Split 2 times max to allow spaces in branch names.
return tuple(output.decode().strip().split(None, 2))
except (subprocess.CalledProcessError, AssertionError, AttributeError,
OSError):
return (None, None, None)
|
[
"def",
"get_hg_revision",
"(",
"repopath",
")",
":",
"try",
":",
"assert",
"osp",
".",
"isdir",
"(",
"osp",
".",
"join",
"(",
"repopath",
",",
"'.hg'",
")",
")",
"proc",
"=",
"programs",
".",
"run_program",
"(",
"'hg'",
",",
"[",
"'id'",
",",
"'-nib'",
",",
"repopath",
"]",
")",
"output",
",",
"_err",
"=",
"proc",
".",
"communicate",
"(",
")",
"# output is now: ('eba7273c69df+ 2015+ default\\n', None)\r",
"# Split 2 times max to allow spaces in branch names.\r",
"return",
"tuple",
"(",
"output",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"None",
",",
"2",
")",
")",
"except",
"(",
"subprocess",
".",
"CalledProcessError",
",",
"AssertionError",
",",
"AttributeError",
",",
"OSError",
")",
":",
"return",
"(",
"None",
",",
"None",
",",
"None",
")"
] |
Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
|
[
"Return",
"Mercurial",
"revision",
"for",
"the",
"repository",
"located",
"at",
"repopath",
"Result",
"is",
"a",
"tuple",
"(",
"global",
"local",
"branch",
")",
"with",
"None",
"values",
"on",
"error",
"For",
"example",
":",
">>>",
"get_hg_revision",
"(",
".",
")",
"(",
"eba7273c69df",
"+",
"2015",
"+",
"default",
")"
] |
python
|
train
| 45.882353 |
splunk/splunk-sdk-python
|
splunklib/binding.py
|
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/binding.py#L1297-L1301
|
def close(self):
"""Closes this response."""
if self._connection:
self._connection.close()
self._response.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_connection",
":",
"self",
".",
"_connection",
".",
"close",
"(",
")",
"self",
".",
"_response",
".",
"close",
"(",
")"
] |
Closes this response.
|
[
"Closes",
"this",
"response",
"."
] |
python
|
train
| 29 |
fastai/fastai
|
fastai/basic_train.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/basic_train.py#L69-L76
|
def train_epoch(model:nn.Module, dl:DataLoader, opt:optim.Optimizer, loss_func:LossFunction)->None:
"Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`."
model.train()
for xb,yb in dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad()
|
[
"def",
"train_epoch",
"(",
"model",
":",
"nn",
".",
"Module",
",",
"dl",
":",
"DataLoader",
",",
"opt",
":",
"optim",
".",
"Optimizer",
",",
"loss_func",
":",
"LossFunction",
")",
"->",
"None",
":",
"model",
".",
"train",
"(",
")",
"for",
"xb",
",",
"yb",
"in",
"dl",
":",
"loss",
"=",
"loss_func",
"(",
"model",
"(",
"xb",
")",
",",
"yb",
")",
"loss",
".",
"backward",
"(",
")",
"opt",
".",
"step",
"(",
")",
"opt",
".",
"zero_grad",
"(",
")"
] |
Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`.
|
[
"Simple",
"training",
"of",
"model",
"for",
"1",
"epoch",
"of",
"dl",
"using",
"optim",
"opt",
"and",
"loss",
"function",
"loss_func",
"."
] |
python
|
train
| 42.5 |
oisinmulvihill/stomper
|
lib/stomper/stompbuffer.py
|
https://github.com/oisinmulvihill/stomper/blob/842ed2353a4ddd638d35929ae5b7b70eb298305c/lib/stomper/stompbuffer.py#L69-L109
|
def getOneMessage ( self ):
"""
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
"""
( mbytes, hbytes ) = self._findMessageBytes ( self.buffer )
if not mbytes:
return None
msgdata = self.buffer[:mbytes]
self.buffer = self.buffer[mbytes:]
hdata = msgdata[:hbytes]
elems = hdata.split ( '\n' )
cmd = elems.pop ( 0 )
headers = {}
# We can't use a simple split because the value can legally contain
# colon characters (for example, the session returned by ActiveMQ).
for e in elems:
try:
i = e.find ( ':' )
except ValueError:
continue
k = e[:i].strip()
v = e[i+1:].strip()
headers [ k ] = v
# hbytes points to the start of the '\n\n' at the end of the header,
# so 2 bytes beyond this is the start of the body. The body EXCLUDES
# the final two bytes, which are '\x00\n'. Note that these 2 bytes
# are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert
# into the data stream.
body = msgdata[hbytes+2:-2]
msg = { 'cmd' : cmd,
'headers' : headers,
'body' : body,
}
return msg
|
[
"def",
"getOneMessage",
"(",
"self",
")",
":",
"(",
"mbytes",
",",
"hbytes",
")",
"=",
"self",
".",
"_findMessageBytes",
"(",
"self",
".",
"buffer",
")",
"if",
"not",
"mbytes",
":",
"return",
"None",
"msgdata",
"=",
"self",
".",
"buffer",
"[",
":",
"mbytes",
"]",
"self",
".",
"buffer",
"=",
"self",
".",
"buffer",
"[",
"mbytes",
":",
"]",
"hdata",
"=",
"msgdata",
"[",
":",
"hbytes",
"]",
"elems",
"=",
"hdata",
".",
"split",
"(",
"'\\n'",
")",
"cmd",
"=",
"elems",
".",
"pop",
"(",
"0",
")",
"headers",
"=",
"{",
"}",
"# We can't use a simple split because the value can legally contain",
"# colon characters (for example, the session returned by ActiveMQ).",
"for",
"e",
"in",
"elems",
":",
"try",
":",
"i",
"=",
"e",
".",
"find",
"(",
"':'",
")",
"except",
"ValueError",
":",
"continue",
"k",
"=",
"e",
"[",
":",
"i",
"]",
".",
"strip",
"(",
")",
"v",
"=",
"e",
"[",
"i",
"+",
"1",
":",
"]",
".",
"strip",
"(",
")",
"headers",
"[",
"k",
"]",
"=",
"v",
"# hbytes points to the start of the '\\n\\n' at the end of the header,",
"# so 2 bytes beyond this is the start of the body. The body EXCLUDES",
"# the final two bytes, which are '\\x00\\n'. Note that these 2 bytes",
"# are UNRELATED to the 2-byte '\\n\\n' that Frame.pack() used to insert",
"# into the data stream.",
"body",
"=",
"msgdata",
"[",
"hbytes",
"+",
"2",
":",
"-",
"2",
"]",
"msg",
"=",
"{",
"'cmd'",
":",
"cmd",
",",
"'headers'",
":",
"headers",
",",
"'body'",
":",
"body",
",",
"}",
"return",
"msg"
] |
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
|
[
"I",
"pull",
"one",
"complete",
"message",
"off",
"the",
"buffer",
"and",
"return",
"it",
"decoded",
"as",
"a",
"dict",
".",
"If",
"there",
"is",
"no",
"complete",
"message",
"in",
"the",
"buffer",
"I",
"return",
"None",
"."
] |
python
|
train
| 37.341463 |
nicferrier/md
|
src/mdlib/cli.py
|
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/cli.py#L211-L219
|
def do_struct(self, subcmd, opts, message):
"""${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout)
|
[
"def",
"do_struct",
"(",
"self",
",",
"subcmd",
",",
"opts",
",",
"message",
")",
":",
"client",
"=",
"MdClient",
"(",
"self",
".",
"maildir",
",",
"filesystem",
"=",
"self",
".",
"filesystem",
")",
"as_json",
"=",
"getattr",
"(",
"opts",
",",
"\"json\"",
",",
"False",
")",
"client",
".",
"getstruct",
"(",
"message",
",",
"as_json",
"=",
"as_json",
",",
"stream",
"=",
"self",
".",
"stdout",
")"
] |
${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
|
[
"$",
"{",
"cmd_name",
"}",
":",
"get",
"the",
"structure",
"of",
"the",
"specified",
"message"
] |
python
|
train
| 38.777778 |
pyblish/pyblish-qml
|
pyblish_qml/ipc/formatting.py
|
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/ipc/formatting.py#L53-L58
|
def format_records(records):
"""Serialise multiple records"""
formatted = list()
for record_ in records:
formatted.append(format_record(record_))
return formatted
|
[
"def",
"format_records",
"(",
"records",
")",
":",
"formatted",
"=",
"list",
"(",
")",
"for",
"record_",
"in",
"records",
":",
"formatted",
".",
"append",
"(",
"format_record",
"(",
"record_",
")",
")",
"return",
"formatted"
] |
Serialise multiple records
|
[
"Serialise",
"multiple",
"records"
] |
python
|
train
| 30.166667 |
PmagPy/PmagPy
|
pmagpy/validate_upload3.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload3.py#L588-L599
|
def get_degree_cols(df):
"""
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
"""
vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon',
'paleolon', 'paleolon_sigma',
'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma',
'azimuth', 'azimuth_dec_correction', 'dir_dec',
'geographic_precision', 'bed_dip_direction']
relevant_cols = list(set(vals).intersection(df.columns))
return relevant_cols
|
[
"def",
"get_degree_cols",
"(",
"df",
")",
":",
"vals",
"=",
"[",
"'lon_w'",
",",
"'lon_e'",
",",
"'lat_lon_precision'",
",",
"'pole_lon'",
",",
"'paleolon'",
",",
"'paleolon_sigma'",
",",
"'lon'",
",",
"'lon_sigma'",
",",
"'vgp_lon'",
",",
"'paleo_lon'",
",",
"'paleo_lon_sigma'",
",",
"'azimuth'",
",",
"'azimuth_dec_correction'",
",",
"'dir_dec'",
",",
"'geographic_precision'",
",",
"'bed_dip_direction'",
"]",
"relevant_cols",
"=",
"list",
"(",
"set",
"(",
"vals",
")",
".",
"intersection",
"(",
"df",
".",
"columns",
")",
")",
"return",
"relevant_cols"
] |
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
|
[
"Take",
"in",
"a",
"pandas",
"DataFrame",
"and",
"return",
"a",
"list",
"of",
"columns",
"that",
"are",
"in",
"that",
"DataFrame",
"AND",
"should",
"be",
"between",
"0",
"-",
"360",
"degrees",
"."
] |
python
|
train
| 45.25 |
mlperf/training
|
reinforcement/tensorflow/minigo/oneoffs/eval_sgf_to_cbt.py
|
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/oneoffs/eval_sgf_to_cbt.py#L179-L233
|
def write_eval_records(bt_table, game_data, last_game):
"""Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
"""
eval_num = last_game
# Each column counts as a mutation so max rows is ~10000
GAMES_PER_COMMIT = 2000
for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):
assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists"
assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists"
rows = []
for i, metadata in enumerate(games):
eval_num += 1
row_name = EVAL_PREFIX.format(eval_num)
row = bt_table.row(row_name)
for column, value in metadata:
row.set_cell(METADATA, column, value)
rows.append(row)
# For each batch of games print a couple of the rows being added.
if i < 5 or i + 5 > len(games):
print("\t", i, row_name, metadata[6][1])
if eval_num == last_game + len(games):
test = input("Commit ('y'/'yes' required): ")
if test.lower() not in ('y', 'yes'):
break
# TODO(derek): Figure out how to condition on atomic counter update.
# Condition all updates on the current value of last_game
game_num_update = bt_table.row(TABLE_STATE)
game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)
print(TABLE_STATE, eval_num)
response = bt_table.mutate_rows(rows)
# validate that all rows written successfully
any_bad = False
for i, status in enumerate(response):
if status.code is not 0:
print("Row number {} failed to write {}".format(i, status))
any_bad = True
if any_bad:
break
game_num_update.commit()
|
[
"def",
"write_eval_records",
"(",
"bt_table",
",",
"game_data",
",",
"last_game",
")",
":",
"eval_num",
"=",
"last_game",
"# Each column counts as a mutation so max rows is ~10000",
"GAMES_PER_COMMIT",
"=",
"2000",
"for",
"games",
"in",
"grouper",
"(",
"tqdm",
"(",
"game_data",
")",
",",
"GAMES_PER_COMMIT",
")",
":",
"assert",
"bt_table",
".",
"read_row",
"(",
"EVAL_PREFIX",
".",
"format",
"(",
"eval_num",
")",
")",
",",
"\"Prev row doesn't exists\"",
"assert",
"bt_table",
".",
"read_row",
"(",
"EVAL_PREFIX",
".",
"format",
"(",
"eval_num",
"+",
"1",
")",
")",
"is",
"None",
",",
"\"Row already exists\"",
"rows",
"=",
"[",
"]",
"for",
"i",
",",
"metadata",
"in",
"enumerate",
"(",
"games",
")",
":",
"eval_num",
"+=",
"1",
"row_name",
"=",
"EVAL_PREFIX",
".",
"format",
"(",
"eval_num",
")",
"row",
"=",
"bt_table",
".",
"row",
"(",
"row_name",
")",
"for",
"column",
",",
"value",
"in",
"metadata",
":",
"row",
".",
"set_cell",
"(",
"METADATA",
",",
"column",
",",
"value",
")",
"rows",
".",
"append",
"(",
"row",
")",
"# For each batch of games print a couple of the rows being added.",
"if",
"i",
"<",
"5",
"or",
"i",
"+",
"5",
">",
"len",
"(",
"games",
")",
":",
"print",
"(",
"\"\\t\"",
",",
"i",
",",
"row_name",
",",
"metadata",
"[",
"6",
"]",
"[",
"1",
"]",
")",
"if",
"eval_num",
"==",
"last_game",
"+",
"len",
"(",
"games",
")",
":",
"test",
"=",
"input",
"(",
"\"Commit ('y'/'yes' required): \"",
")",
"if",
"test",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'y'",
",",
"'yes'",
")",
":",
"break",
"# TODO(derek): Figure out how to condition on atomic counter update.",
"# Condition all updates on the current value of last_game",
"game_num_update",
"=",
"bt_table",
".",
"row",
"(",
"TABLE_STATE",
")",
"game_num_update",
".",
"set_cell",
"(",
"METADATA",
",",
"EVAL_GAME_COUNTER",
",",
"eval_num",
")",
"print",
"(",
"TABLE_STATE",
",",
"eval_num",
")",
"response",
"=",
"bt_table",
".",
"mutate_rows",
"(",
"rows",
")",
"# validate that all rows written successfully",
"any_bad",
"=",
"False",
"for",
"i",
",",
"status",
"in",
"enumerate",
"(",
"response",
")",
":",
"if",
"status",
".",
"code",
"is",
"not",
"0",
":",
"print",
"(",
"\"Row number {} failed to write {}\"",
".",
"format",
"(",
"i",
",",
"status",
")",
")",
"any_bad",
"=",
"True",
"if",
"any_bad",
":",
"break",
"game_num_update",
".",
"commit",
"(",
")"
] |
Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
|
[
"Write",
"all",
"eval_records",
"to",
"eval_table"
] |
python
|
train
| 37.927273 |
AtomHash/evernode
|
evernode/classes/form_data.py
|
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/form_data.py#L38-L46
|
def add_file(self, name, required=False, error=None, extensions=None):
""" Add a file field to parse on request (uploads) """
if name is None:
return
self.file_arguments.append(dict(
name=name,
required=required,
error=error,
extensions=extensions))
|
[
"def",
"add_file",
"(",
"self",
",",
"name",
",",
"required",
"=",
"False",
",",
"error",
"=",
"None",
",",
"extensions",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"file_arguments",
".",
"append",
"(",
"dict",
"(",
"name",
"=",
"name",
",",
"required",
"=",
"required",
",",
"error",
"=",
"error",
",",
"extensions",
"=",
"extensions",
")",
")"
] |
Add a file field to parse on request (uploads)
|
[
"Add",
"a",
"file",
"field",
"to",
"parse",
"on",
"request",
"(",
"uploads",
")"
] |
python
|
train
| 37 |
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1279-L1297
|
def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective
|
[
"def",
"uniform_binning_correction",
"(",
"x",
",",
"n_bits",
"=",
"8",
")",
":",
"n_bins",
"=",
"2",
"**",
"n_bits",
"batch_size",
",",
"height",
",",
"width",
",",
"n_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"hwc",
"=",
"float",
"(",
"height",
"*",
"width",
"*",
"n_channels",
")",
"x",
"=",
"x",
"+",
"tf",
".",
"random_uniform",
"(",
"shape",
"=",
"(",
"batch_size",
",",
"height",
",",
"width",
",",
"n_channels",
")",
",",
"minval",
"=",
"0.0",
",",
"maxval",
"=",
"1.0",
"/",
"n_bins",
")",
"objective",
"=",
"-",
"np",
".",
"log",
"(",
"n_bins",
")",
"*",
"hwc",
"*",
"tf",
".",
"ones",
"(",
"batch_size",
")",
"return",
"x",
",",
"objective"
] |
Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
|
[
"Replaces",
"x^i",
"with",
"q^i",
"(",
"x",
")",
"=",
"U",
"(",
"x",
"x",
"+",
"1",
".",
"0",
"/",
"256",
".",
"0",
")",
"."
] |
python
|
train
| 30.210526 |
thilux/tvdb_client
|
tvdb_client/clients/ApiV2Client.py
|
https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L252-L271
|
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
[
"def",
"get_updated",
"(",
"self",
",",
"from_time",
",",
"to_time",
"=",
"None",
")",
":",
"arguments",
"=",
"locals",
"(",
")",
"optional_parameters",
"=",
"{",
"'to_time'",
":",
"'toTime'",
"}",
"query_string",
"=",
"'fromTime=%s&%s'",
"%",
"(",
"from_time",
",",
"utils",
".",
"query_param_string_from_option_args",
"(",
"optional_parameters",
",",
"arguments",
")",
")",
"raw_response",
"=",
"requests_util",
".",
"run_request",
"(",
"'get'",
",",
"self",
".",
"API_BASE_URL",
"+",
"'/uodated/query?%s'",
"%",
"query_string",
",",
"headers",
"=",
"self",
".",
"__get_header_with_auth",
"(",
")",
")",
"return",
"self",
".",
"parse_raw_response",
"(",
"raw_response",
")"
] |
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
|
[
"Retrives",
"a",
"list",
"of",
"series",
"that",
"have",
"changed",
"on",
"TheTVDB",
"since",
"a",
"provided",
"from",
"time",
"parameter",
"and",
"optionally",
"to",
"an",
"specified",
"to",
"time",
"."
] |
python
|
train
| 50.7 |
softlayer/softlayer-python
|
SoftLayer/CLI/virt/list.py
|
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/list.py#L70-L93
|
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network,
hourly, monthly, tag, columns, limit):
"""List virtual servers."""
vsi = SoftLayer.VSManager(env.client)
guests = vsi.list_instances(hourly=hourly,
monthly=monthly,
hostname=hostname,
domain=domain,
cpus=cpu,
memory=memory,
datacenter=datacenter,
nic_speed=network,
tags=tag,
mask=columns.mask(),
limit=limit)
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table)
|
[
"def",
"cli",
"(",
"env",
",",
"sortby",
",",
"cpu",
",",
"domain",
",",
"datacenter",
",",
"hostname",
",",
"memory",
",",
"network",
",",
"hourly",
",",
"monthly",
",",
"tag",
",",
"columns",
",",
"limit",
")",
":",
"vsi",
"=",
"SoftLayer",
".",
"VSManager",
"(",
"env",
".",
"client",
")",
"guests",
"=",
"vsi",
".",
"list_instances",
"(",
"hourly",
"=",
"hourly",
",",
"monthly",
"=",
"monthly",
",",
"hostname",
"=",
"hostname",
",",
"domain",
"=",
"domain",
",",
"cpus",
"=",
"cpu",
",",
"memory",
"=",
"memory",
",",
"datacenter",
"=",
"datacenter",
",",
"nic_speed",
"=",
"network",
",",
"tags",
"=",
"tag",
",",
"mask",
"=",
"columns",
".",
"mask",
"(",
")",
",",
"limit",
"=",
"limit",
")",
"table",
"=",
"formatting",
".",
"Table",
"(",
"columns",
".",
"columns",
")",
"table",
".",
"sortby",
"=",
"sortby",
"for",
"guest",
"in",
"guests",
":",
"table",
".",
"add_row",
"(",
"[",
"value",
"or",
"formatting",
".",
"blank",
"(",
")",
"for",
"value",
"in",
"columns",
".",
"row",
"(",
"guest",
")",
"]",
")",
"env",
".",
"fout",
"(",
"table",
")"
] |
List virtual servers.
|
[
"List",
"virtual",
"servers",
"."
] |
python
|
train
| 38.625 |
google/grr
|
grr/server/grr_response_server/gui/api_value_renderers.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_value_renderers.py#L91-L126
|
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in itervalues(ApiValueRenderer.classes):
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError(
"No renderer found for value %s." % value.__class__.__name__)
candidates = sorted(
candidates, key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists)
|
[
"def",
"GetRendererForValueOrClass",
"(",
"cls",
",",
"value",
",",
"limit_lists",
"=",
"-",
"1",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"value",
")",
":",
"value_cls",
"=",
"value",
"else",
":",
"value_cls",
"=",
"value",
".",
"__class__",
"cache_key",
"=",
"\"%s_%d\"",
"%",
"(",
"value_cls",
".",
"__name__",
",",
"limit_lists",
")",
"try",
":",
"renderer_cls",
"=",
"cls",
".",
"_renderers_cache",
"[",
"cache_key",
"]",
"except",
"KeyError",
":",
"candidates",
"=",
"[",
"]",
"for",
"candidate",
"in",
"itervalues",
"(",
"ApiValueRenderer",
".",
"classes",
")",
":",
"if",
"candidate",
".",
"value_class",
":",
"candidate_class",
"=",
"candidate",
".",
"value_class",
"else",
":",
"continue",
"if",
"inspect",
".",
"isclass",
"(",
"value",
")",
":",
"if",
"issubclass",
"(",
"value_cls",
",",
"candidate_class",
")",
":",
"candidates",
".",
"append",
"(",
"(",
"candidate",
",",
"candidate_class",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"value",
",",
"candidate_class",
")",
":",
"candidates",
".",
"append",
"(",
"(",
"candidate",
",",
"candidate_class",
")",
")",
"if",
"not",
"candidates",
":",
"raise",
"RuntimeError",
"(",
"\"No renderer found for value %s.\"",
"%",
"value",
".",
"__class__",
".",
"__name__",
")",
"candidates",
"=",
"sorted",
"(",
"candidates",
",",
"key",
"=",
"lambda",
"candidate",
":",
"len",
"(",
"candidate",
"[",
"1",
"]",
".",
"mro",
"(",
")",
")",
")",
"renderer_cls",
"=",
"candidates",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"cls",
".",
"_renderers_cache",
"[",
"cache_key",
"]",
"=",
"renderer_cls",
"return",
"renderer_cls",
"(",
"limit_lists",
"=",
"limit_lists",
")"
] |
Returns renderer corresponding to a given value and rendering args.
|
[
"Returns",
"renderer",
"corresponding",
"to",
"a",
"given",
"value",
"and",
"rendering",
"args",
"."
] |
python
|
train
| 32.916667 |
payu-org/payu
|
payu/namcouple.py
|
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/namcouple.py#L35-L59
|
def substitute_timestep(self, regex, timestep):
"""
Substitute a new timestep value using regex.
"""
# Make one change at a time, each change affects subsequent matches.
timestep_changed = False
while True:
matches = re.finditer(regex, self.str, re.MULTILINE | re.DOTALL)
none_updated = True
for m in matches:
if m.group(1) == timestep:
continue
else:
self.str = (self.str[:m.start(1)] + timestep +
self.str[m.end(1):])
none_updated = False
timestep_changed = True
break
if none_updated:
break
if not timestep_changed:
sys.stderr.write('WARNING: no update with {0}.\n'.format(regex))
|
[
"def",
"substitute_timestep",
"(",
"self",
",",
"regex",
",",
"timestep",
")",
":",
"# Make one change at a time, each change affects subsequent matches.",
"timestep_changed",
"=",
"False",
"while",
"True",
":",
"matches",
"=",
"re",
".",
"finditer",
"(",
"regex",
",",
"self",
".",
"str",
",",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
")",
"none_updated",
"=",
"True",
"for",
"m",
"in",
"matches",
":",
"if",
"m",
".",
"group",
"(",
"1",
")",
"==",
"timestep",
":",
"continue",
"else",
":",
"self",
".",
"str",
"=",
"(",
"self",
".",
"str",
"[",
":",
"m",
".",
"start",
"(",
"1",
")",
"]",
"+",
"timestep",
"+",
"self",
".",
"str",
"[",
"m",
".",
"end",
"(",
"1",
")",
":",
"]",
")",
"none_updated",
"=",
"False",
"timestep_changed",
"=",
"True",
"break",
"if",
"none_updated",
":",
"break",
"if",
"not",
"timestep_changed",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'WARNING: no update with {0}.\\n'",
".",
"format",
"(",
"regex",
")",
")"
] |
Substitute a new timestep value using regex.
|
[
"Substitute",
"a",
"new",
"timestep",
"value",
"using",
"regex",
"."
] |
python
|
train
| 34.32 |
fracpete/python-weka-wrapper3
|
python/weka/classifiers.py
|
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L1133-L1155
|
def crossvalidate_model(self, classifier, data, num_folds, rnd, output=None):
"""
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
"""
if output is None:
generator = []
else:
generator = [output.jobject]
javabridge.call(
self.jobject, "crossValidateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V",
classifier.jobject, data.jobject, num_folds, rnd.jobject, generator)
|
[
"def",
"crossvalidate_model",
"(",
"self",
",",
"classifier",
",",
"data",
",",
"num_folds",
",",
"rnd",
",",
"output",
"=",
"None",
")",
":",
"if",
"output",
"is",
"None",
":",
"generator",
"=",
"[",
"]",
"else",
":",
"generator",
"=",
"[",
"output",
".",
"jobject",
"]",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"crossValidateModel\"",
",",
"\"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V\"",
",",
"classifier",
".",
"jobject",
",",
"data",
".",
"jobject",
",",
"num_folds",
",",
"rnd",
".",
"jobject",
",",
"generator",
")"
] |
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
|
[
"Crossvalidates",
"the",
"model",
"using",
"the",
"specified",
"data",
"number",
"of",
"folds",
"and",
"random",
"number",
"generator",
"wrapper",
"."
] |
python
|
train
| 42.608696 |
fastai/fastai
|
fastai/torch_core.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L198-L214
|
def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]:
"Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest."
split_params = []
for l in layer_groups:
l1,l2 = [],[]
for c in l.children():
if isinstance(c, no_wd_types): l2 += list(trainable_params(c))
elif isinstance(c, bias_types):
bias = c.bias if hasattr(c, 'bias') else None
l1 += [p for p in trainable_params(c) if not (p is bias)]
if bias is not None: l2.append(bias)
else: l1 += list(trainable_params(c))
#Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order
#for the optimizer load of state_dict
l1,l2 = uniqueify(l1),uniqueify(l2)
split_params += [l1, l2]
return split_params
|
[
"def",
"split_no_wd_params",
"(",
"layer_groups",
":",
"Collection",
"[",
"nn",
".",
"Module",
"]",
")",
"->",
"List",
"[",
"List",
"[",
"nn",
".",
"Parameter",
"]",
"]",
":",
"split_params",
"=",
"[",
"]",
"for",
"l",
"in",
"layer_groups",
":",
"l1",
",",
"l2",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"c",
"in",
"l",
".",
"children",
"(",
")",
":",
"if",
"isinstance",
"(",
"c",
",",
"no_wd_types",
")",
":",
"l2",
"+=",
"list",
"(",
"trainable_params",
"(",
"c",
")",
")",
"elif",
"isinstance",
"(",
"c",
",",
"bias_types",
")",
":",
"bias",
"=",
"c",
".",
"bias",
"if",
"hasattr",
"(",
"c",
",",
"'bias'",
")",
"else",
"None",
"l1",
"+=",
"[",
"p",
"for",
"p",
"in",
"trainable_params",
"(",
"c",
")",
"if",
"not",
"(",
"p",
"is",
"bias",
")",
"]",
"if",
"bias",
"is",
"not",
"None",
":",
"l2",
".",
"append",
"(",
"bias",
")",
"else",
":",
"l1",
"+=",
"list",
"(",
"trainable_params",
"(",
"c",
")",
")",
"#Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order",
"#for the optimizer load of state_dict",
"l1",
",",
"l2",
"=",
"uniqueify",
"(",
"l1",
")",
",",
"uniqueify",
"(",
"l2",
")",
"split_params",
"+=",
"[",
"l1",
",",
"l2",
"]",
"return",
"split_params"
] |
Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest.
|
[
"Separate",
"the",
"parameters",
"in",
"layer_groups",
"between",
"no_wd_types",
"and",
"bias",
"(",
"bias_types",
")",
"from",
"the",
"rest",
"."
] |
python
|
train
| 53.176471 |
scivision/gridaurora
|
gridaurora/calcemissions.py
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L62-L76
|
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
|
[
"def",
"getMetastable",
"(",
"rates",
",",
"ver",
":",
"np",
".",
"ndarray",
",",
"lamb",
",",
"br",
",",
"reactfn",
":",
"Path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"reactfn",
",",
"'r'",
")",
"as",
"f",
":",
"A",
"=",
"f",
"[",
"'/metastable/A'",
"]",
"[",
":",
"]",
"lambnew",
"=",
"f",
"[",
"'/metastable/lambda'",
"]",
".",
"value",
".",
"ravel",
"(",
"order",
"=",
"'F'",
")",
"# some are not 1-D!",
"vnew",
"=",
"np",
".",
"concatenate",
"(",
"(",
"A",
"[",
":",
"2",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'no1s'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
",",
"A",
"[",
"2",
":",
"4",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'no1d'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
",",
"A",
"[",
"4",
":",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'noii2p'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
")",
",",
"axis",
"=",
"-",
"1",
")",
"assert",
"vnew",
".",
"shape",
"==",
"(",
"rates",
".",
"shape",
"[",
"0",
"]",
",",
"A",
".",
"size",
")",
"return",
"catvl",
"(",
"rates",
".",
"alt_km",
",",
"ver",
",",
"vnew",
",",
"lamb",
",",
"lambnew",
",",
"br",
")"
] |
concatenate along the reaction dimension, axis=-1
|
[
"concatenate",
"along",
"the",
"reaction",
"dimension",
"axis",
"=",
"-",
"1"
] |
python
|
train
| 42.266667 |
cwoebker/pen
|
pen/edit.py
|
https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L73-L82
|
def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus + 1, edit)
|
[
"def",
"split_focus",
"(",
"self",
")",
":",
"focus",
"=",
"self",
".",
"lines",
"[",
"self",
".",
"focus",
"]",
"pos",
"=",
"focus",
".",
"edit_pos",
"edit",
"=",
"urwid",
".",
"Edit",
"(",
"\"\"",
",",
"focus",
".",
"edit_text",
"[",
"pos",
":",
"]",
",",
"allow_tab",
"=",
"True",
")",
"edit",
".",
"original_text",
"=",
"\"\"",
"focus",
".",
"set_edit_text",
"(",
"focus",
".",
"edit_text",
"[",
":",
"pos",
"]",
")",
"edit",
".",
"set_edit_pos",
"(",
"0",
")",
"self",
".",
"lines",
".",
"insert",
"(",
"self",
".",
"focus",
"+",
"1",
",",
"edit",
")"
] |
Divide the focus edit widget at the cursor location.
|
[
"Divide",
"the",
"focus",
"edit",
"widget",
"at",
"the",
"cursor",
"location",
"."
] |
python
|
train
| 37.8 |
secdev/scapy
|
scapy/main.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/main.py#L336-L349
|
def update_session(fname=None):
"""Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from"""
if fname is None:
fname = conf.session
try:
s = six.moves.cPickle.load(gzip.open(fname, "rb"))
except IOError:
s = six.moves.cPickle.load(open(fname, "rb"))
scapy_session = six.moves.builtins.__dict__["scapy_session"]
scapy_session.update(s)
update_ipython_session(scapy_session)
|
[
"def",
"update_session",
"(",
"fname",
"=",
"None",
")",
":",
"if",
"fname",
"is",
"None",
":",
"fname",
"=",
"conf",
".",
"session",
"try",
":",
"s",
"=",
"six",
".",
"moves",
".",
"cPickle",
".",
"load",
"(",
"gzip",
".",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
")",
"except",
"IOError",
":",
"s",
"=",
"six",
".",
"moves",
".",
"cPickle",
".",
"load",
"(",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
")",
"scapy_session",
"=",
"six",
".",
"moves",
".",
"builtins",
".",
"__dict__",
"[",
"\"scapy_session\"",
"]",
"scapy_session",
".",
"update",
"(",
"s",
")",
"update_ipython_session",
"(",
"scapy_session",
")"
] |
Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from
|
[
"Update",
"current",
"Scapy",
"session",
"from",
"the",
"file",
"specified",
"in",
"the",
"fname",
"arg",
"."
] |
python
|
train
| 35 |
UCSBarchlab/PyRTL
|
pyrtl/helperfuncs.py
|
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/helperfuncs.py#L226-L274
|
def formatted_str_to_val(data, format, enum_set=None):
""" Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth)-1
if type == 's':
rval = int(data) & bitmask
elif type == 'x':
rval = int(data, 16)
elif type == 'b':
rval = int(data, 2)
elif type == 'u':
rval = int(data)
if rval < 0:
raise PyrtlError('unsigned format requested, but negative value provided')
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'
.format(enumname, enum_set))
rval = getattr(enum_inst_list[0], data).value
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval
|
[
"def",
"formatted_str_to_val",
"(",
"data",
",",
"format",
",",
"enum_set",
"=",
"None",
")",
":",
"type",
"=",
"format",
"[",
"0",
"]",
"bitwidth",
"=",
"int",
"(",
"format",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
")",
"bitmask",
"=",
"(",
"1",
"<<",
"bitwidth",
")",
"-",
"1",
"if",
"type",
"==",
"'s'",
":",
"rval",
"=",
"int",
"(",
"data",
")",
"&",
"bitmask",
"elif",
"type",
"==",
"'x'",
":",
"rval",
"=",
"int",
"(",
"data",
",",
"16",
")",
"elif",
"type",
"==",
"'b'",
":",
"rval",
"=",
"int",
"(",
"data",
",",
"2",
")",
"elif",
"type",
"==",
"'u'",
":",
"rval",
"=",
"int",
"(",
"data",
")",
"if",
"rval",
"<",
"0",
":",
"raise",
"PyrtlError",
"(",
"'unsigned format requested, but negative value provided'",
")",
"elif",
"type",
"==",
"'e'",
":",
"enumname",
"=",
"format",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"enum_inst_list",
"=",
"[",
"e",
"for",
"e",
"in",
"enum_set",
"if",
"e",
".",
"__name__",
"==",
"enumname",
"]",
"if",
"len",
"(",
"enum_inst_list",
")",
"==",
"0",
":",
"raise",
"PyrtlError",
"(",
"'enum \"{}\" not found in passed enum_set \"{}\"'",
".",
"format",
"(",
"enumname",
",",
"enum_set",
")",
")",
"rval",
"=",
"getattr",
"(",
"enum_inst_list",
"[",
"0",
"]",
",",
"data",
")",
".",
"value",
"else",
":",
"raise",
"PyrtlError",
"(",
"'unknown format type {}'",
".",
"format",
"(",
"format",
")",
")",
"return",
"rval"
] |
Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
|
[
"Return",
"an",
"unsigned",
"integer",
"representation",
"of",
"the",
"data",
"given",
"format",
"specified",
"."
] |
python
|
train
| 42 |
odlgroup/odl
|
odl/contrib/solvers/spdhg/misc.py
|
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/solvers/spdhg/misc.py#L311-L398
|
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x
|
[
"def",
"fgp_dual",
"(",
"p",
",",
"data",
",",
"alpha",
",",
"niter",
",",
"grad",
",",
"proj_C",
",",
"proj_P",
",",
"tol",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Callback object",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"None",
")",
"if",
"callback",
"is",
"not",
"None",
"and",
"not",
"callable",
"(",
"callback",
")",
":",
"raise",
"TypeError",
"(",
"'`callback` {} is not callable'",
".",
"format",
"(",
"callback",
")",
")",
"factr",
"=",
"1",
"/",
"(",
"grad",
".",
"norm",
"**",
"2",
"*",
"alpha",
")",
"q",
"=",
"p",
".",
"copy",
"(",
")",
"x",
"=",
"data",
".",
"space",
".",
"zero",
"(",
")",
"t",
"=",
"1.",
"if",
"tol",
"is",
"None",
":",
"def",
"convergence_eval",
"(",
"p1",
",",
"p2",
")",
":",
"return",
"False",
"else",
":",
"def",
"convergence_eval",
"(",
"p1",
",",
"p2",
")",
":",
"return",
"(",
"p1",
"-",
"p2",
")",
".",
"norm",
"(",
")",
"/",
"p1",
".",
"norm",
"(",
")",
"<",
"tol",
"pnew",
"=",
"p",
".",
"copy",
"(",
")",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"p",
")",
"for",
"k",
"in",
"range",
"(",
"niter",
")",
":",
"t0",
"=",
"t",
"grad",
".",
"adjoint",
"(",
"q",
",",
"out",
"=",
"x",
")",
"proj_C",
"(",
"data",
"-",
"alpha",
"*",
"x",
",",
"out",
"=",
"x",
")",
"grad",
"(",
"x",
",",
"out",
"=",
"pnew",
")",
"pnew",
"*=",
"factr",
"pnew",
"+=",
"q",
"proj_P",
"(",
"pnew",
",",
"out",
"=",
"pnew",
")",
"converged",
"=",
"convergence_eval",
"(",
"p",
",",
"pnew",
")",
"if",
"not",
"converged",
":",
"# update step size",
"t",
"=",
"(",
"1",
"+",
"np",
".",
"sqrt",
"(",
"1",
"+",
"4",
"*",
"t0",
"**",
"2",
")",
")",
"/",
"2.",
"# calculate next iterate",
"q",
"[",
":",
"]",
"=",
"pnew",
"+",
"(",
"t0",
"-",
"1",
")",
"/",
"t",
"*",
"(",
"pnew",
"-",
"p",
")",
"p",
"[",
":",
"]",
"=",
"pnew",
"if",
"converged",
":",
"t",
"=",
"None",
"break",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"p",
")",
"# get current image estimate",
"x",
"=",
"proj_C",
"(",
"data",
"-",
"alpha",
"*",
"grad",
".",
"adjoint",
"(",
"p",
")",
")",
"return",
"x"
] |
Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
|
[
"Computes",
"a",
"solution",
"to",
"the",
"ROF",
"problem",
"with",
"the",
"fast",
"gradient",
"projection",
"algorithm",
"."
] |
python
|
train
| 25.306818 |
CitrineInformatics/pypif-sdk
|
pypif_sdk/func/replace_funcs.py
|
https://github.com/CitrineInformatics/pypif-sdk/blob/8b01d10d9a1426d5eef12e4b2f31c4657aa0fe59/pypif_sdk/func/replace_funcs.py#L21-L34
|
def replace_by_key(pif, key, subs, new_key=None, remove=False):
"""Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed.
"""
if not new_key:
new_key = key
remove = False
orig = pif.as_dictionary()
new = _recurse_replace(orig, to_camel_case(key), to_camel_case(new_key), subs, remove)
return pypif.pif.loads(json.dumps(new))
|
[
"def",
"replace_by_key",
"(",
"pif",
",",
"key",
",",
"subs",
",",
"new_key",
"=",
"None",
",",
"remove",
"=",
"False",
")",
":",
"if",
"not",
"new_key",
":",
"new_key",
"=",
"key",
"remove",
"=",
"False",
"orig",
"=",
"pif",
".",
"as_dictionary",
"(",
")",
"new",
"=",
"_recurse_replace",
"(",
"orig",
",",
"to_camel_case",
"(",
"key",
")",
",",
"to_camel_case",
"(",
"new_key",
")",
",",
"subs",
",",
"remove",
")",
"return",
"pypif",
".",
"pif",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"new",
")",
")"
] |
Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed.
|
[
"Replace",
"values",
"that",
"match",
"a",
"key"
] |
python
|
train
| 40.714286 |
PyGithub/PyGithub
|
github/Repository.py
|
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L1833-L1843
|
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
)
|
[
"def",
"get_downloads",
"(",
"self",
")",
":",
"return",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"(",
"github",
".",
"Download",
".",
"Download",
",",
"self",
".",
"_requester",
",",
"self",
".",
"url",
"+",
"\"/downloads\"",
",",
"None",
")"
] |
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
|
[
":",
"calls",
":",
"GET",
"/",
"repos",
"/",
":",
"owner",
"/",
":",
"repo",
"/",
"downloads",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"repos",
"/",
"downloads",
">",
"_",
":",
"rtype",
":",
":",
"class",
":",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"of",
":",
"class",
":",
"github",
".",
"Download",
".",
"Download"
] |
python
|
train
| 38.090909 |
CxAalto/gtfspy
|
gtfspy/gtfs.py
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L1113-L1131
|
def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = time.localtime(day_start_ut + 43200) # time of noon
dayN = time.mktime(day0[:2] + # YYYY, MM
(day0[2] + n_days,) + # DD
(12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours.
set_process_timezone(old_tz)
return dayN
|
[
"def",
"increment_day_start_ut",
"(",
"self",
",",
"day_start_ut",
",",
"n_days",
"=",
"1",
")",
":",
"old_tz",
"=",
"self",
".",
"set_current_process_time_zone",
"(",
")",
"day0",
"=",
"time",
".",
"localtime",
"(",
"day_start_ut",
"+",
"43200",
")",
"# time of noon",
"dayN",
"=",
"time",
".",
"mktime",
"(",
"day0",
"[",
":",
"2",
"]",
"+",
"# YYYY, MM",
"(",
"day0",
"[",
"2",
"]",
"+",
"n_days",
",",
")",
"+",
"# DD",
"(",
"12",
",",
"00",
",",
"0",
",",
"0",
",",
"0",
",",
"-",
"1",
")",
")",
"-",
"43200",
"# HHMM, etc. Minus 12 hours.",
"set_process_timezone",
"(",
"old_tz",
")",
"return",
"dayN"
] |
Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
|
[
"Increment",
"the",
"GTFS",
"-",
"definition",
"of",
"day",
"start",
"."
] |
python
|
valid
| 42.368421 |
klen/adrest
|
adrest/utils/emitter.py
|
https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/utils/emitter.py#L114-L126
|
def serialize(self, content):
""" Serialize to JSON.
:return string: serializaed JSON
"""
worker = JSONSerializer(
scheme=self.resource,
options=self.resource._meta.emit_options,
format=self.resource._meta.emit_format,
**self.resource._meta.emit_models
)
return worker.serialize(content)
|
[
"def",
"serialize",
"(",
"self",
",",
"content",
")",
":",
"worker",
"=",
"JSONSerializer",
"(",
"scheme",
"=",
"self",
".",
"resource",
",",
"options",
"=",
"self",
".",
"resource",
".",
"_meta",
".",
"emit_options",
",",
"format",
"=",
"self",
".",
"resource",
".",
"_meta",
".",
"emit_format",
",",
"*",
"*",
"self",
".",
"resource",
".",
"_meta",
".",
"emit_models",
")",
"return",
"worker",
".",
"serialize",
"(",
"content",
")"
] |
Serialize to JSON.
:return string: serializaed JSON
|
[
"Serialize",
"to",
"JSON",
"."
] |
python
|
train
| 28.692308 |
mass-project/mass_api_client
|
mass_api_client/utils.py
|
https://github.com/mass-project/mass_api_client/blob/b200c32c93608bf3b2707fbf0e83a2228702e2c8/mass_api_client/utils.py#L75-L91
|
def process_analyses(analysis_system_instance, analysis_method, sleep_time):
"""Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server
"""
try:
while True:
for analysis_request in analysis_system_instance.get_scheduled_analyses():
analysis_method(analysis_request)
time.sleep(sleep_time)
except KeyboardInterrupt:
logging.debug('Shutting down.')
return
|
[
"def",
"process_analyses",
"(",
"analysis_system_instance",
",",
"analysis_method",
",",
"sleep_time",
")",
":",
"try",
":",
"while",
"True",
":",
"for",
"analysis_request",
"in",
"analysis_system_instance",
".",
"get_scheduled_analyses",
"(",
")",
":",
"analysis_method",
"(",
"analysis_request",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"except",
"KeyboardInterrupt",
":",
"logging",
".",
"debug",
"(",
"'Shutting down.'",
")",
"return"
] |
Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server
|
[
"Process",
"all",
"analyses",
"which",
"are",
"scheduled",
"for",
"the",
"analysis",
"system",
"instance",
"."
] |
python
|
train
| 48.705882 |
CityOfZion/neo-python
|
neo/Wallets/Wallet.py
|
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L1217-L1232
|
def SignMessage(self, message, script_hash):
"""
Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message
"""
keypair = self.GetKeyByScriptHash(script_hash)
prikey = bytes(keypair.PrivateKey)
res = Crypto.Default().Sign(message, prikey)
return res, keypair.PublicKey
|
[
"def",
"SignMessage",
"(",
"self",
",",
"message",
",",
"script_hash",
")",
":",
"keypair",
"=",
"self",
".",
"GetKeyByScriptHash",
"(",
"script_hash",
")",
"prikey",
"=",
"bytes",
"(",
"keypair",
".",
"PrivateKey",
")",
"res",
"=",
"Crypto",
".",
"Default",
"(",
")",
".",
"Sign",
"(",
"message",
",",
"prikey",
")",
"return",
"res",
",",
"keypair",
".",
"PublicKey"
] |
Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message
|
[
"Sign",
"a",
"message",
"with",
"a",
"specified",
"script_hash",
"."
] |
python
|
train
| 29.9375 |
jazzband/django-pipeline
|
pipeline/finders.py
|
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/finders.py#L26-L37
|
def find(self, path, all=False):
"""
Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT
"""
matches = []
for elem in chain(settings.STYLESHEETS.values(), settings.JAVASCRIPT.values()):
if normpath(elem['output_filename']) == normpath(path):
match = safe_join(settings.PIPELINE_ROOT, path)
if not all:
return match
matches.append(match)
return matches
|
[
"def",
"find",
"(",
"self",
",",
"path",
",",
"all",
"=",
"False",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"elem",
"in",
"chain",
"(",
"settings",
".",
"STYLESHEETS",
".",
"values",
"(",
")",
",",
"settings",
".",
"JAVASCRIPT",
".",
"values",
"(",
")",
")",
":",
"if",
"normpath",
"(",
"elem",
"[",
"'output_filename'",
"]",
")",
"==",
"normpath",
"(",
"path",
")",
":",
"match",
"=",
"safe_join",
"(",
"settings",
".",
"PIPELINE_ROOT",
",",
"path",
")",
"if",
"not",
"all",
":",
"return",
"match",
"matches",
".",
"append",
"(",
"match",
")",
"return",
"matches"
] |
Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT
|
[
"Looks",
"for",
"files",
"in",
"PIPELINE",
".",
"STYLESHEETS",
"and",
"PIPELINE",
".",
"JAVASCRIPT"
] |
python
|
train
| 40 |
gem/oq-engine
|
openquake/baselib/datastore.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/datastore.py#L184-L195
|
def open(self, mode):
"""
Open the underlying .hdf5 file and the parent, if any
"""
if self.hdf5 == (): # not already open
kw = dict(mode=mode, libver='latest')
if mode == 'r':
kw['swmr'] = True
try:
self.hdf5 = hdf5.File(self.filename, **kw)
except OSError as exc:
raise OSError('%s in %s' % (exc, self.filename))
|
[
"def",
"open",
"(",
"self",
",",
"mode",
")",
":",
"if",
"self",
".",
"hdf5",
"==",
"(",
")",
":",
"# not already open",
"kw",
"=",
"dict",
"(",
"mode",
"=",
"mode",
",",
"libver",
"=",
"'latest'",
")",
"if",
"mode",
"==",
"'r'",
":",
"kw",
"[",
"'swmr'",
"]",
"=",
"True",
"try",
":",
"self",
".",
"hdf5",
"=",
"hdf5",
".",
"File",
"(",
"self",
".",
"filename",
",",
"*",
"*",
"kw",
")",
"except",
"OSError",
"as",
"exc",
":",
"raise",
"OSError",
"(",
"'%s in %s'",
"%",
"(",
"exc",
",",
"self",
".",
"filename",
")",
")"
] |
Open the underlying .hdf5 file and the parent, if any
|
[
"Open",
"the",
"underlying",
".",
"hdf5",
"file",
"and",
"the",
"parent",
"if",
"any"
] |
python
|
train
| 36 |
bcbio/bcbio-nextgen
|
bcbio/cwl/tool.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L313-L320
|
def _run_sbgenomics(args):
"""Run CWL on SevenBridges platform and Cancer Genomics Cloud.
"""
assert not args.no_container, "Seven Bridges runs require containers"
main_file, json_file, project_name = _get_main_and_json(args.directory)
flags = []
cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file]
_run_tool(cmd)
|
[
"def",
"_run_sbgenomics",
"(",
"args",
")",
":",
"assert",
"not",
"args",
".",
"no_container",
",",
"\"Seven Bridges runs require containers\"",
"main_file",
",",
"json_file",
",",
"project_name",
"=",
"_get_main_and_json",
"(",
"args",
".",
"directory",
")",
"flags",
"=",
"[",
"]",
"cmd",
"=",
"[",
"\"sbg-cwl-runner\"",
"]",
"+",
"flags",
"+",
"args",
".",
"toolargs",
"+",
"[",
"main_file",
",",
"json_file",
"]",
"_run_tool",
"(",
"cmd",
")"
] |
Run CWL on SevenBridges platform and Cancer Genomics Cloud.
|
[
"Run",
"CWL",
"on",
"SevenBridges",
"platform",
"and",
"Cancer",
"Genomics",
"Cloud",
"."
] |
python
|
train
| 44.5 |
coin-or/GiMPy
|
src/gimpy/graph.py
|
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L2046-L2067
|
def floyd_warshall_get_path(self, distance, nextn, i, j):
'''
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
'''
if distance[(i,j)]=='infinity':
return None
k = nextn[(i,j)]
path = self.floyd_warshall_get_path
if i==k:
return [i, j]
else:
return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j)
|
[
"def",
"floyd_warshall_get_path",
"(",
"self",
",",
"distance",
",",
"nextn",
",",
"i",
",",
"j",
")",
":",
"if",
"distance",
"[",
"(",
"i",
",",
"j",
")",
"]",
"==",
"'infinity'",
":",
"return",
"None",
"k",
"=",
"nextn",
"[",
"(",
"i",
",",
"j",
")",
"]",
"path",
"=",
"self",
".",
"floyd_warshall_get_path",
"if",
"i",
"==",
"k",
":",
"return",
"[",
"i",
",",
"j",
"]",
"else",
":",
"return",
"path",
"(",
"distance",
",",
"nextn",
",",
"i",
",",
"k",
")",
"+",
"[",
"k",
"]",
"+",
"path",
"(",
"distance",
",",
"nextn",
",",
"k",
",",
"j",
")"
] |
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
|
[
"API",
":",
"floyd_warshall_get_path",
"(",
"self",
"distance",
"nextn",
"i",
"j",
")",
":",
"Description",
":",
"Finds",
"shortest",
"path",
"between",
"i",
"and",
"j",
"using",
"distance",
"and",
"nextn",
"dictionaries",
".",
"Pre",
":",
"(",
"1",
")",
"distance",
"and",
"nextn",
"are",
"outputs",
"of",
"floyd_warshall",
"method",
".",
"(",
"2",
")",
"The",
"graph",
"does",
"not",
"have",
"a",
"negative",
"cycle",
"ie",
".",
"distance",
"[",
"(",
"i",
"i",
")",
"]",
">",
"=",
"0",
"for",
"all",
"node",
"i",
".",
"Return",
":",
"Returns",
"the",
"list",
"of",
"nodes",
"on",
"the",
"path",
"from",
"i",
"to",
"j",
"ie",
".",
"[",
"i",
"...",
"j",
"]"
] |
python
|
train
| 37.318182 |
gabrielfalcao/dominic
|
dominic/xpath/expr.py
|
https://github.com/gabrielfalcao/dominic/blob/a42f418fc288f3b70cb95847b405eaf7b83bb3a0/dominic/xpath/expr.py#L580-L592
|
def axisfn(reverse=False, principal_node_type=xml.dom.Node.ELEMENT_NODE):
"""Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
"""
def decorate(f):
f.__name__ = f.__name__.replace('_', '-')
f.reverse = reverse
f.principal_node_type = principal_node_type
return f
return decorate
|
[
"def",
"axisfn",
"(",
"reverse",
"=",
"False",
",",
"principal_node_type",
"=",
"xml",
".",
"dom",
".",
"Node",
".",
"ELEMENT_NODE",
")",
":",
"def",
"decorate",
"(",
"f",
")",
":",
"f",
".",
"__name__",
"=",
"f",
".",
"__name__",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"f",
".",
"reverse",
"=",
"reverse",
"f",
".",
"principal_node_type",
"=",
"principal_node_type",
"return",
"f",
"return",
"decorate"
] |
Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
|
[
"Axis",
"function",
"decorator",
"."
] |
python
|
train
| 38.923077 |
PX4/pyulog
|
pyulog/info.py
|
https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/info.py#L15-L65
|
def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points))
|
[
"def",
"show_info",
"(",
"ulog",
",",
"verbose",
")",
":",
"m1",
",",
"s1",
"=",
"divmod",
"(",
"int",
"(",
"ulog",
".",
"start_timestamp",
"/",
"1e6",
")",
",",
"60",
")",
"h1",
",",
"m1",
"=",
"divmod",
"(",
"m1",
",",
"60",
")",
"m2",
",",
"s2",
"=",
"divmod",
"(",
"int",
"(",
"(",
"ulog",
".",
"last_timestamp",
"-",
"ulog",
".",
"start_timestamp",
")",
"/",
"1e6",
")",
",",
"60",
")",
"h2",
",",
"m2",
"=",
"divmod",
"(",
"m2",
",",
"60",
")",
"print",
"(",
"\"Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}\"",
".",
"format",
"(",
"h1",
",",
"m1",
",",
"s1",
",",
"h2",
",",
"m2",
",",
"s2",
")",
")",
"dropout_durations",
"=",
"[",
"dropout",
".",
"duration",
"for",
"dropout",
"in",
"ulog",
".",
"dropouts",
"]",
"if",
"len",
"(",
"dropout_durations",
")",
"==",
"0",
":",
"print",
"(",
"\"No Dropouts\"",
")",
"else",
":",
"print",
"(",
"\"Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms\"",
".",
"format",
"(",
"len",
"(",
"dropout_durations",
")",
",",
"sum",
"(",
"dropout_durations",
")",
"/",
"1000.",
",",
"max",
"(",
"dropout_durations",
")",
",",
"int",
"(",
"sum",
"(",
"dropout_durations",
")",
"/",
"len",
"(",
"dropout_durations",
")",
")",
")",
")",
"version",
"=",
"ulog",
".",
"get_version_info_str",
"(",
")",
"if",
"not",
"version",
"is",
"None",
":",
"print",
"(",
"'SW Version: {}'",
".",
"format",
"(",
"version",
")",
")",
"print",
"(",
"\"Info Messages:\"",
")",
"for",
"k",
"in",
"sorted",
"(",
"ulog",
".",
"msg_info_dict",
")",
":",
"if",
"not",
"k",
".",
"startswith",
"(",
"'perf_'",
")",
"or",
"verbose",
":",
"print",
"(",
"\" {0}: {1}\"",
".",
"format",
"(",
"k",
",",
"ulog",
".",
"msg_info_dict",
"[",
"k",
"]",
")",
")",
"if",
"len",
"(",
"ulog",
".",
"msg_info_multiple_dict",
")",
">",
"0",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Info Multiple Messages:\"",
")",
"for",
"k",
"in",
"sorted",
"(",
"ulog",
".",
"msg_info_multiple_dict",
")",
":",
"print",
"(",
"\" {0}: {1}\"",
".",
"format",
"(",
"k",
",",
"ulog",
".",
"msg_info_multiple_dict",
"[",
"k",
"]",
")",
")",
"else",
":",
"print",
"(",
"\"Info Multiple Messages: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"\"[{}: {}]\"",
".",
"format",
"(",
"k",
",",
"len",
"(",
"ulog",
".",
"msg_info_multiple_dict",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"sorted",
"(",
"ulog",
".",
"msg_info_multiple_dict",
")",
"]",
")",
")",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"{:<41} {:7}, {:10}\"",
".",
"format",
"(",
"\"Name (multi id, message size in bytes)\"",
",",
"\"number of data points\"",
",",
"\"total bytes\"",
")",
")",
"data_list_sorted",
"=",
"sorted",
"(",
"ulog",
".",
"data_list",
",",
"key",
"=",
"lambda",
"d",
":",
"d",
".",
"name",
"+",
"str",
"(",
"d",
".",
"multi_id",
")",
")",
"for",
"d",
"in",
"data_list_sorted",
":",
"message_size",
"=",
"sum",
"(",
"[",
"ULog",
".",
"get_field_size",
"(",
"f",
".",
"type_str",
")",
"for",
"f",
"in",
"d",
".",
"field_data",
"]",
")",
"num_data_points",
"=",
"len",
"(",
"d",
".",
"data",
"[",
"'timestamp'",
"]",
")",
"name_id",
"=",
"\"{:} ({:}, {:})\"",
".",
"format",
"(",
"d",
".",
"name",
",",
"d",
".",
"multi_id",
",",
"message_size",
")",
"print",
"(",
"\" {:<40} {:7d} {:10d}\"",
".",
"format",
"(",
"name_id",
",",
"num_data_points",
",",
"message_size",
"*",
"num_data_points",
")",
")"
] |
Show general information from an ULog
|
[
"Show",
"general",
"information",
"from",
"an",
"ULog"
] |
python
|
train
| 43.235294 |
apache/incubator-mxnet
|
python/mxnet/log.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/log.py#L90-L145
|
def get_logger(name=None, filename=None, filemode=None, level=WARNING):
"""Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
"""
logger = logging.getLogger(name)
if name is not None and not getattr(logger, '_init_done', None):
logger._init_done = True
if filename:
mode = filemode if filemode else 'a'
hdlr = logging.FileHandler(filename, mode)
else:
hdlr = logging.StreamHandler() # pylint: disable=redefined-variable-type
# the `_Formatter` contain some escape character to
# represent color, which is not suitable for FileHandler,
# (TODO) maybe we can add another Formatter for FileHandler.
hdlr.setFormatter(_Formatter())
logger.addHandler(hdlr)
logger.setLevel(level)
return logger
|
[
"def",
"get_logger",
"(",
"name",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"filemode",
"=",
"None",
",",
"level",
"=",
"WARNING",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"if",
"name",
"is",
"not",
"None",
"and",
"not",
"getattr",
"(",
"logger",
",",
"'_init_done'",
",",
"None",
")",
":",
"logger",
".",
"_init_done",
"=",
"True",
"if",
"filename",
":",
"mode",
"=",
"filemode",
"if",
"filemode",
"else",
"'a'",
"hdlr",
"=",
"logging",
".",
"FileHandler",
"(",
"filename",
",",
"mode",
")",
"else",
":",
"hdlr",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"# pylint: disable=redefined-variable-type",
"# the `_Formatter` contain some escape character to",
"# represent color, which is not suitable for FileHandler,",
"# (TODO) maybe we can add another Formatter for FileHandler.",
"hdlr",
".",
"setFormatter",
"(",
"_Formatter",
"(",
")",
")",
"logger",
".",
"addHandler",
"(",
"hdlr",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"return",
"logger"
] |
Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
|
[
"Gets",
"a",
"customized",
"logger",
"."
] |
python
|
train
| 37.553571 |
consbio/ncdjango
|
ncdjango/geoprocessing/evaluation.py
|
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L321-L332
|
def fn_abs(self, value):
"""
Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number.
"""
if is_ndarray(value):
return numpy.absolute(value)
else:
return abs(value)
|
[
"def",
"fn_abs",
"(",
"self",
",",
"value",
")",
":",
"if",
"is_ndarray",
"(",
"value",
")",
":",
"return",
"numpy",
".",
"absolute",
"(",
"value",
")",
"else",
":",
"return",
"abs",
"(",
"value",
")"
] |
Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number.
|
[
"Return",
"the",
"absolute",
"value",
"of",
"a",
"number",
"."
] |
python
|
train
| 23.833333 |
astropy/regions
|
regions/io/core.py
|
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/core.py#L549-L572
|
def _convert_sky_coords(self):
"""
Convert to sky coordinates
"""
parsed_angles = [(x, y)
for x, y in zip(self.coord[:-1:2], self.coord[1::2])
if (isinstance(x, coordinates.Angle) and isinstance(y, coordinates.Angle))
]
frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)
lon, lat = zip(*parsed_angles)
if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:
# force entries to be scalar if they are length-1
lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])
else:
# otherwise, they are vector quantities
lon, lat = u.Quantity(lon), u.Quantity(lat)
sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)
coords = [SkyCoord(frame(sphcoords))]
if self.region_type != 'polygon':
coords += self.coord[len(coords * 2):]
return coords
|
[
"def",
"_convert_sky_coords",
"(",
"self",
")",
":",
"parsed_angles",
"=",
"[",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"coord",
"[",
":",
"-",
"1",
":",
"2",
"]",
",",
"self",
".",
"coord",
"[",
"1",
":",
":",
"2",
"]",
")",
"if",
"(",
"isinstance",
"(",
"x",
",",
"coordinates",
".",
"Angle",
")",
"and",
"isinstance",
"(",
"y",
",",
"coordinates",
".",
"Angle",
")",
")",
"]",
"frame",
"=",
"coordinates",
".",
"frame_transform_graph",
".",
"lookup_name",
"(",
"self",
".",
"coordsys",
")",
"lon",
",",
"lat",
"=",
"zip",
"(",
"*",
"parsed_angles",
")",
"if",
"hasattr",
"(",
"lon",
",",
"'__len__'",
")",
"and",
"hasattr",
"(",
"lat",
",",
"'__len__'",
")",
"and",
"len",
"(",
"lon",
")",
"==",
"1",
"and",
"len",
"(",
"lat",
")",
"==",
"1",
":",
"# force entries to be scalar if they are length-1",
"lon",
",",
"lat",
"=",
"u",
".",
"Quantity",
"(",
"lon",
"[",
"0",
"]",
")",
",",
"u",
".",
"Quantity",
"(",
"lat",
"[",
"0",
"]",
")",
"else",
":",
"# otherwise, they are vector quantities",
"lon",
",",
"lat",
"=",
"u",
".",
"Quantity",
"(",
"lon",
")",
",",
"u",
".",
"Quantity",
"(",
"lat",
")",
"sphcoords",
"=",
"coordinates",
".",
"UnitSphericalRepresentation",
"(",
"lon",
",",
"lat",
")",
"coords",
"=",
"[",
"SkyCoord",
"(",
"frame",
"(",
"sphcoords",
")",
")",
"]",
"if",
"self",
".",
"region_type",
"!=",
"'polygon'",
":",
"coords",
"+=",
"self",
".",
"coord",
"[",
"len",
"(",
"coords",
"*",
"2",
")",
":",
"]",
"return",
"coords"
] |
Convert to sky coordinates
|
[
"Convert",
"to",
"sky",
"coordinates"
] |
python
|
train
| 41.625 |
mitsei/dlkit
|
dlkit/json_/osid/objects.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/objects.py#L1299-L1314
|
def set_start_date(self, date):
"""Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_start_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(date, self.get_start_date_metadata()):
raise errors.InvalidArgument()
# self._my_map['startDate'] = self._get_date_map(date)
self._my_map['startDate'] = date
|
[
"def",
"set_start_date",
"(",
"self",
",",
"date",
")",
":",
"if",
"self",
".",
"get_start_date_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"_is_valid_date_time",
"(",
"date",
",",
"self",
".",
"get_start_date_metadata",
"(",
")",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"# self._my_map['startDate'] = self._get_date_map(date)",
"self",
".",
"_my_map",
"[",
"'startDate'",
"]",
"=",
"date"
] |
Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
[
"Sets",
"the",
"start",
"date",
"."
] |
python
|
train
| 42.75 |
psphere-project/psphere
|
psphere/client.py
|
https://github.com/psphere-project/psphere/blob/83a252e037c3d6e4f18bcd37380998bc9535e591/psphere/client.py#L590-L677
|
def find_entity_view(self, view_type, begin_entity=None, filter={},
properties=None):
"""Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity
"""
if properties is None:
properties = []
kls = classmapper(view_type)
# Start the search at the root folder if no begin_entity was given
if not begin_entity:
begin_entity = self.sc.rootFolder._mo_ref
logger.debug("Using %s", self.sc.rootFolder._mo_ref)
property_spec = self.create('PropertySpec')
property_spec.type = view_type
property_spec.all = False
property_spec.pathSet = list(filter.keys())
pfs = self.get_search_filter_spec(begin_entity, property_spec)
# Retrieve properties from server and update entity
#obj_contents = self.propertyCollector.RetrieveProperties(specSet=pfs)
obj_contents = self.sc.propertyCollector.RetrieveProperties(specSet=pfs)
# TODO: Implement filtering
if not filter:
logger.warning('No filter specified, returning first match.')
# If no filter is specified we just return the first item
# in the list of returned objects
logger.debug("Creating class in find_entity_view (filter)")
view = kls(obj_contents[0].obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view (filter)")
#view.update_view_data(properties)
return view
matched = False
# Iterate through obj_contents retrieved
for obj_content in obj_contents:
# If there are is no propSet, skip this one
if not obj_content.propSet:
continue
matches = 0
# Iterate through each property in the set
for prop in obj_content.propSet:
for key in filter.keys():
# If the property name is in the defined filter
if prop.name == key:
# ...and it matches the value specified
# TODO: Regex this?
if prop.val == filter[prop.name]:
# We've found a match
matches += 1
else:
break
else:
continue
if matches == len(filter):
filtered_obj_content = obj_content
matched = True
break
else:
continue
if matched is not True:
# There were no matches
raise ObjectNotFoundError("No matching objects for filter")
logger.debug("Creating class in find_entity_view")
view = kls(filtered_obj_content.obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view")
#view.update_view_data(properties=properties)
return view
|
[
"def",
"find_entity_view",
"(",
"self",
",",
"view_type",
",",
"begin_entity",
"=",
"None",
",",
"filter",
"=",
"{",
"}",
",",
"properties",
"=",
"None",
")",
":",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"[",
"]",
"kls",
"=",
"classmapper",
"(",
"view_type",
")",
"# Start the search at the root folder if no begin_entity was given",
"if",
"not",
"begin_entity",
":",
"begin_entity",
"=",
"self",
".",
"sc",
".",
"rootFolder",
".",
"_mo_ref",
"logger",
".",
"debug",
"(",
"\"Using %s\"",
",",
"self",
".",
"sc",
".",
"rootFolder",
".",
"_mo_ref",
")",
"property_spec",
"=",
"self",
".",
"create",
"(",
"'PropertySpec'",
")",
"property_spec",
".",
"type",
"=",
"view_type",
"property_spec",
".",
"all",
"=",
"False",
"property_spec",
".",
"pathSet",
"=",
"list",
"(",
"filter",
".",
"keys",
"(",
")",
")",
"pfs",
"=",
"self",
".",
"get_search_filter_spec",
"(",
"begin_entity",
",",
"property_spec",
")",
"# Retrieve properties from server and update entity",
"#obj_contents = self.propertyCollector.RetrieveProperties(specSet=pfs)",
"obj_contents",
"=",
"self",
".",
"sc",
".",
"propertyCollector",
".",
"RetrieveProperties",
"(",
"specSet",
"=",
"pfs",
")",
"# TODO: Implement filtering",
"if",
"not",
"filter",
":",
"logger",
".",
"warning",
"(",
"'No filter specified, returning first match.'",
")",
"# If no filter is specified we just return the first item",
"# in the list of returned objects",
"logger",
".",
"debug",
"(",
"\"Creating class in find_entity_view (filter)\"",
")",
"view",
"=",
"kls",
"(",
"obj_contents",
"[",
"0",
"]",
".",
"obj",
".",
"_mo_ref",
",",
"self",
")",
"logger",
".",
"debug",
"(",
"\"Completed creating class in find_entity_view (filter)\"",
")",
"#view.update_view_data(properties)",
"return",
"view",
"matched",
"=",
"False",
"# Iterate through obj_contents retrieved",
"for",
"obj_content",
"in",
"obj_contents",
":",
"# If there are is no propSet, skip this one",
"if",
"not",
"obj_content",
".",
"propSet",
":",
"continue",
"matches",
"=",
"0",
"# Iterate through each property in the set",
"for",
"prop",
"in",
"obj_content",
".",
"propSet",
":",
"for",
"key",
"in",
"filter",
".",
"keys",
"(",
")",
":",
"# If the property name is in the defined filter",
"if",
"prop",
".",
"name",
"==",
"key",
":",
"# ...and it matches the value specified",
"# TODO: Regex this?",
"if",
"prop",
".",
"val",
"==",
"filter",
"[",
"prop",
".",
"name",
"]",
":",
"# We've found a match",
"matches",
"+=",
"1",
"else",
":",
"break",
"else",
":",
"continue",
"if",
"matches",
"==",
"len",
"(",
"filter",
")",
":",
"filtered_obj_content",
"=",
"obj_content",
"matched",
"=",
"True",
"break",
"else",
":",
"continue",
"if",
"matched",
"is",
"not",
"True",
":",
"# There were no matches",
"raise",
"ObjectNotFoundError",
"(",
"\"No matching objects for filter\"",
")",
"logger",
".",
"debug",
"(",
"\"Creating class in find_entity_view\"",
")",
"view",
"=",
"kls",
"(",
"filtered_obj_content",
".",
"obj",
".",
"_mo_ref",
",",
"self",
")",
"logger",
".",
"debug",
"(",
"\"Completed creating class in find_entity_view\"",
")",
"#view.update_view_data(properties=properties)",
"return",
"view"
] |
Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity
|
[
"Find",
"a",
"ManagedEntity",
"of",
"the",
"requested",
"type",
"."
] |
python
|
train
| 40.602273 |
acutesoftware/AIKIF
|
aikif/cls_log.py
|
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L142-L146
|
def record_process(self, process, prg=''):
"""
log a process or program - log a physical program (.py, .bat, .exe)
"""
self._log(self.logFileProcess, force_to_string(process), prg)
|
[
"def",
"record_process",
"(",
"self",
",",
"process",
",",
"prg",
"=",
"''",
")",
":",
"self",
".",
"_log",
"(",
"self",
".",
"logFileProcess",
",",
"force_to_string",
"(",
"process",
")",
",",
"prg",
")"
] |
log a process or program - log a physical program (.py, .bat, .exe)
|
[
"log",
"a",
"process",
"or",
"program",
"-",
"log",
"a",
"physical",
"program",
"(",
".",
"py",
".",
"bat",
".",
"exe",
")"
] |
python
|
train
| 41.6 |
gabstopper/smc-python
|
smc/core/engine.py
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/engine.py#L552-L567
|
def blacklist_bulk(self, blacklist):
"""
Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4
"""
self.make_request(
EngineCommandFailed,
method='create',
resource='blacklist',
json=blacklist.entries)
|
[
"def",
"blacklist_bulk",
"(",
"self",
",",
"blacklist",
")",
":",
"self",
".",
"make_request",
"(",
"EngineCommandFailed",
",",
"method",
"=",
"'create'",
",",
"resource",
"=",
"'blacklist'",
",",
"json",
"=",
"blacklist",
".",
"entries",
")"
] |
Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4
|
[
"Add",
"blacklist",
"entries",
"to",
"the",
"engine",
"node",
"in",
"bulk",
".",
"For",
"blacklist",
"to",
"work",
"you",
"must",
"also",
"create",
"a",
"rule",
"with",
"action",
"Apply",
"Blacklist",
".",
"First",
"create",
"your",
"blacklist",
"entries",
"using",
":",
"class",
":",
"smc",
".",
"elements",
".",
"other",
".",
"Blacklist",
"then",
"provide",
"the",
"blacklist",
"to",
"this",
"method",
".",
":",
"param",
"blacklist",
"Blacklist",
":",
"pre",
"-",
"configured",
"blacklist",
"entries",
"..",
"note",
"::",
"This",
"method",
"requires",
"SMC",
"version",
">",
"=",
"6",
".",
"4"
] |
python
|
train
| 39.75 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.