repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
flask-restful/flask-restful
|
flask_restful/__init__.py
|
https://github.com/flask-restful/flask-restful/blob/25544d697c1f82bafbd1320960df459f58a58e03/flask_restful/__init__.py#L386-L404
|
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
|
[
"def",
"resource",
"(",
"self",
",",
"*",
"urls",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"self",
".",
"add_resource",
"(",
"cls",
",",
"*",
"urls",
",",
"*",
"*",
"kwargs",
")",
"return",
"cls",
"return",
"decorator"
] |
Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
|
[
"Wraps",
"a",
":",
"class",
":",
"~flask_restful",
".",
"Resource",
"class",
"adding",
"it",
"to",
"the",
"api",
".",
"Parameters",
"are",
"the",
"same",
"as",
":",
"meth",
":",
"~flask_restful",
".",
"Api",
".",
"add_resource",
"."
] |
python
|
train
|
RusticiSoftware/TinCanPython
|
tincan/remote_lrs.py
|
https://github.com/RusticiSoftware/TinCanPython/blob/424eedaa6d19221efb1108edb915fc332abbb317/tincan/remote_lrs.py#L509-L551
|
def _delete_state(self, activity, agent, state_id=None, registration=None, etag=None):
"""Private method to delete a specified state from the LRS
:param activity: Activity object of state to be deleted
:type activity: :class:`tincan.activity.Activity`
:param agent: Agent object of state to be deleted
:type agent: :class:`tincan.agent.Agent`
:param state_id: UUID of state to be deleted
:type state_id: str | unicode
:param registration: registration UUID of state to be deleted
:type registration: str | unicode
:param etag: etag of state to be deleted
:type etag: str | unicode
:return: LRS Response object with deleted state as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(activity, Activity):
activity = Activity(activity)
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="DELETE",
resource="activities/state"
)
if etag is not None:
request.headers["If-Match"] = etag
request.query_params = {
"activityId": activity.id,
"agent": agent.to_json(self.version)
}
if state_id is not None:
request.query_params["stateId"] = state_id
if registration is not None:
request.query_params["registration"] = registration
lrs_response = self._send_request(request)
return lrs_response
|
[
"def",
"_delete_state",
"(",
"self",
",",
"activity",
",",
"agent",
",",
"state_id",
"=",
"None",
",",
"registration",
"=",
"None",
",",
"etag",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"activity",
",",
"Activity",
")",
":",
"activity",
"=",
"Activity",
"(",
"activity",
")",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
":",
"agent",
"=",
"Agent",
"(",
"agent",
")",
"request",
"=",
"HTTPRequest",
"(",
"method",
"=",
"\"DELETE\"",
",",
"resource",
"=",
"\"activities/state\"",
")",
"if",
"etag",
"is",
"not",
"None",
":",
"request",
".",
"headers",
"[",
"\"If-Match\"",
"]",
"=",
"etag",
"request",
".",
"query_params",
"=",
"{",
"\"activityId\"",
":",
"activity",
".",
"id",
",",
"\"agent\"",
":",
"agent",
".",
"to_json",
"(",
"self",
".",
"version",
")",
"}",
"if",
"state_id",
"is",
"not",
"None",
":",
"request",
".",
"query_params",
"[",
"\"stateId\"",
"]",
"=",
"state_id",
"if",
"registration",
"is",
"not",
"None",
":",
"request",
".",
"query_params",
"[",
"\"registration\"",
"]",
"=",
"registration",
"lrs_response",
"=",
"self",
".",
"_send_request",
"(",
"request",
")",
"return",
"lrs_response"
] |
Private method to delete a specified state from the LRS
:param activity: Activity object of state to be deleted
:type activity: :class:`tincan.activity.Activity`
:param agent: Agent object of state to be deleted
:type agent: :class:`tincan.agent.Agent`
:param state_id: UUID of state to be deleted
:type state_id: str | unicode
:param registration: registration UUID of state to be deleted
:type registration: str | unicode
:param etag: etag of state to be deleted
:type etag: str | unicode
:return: LRS Response object with deleted state as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
|
[
"Private",
"method",
"to",
"delete",
"a",
"specified",
"state",
"from",
"the",
"LRS"
] |
python
|
train
|
Robpol86/Flask-Celery-Helper
|
flask_celery.py
|
https://github.com/Robpol86/Flask-Celery-Helper/blob/92bd3b02954422665260116adda8eb899546c365/flask_celery.py#L228-L269
|
def single_instance(func=None, lock_timeout=None, include_args=False):
"""Celery task decorator. Forces the task to have only one running instance at a time.
Use with binded tasks (@celery.task(bind=True)).
Modeled after:
http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
http://blogs.it.ox.ac.uk/inapickle/2012/01/05/python-decorators-with-optional-arguments/
Written by @Robpol86.
:raise OtherInstanceError: If another instance is already running.
:param function func: The function to decorate, must be also decorated by @celery.task.
:param int lock_timeout: Lock timeout in seconds plus five more seconds, in-case the task crashes and fails to
release the lock. If not specified, the values of the task's soft/hard limits are used. If all else fails,
timeout will be 5 minutes.
:param bool include_args: Include the md5 checksum of the arguments passed to the task in the Redis key. This allows
the same task to run with different arguments, only stopping a task from running if another instance of it is
running with the same arguments.
"""
if func is None:
return partial(single_instance, lock_timeout=lock_timeout, include_args=include_args)
@wraps(func)
def wrapped(celery_self, *args, **kwargs):
"""Wrapped Celery task, for single_instance()."""
# Select the manager and get timeout.
timeout = (
lock_timeout or celery_self.soft_time_limit or celery_self.time_limit
or celery_self.app.conf.get('CELERYD_TASK_SOFT_TIME_LIMIT')
or celery_self.app.conf.get('CELERYD_TASK_TIME_LIMIT')
or (60 * 5)
)
manager_class = _select_manager(celery_self.backend.__class__.__name__)
lock_manager = manager_class(celery_self, timeout, include_args, args, kwargs)
# Lock and execute.
with lock_manager:
ret_value = func(*args, **kwargs)
return ret_value
return wrapped
|
[
"def",
"single_instance",
"(",
"func",
"=",
"None",
",",
"lock_timeout",
"=",
"None",
",",
"include_args",
"=",
"False",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"partial",
"(",
"single_instance",
",",
"lock_timeout",
"=",
"lock_timeout",
",",
"include_args",
"=",
"include_args",
")",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"celery_self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapped Celery task, for single_instance().\"\"\"",
"# Select the manager and get timeout.",
"timeout",
"=",
"(",
"lock_timeout",
"or",
"celery_self",
".",
"soft_time_limit",
"or",
"celery_self",
".",
"time_limit",
"or",
"celery_self",
".",
"app",
".",
"conf",
".",
"get",
"(",
"'CELERYD_TASK_SOFT_TIME_LIMIT'",
")",
"or",
"celery_self",
".",
"app",
".",
"conf",
".",
"get",
"(",
"'CELERYD_TASK_TIME_LIMIT'",
")",
"or",
"(",
"60",
"*",
"5",
")",
")",
"manager_class",
"=",
"_select_manager",
"(",
"celery_self",
".",
"backend",
".",
"__class__",
".",
"__name__",
")",
"lock_manager",
"=",
"manager_class",
"(",
"celery_self",
",",
"timeout",
",",
"include_args",
",",
"args",
",",
"kwargs",
")",
"# Lock and execute.",
"with",
"lock_manager",
":",
"ret_value",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"ret_value",
"return",
"wrapped"
] |
Celery task decorator. Forces the task to have only one running instance at a time.
Use with binded tasks (@celery.task(bind=True)).
Modeled after:
http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
http://blogs.it.ox.ac.uk/inapickle/2012/01/05/python-decorators-with-optional-arguments/
Written by @Robpol86.
:raise OtherInstanceError: If another instance is already running.
:param function func: The function to decorate, must be also decorated by @celery.task.
:param int lock_timeout: Lock timeout in seconds plus five more seconds, in-case the task crashes and fails to
release the lock. If not specified, the values of the task's soft/hard limits are used. If all else fails,
timeout will be 5 minutes.
:param bool include_args: Include the md5 checksum of the arguments passed to the task in the Redis key. This allows
the same task to run with different arguments, only stopping a task from running if another instance of it is
running with the same arguments.
|
[
"Celery",
"task",
"decorator",
".",
"Forces",
"the",
"task",
"to",
"have",
"only",
"one",
"running",
"instance",
"at",
"a",
"time",
"."
] |
python
|
valid
|
genericclient/genericclient-base
|
genericclient_base/utils.py
|
https://github.com/genericclient/genericclient-base/blob/193f7c879c40decaf03504af633f593b88e4abc5/genericclient_base/utils.py#L60-L74
|
def parse_headers_link(headers):
"""Returns the parsed header links of the response, if any."""
header = CaseInsensitiveDict(headers).get('link')
l = {}
if header:
links = parse_link(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
|
[
"def",
"parse_headers_link",
"(",
"headers",
")",
":",
"header",
"=",
"CaseInsensitiveDict",
"(",
"headers",
")",
".",
"get",
"(",
"'link'",
")",
"l",
"=",
"{",
"}",
"if",
"header",
":",
"links",
"=",
"parse_link",
"(",
"header",
")",
"for",
"link",
"in",
"links",
":",
"key",
"=",
"link",
".",
"get",
"(",
"'rel'",
")",
"or",
"link",
".",
"get",
"(",
"'url'",
")",
"l",
"[",
"key",
"]",
"=",
"link",
"return",
"l"
] |
Returns the parsed header links of the response, if any.
|
[
"Returns",
"the",
"parsed",
"header",
"links",
"of",
"the",
"response",
"if",
"any",
"."
] |
python
|
train
|
iopipe/iopipe-python
|
iopipe/agent.py
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L246-L258
|
def submit_future(self, func, *args, **kwargs):
"""
Submit a function call to be run as a future in a thread pool. This
should be an I/O bound operation.
"""
# This mode will run futures synchronously. This should only be used
# for benchmarking purposes.
if self.config["sync_http"] is True:
return MockFuture(func, *args, **kwargs)
future = self.pool.submit(func, *args, **kwargs)
self.futures.append(future)
return future
|
[
"def",
"submit_future",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# This mode will run futures synchronously. This should only be used",
"# for benchmarking purposes.",
"if",
"self",
".",
"config",
"[",
"\"sync_http\"",
"]",
"is",
"True",
":",
"return",
"MockFuture",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"future",
"=",
"self",
".",
"pool",
".",
"submit",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"futures",
".",
"append",
"(",
"future",
")",
"return",
"future"
] |
Submit a function call to be run as a future in a thread pool. This
should be an I/O bound operation.
|
[
"Submit",
"a",
"function",
"call",
"to",
"be",
"run",
"as",
"a",
"future",
"in",
"a",
"thread",
"pool",
".",
"This",
"should",
"be",
"an",
"I",
"/",
"O",
"bound",
"operation",
"."
] |
python
|
train
|
Robpol86/terminaltables
|
terminaltables/width_and_alignment.py
|
https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/terminaltables/width_and_alignment.py#L84-L113
|
def max_dimensions(table_data, padding_left=0, padding_right=0, padding_top=0, padding_bottom=0):
"""Get maximum widths of each column and maximum height of each row.
:param iter table_data: List of list of strings (unmodified table data).
:param int padding_left: Number of space chars on left side of cell.
:param int padding_right: Number of space chars on right side of cell.
:param int padding_top: Number of empty lines on top side of cell.
:param int padding_bottom: Number of empty lines on bottom side of cell.
:return: 4-item tuple of n-item lists. Inner column widths and row heights, outer column widths and row heights.
:rtype: tuple
"""
inner_widths = [0] * (max(len(r) for r in table_data) if table_data else 0)
inner_heights = [0] * len(table_data)
# Find max width and heights.
for j, row in enumerate(table_data):
for i, cell in enumerate(row):
if not hasattr(cell, 'count') or not hasattr(cell, 'splitlines'):
cell = str(cell)
if not cell:
continue
inner_heights[j] = max(inner_heights[j], cell.count('\n') + 1)
inner_widths[i] = max(inner_widths[i], *[visible_width(l) for l in cell.splitlines()])
# Calculate with padding.
outer_widths = [padding_left + i + padding_right for i in inner_widths]
outer_heights = [padding_top + i + padding_bottom for i in inner_heights]
return inner_widths, inner_heights, outer_widths, outer_heights
|
[
"def",
"max_dimensions",
"(",
"table_data",
",",
"padding_left",
"=",
"0",
",",
"padding_right",
"=",
"0",
",",
"padding_top",
"=",
"0",
",",
"padding_bottom",
"=",
"0",
")",
":",
"inner_widths",
"=",
"[",
"0",
"]",
"*",
"(",
"max",
"(",
"len",
"(",
"r",
")",
"for",
"r",
"in",
"table_data",
")",
"if",
"table_data",
"else",
"0",
")",
"inner_heights",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"table_data",
")",
"# Find max width and heights.",
"for",
"j",
",",
"row",
"in",
"enumerate",
"(",
"table_data",
")",
":",
"for",
"i",
",",
"cell",
"in",
"enumerate",
"(",
"row",
")",
":",
"if",
"not",
"hasattr",
"(",
"cell",
",",
"'count'",
")",
"or",
"not",
"hasattr",
"(",
"cell",
",",
"'splitlines'",
")",
":",
"cell",
"=",
"str",
"(",
"cell",
")",
"if",
"not",
"cell",
":",
"continue",
"inner_heights",
"[",
"j",
"]",
"=",
"max",
"(",
"inner_heights",
"[",
"j",
"]",
",",
"cell",
".",
"count",
"(",
"'\\n'",
")",
"+",
"1",
")",
"inner_widths",
"[",
"i",
"]",
"=",
"max",
"(",
"inner_widths",
"[",
"i",
"]",
",",
"*",
"[",
"visible_width",
"(",
"l",
")",
"for",
"l",
"in",
"cell",
".",
"splitlines",
"(",
")",
"]",
")",
"# Calculate with padding.",
"outer_widths",
"=",
"[",
"padding_left",
"+",
"i",
"+",
"padding_right",
"for",
"i",
"in",
"inner_widths",
"]",
"outer_heights",
"=",
"[",
"padding_top",
"+",
"i",
"+",
"padding_bottom",
"for",
"i",
"in",
"inner_heights",
"]",
"return",
"inner_widths",
",",
"inner_heights",
",",
"outer_widths",
",",
"outer_heights"
] |
Get maximum widths of each column and maximum height of each row.
:param iter table_data: List of list of strings (unmodified table data).
:param int padding_left: Number of space chars on left side of cell.
:param int padding_right: Number of space chars on right side of cell.
:param int padding_top: Number of empty lines on top side of cell.
:param int padding_bottom: Number of empty lines on bottom side of cell.
:return: 4-item tuple of n-item lists. Inner column widths and row heights, outer column widths and row heights.
:rtype: tuple
|
[
"Get",
"maximum",
"widths",
"of",
"each",
"column",
"and",
"maximum",
"height",
"of",
"each",
"row",
"."
] |
python
|
train
|
DEIB-GECO/PyGMQL
|
gmql/dataset/GMQLDataset.py
|
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L1363-L1388
|
def materialize(self, output_path=None, output_name=None, all_load=True):
"""
*Wrapper of* ``MATERIALIZE``
Starts the execution of the operations for the GMQLDataset. PyGMQL implements lazy execution
and no operation is performed until the materialization of the results is requestd.
This operation can happen both locally or remotely.
* Local mode: if the GMQLDataset is local (based on local data) the user can specify the
:param output_path: (Optional) If specified, the user can say where to locally save the results
of the computations.
:param output_name: (Optional) Can be used only if the dataset is remote. It represents the name that
the user wants to give to the resulting dataset on the server
:param all_load: (Optional) It specifies if the result dataset should be directly converted to a GDataframe (True) or to a
GMQLDataset (False) for future local queries.
:return: A GDataframe or a GMQLDataset
"""
current_mode = get_mode()
new_index = self.__modify_dag(current_mode)
if current_mode == 'local':
return Materializations.materialize_local(new_index, output_path, all_load)
elif current_mode == 'remote':
return Materializations.materialize_remote(new_index, output_name, output_path, all_load)
else:
raise ValueError("Current mode is not defined. {} given".format(current_mode))
|
[
"def",
"materialize",
"(",
"self",
",",
"output_path",
"=",
"None",
",",
"output_name",
"=",
"None",
",",
"all_load",
"=",
"True",
")",
":",
"current_mode",
"=",
"get_mode",
"(",
")",
"new_index",
"=",
"self",
".",
"__modify_dag",
"(",
"current_mode",
")",
"if",
"current_mode",
"==",
"'local'",
":",
"return",
"Materializations",
".",
"materialize_local",
"(",
"new_index",
",",
"output_path",
",",
"all_load",
")",
"elif",
"current_mode",
"==",
"'remote'",
":",
"return",
"Materializations",
".",
"materialize_remote",
"(",
"new_index",
",",
"output_name",
",",
"output_path",
",",
"all_load",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Current mode is not defined. {} given\"",
".",
"format",
"(",
"current_mode",
")",
")"
] |
*Wrapper of* ``MATERIALIZE``
Starts the execution of the operations for the GMQLDataset. PyGMQL implements lazy execution
and no operation is performed until the materialization of the results is requestd.
This operation can happen both locally or remotely.
* Local mode: if the GMQLDataset is local (based on local data) the user can specify the
:param output_path: (Optional) If specified, the user can say where to locally save the results
of the computations.
:param output_name: (Optional) Can be used only if the dataset is remote. It represents the name that
the user wants to give to the resulting dataset on the server
:param all_load: (Optional) It specifies if the result dataset should be directly converted to a GDataframe (True) or to a
GMQLDataset (False) for future local queries.
:return: A GDataframe or a GMQLDataset
|
[
"*",
"Wrapper",
"of",
"*",
"MATERIALIZE"
] |
python
|
train
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L613-L637
|
def Override(self, overrides):
"""
Produce a modified environment whose variables are overridden by
the overrides dictionaries. "overrides" is a dictionary that
will override the variables of this environment.
This function is much more efficient than Clone() or creating
a new Environment because it doesn't copy the construction
environment dictionary, it just wraps the underlying construction
environment, and doesn't even create a wrapper object if there
are no overrides.
"""
if not overrides: return self
o = copy_non_reserved_keywords(overrides)
if not o: return self
overrides = {}
merges = None
for key, value in o.items():
if key == 'parse_flags':
merges = value
else:
overrides[key] = SCons.Subst.scons_subst_once(value, self, key)
env = OverrideEnvironment(self, overrides)
if merges: env.MergeFlags(merges)
return env
|
[
"def",
"Override",
"(",
"self",
",",
"overrides",
")",
":",
"if",
"not",
"overrides",
":",
"return",
"self",
"o",
"=",
"copy_non_reserved_keywords",
"(",
"overrides",
")",
"if",
"not",
"o",
":",
"return",
"self",
"overrides",
"=",
"{",
"}",
"merges",
"=",
"None",
"for",
"key",
",",
"value",
"in",
"o",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'parse_flags'",
":",
"merges",
"=",
"value",
"else",
":",
"overrides",
"[",
"key",
"]",
"=",
"SCons",
".",
"Subst",
".",
"scons_subst_once",
"(",
"value",
",",
"self",
",",
"key",
")",
"env",
"=",
"OverrideEnvironment",
"(",
"self",
",",
"overrides",
")",
"if",
"merges",
":",
"env",
".",
"MergeFlags",
"(",
"merges",
")",
"return",
"env"
] |
Produce a modified environment whose variables are overridden by
the overrides dictionaries. "overrides" is a dictionary that
will override the variables of this environment.
This function is much more efficient than Clone() or creating
a new Environment because it doesn't copy the construction
environment dictionary, it just wraps the underlying construction
environment, and doesn't even create a wrapper object if there
are no overrides.
|
[
"Produce",
"a",
"modified",
"environment",
"whose",
"variables",
"are",
"overridden",
"by",
"the",
"overrides",
"dictionaries",
".",
"overrides",
"is",
"a",
"dictionary",
"that",
"will",
"override",
"the",
"variables",
"of",
"this",
"environment",
"."
] |
python
|
train
|
20c/xbahn
|
xbahn/engineer.py
|
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/engineer.py#L130-L149
|
def engineer_info(self, action):
"""
Returns:
dict: engineer command information
- arguments (list<dict>): command arguments
- args (list): args to pass through to click.argument
- kwargs (dict): keyword arguments to pass through to click.argument
- options (list<dict>): command options
- args (list): args to pass through to click.option
- kwargs (dict): keyword options to pass through to click.option
"""
fn = getattr(self, action, None)
if not fn:
raise AttributeError("Engineer action not found: %s" % action)
if not hasattr(fn, "engineer"):
raise AttributeError("Engineer action not exposed: %s" % action)
return fn.engineer
|
[
"def",
"engineer_info",
"(",
"self",
",",
"action",
")",
":",
"fn",
"=",
"getattr",
"(",
"self",
",",
"action",
",",
"None",
")",
"if",
"not",
"fn",
":",
"raise",
"AttributeError",
"(",
"\"Engineer action not found: %s\"",
"%",
"action",
")",
"if",
"not",
"hasattr",
"(",
"fn",
",",
"\"engineer\"",
")",
":",
"raise",
"AttributeError",
"(",
"\"Engineer action not exposed: %s\"",
"%",
"action",
")",
"return",
"fn",
".",
"engineer"
] |
Returns:
dict: engineer command information
- arguments (list<dict>): command arguments
- args (list): args to pass through to click.argument
- kwargs (dict): keyword arguments to pass through to click.argument
- options (list<dict>): command options
- args (list): args to pass through to click.option
- kwargs (dict): keyword options to pass through to click.option
|
[
"Returns",
":",
"dict",
":",
"engineer",
"command",
"information",
"-",
"arguments",
"(",
"list<dict",
">",
")",
":",
"command",
"arguments",
"-",
"args",
"(",
"list",
")",
":",
"args",
"to",
"pass",
"through",
"to",
"click",
".",
"argument",
"-",
"kwargs",
"(",
"dict",
")",
":",
"keyword",
"arguments",
"to",
"pass",
"through",
"to",
"click",
".",
"argument",
"-",
"options",
"(",
"list<dict",
">",
")",
":",
"command",
"options",
"-",
"args",
"(",
"list",
")",
":",
"args",
"to",
"pass",
"through",
"to",
"click",
".",
"option",
"-",
"kwargs",
"(",
"dict",
")",
":",
"keyword",
"options",
"to",
"pass",
"through",
"to",
"click",
".",
"option"
] |
python
|
train
|
bjodah/pycompilation
|
pycompilation/compilation.py
|
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L311-L381
|
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
|
[
"def",
"simple_cythonize",
"(",
"src",
",",
"destdir",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"full_module_name",
"=",
"None",
",",
"only_update",
"=",
"False",
",",
"*",
"*",
"cy_kwargs",
")",
":",
"from",
"Cython",
".",
"Compiler",
".",
"Main",
"import",
"(",
"default_options",
",",
"CompilationOptions",
")",
"from",
"Cython",
".",
"Compiler",
".",
"Main",
"import",
"compile",
"as",
"cy_compile",
"assert",
"src",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.pyx'",
")",
"or",
"src",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.py'",
")",
"cwd",
"=",
"cwd",
"or",
"'.'",
"destdir",
"=",
"destdir",
"or",
"'.'",
"ext",
"=",
"'.cpp'",
"if",
"cy_kwargs",
".",
"get",
"(",
"'cplus'",
",",
"False",
")",
"else",
"'.c'",
"c_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
"[",
"0",
"]",
"+",
"ext",
"dstfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destdir",
",",
"c_name",
")",
"if",
"only_update",
":",
"if",
"not",
"missing_or_other_newer",
"(",
"dstfile",
",",
"src",
",",
"cwd",
"=",
"cwd",
")",
":",
"msg",
"=",
"'{0} newer than {1}, did not re-cythonize.'",
".",
"format",
"(",
"dstfile",
",",
"src",
")",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"msg",
")",
"return",
"dstfile",
"if",
"cwd",
":",
"ori_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"else",
":",
"ori_dir",
"=",
"'.'",
"os",
".",
"chdir",
"(",
"cwd",
")",
"try",
":",
"cy_options",
"=",
"CompilationOptions",
"(",
"default_options",
")",
"cy_options",
".",
"__dict__",
".",
"update",
"(",
"cy_kwargs",
")",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"\"Cythonizing {0} to {1}\"",
".",
"format",
"(",
"src",
",",
"dstfile",
")",
")",
"cy_result",
"=",
"cy_compile",
"(",
"[",
"src",
"]",
",",
"cy_options",
",",
"full_module_name",
"=",
"full_module_name",
")",
"if",
"cy_result",
".",
"num_errors",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cython compilation failed.\"",
")",
"if",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"src",
")",
")",
"!=",
"os",
".",
"path",
".",
"abspath",
"(",
"destdir",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dstfile",
")",
":",
"os",
".",
"unlink",
"(",
"dstfile",
")",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"src",
")",
",",
"c_name",
")",
",",
"destdir",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"ori_dir",
")",
"return",
"dstfile"
] |
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
|
[
"Generates",
"a",
"C",
"file",
"from",
"a",
"Cython",
"source",
"file",
"."
] |
python
|
train
|
IBMStreams/pypi.streamsx
|
streamsx/topology/context.py
|
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/context.py#L55-L89
|
def submit(ctxtype, graph, config=None, username=None, password=None):
"""
Submits a `Topology` (application) using the specified context type.
Used to submit an application for compilation into a Streams application and
execution within an Streaming Analytics service or IBM Streams instance.
`ctxtype` defines how the application will be submitted, see :py:class:`ContextTypes`.
The parameters `username` and `password` are only required when submitting to an
IBM Streams instance and it is required to access the Streams REST API from the
code performing the submit. Accessing data from views created by
:py:meth:`~streamsx.topology.topology.Stream.view` requires access to the Streams REST API.
Args:
ctxtype(str): Type of context the application will be submitted to. A value from :py:class:`ContextTypes`.
graph(Topology): The application topology to be submitted.
config(dict): Configuration for the submission.
username(str): Username for the Streams REST api.
password(str): Password for `username`.
Returns:
SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`.
"""
streamsx._streams._version._mismatch_check(__name__)
graph = graph.graph
if not graph.operators:
raise ValueError("Topology {0} does not contain any streams.".format(graph.topology.name))
context_submitter = _SubmitContextFactory(graph, config, username, password).get_submit_context(ctxtype)
sr = SubmissionResult(context_submitter.submit())
sr._submitter = context_submitter
return sr
|
[
"def",
"submit",
"(",
"ctxtype",
",",
"graph",
",",
"config",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"streamsx",
".",
"_streams",
".",
"_version",
".",
"_mismatch_check",
"(",
"__name__",
")",
"graph",
"=",
"graph",
".",
"graph",
"if",
"not",
"graph",
".",
"operators",
":",
"raise",
"ValueError",
"(",
"\"Topology {0} does not contain any streams.\"",
".",
"format",
"(",
"graph",
".",
"topology",
".",
"name",
")",
")",
"context_submitter",
"=",
"_SubmitContextFactory",
"(",
"graph",
",",
"config",
",",
"username",
",",
"password",
")",
".",
"get_submit_context",
"(",
"ctxtype",
")",
"sr",
"=",
"SubmissionResult",
"(",
"context_submitter",
".",
"submit",
"(",
")",
")",
"sr",
".",
"_submitter",
"=",
"context_submitter",
"return",
"sr"
] |
Submits a `Topology` (application) using the specified context type.
Used to submit an application for compilation into a Streams application and
execution within an Streaming Analytics service or IBM Streams instance.
`ctxtype` defines how the application will be submitted, see :py:class:`ContextTypes`.
The parameters `username` and `password` are only required when submitting to an
IBM Streams instance and it is required to access the Streams REST API from the
code performing the submit. Accessing data from views created by
:py:meth:`~streamsx.topology.topology.Stream.view` requires access to the Streams REST API.
Args:
ctxtype(str): Type of context the application will be submitted to. A value from :py:class:`ContextTypes`.
graph(Topology): The application topology to be submitted.
config(dict): Configuration for the submission.
username(str): Username for the Streams REST api.
password(str): Password for `username`.
Returns:
SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`.
|
[
"Submits",
"a",
"Topology",
"(",
"application",
")",
"using",
"the",
"specified",
"context",
"type",
"."
] |
python
|
train
|
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/app/backends/ipython/_widget.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/app/backends/ipython/_widget.py#L21-L31
|
def _stop_timers(canvas):
"""Stop all timers in a canvas."""
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop()
|
[
"def",
"_stop_timers",
"(",
"canvas",
")",
":",
"for",
"attr",
"in",
"dir",
"(",
"canvas",
")",
":",
"try",
":",
"attr_obj",
"=",
"getattr",
"(",
"canvas",
",",
"attr",
")",
"except",
"NotImplementedError",
":",
"# This try/except is needed because canvas.position raises",
"# an error (it is not implemented in this backend).",
"attr_obj",
"=",
"None",
"if",
"isinstance",
"(",
"attr_obj",
",",
"Timer",
")",
":",
"attr_obj",
".",
"stop",
"(",
")"
] |
Stop all timers in a canvas.
|
[
"Stop",
"all",
"timers",
"in",
"a",
"canvas",
"."
] |
python
|
train
|
common-workflow-language/workflow-service
|
wes_service/util.py
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/util.py#L38-L44
|
def getoptlist(self, p):
"""Returns all option values stored that match p as a list."""
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
|
[
"def",
"getoptlist",
"(",
"self",
",",
"p",
")",
":",
"optlist",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"pairs",
":",
"if",
"k",
"==",
"p",
":",
"optlist",
".",
"append",
"(",
"v",
")",
"return",
"optlist"
] |
Returns all option values stored that match p as a list.
|
[
"Returns",
"all",
"option",
"values",
"stored",
"that",
"match",
"p",
"as",
"a",
"list",
"."
] |
python
|
train
|
shoebot/shoebot
|
shoebot/grammar/livecode.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/livecode.py#L84-L103
|
def run_tenuous(self):
"""
Run edited source, if no exceptions occur then it
graduates to known good.
"""
with LiveExecution.lock:
ns_snapshot = copy.copy(self.ns)
try:
source = self.edited_source
self.edited_source = None
self.do_exec(source, ns_snapshot)
self.known_good = source
self.call_good_cb()
return True, None
except Exception as ex:
tb = traceback.format_exc()
self.call_bad_cb(tb)
self.ns.clear()
self.ns.update(ns_snapshot)
return False, ex
|
[
"def",
"run_tenuous",
"(",
"self",
")",
":",
"with",
"LiveExecution",
".",
"lock",
":",
"ns_snapshot",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"ns",
")",
"try",
":",
"source",
"=",
"self",
".",
"edited_source",
"self",
".",
"edited_source",
"=",
"None",
"self",
".",
"do_exec",
"(",
"source",
",",
"ns_snapshot",
")",
"self",
".",
"known_good",
"=",
"source",
"self",
".",
"call_good_cb",
"(",
")",
"return",
"True",
",",
"None",
"except",
"Exception",
"as",
"ex",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"call_bad_cb",
"(",
"tb",
")",
"self",
".",
"ns",
".",
"clear",
"(",
")",
"self",
".",
"ns",
".",
"update",
"(",
"ns_snapshot",
")",
"return",
"False",
",",
"ex"
] |
Run edited source, if no exceptions occur then it
graduates to known good.
|
[
"Run",
"edited",
"source",
"if",
"no",
"exceptions",
"occur",
"then",
"it",
"graduates",
"to",
"known",
"good",
"."
] |
python
|
valid
|
RedHatInsights/insights-core
|
insights/client/config.py
|
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L393-L417
|
def _update_dict(self, dict_):
'''
Update without allowing undefined options or overwrite of class methods
'''
dict_ = dict((k, v) for k, v in dict_.items() if (
k not in self._init_attrs))
# zzz
if 'no_gpg' in dict_ and dict_['no_gpg']:
dict_['gpg'] = False
unknown_opts = set(dict_.keys()).difference(set(DEFAULT_OPTS.keys()))
if unknown_opts and self._print_errors:
# only print error once
sys.stdout.write(
'WARNING: Unknown options: ' +
', '.join(list(unknown_opts)) + '\n')
if 'no_schedule' in unknown_opts:
sys.stdout.write('WARNING: Config option `no_schedule` has '
'been deprecated. To disable automatic '
'scheduling for Red Hat Insights, run '
'`insights-client --disable-schedule`\n')
for u in unknown_opts:
dict_.pop(u, None)
self.__dict__.update(dict_)
|
[
"def",
"_update_dict",
"(",
"self",
",",
"dict_",
")",
":",
"dict_",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"dict_",
".",
"items",
"(",
")",
"if",
"(",
"k",
"not",
"in",
"self",
".",
"_init_attrs",
")",
")",
"# zzz",
"if",
"'no_gpg'",
"in",
"dict_",
"and",
"dict_",
"[",
"'no_gpg'",
"]",
":",
"dict_",
"[",
"'gpg'",
"]",
"=",
"False",
"unknown_opts",
"=",
"set",
"(",
"dict_",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"DEFAULT_OPTS",
".",
"keys",
"(",
")",
")",
")",
"if",
"unknown_opts",
"and",
"self",
".",
"_print_errors",
":",
"# only print error once",
"sys",
".",
"stdout",
".",
"write",
"(",
"'WARNING: Unknown options: '",
"+",
"', '",
".",
"join",
"(",
"list",
"(",
"unknown_opts",
")",
")",
"+",
"'\\n'",
")",
"if",
"'no_schedule'",
"in",
"unknown_opts",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'WARNING: Config option `no_schedule` has '",
"'been deprecated. To disable automatic '",
"'scheduling for Red Hat Insights, run '",
"'`insights-client --disable-schedule`\\n'",
")",
"for",
"u",
"in",
"unknown_opts",
":",
"dict_",
".",
"pop",
"(",
"u",
",",
"None",
")",
"self",
".",
"__dict__",
".",
"update",
"(",
"dict_",
")"
] |
Update without allowing undefined options or overwrite of class methods
|
[
"Update",
"without",
"allowing",
"undefined",
"options",
"or",
"overwrite",
"of",
"class",
"methods"
] |
python
|
train
|
oscarlazoarjona/fast
|
fast/symbolic.py
|
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1431-L1462
|
def cross(a, b):
r"""Cross product of two 3d vectors."""
if isinstance(a, Mul):
a = a.expand()
avect = 1
aivect = -1
for ai, fact in enumerate(a.args):
if isinstance(fact, Vector3D):
avect = fact
aivect = ai
break
acoef = a.args[:aivect] + a.args[aivect+1:]
acoef = Mul(*acoef)
return acoef*cross(avect, b)
if isinstance(b, Mul):
b = b.expand()
bvect = 1
bivect = -1
for bi, fact in enumerate(b.args):
if isinstance(fact, Vector3D):
bvect = fact
bivect = bi
break
bcoef = b.args[:bivect] + b.args[bivect+1:]
bcoef = Mul(*bcoef)
return bcoef*cross(a, bvect)
if isinstance(a, Vector3D) and isinstance(b, Vector3D):
return CrossProduct(a, b)
|
[
"def",
"cross",
"(",
"a",
",",
"b",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"Mul",
")",
":",
"a",
"=",
"a",
".",
"expand",
"(",
")",
"avect",
"=",
"1",
"aivect",
"=",
"-",
"1",
"for",
"ai",
",",
"fact",
"in",
"enumerate",
"(",
"a",
".",
"args",
")",
":",
"if",
"isinstance",
"(",
"fact",
",",
"Vector3D",
")",
":",
"avect",
"=",
"fact",
"aivect",
"=",
"ai",
"break",
"acoef",
"=",
"a",
".",
"args",
"[",
":",
"aivect",
"]",
"+",
"a",
".",
"args",
"[",
"aivect",
"+",
"1",
":",
"]",
"acoef",
"=",
"Mul",
"(",
"*",
"acoef",
")",
"return",
"acoef",
"*",
"cross",
"(",
"avect",
",",
"b",
")",
"if",
"isinstance",
"(",
"b",
",",
"Mul",
")",
":",
"b",
"=",
"b",
".",
"expand",
"(",
")",
"bvect",
"=",
"1",
"bivect",
"=",
"-",
"1",
"for",
"bi",
",",
"fact",
"in",
"enumerate",
"(",
"b",
".",
"args",
")",
":",
"if",
"isinstance",
"(",
"fact",
",",
"Vector3D",
")",
":",
"bvect",
"=",
"fact",
"bivect",
"=",
"bi",
"break",
"bcoef",
"=",
"b",
".",
"args",
"[",
":",
"bivect",
"]",
"+",
"b",
".",
"args",
"[",
"bivect",
"+",
"1",
":",
"]",
"bcoef",
"=",
"Mul",
"(",
"*",
"bcoef",
")",
"return",
"bcoef",
"*",
"cross",
"(",
"a",
",",
"bvect",
")",
"if",
"isinstance",
"(",
"a",
",",
"Vector3D",
")",
"and",
"isinstance",
"(",
"b",
",",
"Vector3D",
")",
":",
"return",
"CrossProduct",
"(",
"a",
",",
"b",
")"
] |
r"""Cross product of two 3d vectors.
|
[
"r",
"Cross",
"product",
"of",
"two",
"3d",
"vectors",
"."
] |
python
|
train
|
saltstack/salt
|
salt/master.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2063-L2174
|
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
|
[
"def",
"publish",
"(",
"self",
",",
"clear_load",
")",
":",
"extra",
"=",
"clear_load",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"publisher_acl",
"=",
"salt",
".",
"acl",
".",
"PublisherACL",
"(",
"self",
".",
"opts",
"[",
"'publisher_acl_blacklist'",
"]",
")",
"if",
"publisher_acl",
".",
"user_is_blacklisted",
"(",
"clear_load",
"[",
"'user'",
"]",
")",
"or",
"publisher_acl",
".",
"cmd_is_blacklisted",
"(",
"clear_load",
"[",
"'fun'",
"]",
")",
":",
"log",
".",
"error",
"(",
"'%s does not have permissions to run %s. Please contact '",
"'your local administrator if you believe this is in '",
"'error.\\n'",
",",
"clear_load",
"[",
"'user'",
"]",
",",
"clear_load",
"[",
"'fun'",
"]",
")",
"return",
"{",
"'error'",
":",
"{",
"'name'",
":",
"'AuthorizationError'",
",",
"'message'",
":",
"'Authorization error occurred.'",
"}",
"}",
"# Retrieve the minions list",
"delimiter",
"=",
"clear_load",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'delimiter'",
",",
"DEFAULT_TARGET_DELIM",
")",
"_res",
"=",
"self",
".",
"ckminions",
".",
"check_minions",
"(",
"clear_load",
"[",
"'tgt'",
"]",
",",
"clear_load",
".",
"get",
"(",
"'tgt_type'",
",",
"'glob'",
")",
",",
"delimiter",
")",
"minions",
"=",
"_res",
".",
"get",
"(",
"'minions'",
",",
"list",
"(",
")",
")",
"missing",
"=",
"_res",
".",
"get",
"(",
"'missing'",
",",
"list",
"(",
")",
")",
"ssh_minions",
"=",
"_res",
".",
"get",
"(",
"'ssh_minions'",
",",
"False",
")",
"# Check for external auth calls and authenticate",
"auth_type",
",",
"err_name",
",",
"key",
",",
"sensitive_load_keys",
"=",
"self",
".",
"_prep_auth_info",
"(",
"extra",
")",
"if",
"auth_type",
"==",
"'user'",
":",
"auth_check",
"=",
"self",
".",
"loadauth",
".",
"check_authentication",
"(",
"clear_load",
",",
"auth_type",
",",
"key",
"=",
"key",
")",
"else",
":",
"auth_check",
"=",
"self",
".",
"loadauth",
".",
"check_authentication",
"(",
"extra",
",",
"auth_type",
")",
"# Setup authorization list variable and error information",
"auth_list",
"=",
"auth_check",
".",
"get",
"(",
"'auth_list'",
",",
"[",
"]",
")",
"err_msg",
"=",
"'Authentication failure of type \"{0}\" occurred.'",
".",
"format",
"(",
"auth_type",
")",
"if",
"auth_check",
".",
"get",
"(",
"'error'",
")",
":",
"# Authentication error occurred: do not continue.",
"log",
".",
"warning",
"(",
"err_msg",
")",
"return",
"{",
"'error'",
":",
"{",
"'name'",
":",
"'AuthenticationError'",
",",
"'message'",
":",
"'Authentication error occurred.'",
"}",
"}",
"# All Token, Eauth, and non-root users must pass the authorization check",
"if",
"auth_type",
"!=",
"'user'",
"or",
"(",
"auth_type",
"==",
"'user'",
"and",
"auth_list",
")",
":",
"# Authorize the request",
"authorized",
"=",
"self",
".",
"ckminions",
".",
"auth_check",
"(",
"auth_list",
",",
"clear_load",
"[",
"'fun'",
"]",
",",
"clear_load",
"[",
"'arg'",
"]",
",",
"clear_load",
"[",
"'tgt'",
"]",
",",
"clear_load",
".",
"get",
"(",
"'tgt_type'",
",",
"'glob'",
")",
",",
"minions",
"=",
"minions",
",",
"# always accept find_job",
"whitelist",
"=",
"[",
"'saltutil.find_job'",
"]",
",",
")",
"if",
"not",
"authorized",
":",
"# Authorization error occurred. Do not continue.",
"if",
"auth_type",
"==",
"'eauth'",
"and",
"not",
"auth_list",
"and",
"'username'",
"in",
"extra",
"and",
"'eauth'",
"in",
"extra",
":",
"log",
".",
"debug",
"(",
"'Auth configuration for eauth \"%s\" and user \"%s\" is empty'",
",",
"extra",
"[",
"'eauth'",
"]",
",",
"extra",
"[",
"'username'",
"]",
")",
"log",
".",
"warning",
"(",
"err_msg",
")",
"return",
"{",
"'error'",
":",
"{",
"'name'",
":",
"'AuthorizationError'",
",",
"'message'",
":",
"'Authorization error occurred.'",
"}",
"}",
"# Perform some specific auth_type tasks after the authorization check",
"if",
"auth_type",
"==",
"'token'",
":",
"username",
"=",
"auth_check",
".",
"get",
"(",
"'username'",
")",
"clear_load",
"[",
"'user'",
"]",
"=",
"username",
"log",
".",
"debug",
"(",
"'Minion tokenized user = \"%s\"'",
",",
"username",
")",
"elif",
"auth_type",
"==",
"'eauth'",
":",
"# The username we are attempting to auth with",
"clear_load",
"[",
"'user'",
"]",
"=",
"self",
".",
"loadauth",
".",
"load_name",
"(",
"extra",
")",
"# If we order masters (via a syndic), don't short circuit if no minions",
"# are found",
"if",
"not",
"self",
".",
"opts",
".",
"get",
"(",
"'order_masters'",
")",
":",
"# Check for no minions",
"if",
"not",
"minions",
":",
"return",
"{",
"'enc'",
":",
"'clear'",
",",
"'load'",
":",
"{",
"'jid'",
":",
"None",
",",
"'minions'",
":",
"minions",
",",
"'error'",
":",
"'Master could not resolve minions for target {0}'",
".",
"format",
"(",
"clear_load",
"[",
"'tgt'",
"]",
")",
"}",
"}",
"if",
"extra",
".",
"get",
"(",
"'batch'",
",",
"None",
")",
":",
"return",
"self",
".",
"publish_batch",
"(",
"clear_load",
",",
"extra",
",",
"minions",
",",
"missing",
")",
"jid",
"=",
"self",
".",
"_prep_jid",
"(",
"clear_load",
",",
"extra",
")",
"if",
"jid",
"is",
"None",
":",
"return",
"{",
"'enc'",
":",
"'clear'",
",",
"'load'",
":",
"{",
"'error'",
":",
"'Master failed to assign jid'",
"}",
"}",
"payload",
"=",
"self",
".",
"_prep_pub",
"(",
"minions",
",",
"jid",
",",
"clear_load",
",",
"extra",
",",
"missing",
")",
"# Send it!",
"self",
".",
"_send_ssh_pub",
"(",
"payload",
",",
"ssh_minions",
"=",
"ssh_minions",
")",
"self",
".",
"_send_pub",
"(",
"payload",
")",
"return",
"{",
"'enc'",
":",
"'clear'",
",",
"'load'",
":",
"{",
"'jid'",
":",
"clear_load",
"[",
"'jid'",
"]",
",",
"'minions'",
":",
"minions",
",",
"'missing'",
":",
"missing",
"}",
"}"
] |
This method sends out publications to the minions, it can only be used
by the LocalClient.
|
[
"This",
"method",
"sends",
"out",
"publications",
"to",
"the",
"minions",
"it",
"can",
"only",
"be",
"used",
"by",
"the",
"LocalClient",
"."
] |
python
|
train
|
trendels/rhino
|
rhino/response.py
|
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L362-L404
|
def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body)
|
[
"def",
"response",
"(",
"code",
",",
"body",
"=",
"''",
",",
"etag",
"=",
"None",
",",
"last_modified",
"=",
"None",
",",
"expires",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"etag",
"is",
"not",
"None",
":",
"if",
"not",
"(",
"etag",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"etag",
"[",
"-",
"1",
"]",
"==",
"'\"'",
")",
":",
"etag",
"=",
"'\"%s\"'",
"%",
"etag",
"kw",
"[",
"'etag'",
"]",
"=",
"etag",
"if",
"last_modified",
"is",
"not",
"None",
":",
"kw",
"[",
"'last_modified'",
"]",
"=",
"datetime_to_httpdate",
"(",
"last_modified",
")",
"if",
"expires",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"expires",
",",
"datetime",
")",
":",
"kw",
"[",
"'expires'",
"]",
"=",
"datetime_to_httpdate",
"(",
"expires",
")",
"else",
":",
"kw",
"[",
"'expires'",
"]",
"=",
"timedelta_to_httpdate",
"(",
"expires",
")",
"headers",
"=",
"[",
"(",
"k",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
".",
"title",
"(",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"kw",
".",
"items",
"(",
")",
")",
"]",
"return",
"Response",
"(",
"code",
",",
"headers",
",",
"body",
")"
] |
Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
|
[
"Helper",
"to",
"build",
"an",
"HTTP",
"response",
"."
] |
python
|
train
|
DataONEorg/d1_python
|
gmn/src/d1_gmn/app/management/commands/process_replication_queue.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/management/commands/process_replication_queue.py#L228-L246
|
def _create_replica(self, sysmeta_pyxb, sciobj_bytestream):
"""GMN handles replicas differently from native objects, with the main
differences being related to handling of restrictions related to revision chains
and SIDs.
So this create sequence differs significantly from the regular one that is
accessed through MNStorage.create().
"""
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
self._assert_is_pid_of_local_unprocessed_replica(pid)
self._check_and_create_replica_revision(sysmeta_pyxb, "obsoletes")
self._check_and_create_replica_revision(sysmeta_pyxb, "obsoletedBy")
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
sciobj_model = d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url)
self._store_science_object_bytes(pid, sciobj_bytestream)
d1_gmn.app.event_log.create_log_entry(
sciobj_model, "create", "0.0.0.0", "[replica]", "[replica]"
)
|
[
"def",
"_create_replica",
"(",
"self",
",",
"sysmeta_pyxb",
",",
"sciobj_bytestream",
")",
":",
"pid",
"=",
"d1_common",
".",
"xml",
".",
"get_req_val",
"(",
"sysmeta_pyxb",
".",
"identifier",
")",
"self",
".",
"_assert_is_pid_of_local_unprocessed_replica",
"(",
"pid",
")",
"self",
".",
"_check_and_create_replica_revision",
"(",
"sysmeta_pyxb",
",",
"\"obsoletes\"",
")",
"self",
".",
"_check_and_create_replica_revision",
"(",
"sysmeta_pyxb",
",",
"\"obsoletedBy\"",
")",
"sciobj_url",
"=",
"d1_gmn",
".",
"app",
".",
"sciobj_store",
".",
"get_rel_sciobj_file_url_by_pid",
"(",
"pid",
")",
"sciobj_model",
"=",
"d1_gmn",
".",
"app",
".",
"sysmeta",
".",
"create_or_update",
"(",
"sysmeta_pyxb",
",",
"sciobj_url",
")",
"self",
".",
"_store_science_object_bytes",
"(",
"pid",
",",
"sciobj_bytestream",
")",
"d1_gmn",
".",
"app",
".",
"event_log",
".",
"create_log_entry",
"(",
"sciobj_model",
",",
"\"create\"",
",",
"\"0.0.0.0\"",
",",
"\"[replica]\"",
",",
"\"[replica]\"",
")"
] |
GMN handles replicas differently from native objects, with the main
differences being related to handling of restrictions related to revision chains
and SIDs.
So this create sequence differs significantly from the regular one that is
accessed through MNStorage.create().
|
[
"GMN",
"handles",
"replicas",
"differently",
"from",
"native",
"objects",
"with",
"the",
"main",
"differences",
"being",
"related",
"to",
"handling",
"of",
"restrictions",
"related",
"to",
"revision",
"chains",
"and",
"SIDs",
"."
] |
python
|
train
|
arraylabs/pymyq
|
pymyq/api.py
|
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L255-L283
|
async def get_devices(self, covers_only: bool = True) -> list:
"""Get a list of all devices associated with the account."""
from .device import MyQDevice
_LOGGER.debug('Retrieving list of devices')
devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)
# print(json.dumps(devices_resp, indent=4))
device_list = []
if devices_resp is None:
return device_list
for device in devices_resp['Devices']:
if not covers_only or \
device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES:
self._devices.append({
'device_id': device['MyQDeviceId'],
'device_info': device
})
myq_device = MyQDevice(
self._devices[-1], self._brand, self)
device_list.append(myq_device)
# Store current device states.
self._store_device_states(devices_resp.get('Devices', []))
_LOGGER.debug('List of devices retrieved')
return device_list
|
[
"async",
"def",
"get_devices",
"(",
"self",
",",
"covers_only",
":",
"bool",
"=",
"True",
")",
"->",
"list",
":",
"from",
".",
"device",
"import",
"MyQDevice",
"_LOGGER",
".",
"debug",
"(",
"'Retrieving list of devices'",
")",
"devices_resp",
"=",
"await",
"self",
".",
"_request",
"(",
"'get'",
",",
"DEVICE_LIST_ENDPOINT",
")",
"# print(json.dumps(devices_resp, indent=4))",
"device_list",
"=",
"[",
"]",
"if",
"devices_resp",
"is",
"None",
":",
"return",
"device_list",
"for",
"device",
"in",
"devices_resp",
"[",
"'Devices'",
"]",
":",
"if",
"not",
"covers_only",
"or",
"device",
"[",
"'MyQDeviceTypeName'",
"]",
"in",
"SUPPORTED_DEVICE_TYPE_NAMES",
":",
"self",
".",
"_devices",
".",
"append",
"(",
"{",
"'device_id'",
":",
"device",
"[",
"'MyQDeviceId'",
"]",
",",
"'device_info'",
":",
"device",
"}",
")",
"myq_device",
"=",
"MyQDevice",
"(",
"self",
".",
"_devices",
"[",
"-",
"1",
"]",
",",
"self",
".",
"_brand",
",",
"self",
")",
"device_list",
".",
"append",
"(",
"myq_device",
")",
"# Store current device states.",
"self",
".",
"_store_device_states",
"(",
"devices_resp",
".",
"get",
"(",
"'Devices'",
",",
"[",
"]",
")",
")",
"_LOGGER",
".",
"debug",
"(",
"'List of devices retrieved'",
")",
"return",
"device_list"
] |
Get a list of all devices associated with the account.
|
[
"Get",
"a",
"list",
"of",
"all",
"devices",
"associated",
"with",
"the",
"account",
"."
] |
python
|
train
|
basho/riak-python-client
|
riak/transports/pool.py
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/pool.py#L209-L221
|
def delete_resource(self, resource):
"""
Deletes the resource from the pool and destroys the associated
resource. Not usually needed by users of the pool, but called
internally when BadResource is raised.
:param resource: the resource to remove
:type resource: Resource
"""
with self.lock:
self.resources.remove(resource)
self.destroy_resource(resource.object)
del resource
|
[
"def",
"delete_resource",
"(",
"self",
",",
"resource",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"resources",
".",
"remove",
"(",
"resource",
")",
"self",
".",
"destroy_resource",
"(",
"resource",
".",
"object",
")",
"del",
"resource"
] |
Deletes the resource from the pool and destroys the associated
resource. Not usually needed by users of the pool, but called
internally when BadResource is raised.
:param resource: the resource to remove
:type resource: Resource
|
[
"Deletes",
"the",
"resource",
"from",
"the",
"pool",
"and",
"destroys",
"the",
"associated",
"resource",
".",
"Not",
"usually",
"needed",
"by",
"users",
"of",
"the",
"pool",
"but",
"called",
"internally",
"when",
"BadResource",
"is",
"raised",
"."
] |
python
|
train
|
peepall/FancyLogger
|
FancyLogger/__init__.py
|
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/__init__.py#L305-L336
|
def set_task(self,
task_id,
total,
prefix,
suffix='',
decimals=0,
bar_length=60,
keep_alive=False,
display_time=False):
"""
Defines a new progress bar with the given information.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param total: The total number of iteration for this progress bar.
:param prefix: The text that should be displayed at the left side of the progress bar. Note that
progress bars will always stay left-aligned at the shortest possible.
:param suffix: [Optional] The text that should be displayed at the very right side of the progress bar.
:param decimals: [Optional] The number of decimals to display for the percentage.
:param bar_length: [Optional] The graphical bar size displayed on screen. Unit is character.
:param keep_alive: [Optional] Specify whether the progress bar should stay displayed forever once completed
or if it should vanish.
:param display_time: [Optional] Specify whether the duration since the progress has begun should be
displayed. Running time will be displayed between parenthesis, whereas it will be
displayed between brackets when the progress has completed.
"""
self.queue.put(dill.dumps(NewTaskCommand(task_id=task_id,
task=TaskProgress(total,
prefix,
suffix,
decimals,
bar_length,
keep_alive,
display_time))))
|
[
"def",
"set_task",
"(",
"self",
",",
"task_id",
",",
"total",
",",
"prefix",
",",
"suffix",
"=",
"''",
",",
"decimals",
"=",
"0",
",",
"bar_length",
"=",
"60",
",",
"keep_alive",
"=",
"False",
",",
"display_time",
"=",
"False",
")",
":",
"self",
".",
"queue",
".",
"put",
"(",
"dill",
".",
"dumps",
"(",
"NewTaskCommand",
"(",
"task_id",
"=",
"task_id",
",",
"task",
"=",
"TaskProgress",
"(",
"total",
",",
"prefix",
",",
"suffix",
",",
"decimals",
",",
"bar_length",
",",
"keep_alive",
",",
"display_time",
")",
")",
")",
")"
] |
Defines a new progress bar with the given information.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param total: The total number of iteration for this progress bar.
:param prefix: The text that should be displayed at the left side of the progress bar. Note that
progress bars will always stay left-aligned at the shortest possible.
:param suffix: [Optional] The text that should be displayed at the very right side of the progress bar.
:param decimals: [Optional] The number of decimals to display for the percentage.
:param bar_length: [Optional] The graphical bar size displayed on screen. Unit is character.
:param keep_alive: [Optional] Specify whether the progress bar should stay displayed forever once completed
or if it should vanish.
:param display_time: [Optional] Specify whether the duration since the progress has begun should be
displayed. Running time will be displayed between parenthesis, whereas it will be
displayed between brackets when the progress has completed.
|
[
"Defines",
"a",
"new",
"progress",
"bar",
"with",
"the",
"given",
"information",
".",
":",
"param",
"task_id",
":",
"Unique",
"identifier",
"for",
"this",
"progress",
"bar",
".",
"Will",
"erase",
"if",
"already",
"existing",
".",
":",
"param",
"total",
":",
"The",
"total",
"number",
"of",
"iteration",
"for",
"this",
"progress",
"bar",
".",
":",
"param",
"prefix",
":",
"The",
"text",
"that",
"should",
"be",
"displayed",
"at",
"the",
"left",
"side",
"of",
"the",
"progress",
"bar",
".",
"Note",
"that",
"progress",
"bars",
"will",
"always",
"stay",
"left",
"-",
"aligned",
"at",
"the",
"shortest",
"possible",
".",
":",
"param",
"suffix",
":",
"[",
"Optional",
"]",
"The",
"text",
"that",
"should",
"be",
"displayed",
"at",
"the",
"very",
"right",
"side",
"of",
"the",
"progress",
"bar",
".",
":",
"param",
"decimals",
":",
"[",
"Optional",
"]",
"The",
"number",
"of",
"decimals",
"to",
"display",
"for",
"the",
"percentage",
".",
":",
"param",
"bar_length",
":",
"[",
"Optional",
"]",
"The",
"graphical",
"bar",
"size",
"displayed",
"on",
"screen",
".",
"Unit",
"is",
"character",
".",
":",
"param",
"keep_alive",
":",
"[",
"Optional",
"]",
"Specify",
"whether",
"the",
"progress",
"bar",
"should",
"stay",
"displayed",
"forever",
"once",
"completed",
"or",
"if",
"it",
"should",
"vanish",
".",
":",
"param",
"display_time",
":",
"[",
"Optional",
"]",
"Specify",
"whether",
"the",
"duration",
"since",
"the",
"progress",
"has",
"begun",
"should",
"be",
"displayed",
".",
"Running",
"time",
"will",
"be",
"displayed",
"between",
"parenthesis",
"whereas",
"it",
"will",
"be",
"displayed",
"between",
"brackets",
"when",
"the",
"progress",
"has",
"completed",
"."
] |
python
|
train
|
greenbone/ospd
|
ospd/ospd.py
|
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L759-L800
|
def handle_client_stream(self, stream, is_unix=False):
""" Handles stream of data received from client. """
assert stream
data = []
stream.settimeout(2)
while True:
try:
if is_unix:
buf = stream.recv(1024)
else:
buf = stream.read(1024)
if not buf:
break
data.append(buf)
except (AttributeError, ValueError) as message:
logger.error(message)
return
except (ssl.SSLError) as exception:
logger.debug('Error: %s', exception[0])
break
except (socket.timeout) as exception:
logger.debug('Error: %s', exception)
break
data = b''.join(data)
if len(data) <= 0:
logger.debug("Empty client stream")
return
try:
response = self.handle_command(data)
except OSPDError as exception:
response = exception.as_xml()
logger.debug('Command error: %s', exception.message)
except Exception:
logger.exception('While handling client command:')
exception = OSPDError('Fatal error', 'error')
response = exception.as_xml()
if is_unix:
send_method = stream.send
else:
send_method = stream.write
self.write_to_stream(send_method, response)
|
[
"def",
"handle_client_stream",
"(",
"self",
",",
"stream",
",",
"is_unix",
"=",
"False",
")",
":",
"assert",
"stream",
"data",
"=",
"[",
"]",
"stream",
".",
"settimeout",
"(",
"2",
")",
"while",
"True",
":",
"try",
":",
"if",
"is_unix",
":",
"buf",
"=",
"stream",
".",
"recv",
"(",
"1024",
")",
"else",
":",
"buf",
"=",
"stream",
".",
"read",
"(",
"1024",
")",
"if",
"not",
"buf",
":",
"break",
"data",
".",
"append",
"(",
"buf",
")",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
"as",
"message",
":",
"logger",
".",
"error",
"(",
"message",
")",
"return",
"except",
"(",
"ssl",
".",
"SSLError",
")",
"as",
"exception",
":",
"logger",
".",
"debug",
"(",
"'Error: %s'",
",",
"exception",
"[",
"0",
"]",
")",
"break",
"except",
"(",
"socket",
".",
"timeout",
")",
"as",
"exception",
":",
"logger",
".",
"debug",
"(",
"'Error: %s'",
",",
"exception",
")",
"break",
"data",
"=",
"b''",
".",
"join",
"(",
"data",
")",
"if",
"len",
"(",
"data",
")",
"<=",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Empty client stream\"",
")",
"return",
"try",
":",
"response",
"=",
"self",
".",
"handle_command",
"(",
"data",
")",
"except",
"OSPDError",
"as",
"exception",
":",
"response",
"=",
"exception",
".",
"as_xml",
"(",
")",
"logger",
".",
"debug",
"(",
"'Command error: %s'",
",",
"exception",
".",
"message",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'While handling client command:'",
")",
"exception",
"=",
"OSPDError",
"(",
"'Fatal error'",
",",
"'error'",
")",
"response",
"=",
"exception",
".",
"as_xml",
"(",
")",
"if",
"is_unix",
":",
"send_method",
"=",
"stream",
".",
"send",
"else",
":",
"send_method",
"=",
"stream",
".",
"write",
"self",
".",
"write_to_stream",
"(",
"send_method",
",",
"response",
")"
] |
Handles stream of data received from client.
|
[
"Handles",
"stream",
"of",
"data",
"received",
"from",
"client",
"."
] |
python
|
train
|
jbarlow83/OCRmyPDF
|
src/ocrmypdf/_pipeline.py
|
https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/_pipeline.py#L241-L253
|
def get_page_square_dpi(pageinfo, options):
"Get the DPI when we require xres == yres, scaled to physical units"
xres = pageinfo.xres or 0
yres = pageinfo.yres or 0
userunit = pageinfo.userunit or 1
return float(
max(
(xres * userunit) or VECTOR_PAGE_DPI,
(yres * userunit) or VECTOR_PAGE_DPI,
VECTOR_PAGE_DPI if pageinfo.has_vector else 0,
options.oversample or 0,
)
)
|
[
"def",
"get_page_square_dpi",
"(",
"pageinfo",
",",
"options",
")",
":",
"xres",
"=",
"pageinfo",
".",
"xres",
"or",
"0",
"yres",
"=",
"pageinfo",
".",
"yres",
"or",
"0",
"userunit",
"=",
"pageinfo",
".",
"userunit",
"or",
"1",
"return",
"float",
"(",
"max",
"(",
"(",
"xres",
"*",
"userunit",
")",
"or",
"VECTOR_PAGE_DPI",
",",
"(",
"yres",
"*",
"userunit",
")",
"or",
"VECTOR_PAGE_DPI",
",",
"VECTOR_PAGE_DPI",
"if",
"pageinfo",
".",
"has_vector",
"else",
"0",
",",
"options",
".",
"oversample",
"or",
"0",
",",
")",
")"
] |
Get the DPI when we require xres == yres, scaled to physical units
|
[
"Get",
"the",
"DPI",
"when",
"we",
"require",
"xres",
"==",
"yres",
"scaled",
"to",
"physical",
"units"
] |
python
|
train
|
deepmind/sonnet
|
sonnet/python/modules/relational_memory.py
|
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/relational_memory.py#L184-L210
|
def _create_gates(self, inputs, memory):
"""Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
"""
# We'll create the input and forget gates at once. Hence, calculate double
# the gate size.
num_gates = 2 * self._calculate_gate_size()
memory = tf.tanh(memory)
inputs = basic.BatchFlatten()(inputs)
gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)
gate_inputs = tf.expand_dims(gate_inputs, axis=1)
gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)
gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2)
input_gate, forget_gate = gates
input_gate = tf.sigmoid(input_gate + self._input_bias)
forget_gate = tf.sigmoid(forget_gate + self._forget_bias)
return input_gate, forget_gate
|
[
"def",
"_create_gates",
"(",
"self",
",",
"inputs",
",",
"memory",
")",
":",
"# We'll create the input and forget gates at once. Hence, calculate double",
"# the gate size.",
"num_gates",
"=",
"2",
"*",
"self",
".",
"_calculate_gate_size",
"(",
")",
"memory",
"=",
"tf",
".",
"tanh",
"(",
"memory",
")",
"inputs",
"=",
"basic",
".",
"BatchFlatten",
"(",
")",
"(",
"inputs",
")",
"gate_inputs",
"=",
"basic",
".",
"BatchApply",
"(",
"basic",
".",
"Linear",
"(",
"num_gates",
")",
",",
"n_dims",
"=",
"1",
")",
"(",
"inputs",
")",
"gate_inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"gate_inputs",
",",
"axis",
"=",
"1",
")",
"gate_memory",
"=",
"basic",
".",
"BatchApply",
"(",
"basic",
".",
"Linear",
"(",
"num_gates",
")",
")",
"(",
"memory",
")",
"gates",
"=",
"tf",
".",
"split",
"(",
"gate_memory",
"+",
"gate_inputs",
",",
"num_or_size_splits",
"=",
"2",
",",
"axis",
"=",
"2",
")",
"input_gate",
",",
"forget_gate",
"=",
"gates",
"input_gate",
"=",
"tf",
".",
"sigmoid",
"(",
"input_gate",
"+",
"self",
".",
"_input_bias",
")",
"forget_gate",
"=",
"tf",
".",
"sigmoid",
"(",
"forget_gate",
"+",
"self",
".",
"_forget_bias",
")",
"return",
"input_gate",
",",
"forget_gate"
] |
Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
|
[
"Create",
"input",
"and",
"forget",
"gates",
"for",
"this",
"step",
"using",
"inputs",
"and",
"memory",
"."
] |
python
|
train
|
data-8/datascience
|
datascience/tables.py
|
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2190-L2295
|
def scatter(self, column_for_x, select=None, overlay=True, fit_line=False,
colors=None, labels=None, sizes=None, width=5, height=5, s=20, **vargs):
"""Creates scatterplots, optionally adding a line of best fit.
Args:
``column_for_x`` (``str``): The column to use for the x-axis values
and label of the scatter plots.
Kwargs:
``overlay`` (``bool``): If true, creates a chart with one color
per data column; if False, each plot will be displayed separately.
``fit_line`` (``bool``): draw a line of best fit for each set of points.
``vargs``: Additional arguments that get passed into `plt.scatter`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
for additional arguments that can be passed into vargs. These
include: `marker` and `norm`, to name a couple.
``colors``: A column of categories to be used for coloring dots.
``labels``: A column of text labels to annotate dots.
``sizes``: A column of values to set the relative areas of dots.
``s``: Size of dots. If sizes is also provided, then dots will be
in the range 0 to 2 * s.
Raises:
ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical
Returns:
Scatter plot of values of ``column_for_x`` plotted against
values for all other columns in self. Each plot uses the values in
`column_for_x` for horizontal positions. One plot is produced for
all other columns in self as y (or for the columns designated by
`select`).
>>> table = Table().with_columns(
... 'x', make_array(9, 3, 3, 1),
... 'y', make_array(1, 2, 2, 10),
... 'z', make_array(3, 4, 5, 6))
>>> table
x | y | z
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.scatter('x') # doctest: +SKIP
<scatterplot of values in y and z on x>
>>> table.scatter('x', overlay=False) # doctest: +SKIP
<scatterplot of values in y on x>
<scatterplot of values in z on x>
>>> table.scatter('x', fit_line=True) # doctest: +SKIP
<scatterplot of values in y and z on x with lines of best fit>
"""
options = self.default_options.copy()
options.update(vargs)
x_data, y_labels = self._split_column_and_labels(column_for_x)
if colors is not None:
y_labels.remove(self._as_label(colors))
if sizes is not None:
y_labels.remove(self._as_label(sizes))
if select is not None:
y_labels = self._as_labels(select)
if len(y_labels) > 1 and colors is not None and overlay:
warnings.warn("Colors and overlay are incompatible in a scatter")
overlay = False
def draw(axis, label, color):
if colors is not None:
colored = sorted(np.unique(self.column(colors)))
color_list = list(itertools.islice(itertools.cycle(self.chart_colors), len(colored)))
color_map = collections.OrderedDict(zip(colored, color_list))
color = [color_map[x] for x in self.column(colors)]
elif 'color' in options:
color = options.pop('color')
y_data = self[label]
if sizes is not None:
max_size = max(self[sizes]) ** 0.5
size = 2 * s * self[sizes] ** 0.5 / max_size
else:
size = s
axis.scatter(x_data, y_data, color=color, s=size, **options)
if fit_line:
m, b = np.polyfit(x_data, self[label], 1)
minx, maxx = np.min(x_data),np.max(x_data)
axis.plot([minx,maxx],[m*minx+b,m*maxx+b], color=color)
if labels is not None:
for x, y, label in zip(x_data, y_data, self[labels]):
axis.annotate(label, (x, y),
xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.7),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='black'))
if colors is not None:
import matplotlib.patches as mpatches
patches = [mpatches.Patch(color=c, label=v) for (v, c) in color_map.items()]
axis.legend(loc=2, bbox_to_anchor=(1.05, 1), handles=patches)
x_label = self._as_label(column_for_x)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height)
|
[
"def",
"scatter",
"(",
"self",
",",
"column_for_x",
",",
"select",
"=",
"None",
",",
"overlay",
"=",
"True",
",",
"fit_line",
"=",
"False",
",",
"colors",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"sizes",
"=",
"None",
",",
"width",
"=",
"5",
",",
"height",
"=",
"5",
",",
"s",
"=",
"20",
",",
"*",
"*",
"vargs",
")",
":",
"options",
"=",
"self",
".",
"default_options",
".",
"copy",
"(",
")",
"options",
".",
"update",
"(",
"vargs",
")",
"x_data",
",",
"y_labels",
"=",
"self",
".",
"_split_column_and_labels",
"(",
"column_for_x",
")",
"if",
"colors",
"is",
"not",
"None",
":",
"y_labels",
".",
"remove",
"(",
"self",
".",
"_as_label",
"(",
"colors",
")",
")",
"if",
"sizes",
"is",
"not",
"None",
":",
"y_labels",
".",
"remove",
"(",
"self",
".",
"_as_label",
"(",
"sizes",
")",
")",
"if",
"select",
"is",
"not",
"None",
":",
"y_labels",
"=",
"self",
".",
"_as_labels",
"(",
"select",
")",
"if",
"len",
"(",
"y_labels",
")",
">",
"1",
"and",
"colors",
"is",
"not",
"None",
"and",
"overlay",
":",
"warnings",
".",
"warn",
"(",
"\"Colors and overlay are incompatible in a scatter\"",
")",
"overlay",
"=",
"False",
"def",
"draw",
"(",
"axis",
",",
"label",
",",
"color",
")",
":",
"if",
"colors",
"is",
"not",
"None",
":",
"colored",
"=",
"sorted",
"(",
"np",
".",
"unique",
"(",
"self",
".",
"column",
"(",
"colors",
")",
")",
")",
"color_list",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"itertools",
".",
"cycle",
"(",
"self",
".",
"chart_colors",
")",
",",
"len",
"(",
"colored",
")",
")",
")",
"color_map",
"=",
"collections",
".",
"OrderedDict",
"(",
"zip",
"(",
"colored",
",",
"color_list",
")",
")",
"color",
"=",
"[",
"color_map",
"[",
"x",
"]",
"for",
"x",
"in",
"self",
".",
"column",
"(",
"colors",
")",
"]",
"elif",
"'color'",
"in",
"options",
":",
"color",
"=",
"options",
".",
"pop",
"(",
"'color'",
")",
"y_data",
"=",
"self",
"[",
"label",
"]",
"if",
"sizes",
"is",
"not",
"None",
":",
"max_size",
"=",
"max",
"(",
"self",
"[",
"sizes",
"]",
")",
"**",
"0.5",
"size",
"=",
"2",
"*",
"s",
"*",
"self",
"[",
"sizes",
"]",
"**",
"0.5",
"/",
"max_size",
"else",
":",
"size",
"=",
"s",
"axis",
".",
"scatter",
"(",
"x_data",
",",
"y_data",
",",
"color",
"=",
"color",
",",
"s",
"=",
"size",
",",
"*",
"*",
"options",
")",
"if",
"fit_line",
":",
"m",
",",
"b",
"=",
"np",
".",
"polyfit",
"(",
"x_data",
",",
"self",
"[",
"label",
"]",
",",
"1",
")",
"minx",
",",
"maxx",
"=",
"np",
".",
"min",
"(",
"x_data",
")",
",",
"np",
".",
"max",
"(",
"x_data",
")",
"axis",
".",
"plot",
"(",
"[",
"minx",
",",
"maxx",
"]",
",",
"[",
"m",
"*",
"minx",
"+",
"b",
",",
"m",
"*",
"maxx",
"+",
"b",
"]",
",",
"color",
"=",
"color",
")",
"if",
"labels",
"is",
"not",
"None",
":",
"for",
"x",
",",
"y",
",",
"label",
"in",
"zip",
"(",
"x_data",
",",
"y_data",
",",
"self",
"[",
"labels",
"]",
")",
":",
"axis",
".",
"annotate",
"(",
"label",
",",
"(",
"x",
",",
"y",
")",
",",
"xytext",
"=",
"(",
"-",
"20",
",",
"20",
")",
",",
"textcoords",
"=",
"'offset points'",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'bottom'",
",",
"bbox",
"=",
"dict",
"(",
"boxstyle",
"=",
"'round,pad=0.5'",
",",
"fc",
"=",
"'white'",
",",
"alpha",
"=",
"0.7",
")",
",",
"arrowprops",
"=",
"dict",
"(",
"arrowstyle",
"=",
"'->'",
",",
"connectionstyle",
"=",
"'arc3,rad=0'",
",",
"color",
"=",
"'black'",
")",
")",
"if",
"colors",
"is",
"not",
"None",
":",
"import",
"matplotlib",
".",
"patches",
"as",
"mpatches",
"patches",
"=",
"[",
"mpatches",
".",
"Patch",
"(",
"color",
"=",
"c",
",",
"label",
"=",
"v",
")",
"for",
"(",
"v",
",",
"c",
")",
"in",
"color_map",
".",
"items",
"(",
")",
"]",
"axis",
".",
"legend",
"(",
"loc",
"=",
"2",
",",
"bbox_to_anchor",
"=",
"(",
"1.05",
",",
"1",
")",
",",
"handles",
"=",
"patches",
")",
"x_label",
"=",
"self",
".",
"_as_label",
"(",
"column_for_x",
")",
"self",
".",
"_visualize",
"(",
"x_label",
",",
"y_labels",
",",
"None",
",",
"overlay",
",",
"draw",
",",
"_vertical_x",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
")"
] |
Creates scatterplots, optionally adding a line of best fit.
Args:
``column_for_x`` (``str``): The column to use for the x-axis values
and label of the scatter plots.
Kwargs:
``overlay`` (``bool``): If true, creates a chart with one color
per data column; if False, each plot will be displayed separately.
``fit_line`` (``bool``): draw a line of best fit for each set of points.
``vargs``: Additional arguments that get passed into `plt.scatter`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
for additional arguments that can be passed into vargs. These
include: `marker` and `norm`, to name a couple.
``colors``: A column of categories to be used for coloring dots.
``labels``: A column of text labels to annotate dots.
``sizes``: A column of values to set the relative areas of dots.
``s``: Size of dots. If sizes is also provided, then dots will be
in the range 0 to 2 * s.
Raises:
ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical
Returns:
Scatter plot of values of ``column_for_x`` plotted against
values for all other columns in self. Each plot uses the values in
`column_for_x` for horizontal positions. One plot is produced for
all other columns in self as y (or for the columns designated by
`select`).
>>> table = Table().with_columns(
... 'x', make_array(9, 3, 3, 1),
... 'y', make_array(1, 2, 2, 10),
... 'z', make_array(3, 4, 5, 6))
>>> table
x | y | z
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.scatter('x') # doctest: +SKIP
<scatterplot of values in y and z on x>
>>> table.scatter('x', overlay=False) # doctest: +SKIP
<scatterplot of values in y on x>
<scatterplot of values in z on x>
>>> table.scatter('x', fit_line=True) # doctest: +SKIP
<scatterplot of values in y and z on x with lines of best fit>
|
[
"Creates",
"scatterplots",
"optionally",
"adding",
"a",
"line",
"of",
"best",
"fit",
"."
] |
python
|
train
|
lappis-unb/salic-ml
|
src/salicml_api/analysis/api.py
|
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml_api/analysis/api.py#L60-L79
|
def indicator_details(indicator):
"""
Return a dictionary with all metrics in FinancialIndicator,
if there aren't values for that Indicator, it is filled with default values
"""
metrics = format_metrics_json(indicator)
metrics_list = set(indicator.metrics
.filter(name__in=metrics_name_map.keys())
.values_list('name', flat=True))
null_metrics = default_metrics
for keys in metrics_list:
null_metrics.pop(metrics_name_map[keys], None)
metrics.update(null_metrics)
return {type(indicator).__name__: {
'valor': indicator.value,
'metricas': metrics, },
}
|
[
"def",
"indicator_details",
"(",
"indicator",
")",
":",
"metrics",
"=",
"format_metrics_json",
"(",
"indicator",
")",
"metrics_list",
"=",
"set",
"(",
"indicator",
".",
"metrics",
".",
"filter",
"(",
"name__in",
"=",
"metrics_name_map",
".",
"keys",
"(",
")",
")",
".",
"values_list",
"(",
"'name'",
",",
"flat",
"=",
"True",
")",
")",
"null_metrics",
"=",
"default_metrics",
"for",
"keys",
"in",
"metrics_list",
":",
"null_metrics",
".",
"pop",
"(",
"metrics_name_map",
"[",
"keys",
"]",
",",
"None",
")",
"metrics",
".",
"update",
"(",
"null_metrics",
")",
"return",
"{",
"type",
"(",
"indicator",
")",
".",
"__name__",
":",
"{",
"'valor'",
":",
"indicator",
".",
"value",
",",
"'metricas'",
":",
"metrics",
",",
"}",
",",
"}"
] |
Return a dictionary with all metrics in FinancialIndicator,
if there aren't values for that Indicator, it is filled with default values
|
[
"Return",
"a",
"dictionary",
"with",
"all",
"metrics",
"in",
"FinancialIndicator",
"if",
"there",
"aren",
"t",
"values",
"for",
"that",
"Indicator",
"it",
"is",
"filled",
"with",
"default",
"values"
] |
python
|
train
|
f3at/feat
|
src/feat/database/couchdb/view.py
|
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/database/couchdb/view.py#L262-L305
|
def main():
"""Command-line entry point for running the view server."""
import getopt
from . import __version__ as VERSION
try:
option_list, argument_list = getopt.gnu_getopt(
sys.argv[1:], 'h',
['version', 'help', 'json-module=', 'debug', 'log-file='])
message = None
for option, value in option_list:
if option in ('--version'):
message = _VERSION % dict(name=os.path.basename(sys.argv[0]),
version=VERSION)
elif option in ('-h', '--help'):
message = _HELP % dict(name=os.path.basename(sys.argv[0]))
elif option in ('--json-module'):
json.use(module=value)
elif option in ('--debug'):
log.setLevel(logging.DEBUG)
elif option in ('--log-file'):
if value == '-':
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(
' -> [%(levelname)s] %(message)s'))
else:
handler = logging.FileHandler(value)
handler.setFormatter(logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(message)s'))
log.addHandler(handler)
if message:
sys.stdout.write(message)
sys.stdout.flush()
sys.exit(0)
except getopt.GetoptError, error:
message = '%s\n\nTry `%s --help` for more information.\n' % (
str(error), os.path.basename(sys.argv[0]))
sys.stderr.write(message)
sys.stderr.flush()
sys.exit(1)
sys.exit(run())
|
[
"def",
"main",
"(",
")",
":",
"import",
"getopt",
"from",
".",
"import",
"__version__",
"as",
"VERSION",
"try",
":",
"option_list",
",",
"argument_list",
"=",
"getopt",
".",
"gnu_getopt",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
",",
"'h'",
",",
"[",
"'version'",
",",
"'help'",
",",
"'json-module='",
",",
"'debug'",
",",
"'log-file='",
"]",
")",
"message",
"=",
"None",
"for",
"option",
",",
"value",
"in",
"option_list",
":",
"if",
"option",
"in",
"(",
"'--version'",
")",
":",
"message",
"=",
"_VERSION",
"%",
"dict",
"(",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
",",
"version",
"=",
"VERSION",
")",
"elif",
"option",
"in",
"(",
"'-h'",
",",
"'--help'",
")",
":",
"message",
"=",
"_HELP",
"%",
"dict",
"(",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"elif",
"option",
"in",
"(",
"'--json-module'",
")",
":",
"json",
".",
"use",
"(",
"module",
"=",
"value",
")",
"elif",
"option",
"in",
"(",
"'--debug'",
")",
":",
"log",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"elif",
"option",
"in",
"(",
"'--log-file'",
")",
":",
"if",
"value",
"==",
"'-'",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stderr",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"' -> [%(levelname)s] %(message)s'",
")",
")",
"else",
":",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"value",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'[%(asctime)s] [%(levelname)s] %(message)s'",
")",
")",
"log",
".",
"addHandler",
"(",
"handler",
")",
"if",
"message",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"message",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"getopt",
".",
"GetoptError",
",",
"error",
":",
"message",
"=",
"'%s\\n\\nTry `%s --help` for more information.\\n'",
"%",
"(",
"str",
"(",
"error",
")",
",",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"message",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"sys",
".",
"exit",
"(",
"run",
"(",
")",
")"
] |
Command-line entry point for running the view server.
|
[
"Command",
"-",
"line",
"entry",
"point",
"for",
"running",
"the",
"view",
"server",
"."
] |
python
|
train
|
zhmcclient/python-zhmcclient
|
zhmcclient/_storage_volume.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_storage_volume.py#L411-L493
|
def update_properties(self, properties, email_to_addresses=None,
email_cc_addresses=None, email_insert=None):
"""
Update writeable properties of this storage volume on the HMC, and
optionally send emails to storage administrators requesting
modification of the storage volume on the storage subsystem and of any
resources related to the storage volume.
This method performs the "Modify Storage Group Properties" operation,
requesting modification of the volume.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - System Programmer" task.
Parameters:
properties (dict): New property values for the volume.
Allowable properties are the fields defined in the
"storage-volume-request-info" nested object for the "modify"
operation. That nested object is described in section "Request body
contents" for operation "Modify Storage Group Properties" in the
:term:`HMC API` book.
The properties provided in this parameter will be copied and then
amended with the `operation="modify"` and `element-uri` properties,
and then used as a single array item for the `storage-volumes`
field in the request body of the "Modify Storage Group Properties"
operation.
email_to_addresses (:term:`iterable` of :term:`string`): Email
addresses of one or more storage administrator to be notified.
If `None` or empty, no email will be sent.
email_cc_addresses (:term:`iterable` of :term:`string`): Email
addresses of one or more storage administrator to be copied
on the notification email.
If `None` or empty, nobody will be copied on the email.
Must be `None` or empty if `email_to_addresses` is `None` or empty.
email_insert (:term:`string`): Additional text to be inserted in the
notification email.
The text can include HTML formatting tags.
If `None`, no additional text will be inserted.
Must be `None` or empty if `email_to_addresses` is `None` or empty.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
volreq_obj = copy.deepcopy(properties)
volreq_obj['operation'] = 'modify'
volreq_obj['element-uri'] = self.uri
body = {
'storage-volumes': [volreq_obj],
}
if email_to_addresses:
body['email-to-addresses'] = email_to_addresses
if email_cc_addresses:
body['email-cc-addresses'] = email_cc_addresses
if email_insert:
body['email-insert'] = email_insert
else:
if email_cc_addresses:
raise ValueError("email_cc_addresses must not be specified if "
"there is no email_to_addresses: %r" %
email_cc_addresses)
if email_insert:
raise ValueError("email_insert must not be specified if "
"there is no email_to_addresses: %r" %
email_insert)
self.manager.session.post(
self.manager.storage_group.uri + '/operations/modify',
body=body)
self.properties.update(copy.deepcopy(properties))
|
[
"def",
"update_properties",
"(",
"self",
",",
"properties",
",",
"email_to_addresses",
"=",
"None",
",",
"email_cc_addresses",
"=",
"None",
",",
"email_insert",
"=",
"None",
")",
":",
"volreq_obj",
"=",
"copy",
".",
"deepcopy",
"(",
"properties",
")",
"volreq_obj",
"[",
"'operation'",
"]",
"=",
"'modify'",
"volreq_obj",
"[",
"'element-uri'",
"]",
"=",
"self",
".",
"uri",
"body",
"=",
"{",
"'storage-volumes'",
":",
"[",
"volreq_obj",
"]",
",",
"}",
"if",
"email_to_addresses",
":",
"body",
"[",
"'email-to-addresses'",
"]",
"=",
"email_to_addresses",
"if",
"email_cc_addresses",
":",
"body",
"[",
"'email-cc-addresses'",
"]",
"=",
"email_cc_addresses",
"if",
"email_insert",
":",
"body",
"[",
"'email-insert'",
"]",
"=",
"email_insert",
"else",
":",
"if",
"email_cc_addresses",
":",
"raise",
"ValueError",
"(",
"\"email_cc_addresses must not be specified if \"",
"\"there is no email_to_addresses: %r\"",
"%",
"email_cc_addresses",
")",
"if",
"email_insert",
":",
"raise",
"ValueError",
"(",
"\"email_insert must not be specified if \"",
"\"there is no email_to_addresses: %r\"",
"%",
"email_insert",
")",
"self",
".",
"manager",
".",
"session",
".",
"post",
"(",
"self",
".",
"manager",
".",
"storage_group",
".",
"uri",
"+",
"'/operations/modify'",
",",
"body",
"=",
"body",
")",
"self",
".",
"properties",
".",
"update",
"(",
"copy",
".",
"deepcopy",
"(",
"properties",
")",
")"
] |
Update writeable properties of this storage volume on the HMC, and
optionally send emails to storage administrators requesting
modification of the storage volume on the storage subsystem and of any
resources related to the storage volume.
This method performs the "Modify Storage Group Properties" operation,
requesting modification of the volume.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - System Programmer" task.
Parameters:
properties (dict): New property values for the volume.
Allowable properties are the fields defined in the
"storage-volume-request-info" nested object for the "modify"
operation. That nested object is described in section "Request body
contents" for operation "Modify Storage Group Properties" in the
:term:`HMC API` book.
The properties provided in this parameter will be copied and then
amended with the `operation="modify"` and `element-uri` properties,
and then used as a single array item for the `storage-volumes`
field in the request body of the "Modify Storage Group Properties"
operation.
email_to_addresses (:term:`iterable` of :term:`string`): Email
addresses of one or more storage administrator to be notified.
If `None` or empty, no email will be sent.
email_cc_addresses (:term:`iterable` of :term:`string`): Email
addresses of one or more storage administrator to be copied
on the notification email.
If `None` or empty, nobody will be copied on the email.
Must be `None` or empty if `email_to_addresses` is `None` or empty.
email_insert (:term:`string`): Additional text to be inserted in the
notification email.
The text can include HTML formatting tags.
If `None`, no additional text will be inserted.
Must be `None` or empty if `email_to_addresses` is `None` or empty.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
[
"Update",
"writeable",
"properties",
"of",
"this",
"storage",
"volume",
"on",
"the",
"HMC",
"and",
"optionally",
"send",
"emails",
"to",
"storage",
"administrators",
"requesting",
"modification",
"of",
"the",
"storage",
"volume",
"on",
"the",
"storage",
"subsystem",
"and",
"of",
"any",
"resources",
"related",
"to",
"the",
"storage",
"volume",
"."
] |
python
|
train
|
cloudera/cm_api
|
python/src/cm_api/endpoints/hosts.py
|
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/hosts.py#L25-L36
|
def create_host(resource_root, host_id, name, ipaddr, rack_id=None):
"""
Create a host
@param resource_root: The root Resource object.
@param host_id: Host id
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None
@return: An ApiHost object
"""
apihost = ApiHost(resource_root, host_id, name, ipaddr, rack_id)
return call(resource_root.post, HOSTS_PATH, ApiHost, True, data=[apihost])[0]
|
[
"def",
"create_host",
"(",
"resource_root",
",",
"host_id",
",",
"name",
",",
"ipaddr",
",",
"rack_id",
"=",
"None",
")",
":",
"apihost",
"=",
"ApiHost",
"(",
"resource_root",
",",
"host_id",
",",
"name",
",",
"ipaddr",
",",
"rack_id",
")",
"return",
"call",
"(",
"resource_root",
".",
"post",
",",
"HOSTS_PATH",
",",
"ApiHost",
",",
"True",
",",
"data",
"=",
"[",
"apihost",
"]",
")",
"[",
"0",
"]"
] |
Create a host
@param resource_root: The root Resource object.
@param host_id: Host id
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None
@return: An ApiHost object
|
[
"Create",
"a",
"host"
] |
python
|
train
|
SoCo/SoCo
|
soco/core.py
|
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L354-L364
|
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
|
[
"def",
"is_bridge",
"(",
"self",
")",
":",
"# Since this does not change over time (?) check whether we already",
"# know the answer. If so, there is no need to go further",
"if",
"self",
".",
"_is_bridge",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_is_bridge",
"# if not, we have to get it from the zone topology. This will set",
"# self._is_bridge for us for next time, so we won't have to do this",
"# again",
"self",
".",
"_parse_zone_group_state",
"(",
")",
"return",
"self",
".",
"_is_bridge"
] |
bool: Is this zone a bridge?
|
[
"bool",
":",
"Is",
"this",
"zone",
"a",
"bridge?"
] |
python
|
train
|
ioos/cc-plugin-ncei
|
cc_plugin_ncei/ncei_timeseries_profile.py
|
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries_profile.py#L21-L43
|
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
|
[
"def",
"check_dimensions",
"(",
"self",
",",
"dataset",
")",
":",
"results",
"=",
"[",
"]",
"required_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'All geophysical variables are timeseries-profile-orthogonal feature types'",
")",
"message",
"=",
"'{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'",
"message",
"+=",
"' If it\\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'",
"message",
"+=",
"' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'",
"message",
"+=",
"' coordinate variabel with dimension (z).'",
"for",
"variable",
"in",
"util",
".",
"get_geophysical_variables",
"(",
"dataset",
")",
":",
"is_valid",
"=",
"util",
".",
"is_timeseries_profile_single_station",
"(",
"dataset",
",",
"variable",
")",
"is_valid",
"=",
"is_valid",
"or",
"util",
".",
"is_timeseries_profile_multi_station",
"(",
"dataset",
",",
"variable",
")",
"required_ctx",
".",
"assert_true",
"(",
"is_valid",
",",
"message",
".",
"format",
"(",
"variable",
")",
")",
"results",
".",
"append",
"(",
"required_ctx",
".",
"to_result",
"(",
")",
")",
"return",
"results"
] |
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
|
[
"Checks",
"that",
"the",
"feature",
"types",
"of",
"this",
"dataset",
"are",
"consistent",
"with",
"a",
"timeseries",
"-",
"profile",
"-",
"orthogonal",
"dataset",
"."
] |
python
|
train
|
openid/python-openid
|
openid/yadis/etxrd.py
|
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/etxrd.py#L119-L133
|
def getYadisXRD(xrd_tree):
"""Return the XRD element that should contain the Yadis services"""
xrd = None
# for the side-effect of assigning the last one in the list to the
# xrd variable
for xrd in xrd_tree.findall(xrd_tag):
pass
# There were no elements found, or else xrd would be set to the
# last one
if xrd is None:
raise XRDSError('No XRD present in tree')
return xrd
|
[
"def",
"getYadisXRD",
"(",
"xrd_tree",
")",
":",
"xrd",
"=",
"None",
"# for the side-effect of assigning the last one in the list to the",
"# xrd variable",
"for",
"xrd",
"in",
"xrd_tree",
".",
"findall",
"(",
"xrd_tag",
")",
":",
"pass",
"# There were no elements found, or else xrd would be set to the",
"# last one",
"if",
"xrd",
"is",
"None",
":",
"raise",
"XRDSError",
"(",
"'No XRD present in tree'",
")",
"return",
"xrd"
] |
Return the XRD element that should contain the Yadis services
|
[
"Return",
"the",
"XRD",
"element",
"that",
"should",
"contain",
"the",
"Yadis",
"services"
] |
python
|
train
|
numenta/nupic
|
src/nupic/frameworks/opf/htm_prediction_model.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L712-L957
|
def _handleSDRClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# SDRClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences
|
[
"def",
"_handleSDRClassifierMultiStep",
"(",
"self",
",",
"patternNZ",
",",
"inputTSRecordIdx",
",",
"rawInput",
")",
":",
"inferenceArgs",
"=",
"self",
".",
"getInferenceArgs",
"(",
")",
"predictedFieldName",
"=",
"inferenceArgs",
".",
"get",
"(",
"'predictedField'",
",",
"None",
")",
"if",
"predictedFieldName",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No predicted field was enabled! Did you call enableInference()?\"",
")",
"self",
".",
"_predictedFieldName",
"=",
"predictedFieldName",
"classifier",
"=",
"self",
".",
"_getClassifierRegion",
"(",
")",
"if",
"not",
"self",
".",
"_hasCL",
"or",
"classifier",
"is",
"None",
":",
"# No classifier so return an empty dict for inferences.",
"return",
"{",
"}",
"sensor",
"=",
"self",
".",
"_getSensorRegion",
"(",
")",
"minLikelihoodThreshold",
"=",
"self",
".",
"_minLikelihoodThreshold",
"maxPredictionsPerStep",
"=",
"self",
".",
"_maxPredictionsPerStep",
"needLearning",
"=",
"self",
".",
"isLearningEnabled",
"(",
")",
"inferences",
"=",
"{",
"}",
"# Get the classifier input encoder, if we don't have it already",
"if",
"self",
".",
"_classifierInputEncoder",
"is",
"None",
":",
"if",
"predictedFieldName",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"This experiment description is missing \"",
"\"the 'predictedField' in its config, which is required \"",
"\"for multi-step prediction inference.\"",
")",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getEncoderList",
"(",
")",
"self",
".",
"_numFields",
"=",
"len",
"(",
"encoderList",
")",
"# This is getting index of predicted field if being fed to CLA.",
"fieldNames",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getScalarNames",
"(",
")",
"if",
"predictedFieldName",
"in",
"fieldNames",
":",
"self",
".",
"_predictedFieldIdx",
"=",
"fieldNames",
".",
"index",
"(",
"predictedFieldName",
")",
"else",
":",
"# Predicted field was not fed into the network, only to the classifier",
"self",
".",
"_predictedFieldIdx",
"=",
"None",
"# In a multi-step model, the classifier input encoder is separate from",
"# the other encoders and always disabled from going into the bottom of",
"# the network.",
"if",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
"is",
"not",
"None",
":",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
".",
"getEncoderList",
"(",
")",
"else",
":",
"encoderList",
"=",
"[",
"]",
"if",
"len",
"(",
"encoderList",
")",
">=",
"1",
":",
"fieldNames",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
".",
"getScalarNames",
"(",
")",
"self",
".",
"_classifierInputEncoder",
"=",
"encoderList",
"[",
"fieldNames",
".",
"index",
"(",
"predictedFieldName",
")",
"]",
"else",
":",
"# Legacy multi-step networks don't have a separate encoder for the",
"# classifier, so use the one that goes into the bottom of the network",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getEncoderList",
"(",
")",
"self",
".",
"_classifierInputEncoder",
"=",
"encoderList",
"[",
"self",
".",
"_predictedFieldIdx",
"]",
"# Get the actual value and the bucket index for this sample. The",
"# predicted field may not be enabled for input to the network, so we",
"# explicitly encode it outside of the sensor",
"# TODO: All this logic could be simpler if in the encoder itself",
"if",
"not",
"predictedFieldName",
"in",
"rawInput",
":",
"raise",
"ValueError",
"(",
"\"Input row does not contain a value for the predicted \"",
"\"field configured for this model. Missing value for '%s'\"",
"%",
"predictedFieldName",
")",
"absoluteValue",
"=",
"rawInput",
"[",
"predictedFieldName",
"]",
"bucketIdx",
"=",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"absoluteValue",
")",
"[",
"0",
"]",
"# Convert the absolute values to deltas if necessary",
"# The bucket index should be handled correctly by the underlying delta encoder",
"if",
"isinstance",
"(",
"self",
".",
"_classifierInputEncoder",
",",
"DeltaEncoder",
")",
":",
"# Make the delta before any values have been seen 0 so that we do not mess up the",
"# range for the adaptive scalar encoder.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_ms_prevVal\"",
")",
":",
"self",
".",
"_ms_prevVal",
"=",
"absoluteValue",
"prevValue",
"=",
"self",
".",
"_ms_prevVal",
"self",
".",
"_ms_prevVal",
"=",
"absoluteValue",
"actualValue",
"=",
"absoluteValue",
"-",
"prevValue",
"else",
":",
"actualValue",
"=",
"absoluteValue",
"if",
"isinstance",
"(",
"actualValue",
",",
"float",
")",
"and",
"math",
".",
"isnan",
"(",
"actualValue",
")",
":",
"actualValue",
"=",
"SENTINEL_VALUE_FOR_MISSING_DATA",
"# Pass this information to the classifier's custom compute method",
"# so that it can assign the current classification to possibly",
"# multiple patterns from the past and current, and also provide",
"# the expected classification for some time step(s) in the future.",
"classifier",
".",
"setParameter",
"(",
"'inferenceMode'",
",",
"True",
")",
"classifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"needLearning",
")",
"classificationIn",
"=",
"{",
"'bucketIdx'",
":",
"bucketIdx",
",",
"'actValue'",
":",
"actualValue",
"}",
"# Handle missing records",
"if",
"inputTSRecordIdx",
"is",
"not",
"None",
":",
"recordNum",
"=",
"inputTSRecordIdx",
"else",
":",
"recordNum",
"=",
"self",
".",
"__numRunCalls",
"clResults",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"customCompute",
"(",
"recordNum",
"=",
"recordNum",
",",
"patternNZ",
"=",
"patternNZ",
",",
"classification",
"=",
"classificationIn",
")",
"# ---------------------------------------------------------------",
"# Get the prediction for every step ahead learned by the classifier",
"predictionSteps",
"=",
"classifier",
".",
"getParameter",
"(",
"'steps'",
")",
"predictionSteps",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"predictionSteps",
".",
"split",
"(",
"','",
")",
"]",
"# We will return the results in this dict. The top level keys",
"# are the step number, the values are the relative likelihoods for",
"# each classification value in that time step, represented as",
"# another dict where the keys are the classification values and",
"# the values are the relative likelihoods.",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"=",
"dict",
"(",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"=",
"dict",
"(",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"=",
"dict",
"(",
")",
"# ======================================================================",
"# Plug in the predictions for each requested time step.",
"for",
"steps",
"in",
"predictionSteps",
":",
"# From the clResults, compute the predicted actual value. The",
"# SDRClassifier classifies the bucket index and returns a list of",
"# relative likelihoods for each bucket. Let's find the max one",
"# and then look up the actual value from that bucket index",
"likelihoodsVec",
"=",
"clResults",
"[",
"steps",
"]",
"bucketValues",
"=",
"clResults",
"[",
"'actualValues'",
"]",
"# Create a dict of value:likelihood pairs. We can't simply use",
"# dict(zip(bucketValues, likelihoodsVec)) because there might be",
"# duplicate bucketValues (this happens early on in the model when",
"# it doesn't have actual values for each bucket so it returns",
"# multiple buckets with the same default actual value).",
"likelihoodsDict",
"=",
"dict",
"(",
")",
"bestActValue",
"=",
"None",
"bestProb",
"=",
"None",
"for",
"(",
"actValue",
",",
"prob",
")",
"in",
"zip",
"(",
"bucketValues",
",",
"likelihoodsVec",
")",
":",
"if",
"actValue",
"in",
"likelihoodsDict",
":",
"likelihoodsDict",
"[",
"actValue",
"]",
"+=",
"prob",
"else",
":",
"likelihoodsDict",
"[",
"actValue",
"]",
"=",
"prob",
"# Keep track of best",
"if",
"bestProb",
"is",
"None",
"or",
"likelihoodsDict",
"[",
"actValue",
"]",
">",
"bestProb",
":",
"bestProb",
"=",
"likelihoodsDict",
"[",
"actValue",
"]",
"bestActValue",
"=",
"actValue",
"# Remove entries with 0 likelihood or likelihood less than",
"# minLikelihoodThreshold, but don't leave an empty dict.",
"likelihoodsDict",
"=",
"HTMPredictionModel",
".",
"_removeUnlikelyPredictions",
"(",
"likelihoodsDict",
",",
"minLikelihoodThreshold",
",",
"maxPredictionsPerStep",
")",
"# calculate likelihood for each bucket",
"bucketLikelihood",
"=",
"{",
"}",
"for",
"k",
"in",
"likelihoodsDict",
".",
"keys",
"(",
")",
":",
"bucketLikelihood",
"[",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"k",
")",
"[",
"0",
"]",
"]",
"=",
"(",
"likelihoodsDict",
"[",
"k",
"]",
")",
"# ---------------------------------------------------------------------",
"# If we have a delta encoder, we have to shift our predicted output value",
"# by the sum of the deltas",
"if",
"isinstance",
"(",
"self",
".",
"_classifierInputEncoder",
",",
"DeltaEncoder",
")",
":",
"# Get the prediction history for this number of timesteps.",
"# The prediction history is a store of the previous best predicted values.",
"# This is used to get the final shift from the current absolute value.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_ms_predHistories'",
")",
":",
"self",
".",
"_ms_predHistories",
"=",
"dict",
"(",
")",
"predHistories",
"=",
"self",
".",
"_ms_predHistories",
"if",
"not",
"steps",
"in",
"predHistories",
":",
"predHistories",
"[",
"steps",
"]",
"=",
"deque",
"(",
")",
"predHistory",
"=",
"predHistories",
"[",
"steps",
"]",
"# Find the sum of the deltas for the steps and use this to generate",
"# an offset from the current absolute value",
"sumDelta",
"=",
"sum",
"(",
"predHistory",
")",
"offsetDict",
"=",
"dict",
"(",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"likelihoodsDict",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"is",
"not",
"None",
":",
"# Reconstruct the absolute value based on the current actual value,",
"# the best predicted values from the previous iterations,",
"# and the current predicted delta",
"offsetDict",
"[",
"absoluteValue",
"+",
"float",
"(",
"k",
")",
"+",
"sumDelta",
"]",
"=",
"v",
"# calculate likelihood for each bucket",
"bucketLikelihoodOffset",
"=",
"{",
"}",
"for",
"k",
"in",
"offsetDict",
".",
"keys",
"(",
")",
":",
"bucketLikelihoodOffset",
"[",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"k",
")",
"[",
"0",
"]",
"]",
"=",
"(",
"offsetDict",
"[",
"k",
"]",
")",
"# Push the current best delta to the history buffer for reconstructing the final delta",
"if",
"bestActValue",
"is",
"not",
"None",
":",
"predHistory",
".",
"append",
"(",
"bestActValue",
")",
"# If we don't need any more values in the predictionHistory, pop off",
"# the earliest one.",
"if",
"len",
"(",
"predHistory",
")",
">=",
"steps",
":",
"predHistory",
".",
"popleft",
"(",
")",
"# Provide the offsetDict as the return value",
"if",
"len",
"(",
"offsetDict",
")",
">",
"0",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"offsetDict",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"bucketLikelihoodOffset",
"else",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"likelihoodsDict",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"bucketLikelihood",
"if",
"bestActValue",
"is",
"None",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"None",
"else",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"absoluteValue",
"+",
"sumDelta",
"+",
"bestActValue",
")",
"# ---------------------------------------------------------------------",
"# Normal case, no delta encoder. Just plug in all our multi-step predictions",
"# with likelihoods as well as our best prediction",
"else",
":",
"# The multiStepPredictions element holds the probabilities for each",
"# bucket",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"likelihoodsDict",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"bestActValue",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"(",
"bucketLikelihood",
")",
"return",
"inferences"
] |
Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
|
[
"Handle",
"the",
"CLA",
"Classifier",
"compute",
"logic",
"when",
"implementing",
"multi",
"-",
"step",
"prediction",
".",
"This",
"is",
"where",
"the",
"patternNZ",
"is",
"associated",
"with",
"one",
"of",
"the",
"other",
"fields",
"from",
"the",
"dataset",
"0",
"to",
"N",
"steps",
"in",
"the",
"future",
".",
"This",
"method",
"is",
"used",
"by",
"each",
"type",
"of",
"network",
"(",
"encoder",
"only",
"SP",
"only",
"SP",
"+",
"TM",
")",
"to",
"handle",
"the",
"compute",
"logic",
"through",
"the",
"CLA",
"Classifier",
".",
"It",
"fills",
"in",
"the",
"inference",
"dict",
"with",
"the",
"results",
"of",
"the",
"compute",
"."
] |
python
|
valid
|
pandas-dev/pandas
|
pandas/core/indexes/range.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L355-L369
|
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super().equals(other)
|
[
"def",
"equals",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"RangeIndex",
")",
":",
"ls",
"=",
"len",
"(",
"self",
")",
"lo",
"=",
"len",
"(",
"other",
")",
"return",
"(",
"ls",
"==",
"lo",
"==",
"0",
"or",
"ls",
"==",
"lo",
"==",
"1",
"and",
"self",
".",
"_start",
"==",
"other",
".",
"_start",
"or",
"ls",
"==",
"lo",
"and",
"self",
".",
"_start",
"==",
"other",
".",
"_start",
"and",
"self",
".",
"_step",
"==",
"other",
".",
"_step",
")",
"return",
"super",
"(",
")",
".",
"equals",
"(",
"other",
")"
] |
Determines if two Index objects contain the same elements.
|
[
"Determines",
"if",
"two",
"Index",
"objects",
"contain",
"the",
"same",
"elements",
"."
] |
python
|
train
|
CitrineInformatics/python-citrination-client
|
citrination_client/models/client.py
|
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/models/client.py#L144-L168
|
def submit_predict_request(self, data_view_id, candidates, prediction_source='scalar', use_prior=True):
"""
Submits an async prediction request.
:param data_view_id: The id returned from create
:param candidates: Array of candidates
:param prediction_source: 'scalar' or 'scalar_from_distribution'
:param use_prior: True to use prior prediction, otherwise False
:return: Predict request Id (used to check status)
"""
data = {
"prediction_source":
prediction_source,
"use_prior":
use_prior,
"candidates":
candidates
}
failure_message = "Configuration creation failed"
post_url = 'v1/data_views/' + str(data_view_id) + '/predict/submit'
return self._get_success_json(
self._post_json(post_url, data, failure_message=failure_message)
)['data']['uid']
|
[
"def",
"submit_predict_request",
"(",
"self",
",",
"data_view_id",
",",
"candidates",
",",
"prediction_source",
"=",
"'scalar'",
",",
"use_prior",
"=",
"True",
")",
":",
"data",
"=",
"{",
"\"prediction_source\"",
":",
"prediction_source",
",",
"\"use_prior\"",
":",
"use_prior",
",",
"\"candidates\"",
":",
"candidates",
"}",
"failure_message",
"=",
"\"Configuration creation failed\"",
"post_url",
"=",
"'v1/data_views/'",
"+",
"str",
"(",
"data_view_id",
")",
"+",
"'/predict/submit'",
"return",
"self",
".",
"_get_success_json",
"(",
"self",
".",
"_post_json",
"(",
"post_url",
",",
"data",
",",
"failure_message",
"=",
"failure_message",
")",
")",
"[",
"'data'",
"]",
"[",
"'uid'",
"]"
] |
Submits an async prediction request.
:param data_view_id: The id returned from create
:param candidates: Array of candidates
:param prediction_source: 'scalar' or 'scalar_from_distribution'
:param use_prior: True to use prior prediction, otherwise False
:return: Predict request Id (used to check status)
|
[
"Submits",
"an",
"async",
"prediction",
"request",
"."
] |
python
|
valid
|
linkedin/Zopkio
|
zopkio/deployer.py
|
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L224-L230
|
def hangup(self, unique_id, configs=None):
"""
Issue a signal to hangup the specified process
:Parameter unique_id: the name of the process
"""
self._send_signal(unique_id, signal.SIGHUP, configs)
|
[
"def",
"hangup",
"(",
"self",
",",
"unique_id",
",",
"configs",
"=",
"None",
")",
":",
"self",
".",
"_send_signal",
"(",
"unique_id",
",",
"signal",
".",
"SIGHUP",
",",
"configs",
")"
] |
Issue a signal to hangup the specified process
:Parameter unique_id: the name of the process
|
[
"Issue",
"a",
"signal",
"to",
"hangup",
"the",
"specified",
"process"
] |
python
|
train
|
aliyun/aliyun-odps-python-sdk
|
odps/df/expr/merge.py
|
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/merge.py#L990-L1045
|
def setdiff(left, *rights, **kwargs):
"""
Exclude data from a collection, like `except` clause in SQL. All collections involved should
have same schema.
:param left: collection to drop data from
:param rights: collection or list of collections
:param distinct: whether to preserve duplicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3], 'b': [1, 3]}))
>>> df1.setdiff(df2)
a b
0 2 2
1 3 3
2 3 3
>>> df1.setdiff(df2, distinct=True)
a b
0 2 2
"""
import time
from ..utils import output
distinct = kwargs.get('distinct', False)
if isinstance(rights[0], list):
rights = rights[0]
cols = [n for n in left.schema.names]
types = [n for n in left.schema.types]
counter_col_name = 'exc_counter_%d' % int(time.time())
left = left[left, Scalar(1).rename(counter_col_name)]
rights = [r[r, Scalar(-1).rename(counter_col_name)] for r in rights]
unioned = left
for r in rights:
unioned = unioned.union(r)
if distinct:
aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].min()})
return aggregated.filter(aggregated[counter_col_name] == 1).select(*cols)
else:
aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].sum()})
@output(cols, types)
def exploder(row):
import sys
irange = xrange if sys.version_info[0] < 3 else range
for _ in irange(getattr(row, counter_col_name)):
yield row[:-1]
return aggregated.map_reduce(mapper=exploder).select(*cols)
|
[
"def",
"setdiff",
"(",
"left",
",",
"*",
"rights",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"time",
"from",
".",
".",
"utils",
"import",
"output",
"distinct",
"=",
"kwargs",
".",
"get",
"(",
"'distinct'",
",",
"False",
")",
"if",
"isinstance",
"(",
"rights",
"[",
"0",
"]",
",",
"list",
")",
":",
"rights",
"=",
"rights",
"[",
"0",
"]",
"cols",
"=",
"[",
"n",
"for",
"n",
"in",
"left",
".",
"schema",
".",
"names",
"]",
"types",
"=",
"[",
"n",
"for",
"n",
"in",
"left",
".",
"schema",
".",
"types",
"]",
"counter_col_name",
"=",
"'exc_counter_%d'",
"%",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"left",
"=",
"left",
"[",
"left",
",",
"Scalar",
"(",
"1",
")",
".",
"rename",
"(",
"counter_col_name",
")",
"]",
"rights",
"=",
"[",
"r",
"[",
"r",
",",
"Scalar",
"(",
"-",
"1",
")",
".",
"rename",
"(",
"counter_col_name",
")",
"]",
"for",
"r",
"in",
"rights",
"]",
"unioned",
"=",
"left",
"for",
"r",
"in",
"rights",
":",
"unioned",
"=",
"unioned",
".",
"union",
"(",
"r",
")",
"if",
"distinct",
":",
"aggregated",
"=",
"unioned",
".",
"groupby",
"(",
"*",
"cols",
")",
".",
"agg",
"(",
"*",
"*",
"{",
"counter_col_name",
":",
"unioned",
"[",
"counter_col_name",
"]",
".",
"min",
"(",
")",
"}",
")",
"return",
"aggregated",
".",
"filter",
"(",
"aggregated",
"[",
"counter_col_name",
"]",
"==",
"1",
")",
".",
"select",
"(",
"*",
"cols",
")",
"else",
":",
"aggregated",
"=",
"unioned",
".",
"groupby",
"(",
"*",
"cols",
")",
".",
"agg",
"(",
"*",
"*",
"{",
"counter_col_name",
":",
"unioned",
"[",
"counter_col_name",
"]",
".",
"sum",
"(",
")",
"}",
")",
"@",
"output",
"(",
"cols",
",",
"types",
")",
"def",
"exploder",
"(",
"row",
")",
":",
"import",
"sys",
"irange",
"=",
"xrange",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
"else",
"range",
"for",
"_",
"in",
"irange",
"(",
"getattr",
"(",
"row",
",",
"counter_col_name",
")",
")",
":",
"yield",
"row",
"[",
":",
"-",
"1",
"]",
"return",
"aggregated",
".",
"map_reduce",
"(",
"mapper",
"=",
"exploder",
")",
".",
"select",
"(",
"*",
"cols",
")"
] |
Exclude data from a collection, like `except` clause in SQL. All collections involved should
have same schema.
:param left: collection to drop data from
:param rights: collection or list of collections
:param distinct: whether to preserve duplicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3], 'b': [1, 3]}))
>>> df1.setdiff(df2)
a b
0 2 2
1 3 3
2 3 3
>>> df1.setdiff(df2, distinct=True)
a b
0 2 2
|
[
"Exclude",
"data",
"from",
"a",
"collection",
"like",
"except",
"clause",
"in",
"SQL",
".",
"All",
"collections",
"involved",
"should",
"have",
"same",
"schema",
"."
] |
python
|
train
|
viniciuschiele/flask-io
|
flask_io/mimetypes.py
|
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/mimetypes.py#L81-L98
|
def replace(self, main_type=None, sub_type=None, params=None):
"""
Return a new MimeType with new values for the specified fields.
:param str main_type: The new main type.
:param str sub_type: The new sub type.
:param dict params: The new parameters.
:return: A new instance of MimeType
"""
if main_type is None:
main_type = self.main_type
if sub_type is None:
sub_type = self.sub_type
if params is None:
params = self.params
return MimeType(main_type, sub_type, params)
|
[
"def",
"replace",
"(",
"self",
",",
"main_type",
"=",
"None",
",",
"sub_type",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"main_type",
"is",
"None",
":",
"main_type",
"=",
"self",
".",
"main_type",
"if",
"sub_type",
"is",
"None",
":",
"sub_type",
"=",
"self",
".",
"sub_type",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"self",
".",
"params",
"return",
"MimeType",
"(",
"main_type",
",",
"sub_type",
",",
"params",
")"
] |
Return a new MimeType with new values for the specified fields.
:param str main_type: The new main type.
:param str sub_type: The new sub type.
:param dict params: The new parameters.
:return: A new instance of MimeType
|
[
"Return",
"a",
"new",
"MimeType",
"with",
"new",
"values",
"for",
"the",
"specified",
"fields",
".",
":",
"param",
"str",
"main_type",
":",
"The",
"new",
"main",
"type",
".",
":",
"param",
"str",
"sub_type",
":",
"The",
"new",
"sub",
"type",
".",
":",
"param",
"dict",
"params",
":",
"The",
"new",
"parameters",
".",
":",
"return",
":",
"A",
"new",
"instance",
"of",
"MimeType"
] |
python
|
train
|
jldantas/libmft
|
libmft/attribute.py
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L695-L715
|
def _astimezone_ts(self, timezone):
"""Changes the time zones of all timestamps.
Receives a new timezone and applies to all timestamps, if necessary.
Args:
timezone (:obj:`tzinfo`): Time zone to be applied
Returns:
A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
"""
if self.created.tzinfo is timezone:
return self
else:
nw_obj = Timestamps((None,)*4)
nw_obj.created = self.created.astimezone(timezone)
nw_obj.changed = self.changed.astimezone(timezone)
nw_obj.mft_changed = self.mft_changed.astimezone(timezone)
nw_obj.accessed = self.accessed.astimezone(timezone)
return nw_obj
|
[
"def",
"_astimezone_ts",
"(",
"self",
",",
"timezone",
")",
":",
"if",
"self",
".",
"created",
".",
"tzinfo",
"is",
"timezone",
":",
"return",
"self",
"else",
":",
"nw_obj",
"=",
"Timestamps",
"(",
"(",
"None",
",",
")",
"*",
"4",
")",
"nw_obj",
".",
"created",
"=",
"self",
".",
"created",
".",
"astimezone",
"(",
"timezone",
")",
"nw_obj",
".",
"changed",
"=",
"self",
".",
"changed",
".",
"astimezone",
"(",
"timezone",
")",
"nw_obj",
".",
"mft_changed",
"=",
"self",
".",
"mft_changed",
".",
"astimezone",
"(",
"timezone",
")",
"nw_obj",
".",
"accessed",
"=",
"self",
".",
"accessed",
".",
"astimezone",
"(",
"timezone",
")",
"return",
"nw_obj"
] |
Changes the time zones of all timestamps.
Receives a new timezone and applies to all timestamps, if necessary.
Args:
timezone (:obj:`tzinfo`): Time zone to be applied
Returns:
A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
|
[
"Changes",
"the",
"time",
"zones",
"of",
"all",
"timestamps",
"."
] |
python
|
train
|
OpenKMIP/PyKMIP
|
kmip/pie/sqltypes.py
|
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/pie/sqltypes.py#L61-L76
|
def process_result_value(self, value, dialect):
"""
Returns a new list of enums.CryptographicUsageMask Enums. This converts
the integer value into the list of enums.
Args:
value(int): The integer value stored in the database that is used
to create the list of enums.CryptographicUsageMask Enums.
dialect(string): SQL dialect
"""
masks = list()
if value:
for e in enums.CryptographicUsageMask:
if e.value & value:
masks.append(e)
return masks
|
[
"def",
"process_result_value",
"(",
"self",
",",
"value",
",",
"dialect",
")",
":",
"masks",
"=",
"list",
"(",
")",
"if",
"value",
":",
"for",
"e",
"in",
"enums",
".",
"CryptographicUsageMask",
":",
"if",
"e",
".",
"value",
"&",
"value",
":",
"masks",
".",
"append",
"(",
"e",
")",
"return",
"masks"
] |
Returns a new list of enums.CryptographicUsageMask Enums. This converts
the integer value into the list of enums.
Args:
value(int): The integer value stored in the database that is used
to create the list of enums.CryptographicUsageMask Enums.
dialect(string): SQL dialect
|
[
"Returns",
"a",
"new",
"list",
"of",
"enums",
".",
"CryptographicUsageMask",
"Enums",
".",
"This",
"converts",
"the",
"integer",
"value",
"into",
"the",
"list",
"of",
"enums",
"."
] |
python
|
test
|
Blueqat/Blueqat
|
blueqat/pauli.py
|
https://github.com/Blueqat/Blueqat/blob/2ac8592c79e7acf4f385d982af82fbd68dafa5cc/blueqat/pauli.py#L555-L557
|
def from_terms_dict(terms_dict):
"""For internal use."""
return Expr(tuple(Term(k, v) for k, v in terms_dict.items() if v))
|
[
"def",
"from_terms_dict",
"(",
"terms_dict",
")",
":",
"return",
"Expr",
"(",
"tuple",
"(",
"Term",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"terms_dict",
".",
"items",
"(",
")",
"if",
"v",
")",
")"
] |
For internal use.
|
[
"For",
"internal",
"use",
"."
] |
python
|
train
|
ArchiveTeam/wpull
|
wpull/network/connection.py
|
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/connection.py#L476-L500
|
def _verify_cert(self, sock: ssl.SSLSocket):
'''Check if certificate matches hostname.'''
# Based on tornado.iostream.SSLIOStream
# Needed for older OpenSSL (<0.9.8f) versions
verify_mode = self._ssl_context.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED,
ssl.CERT_OPTIONAL), \
'Unknown verify mode {}'.format(verify_mode)
if verify_mode == ssl.CERT_NONE:
return
cert = sock.getpeercert()
if not cert and verify_mode == ssl.CERT_OPTIONAL:
return
if not cert:
raise SSLVerificationError('No SSL certificate given')
try:
ssl.match_hostname(cert, self._hostname)
except ssl.CertificateError as error:
raise SSLVerificationError('Invalid SSL certificate') from error
|
[
"def",
"_verify_cert",
"(",
"self",
",",
"sock",
":",
"ssl",
".",
"SSLSocket",
")",
":",
"# Based on tornado.iostream.SSLIOStream",
"# Needed for older OpenSSL (<0.9.8f) versions",
"verify_mode",
"=",
"self",
".",
"_ssl_context",
".",
"verify_mode",
"assert",
"verify_mode",
"in",
"(",
"ssl",
".",
"CERT_NONE",
",",
"ssl",
".",
"CERT_REQUIRED",
",",
"ssl",
".",
"CERT_OPTIONAL",
")",
",",
"'Unknown verify mode {}'",
".",
"format",
"(",
"verify_mode",
")",
"if",
"verify_mode",
"==",
"ssl",
".",
"CERT_NONE",
":",
"return",
"cert",
"=",
"sock",
".",
"getpeercert",
"(",
")",
"if",
"not",
"cert",
"and",
"verify_mode",
"==",
"ssl",
".",
"CERT_OPTIONAL",
":",
"return",
"if",
"not",
"cert",
":",
"raise",
"SSLVerificationError",
"(",
"'No SSL certificate given'",
")",
"try",
":",
"ssl",
".",
"match_hostname",
"(",
"cert",
",",
"self",
".",
"_hostname",
")",
"except",
"ssl",
".",
"CertificateError",
"as",
"error",
":",
"raise",
"SSLVerificationError",
"(",
"'Invalid SSL certificate'",
")",
"from",
"error"
] |
Check if certificate matches hostname.
|
[
"Check",
"if",
"certificate",
"matches",
"hostname",
"."
] |
python
|
train
|
tasdikrahman/vocabulary
|
vocabulary/responselib.py
|
https://github.com/tasdikrahman/vocabulary/blob/54403c5981af25dc3457796b57048ae27f09e9be/vocabulary/responselib.py#L88-L105
|
def respond(self, data, format='json'):
"""
Converts a json object to a python datastructure based on
specified format
:param data: the json object
:param format: python datastructure type. Defaults to: "json"
:returns: a python specified object
"""
dispatchers = {
"dict": self.__respond_with_dict,
"list": self.__respond_with_list
}
if not dispatchers.get(format, False):
return json.dumps(data)
return dispatchers[format](data)
|
[
"def",
"respond",
"(",
"self",
",",
"data",
",",
"format",
"=",
"'json'",
")",
":",
"dispatchers",
"=",
"{",
"\"dict\"",
":",
"self",
".",
"__respond_with_dict",
",",
"\"list\"",
":",
"self",
".",
"__respond_with_list",
"}",
"if",
"not",
"dispatchers",
".",
"get",
"(",
"format",
",",
"False",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"data",
")",
"return",
"dispatchers",
"[",
"format",
"]",
"(",
"data",
")"
] |
Converts a json object to a python datastructure based on
specified format
:param data: the json object
:param format: python datastructure type. Defaults to: "json"
:returns: a python specified object
|
[
"Converts",
"a",
"json",
"object",
"to",
"a",
"python",
"datastructure",
"based",
"on",
"specified",
"format"
] |
python
|
train
|
secdev/scapy
|
scapy/packet.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L496-L521
|
def self_build(self, field_pos_list=None):
"""
Create the default layer regarding fields_desc dict
:param field_pos_list:
"""
if self.raw_packet_cache is not None:
for fname, fval in six.iteritems(self.raw_packet_cache_fields):
if self.getfieldval(fname) != fval:
self.raw_packet_cache = None
self.raw_packet_cache_fields = None
self.wirelen = None
break
if self.raw_packet_cache is not None:
return self.raw_packet_cache
p = b""
for f in self.fields_desc:
val = self.getfieldval(f.name)
if isinstance(val, RawVal):
sval = raw(val)
p += sval
if field_pos_list is not None:
field_pos_list.append((f.name, sval.encode("string_escape"), len(p), len(sval))) # noqa: E501
else:
p = f.addfield(self, p, val)
return p
|
[
"def",
"self_build",
"(",
"self",
",",
"field_pos_list",
"=",
"None",
")",
":",
"if",
"self",
".",
"raw_packet_cache",
"is",
"not",
"None",
":",
"for",
"fname",
",",
"fval",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"raw_packet_cache_fields",
")",
":",
"if",
"self",
".",
"getfieldval",
"(",
"fname",
")",
"!=",
"fval",
":",
"self",
".",
"raw_packet_cache",
"=",
"None",
"self",
".",
"raw_packet_cache_fields",
"=",
"None",
"self",
".",
"wirelen",
"=",
"None",
"break",
"if",
"self",
".",
"raw_packet_cache",
"is",
"not",
"None",
":",
"return",
"self",
".",
"raw_packet_cache",
"p",
"=",
"b\"\"",
"for",
"f",
"in",
"self",
".",
"fields_desc",
":",
"val",
"=",
"self",
".",
"getfieldval",
"(",
"f",
".",
"name",
")",
"if",
"isinstance",
"(",
"val",
",",
"RawVal",
")",
":",
"sval",
"=",
"raw",
"(",
"val",
")",
"p",
"+=",
"sval",
"if",
"field_pos_list",
"is",
"not",
"None",
":",
"field_pos_list",
".",
"append",
"(",
"(",
"f",
".",
"name",
",",
"sval",
".",
"encode",
"(",
"\"string_escape\"",
")",
",",
"len",
"(",
"p",
")",
",",
"len",
"(",
"sval",
")",
")",
")",
"# noqa: E501",
"else",
":",
"p",
"=",
"f",
".",
"addfield",
"(",
"self",
",",
"p",
",",
"val",
")",
"return",
"p"
] |
Create the default layer regarding fields_desc dict
:param field_pos_list:
|
[
"Create",
"the",
"default",
"layer",
"regarding",
"fields_desc",
"dict"
] |
python
|
train
|
nkgilley/python-ecobee-api
|
pyecobee/__init__.py
|
https://github.com/nkgilley/python-ecobee-api/blob/cc8d90d20abcb9ef5b66ec9cb035bae2f06ba174/pyecobee/__init__.py#L12-L32
|
def config_from_file(filename, config=None):
''' Small configuration file management function'''
if config:
# We're writing configuration
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
logger.exception(error)
return False
return True
else:
# We're reading config
if os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
return False
else:
return {}
|
[
"def",
"config_from_file",
"(",
"filename",
",",
"config",
"=",
"None",
")",
":",
"if",
"config",
":",
"# We're writing configuration",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fdesc",
":",
"fdesc",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"config",
")",
")",
"except",
"IOError",
"as",
"error",
":",
"logger",
".",
"exception",
"(",
"error",
")",
"return",
"False",
"return",
"True",
"else",
":",
"# We're reading config",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fdesc",
":",
"return",
"json",
".",
"loads",
"(",
"fdesc",
".",
"read",
"(",
")",
")",
"except",
"IOError",
"as",
"error",
":",
"return",
"False",
"else",
":",
"return",
"{",
"}"
] |
Small configuration file management function
|
[
"Small",
"configuration",
"file",
"management",
"function"
] |
python
|
test
|
saltstack/salt
|
salt/modules/zpool.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zpool.py#L1438-L1467
|
def labelclear(device, force=False):
'''
.. versionadded:: 2018.3.0
Removes ZFS label information from the specified device
device : string
Device name; must not be part of an active pool configuration.
force : boolean
Treat exported or foreign devices as inactive
CLI Example:
.. code-block:: bash
salt '*' zpool.labelclear /path/to/dev
'''
## clear label for all specified device
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='labelclear',
flags=['-f'] if force else None,
target=device,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'labelcleared')
|
[
"def",
"labelclear",
"(",
"device",
",",
"force",
"=",
"False",
")",
":",
"## clear label for all specified device",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"__utils__",
"[",
"'zfs.zpool_command'",
"]",
"(",
"command",
"=",
"'labelclear'",
",",
"flags",
"=",
"[",
"'-f'",
"]",
"if",
"force",
"else",
"None",
",",
"target",
"=",
"device",
",",
")",
",",
"python_shell",
"=",
"False",
",",
")",
"return",
"__utils__",
"[",
"'zfs.parse_command_result'",
"]",
"(",
"res",
",",
"'labelcleared'",
")"
] |
.. versionadded:: 2018.3.0
Removes ZFS label information from the specified device
device : string
Device name; must not be part of an active pool configuration.
force : boolean
Treat exported or foreign devices as inactive
CLI Example:
.. code-block:: bash
salt '*' zpool.labelclear /path/to/dev
|
[
"..",
"versionadded",
"::",
"2018",
".",
"3",
".",
"0"
] |
python
|
train
|
scanny/python-pptx
|
pptx/chart/data.py
|
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/data.py#L187-L197
|
def number_format(self):
"""
The formatting template string that determines how a number in this
series is formatted, both in the chart and in the Excel spreadsheet;
for example '#,##0.0'. If not specified for this series, it is
inherited from the parent chart data object.
"""
number_format = self._number_format
if number_format is None:
return self._chart_data.number_format
return number_format
|
[
"def",
"number_format",
"(",
"self",
")",
":",
"number_format",
"=",
"self",
".",
"_number_format",
"if",
"number_format",
"is",
"None",
":",
"return",
"self",
".",
"_chart_data",
".",
"number_format",
"return",
"number_format"
] |
The formatting template string that determines how a number in this
series is formatted, both in the chart and in the Excel spreadsheet;
for example '#,##0.0'. If not specified for this series, it is
inherited from the parent chart data object.
|
[
"The",
"formatting",
"template",
"string",
"that",
"determines",
"how",
"a",
"number",
"in",
"this",
"series",
"is",
"formatted",
"both",
"in",
"the",
"chart",
"and",
"in",
"the",
"Excel",
"spreadsheet",
";",
"for",
"example",
"#",
"##0",
".",
"0",
".",
"If",
"not",
"specified",
"for",
"this",
"series",
"it",
"is",
"inherited",
"from",
"the",
"parent",
"chart",
"data",
"object",
"."
] |
python
|
train
|
ewels/MultiQC
|
multiqc/modules/mirtrace/mirtrace.py
|
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/mirtrace/mirtrace.py#L218-L237
|
def mirtrace_qc_plot(self):
""" Generate the miRTrace QC Plot"""
# Specify the order of the different possible categories
keys = OrderedDict()
keys['adapter_removed_length_ok'] = { 'color': '#006837', 'name': 'Reads ≥ 18 nt after adapter removal' }
keys['adapter_not_detected'] = { 'color': '#66bd63', 'name': 'Reads without adapter' }
keys['length_shorter_than_18'] = { 'color': '#fdae61', 'name': 'Reads < 18 nt after adapter removal' }
keys['low_complexity'] = { 'color': '#d73027', 'name': 'Reads with low complexity' }
keys['low_phred'] = { 'color': '#a50026', 'name': 'Reads with low PHRED score' }
# Config for the plot
config = {
'id': 'mirtrace_qc_plot',
'title': 'miRTrace: QC Plot',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.summary_data, keys, config)
|
[
"def",
"mirtrace_qc_plot",
"(",
"self",
")",
":",
"# Specify the order of the different possible categories",
"keys",
"=",
"OrderedDict",
"(",
")",
"keys",
"[",
"'adapter_removed_length_ok'",
"]",
"=",
"{",
"'color'",
":",
"'#006837'",
",",
"'name'",
":",
"'Reads ≥ 18 nt after adapter removal' }",
"",
"keys",
"[",
"'adapter_not_detected'",
"]",
"=",
"{",
"'color'",
":",
"'#66bd63'",
",",
"'name'",
":",
"'Reads without adapter'",
"}",
"keys",
"[",
"'length_shorter_than_18'",
"]",
"=",
"{",
"'color'",
":",
"'#fdae61'",
",",
"'name'",
":",
"'Reads < 18 nt after adapter removal'",
"}",
"keys",
"[",
"'low_complexity'",
"]",
"=",
"{",
"'color'",
":",
"'#d73027'",
",",
"'name'",
":",
"'Reads with low complexity'",
"}",
"keys",
"[",
"'low_phred'",
"]",
"=",
"{",
"'color'",
":",
"'#a50026'",
",",
"'name'",
":",
"'Reads with low PHRED score'",
"}",
"# Config for the plot",
"config",
"=",
"{",
"'id'",
":",
"'mirtrace_qc_plot'",
",",
"'title'",
":",
"'miRTrace: QC Plot'",
",",
"'ylab'",
":",
"'# Reads'",
",",
"'cpswitch_counts_label'",
":",
"'Number of Reads'",
"}",
"return",
"bargraph",
".",
"plot",
"(",
"self",
".",
"summary_data",
",",
"keys",
",",
"config",
")"
] |
Generate the miRTrace QC Plot
|
[
"Generate",
"the",
"miRTrace",
"QC",
"Plot"
] |
python
|
train
|
manolomartinez/greg
|
greg/classes.py
|
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/classes.py#L276-L345
|
def download_entry(self, entry):
"""
Find entry link and download entry
"""
downloadlinks = {}
downloaded = False
ignoreenclosures = self.retrieve_config('ignoreenclosures', 'no')
notype = self.retrieve_config('notype', 'no')
if ignoreenclosures == 'no':
for enclosure in entry.enclosures:
if notype == 'yes':
downloadlinks[urlparse(enclosure["href"]).path.split(
"/")[-1]] = enclosure["href"]
# preserve original name
else:
try:
# We will download all enclosures of the desired
# mime-type
if any([mimetype in enclosure["type"] for mimetype in
self.mime]):
downloadlinks[urlparse(
enclosure["href"]).path.split(
"/")[-1]] = enclosure["href"]
# preserve original name
except KeyError:
print("This podcast carries no information about "
"enclosure types. Try using the notype "
"option in your greg.conf", file=sys.stderr,
flush=True)
else:
downloadlinks[urlparse(entry.link).query.split(
"/")[-1]] = entry.link
for podname in downloadlinks:
if (podname, entry.linkdate) not in zip(self.entrylinks,
self.linkdates):
try:
title = entry.title
except:
title = podname
try:
sanitizedsummary = aux.html_to_text(entry.summary)
if sanitizedsummary == "":
sanitizedsummary = "No summary available"
except:
sanitizedsummary = "No summary available"
try:
placeholders = Placeholders(
self, entry, downloadlinks[podname], podname, title,
sanitizedsummary)
placeholders = aux.check_directory(placeholders)
condition = aux.filtercond(placeholders)
if condition:
print("Downloading {} -- {}".format(title, podname))
aux.download_handler(self, placeholders)
if self.willtag:
aux.tag(placeholders)
downloaded = True
else:
print("Skipping {} -- {}".format(title, podname))
downloaded = False
if self.info:
with open(self.info, 'a') as current:
# We write to file this often to ensure that
# downloaded entries count as downloaded.
current.write(''.join([podname, ' ',
str(entry.linkdate), '\n']))
except URLError:
sys.exit(("... something went wrong. "
"Are you connected to the internet?"))
return downloaded
|
[
"def",
"download_entry",
"(",
"self",
",",
"entry",
")",
":",
"downloadlinks",
"=",
"{",
"}",
"downloaded",
"=",
"False",
"ignoreenclosures",
"=",
"self",
".",
"retrieve_config",
"(",
"'ignoreenclosures'",
",",
"'no'",
")",
"notype",
"=",
"self",
".",
"retrieve_config",
"(",
"'notype'",
",",
"'no'",
")",
"if",
"ignoreenclosures",
"==",
"'no'",
":",
"for",
"enclosure",
"in",
"entry",
".",
"enclosures",
":",
"if",
"notype",
"==",
"'yes'",
":",
"downloadlinks",
"[",
"urlparse",
"(",
"enclosure",
"[",
"\"href\"",
"]",
")",
".",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"]",
"=",
"enclosure",
"[",
"\"href\"",
"]",
"# preserve original name",
"else",
":",
"try",
":",
"# We will download all enclosures of the desired",
"# mime-type",
"if",
"any",
"(",
"[",
"mimetype",
"in",
"enclosure",
"[",
"\"type\"",
"]",
"for",
"mimetype",
"in",
"self",
".",
"mime",
"]",
")",
":",
"downloadlinks",
"[",
"urlparse",
"(",
"enclosure",
"[",
"\"href\"",
"]",
")",
".",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"]",
"=",
"enclosure",
"[",
"\"href\"",
"]",
"# preserve original name",
"except",
"KeyError",
":",
"print",
"(",
"\"This podcast carries no information about \"",
"\"enclosure types. Try using the notype \"",
"\"option in your greg.conf\"",
",",
"file",
"=",
"sys",
".",
"stderr",
",",
"flush",
"=",
"True",
")",
"else",
":",
"downloadlinks",
"[",
"urlparse",
"(",
"entry",
".",
"link",
")",
".",
"query",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"]",
"=",
"entry",
".",
"link",
"for",
"podname",
"in",
"downloadlinks",
":",
"if",
"(",
"podname",
",",
"entry",
".",
"linkdate",
")",
"not",
"in",
"zip",
"(",
"self",
".",
"entrylinks",
",",
"self",
".",
"linkdates",
")",
":",
"try",
":",
"title",
"=",
"entry",
".",
"title",
"except",
":",
"title",
"=",
"podname",
"try",
":",
"sanitizedsummary",
"=",
"aux",
".",
"html_to_text",
"(",
"entry",
".",
"summary",
")",
"if",
"sanitizedsummary",
"==",
"\"\"",
":",
"sanitizedsummary",
"=",
"\"No summary available\"",
"except",
":",
"sanitizedsummary",
"=",
"\"No summary available\"",
"try",
":",
"placeholders",
"=",
"Placeholders",
"(",
"self",
",",
"entry",
",",
"downloadlinks",
"[",
"podname",
"]",
",",
"podname",
",",
"title",
",",
"sanitizedsummary",
")",
"placeholders",
"=",
"aux",
".",
"check_directory",
"(",
"placeholders",
")",
"condition",
"=",
"aux",
".",
"filtercond",
"(",
"placeholders",
")",
"if",
"condition",
":",
"print",
"(",
"\"Downloading {} -- {}\"",
".",
"format",
"(",
"title",
",",
"podname",
")",
")",
"aux",
".",
"download_handler",
"(",
"self",
",",
"placeholders",
")",
"if",
"self",
".",
"willtag",
":",
"aux",
".",
"tag",
"(",
"placeholders",
")",
"downloaded",
"=",
"True",
"else",
":",
"print",
"(",
"\"Skipping {} -- {}\"",
".",
"format",
"(",
"title",
",",
"podname",
")",
")",
"downloaded",
"=",
"False",
"if",
"self",
".",
"info",
":",
"with",
"open",
"(",
"self",
".",
"info",
",",
"'a'",
")",
"as",
"current",
":",
"# We write to file this often to ensure that",
"# downloaded entries count as downloaded.",
"current",
".",
"write",
"(",
"''",
".",
"join",
"(",
"[",
"podname",
",",
"' '",
",",
"str",
"(",
"entry",
".",
"linkdate",
")",
",",
"'\\n'",
"]",
")",
")",
"except",
"URLError",
":",
"sys",
".",
"exit",
"(",
"(",
"\"... something went wrong. \"",
"\"Are you connected to the internet?\"",
")",
")",
"return",
"downloaded"
] |
Find entry link and download entry
|
[
"Find",
"entry",
"link",
"and",
"download",
"entry"
] |
python
|
train
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L392-L405
|
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
|
[
"def",
"list_files_in_directory",
"(",
"full_directory_path",
")",
":",
"files",
"=",
"list",
"(",
")",
"for",
"file_name",
"in",
"__os",
".",
"listdir",
"(",
"full_directory_path",
")",
":",
"if",
"__os",
".",
"path",
".",
"isfile",
"(",
"__os",
".",
"path",
".",
"join",
"(",
"full_directory_path",
",",
"file_name",
")",
")",
":",
"files",
".",
"append",
"(",
"file_name",
")",
"return",
"files"
] |
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
|
[
"List",
"the",
"files",
"in",
"a",
"specified",
"directory",
"Args",
":",
"full_directory_path",
":",
"The",
"full",
"directory",
"path",
"to",
"check",
"derive",
"from",
"the",
"os",
"module"
] |
python
|
train
|
SeattleTestbed/seash
|
pyreadline/lineeditor/lineobj.py
|
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/lineeditor/lineobj.py#L726-L736
|
def copy_selection_to_clipboard(self): # ()
u'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard and self.enable_selection and self.selection_mark >= 0:
selection_mark = min(self.selection_mark,len(self.line_buffer))
cursor = min(self.point,len(self.line_buffer))
if self.selection_mark == -1:
return
begin = min(cursor, selection_mark)
end = max(cursor, selection_mark)
toclipboard = u"".join(self.line_buffer[begin:end])
clipboard.SetClipboardText(toclipboard)
|
[
"def",
"copy_selection_to_clipboard",
"(",
"self",
")",
":",
"# ()\r",
"if",
"self",
".",
"enable_win32_clipboard",
"and",
"self",
".",
"enable_selection",
"and",
"self",
".",
"selection_mark",
">=",
"0",
":",
"selection_mark",
"=",
"min",
"(",
"self",
".",
"selection_mark",
",",
"len",
"(",
"self",
".",
"line_buffer",
")",
")",
"cursor",
"=",
"min",
"(",
"self",
".",
"point",
",",
"len",
"(",
"self",
".",
"line_buffer",
")",
")",
"if",
"self",
".",
"selection_mark",
"==",
"-",
"1",
":",
"return",
"begin",
"=",
"min",
"(",
"cursor",
",",
"selection_mark",
")",
"end",
"=",
"max",
"(",
"cursor",
",",
"selection_mark",
")",
"toclipboard",
"=",
"u\"\"",
".",
"join",
"(",
"self",
".",
"line_buffer",
"[",
"begin",
":",
"end",
"]",
")",
"clipboard",
".",
"SetClipboardText",
"(",
"toclipboard",
")"
] |
u'''Copy the text in the region to the windows clipboard.
|
[
"u",
"Copy",
"the",
"text",
"in",
"the",
"region",
"to",
"the",
"windows",
"clipboard",
"."
] |
python
|
train
|
Duke-GCB/DukeDSClient
|
ddsc/core/parallel.py
|
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/parallel.py#L247-L263
|
def get_finished_results(self):
"""
Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished.
"""
task_and_results = []
for pending_result in self.pending_results:
if pending_result.ready():
ret = pending_result.get()
task_id, result = ret
task = self.task_id_to_task[task_id]
# process any pending messages for this task (will also process other tasks messages)
self.process_all_messages_in_queue()
task.after_run(result)
task_and_results.append((task, result))
self.pending_results.remove(pending_result)
return task_and_results
|
[
"def",
"get_finished_results",
"(",
"self",
")",
":",
"task_and_results",
"=",
"[",
"]",
"for",
"pending_result",
"in",
"self",
".",
"pending_results",
":",
"if",
"pending_result",
".",
"ready",
"(",
")",
":",
"ret",
"=",
"pending_result",
".",
"get",
"(",
")",
"task_id",
",",
"result",
"=",
"ret",
"task",
"=",
"self",
".",
"task_id_to_task",
"[",
"task_id",
"]",
"# process any pending messages for this task (will also process other tasks messages)",
"self",
".",
"process_all_messages_in_queue",
"(",
")",
"task",
".",
"after_run",
"(",
"result",
")",
"task_and_results",
".",
"append",
"(",
"(",
"task",
",",
"result",
")",
")",
"self",
".",
"pending_results",
".",
"remove",
"(",
"pending_result",
")",
"return",
"task_and_results"
] |
Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished.
|
[
"Go",
"through",
"pending",
"results",
"and",
"retrieve",
"the",
"results",
"if",
"they",
"are",
"done",
".",
"Then",
"start",
"child",
"tasks",
"for",
"the",
"task",
"that",
"finished",
"."
] |
python
|
train
|
benoitkugler/abstractDataLibrary
|
pyDLib/Core/data_model.py
|
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L57-L62
|
def modifie(self, key: str, value: Any) -> None:
"""Store the modification. `value` should be dumped in DB compatible format."""
if key in self.FIELDS_OPTIONS:
self.modifie_options(key, value)
else:
self.modifications[key] = value
|
[
"def",
"modifie",
"(",
"self",
",",
"key",
":",
"str",
",",
"value",
":",
"Any",
")",
"->",
"None",
":",
"if",
"key",
"in",
"self",
".",
"FIELDS_OPTIONS",
":",
"self",
".",
"modifie_options",
"(",
"key",
",",
"value",
")",
"else",
":",
"self",
".",
"modifications",
"[",
"key",
"]",
"=",
"value"
] |
Store the modification. `value` should be dumped in DB compatible format.
|
[
"Store",
"the",
"modification",
".",
"value",
"should",
"be",
"dumped",
"in",
"DB",
"compatible",
"format",
"."
] |
python
|
train
|
mabuchilab/QNET
|
src/qnet/utils/indices.py
|
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/utils/indices.py#L152-L156
|
def incr_primed(self, incr=1):
"""Return a copy of the index with an incremented :attr:`primed`"""
return self.__class__(
self.name, primed=self._primed + incr,
**self._assumptions.generator)
|
[
"def",
"incr_primed",
"(",
"self",
",",
"incr",
"=",
"1",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"name",
",",
"primed",
"=",
"self",
".",
"_primed",
"+",
"incr",
",",
"*",
"*",
"self",
".",
"_assumptions",
".",
"generator",
")"
] |
Return a copy of the index with an incremented :attr:`primed`
|
[
"Return",
"a",
"copy",
"of",
"the",
"index",
"with",
"an",
"incremented",
":",
"attr",
":",
"primed"
] |
python
|
train
|
aio-libs/aioredis
|
aioredis/commands/list.py
|
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/list.py#L38-L50
|
def brpoplpush(self, sourcekey, destkey, timeout=0, encoding=_NOTSET):
"""Remove and get the last element in a list, or block until one
is available.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
"""
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
return self.execute(b'BRPOPLPUSH', sourcekey, destkey, timeout,
encoding=encoding)
|
[
"def",
"brpoplpush",
"(",
"self",
",",
"sourcekey",
",",
"destkey",
",",
"timeout",
"=",
"0",
",",
"encoding",
"=",
"_NOTSET",
")",
":",
"if",
"not",
"isinstance",
"(",
"timeout",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"timeout argument must be int\"",
")",
"if",
"timeout",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"timeout must be greater equal 0\"",
")",
"return",
"self",
".",
"execute",
"(",
"b'BRPOPLPUSH'",
",",
"sourcekey",
",",
"destkey",
",",
"timeout",
",",
"encoding",
"=",
"encoding",
")"
] |
Remove and get the last element in a list, or block until one
is available.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
|
[
"Remove",
"and",
"get",
"the",
"last",
"element",
"in",
"a",
"list",
"or",
"block",
"until",
"one",
"is",
"available",
"."
] |
python
|
train
|
quantmind/pulsar
|
pulsar/utils/pylib/redisparser.py
|
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/redisparser.py#L86-L91
|
def get(self):
'''Called by the protocol consumer'''
if self._current:
return self._resume(self._current, False)
else:
return self._get(None)
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current",
":",
"return",
"self",
".",
"_resume",
"(",
"self",
".",
"_current",
",",
"False",
")",
"else",
":",
"return",
"self",
".",
"_get",
"(",
"None",
")"
] |
Called by the protocol consumer
|
[
"Called",
"by",
"the",
"protocol",
"consumer"
] |
python
|
train
|
spyder-ide/spyder
|
spyder/app/mainwindow.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2301-L2309
|
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
|
[
"def",
"change_last_focused_widget",
"(",
"self",
",",
"old",
",",
"now",
")",
":",
"if",
"(",
"now",
"is",
"None",
"and",
"QApplication",
".",
"activeWindow",
"(",
")",
"is",
"not",
"None",
")",
":",
"QApplication",
".",
"activeWindow",
"(",
")",
".",
"setFocus",
"(",
")",
"self",
".",
"last_focused_widget",
"=",
"QApplication",
".",
"focusWidget",
"(",
")",
"elif",
"now",
"is",
"not",
"None",
":",
"self",
".",
"last_focused_widget",
"=",
"now",
"self",
".",
"previous_focused_widget",
"=",
"old"
] |
To keep track of to the last focused widget
|
[
"To",
"keep",
"track",
"of",
"to",
"the",
"last",
"focused",
"widget"
] |
python
|
train
|
troeger/opensubmit
|
executor/opensubmitexec/job.py
|
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/executor/opensubmitexec/job.py#L176-L196
|
def run_program(self, name, arguments=[], timeout=30, exclusive=False):
"""Runs a program in the working directory to completion.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
tuple: A tuple of the exit code, as reported by the operating system,
and the output produced during the execution.
"""
logger.debug("Running program ...")
if exclusive:
kill_longrunning(self.config)
prog = RunningProgram(self, name, arguments, timeout)
return prog.expect_end()
|
[
"def",
"run_program",
"(",
"self",
",",
"name",
",",
"arguments",
"=",
"[",
"]",
",",
"timeout",
"=",
"30",
",",
"exclusive",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Running program ...\"",
")",
"if",
"exclusive",
":",
"kill_longrunning",
"(",
"self",
".",
"config",
")",
"prog",
"=",
"RunningProgram",
"(",
"self",
",",
"name",
",",
"arguments",
",",
"timeout",
")",
"return",
"prog",
".",
"expect_end",
"(",
")"
] |
Runs a program in the working directory to completion.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
tuple: A tuple of the exit code, as reported by the operating system,
and the output produced during the execution.
|
[
"Runs",
"a",
"program",
"in",
"the",
"working",
"directory",
"to",
"completion",
"."
] |
python
|
train
|
tijme/not-your-average-web-crawler
|
nyawc/Crawler.py
|
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/Crawler.py#L122-L143
|
def __spawn_new_request(self):
"""Spawn the first queued request if there is one available.
Returns:
bool: True if a new request was spawned, false otherwise.
"""
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
while self.routing.is_treshold_reached(first_in_line.request):
self.queue.move(first_in_line, QueueItem.STATUS_CANCELLED)
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
self.__request_start(first_in_line)
return True
|
[
"def",
"__spawn_new_request",
"(",
"self",
")",
":",
"first_in_line",
"=",
"self",
".",
"queue",
".",
"get_first",
"(",
"QueueItem",
".",
"STATUS_QUEUED",
")",
"if",
"first_in_line",
"is",
"None",
":",
"return",
"False",
"while",
"self",
".",
"routing",
".",
"is_treshold_reached",
"(",
"first_in_line",
".",
"request",
")",
":",
"self",
".",
"queue",
".",
"move",
"(",
"first_in_line",
",",
"QueueItem",
".",
"STATUS_CANCELLED",
")",
"first_in_line",
"=",
"self",
".",
"queue",
".",
"get_first",
"(",
"QueueItem",
".",
"STATUS_QUEUED",
")",
"if",
"first_in_line",
"is",
"None",
":",
"return",
"False",
"self",
".",
"__request_start",
"(",
"first_in_line",
")",
"return",
"True"
] |
Spawn the first queued request if there is one available.
Returns:
bool: True if a new request was spawned, false otherwise.
|
[
"Spawn",
"the",
"first",
"queued",
"request",
"if",
"there",
"is",
"one",
"available",
"."
] |
python
|
train
|
Jaymon/captain
|
captain/__init__.py
|
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/__init__.py#L79-L116
|
def exit(mod_name=""):
"""A stand-in for the normal sys.exit()
all the magic happens here, when this is called at the end of a script it will
figure out all the available commands and arguments that can be passed in,
then handle exiting the script and returning the status code.
:Example:
from captain import exit
exit(__name__)
This also acts as a guard against the script being traditionally imported, so
even if you have this at the end of the script, it won't actually exit if the
script is traditionally imported
"""
if mod_name and mod_name == "__main__":
calling_mod = sys.modules.get("__main__", None)
else:
calling_mod = discover_if_calling_mod()
if calling_mod:
s = Script(inspect.getfile(calling_mod), module=calling_mod)
raw_args = sys.argv[1:]
try:
ret_code = s.run(raw_args)
except Stop as e:
ret_code = e.code
msg = str(e)
if msg:
if ret_code != 0:
echo.err(msg)
else:
echo.out(msg)
sys.exit(ret_code)
|
[
"def",
"exit",
"(",
"mod_name",
"=",
"\"\"",
")",
":",
"if",
"mod_name",
"and",
"mod_name",
"==",
"\"__main__\"",
":",
"calling_mod",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"\"__main__\"",
",",
"None",
")",
"else",
":",
"calling_mod",
"=",
"discover_if_calling_mod",
"(",
")",
"if",
"calling_mod",
":",
"s",
"=",
"Script",
"(",
"inspect",
".",
"getfile",
"(",
"calling_mod",
")",
",",
"module",
"=",
"calling_mod",
")",
"raw_args",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"try",
":",
"ret_code",
"=",
"s",
".",
"run",
"(",
"raw_args",
")",
"except",
"Stop",
"as",
"e",
":",
"ret_code",
"=",
"e",
".",
"code",
"msg",
"=",
"str",
"(",
"e",
")",
"if",
"msg",
":",
"if",
"ret_code",
"!=",
"0",
":",
"echo",
".",
"err",
"(",
"msg",
")",
"else",
":",
"echo",
".",
"out",
"(",
"msg",
")",
"sys",
".",
"exit",
"(",
"ret_code",
")"
] |
A stand-in for the normal sys.exit()
all the magic happens here, when this is called at the end of a script it will
figure out all the available commands and arguments that can be passed in,
then handle exiting the script and returning the status code.
:Example:
from captain import exit
exit(__name__)
This also acts as a guard against the script being traditionally imported, so
even if you have this at the end of the script, it won't actually exit if the
script is traditionally imported
|
[
"A",
"stand",
"-",
"in",
"for",
"the",
"normal",
"sys",
".",
"exit",
"()"
] |
python
|
valid
|
DataDog/integrations-core
|
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L352-L360
|
def _elements_to_dict(data, position, obj_end, opts):
"""Decode a BSON document."""
result = opts.document_class()
pos = position
for key, value, pos in _iterate_elements(data, position, obj_end, opts):
result[key] = value
if pos != obj_end:
raise InvalidBSON('bad object or element length')
return result
|
[
"def",
"_elements_to_dict",
"(",
"data",
",",
"position",
",",
"obj_end",
",",
"opts",
")",
":",
"result",
"=",
"opts",
".",
"document_class",
"(",
")",
"pos",
"=",
"position",
"for",
"key",
",",
"value",
",",
"pos",
"in",
"_iterate_elements",
"(",
"data",
",",
"position",
",",
"obj_end",
",",
"opts",
")",
":",
"result",
"[",
"key",
"]",
"=",
"value",
"if",
"pos",
"!=",
"obj_end",
":",
"raise",
"InvalidBSON",
"(",
"'bad object or element length'",
")",
"return",
"result"
] |
Decode a BSON document.
|
[
"Decode",
"a",
"BSON",
"document",
"."
] |
python
|
train
|
aleontiev/dj
|
dj/generator.py
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/generator.py#L44-L66
|
def render(self):
"""Render the blueprint into a temp directory using the context."""
context = self.context
if 'app' not in context:
context['app'] = self.application.name
temp_dir = self.temp_dir
templates_root = self.blueprint.templates_directory
for root, dirs, files in os.walk(templates_root):
for directory in dirs:
directory = os.path.join(root, directory)
directory = render_from_string(directory, context)
directory = directory.replace(templates_root, temp_dir, 1)
os.mkdir(directory)
for file in files:
full_file = os.path.join(root, file)
stat = os.stat(full_file)
content = render_from_file(full_file, context)
full_file = strip_extension(
render_from_string(full_file, context))
full_file = full_file.replace(templates_root, temp_dir, 1)
with open(full_file, 'w') as f:
f.write(content)
os.chmod(full_file, stat.st_mode)
|
[
"def",
"render",
"(",
"self",
")",
":",
"context",
"=",
"self",
".",
"context",
"if",
"'app'",
"not",
"in",
"context",
":",
"context",
"[",
"'app'",
"]",
"=",
"self",
".",
"application",
".",
"name",
"temp_dir",
"=",
"self",
".",
"temp_dir",
"templates_root",
"=",
"self",
".",
"blueprint",
".",
"templates_directory",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"templates_root",
")",
":",
"for",
"directory",
"in",
"dirs",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"directory",
")",
"directory",
"=",
"render_from_string",
"(",
"directory",
",",
"context",
")",
"directory",
"=",
"directory",
".",
"replace",
"(",
"templates_root",
",",
"temp_dir",
",",
"1",
")",
"os",
".",
"mkdir",
"(",
"directory",
")",
"for",
"file",
"in",
"files",
":",
"full_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file",
")",
"stat",
"=",
"os",
".",
"stat",
"(",
"full_file",
")",
"content",
"=",
"render_from_file",
"(",
"full_file",
",",
"context",
")",
"full_file",
"=",
"strip_extension",
"(",
"render_from_string",
"(",
"full_file",
",",
"context",
")",
")",
"full_file",
"=",
"full_file",
".",
"replace",
"(",
"templates_root",
",",
"temp_dir",
",",
"1",
")",
"with",
"open",
"(",
"full_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"content",
")",
"os",
".",
"chmod",
"(",
"full_file",
",",
"stat",
".",
"st_mode",
")"
] |
Render the blueprint into a temp directory using the context.
|
[
"Render",
"the",
"blueprint",
"into",
"a",
"temp",
"directory",
"using",
"the",
"context",
"."
] |
python
|
train
|
openai/universe
|
universe/remotes/allocator_remote.py
|
https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/remotes/allocator_remote.py#L166-L169
|
def allocate(self, handles, initial=False, params={}):
"""Call from main thread. Initiate a request for more environments"""
assert all(re.search('^\d+$', h) for h in handles), "All handles must be numbers: {}".format(handles)
self.requests.put(('allocate', (handles, initial, params)))
|
[
"def",
"allocate",
"(",
"self",
",",
"handles",
",",
"initial",
"=",
"False",
",",
"params",
"=",
"{",
"}",
")",
":",
"assert",
"all",
"(",
"re",
".",
"search",
"(",
"'^\\d+$'",
",",
"h",
")",
"for",
"h",
"in",
"handles",
")",
",",
"\"All handles must be numbers: {}\"",
".",
"format",
"(",
"handles",
")",
"self",
".",
"requests",
".",
"put",
"(",
"(",
"'allocate'",
",",
"(",
"handles",
",",
"initial",
",",
"params",
")",
")",
")"
] |
Call from main thread. Initiate a request for more environments
|
[
"Call",
"from",
"main",
"thread",
".",
"Initiate",
"a",
"request",
"for",
"more",
"environments"
] |
python
|
train
|
nvbn/thefuck
|
thefuck/types.py
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/types.py#L69-L83
|
def from_raw_script(cls, raw_script):
"""Creates instance of `Command` from a list of script parts.
:type raw_script: [basestring]
:rtype: Command
:raises: EmptyCommand
"""
script = format_raw_script(raw_script)
if not script:
raise EmptyCommand
expanded = shell.from_shell(script)
output = get_output(script, expanded)
return cls(expanded, output)
|
[
"def",
"from_raw_script",
"(",
"cls",
",",
"raw_script",
")",
":",
"script",
"=",
"format_raw_script",
"(",
"raw_script",
")",
"if",
"not",
"script",
":",
"raise",
"EmptyCommand",
"expanded",
"=",
"shell",
".",
"from_shell",
"(",
"script",
")",
"output",
"=",
"get_output",
"(",
"script",
",",
"expanded",
")",
"return",
"cls",
"(",
"expanded",
",",
"output",
")"
] |
Creates instance of `Command` from a list of script parts.
:type raw_script: [basestring]
:rtype: Command
:raises: EmptyCommand
|
[
"Creates",
"instance",
"of",
"Command",
"from",
"a",
"list",
"of",
"script",
"parts",
"."
] |
python
|
train
|
PyThaiNLP/pythainlp
|
pythainlp/util/date.py
|
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/util/date.py#L281-L293
|
def reign_year_to_ad(reign_year: int, reign: int) -> int:
"""
Reign year of Chakri dynasty, Thailand
"""
if int(reign) == 10:
ad = int(reign_year) + 2015
elif int(reign) == 9:
ad = int(reign_year) + 1945
elif int(reign) == 8:
ad = int(reign_year) + 1928
elif int(reign) == 7:
ad = int(reign_year) + 1924
return ad
|
[
"def",
"reign_year_to_ad",
"(",
"reign_year",
":",
"int",
",",
"reign",
":",
"int",
")",
"->",
"int",
":",
"if",
"int",
"(",
"reign",
")",
"==",
"10",
":",
"ad",
"=",
"int",
"(",
"reign_year",
")",
"+",
"2015",
"elif",
"int",
"(",
"reign",
")",
"==",
"9",
":",
"ad",
"=",
"int",
"(",
"reign_year",
")",
"+",
"1945",
"elif",
"int",
"(",
"reign",
")",
"==",
"8",
":",
"ad",
"=",
"int",
"(",
"reign_year",
")",
"+",
"1928",
"elif",
"int",
"(",
"reign",
")",
"==",
"7",
":",
"ad",
"=",
"int",
"(",
"reign_year",
")",
"+",
"1924",
"return",
"ad"
] |
Reign year of Chakri dynasty, Thailand
|
[
"Reign",
"year",
"of",
"Chakri",
"dynasty",
"Thailand"
] |
python
|
train
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L405-L411
|
def _get_cache(self):
"""
Return the cache to use for thundering herd protection, etc.
"""
if not self._cache:
self._cache = get_cache(self.app)
return self._cache
|
[
"def",
"_get_cache",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cache",
":",
"self",
".",
"_cache",
"=",
"get_cache",
"(",
"self",
".",
"app",
")",
"return",
"self",
".",
"_cache"
] |
Return the cache to use for thundering herd protection, etc.
|
[
"Return",
"the",
"cache",
"to",
"use",
"for",
"thundering",
"herd",
"protection",
"etc",
"."
] |
python
|
train
|
python-bugzilla/python-bugzilla
|
bugzilla/base.py
|
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L565-L573
|
def _login(self, user, password, restrict_login=None):
"""
Backend login method for Bugzilla3
"""
payload = {'login': user, 'password': password}
if restrict_login:
payload['restrict_login'] = True
return self._proxy.User.login(payload)
|
[
"def",
"_login",
"(",
"self",
",",
"user",
",",
"password",
",",
"restrict_login",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'login'",
":",
"user",
",",
"'password'",
":",
"password",
"}",
"if",
"restrict_login",
":",
"payload",
"[",
"'restrict_login'",
"]",
"=",
"True",
"return",
"self",
".",
"_proxy",
".",
"User",
".",
"login",
"(",
"payload",
")"
] |
Backend login method for Bugzilla3
|
[
"Backend",
"login",
"method",
"for",
"Bugzilla3"
] |
python
|
train
|
nuSTORM/gnomon
|
gnomon/processors/Fitter.py
|
https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/Fitter.py#L175-L188
|
def sort_points(self, points):
"""Take points (z,x,q) and sort by increasing z"""
new_points = []
z_lookup = {}
for z, x, Q in points:
z_lookup[z] = (z, x, Q)
z_keys = z_lookup.keys()
z_keys.sort()
for key in z_keys:
new_points.append(z_lookup[key])
return new_points
|
[
"def",
"sort_points",
"(",
"self",
",",
"points",
")",
":",
"new_points",
"=",
"[",
"]",
"z_lookup",
"=",
"{",
"}",
"for",
"z",
",",
"x",
",",
"Q",
"in",
"points",
":",
"z_lookup",
"[",
"z",
"]",
"=",
"(",
"z",
",",
"x",
",",
"Q",
")",
"z_keys",
"=",
"z_lookup",
".",
"keys",
"(",
")",
"z_keys",
".",
"sort",
"(",
")",
"for",
"key",
"in",
"z_keys",
":",
"new_points",
".",
"append",
"(",
"z_lookup",
"[",
"key",
"]",
")",
"return",
"new_points"
] |
Take points (z,x,q) and sort by increasing z
|
[
"Take",
"points",
"(",
"z",
"x",
"q",
")",
"and",
"sort",
"by",
"increasing",
"z"
] |
python
|
train
|
linkedin/asciietch
|
asciietch/graph.py
|
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L133-L192
|
def asciigraph(self, values=None, max_height=None, max_width=None, label=False):
'''
Accepts a list of y values and returns an ascii graph
Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
'''
result = ''
border_fill_char = '*'
start_ctime = None
end_ctime = None
if not max_width:
max_width = 180
# If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values
if isinstance(values, dict):
time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps
start_timestamp = time_series_sorted[0][0]
end_timestamp = time_series_sorted[-1][0]
start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime()
end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime()
values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width)
values = [value for value in values if value is not None]
if not max_height:
max_height = min(20, max(values))
stdev = statistics.stdev(values)
mean = statistics.mean(values)
# Do value adjustments
adjusted_values = list(values)
adjusted_values = self._scale_x_values(values=values, max_width=max_width)
upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see
lower_value = min(adjusted_values)
adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False)
adjusted_values = self._round_floats_to_ints(values=adjusted_values)
# Obtain Ascii Graph String
field = self._get_ascii_field(adjusted_values)
graph_string = self._draw_ascii_graph(field=field)
# Label the graph
if label:
top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char)
result += top_label + '\n'
result += '{graph_string}\n'.format(graph_string=graph_string)
if label:
lower = f'Lower value: {lower_value:.2f} '
stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******'
fill_length = max_width - len(lower) - len(stats)
stat_label = f'{lower}{"*" * fill_length}{stats}\n'
result += stat_label
if start_ctime and end_ctime:
fill_length = max_width - len(start_ctime) - len(end_ctime)
result += f'{start_ctime}{" " * fill_length}{end_ctime}\n'
return result
|
[
"def",
"asciigraph",
"(",
"self",
",",
"values",
"=",
"None",
",",
"max_height",
"=",
"None",
",",
"max_width",
"=",
"None",
",",
"label",
"=",
"False",
")",
":",
"result",
"=",
"''",
"border_fill_char",
"=",
"'*'",
"start_ctime",
"=",
"None",
"end_ctime",
"=",
"None",
"if",
"not",
"max_width",
":",
"max_width",
"=",
"180",
"# If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values",
"if",
"isinstance",
"(",
"values",
",",
"dict",
")",
":",
"time_series_sorted",
"=",
"sorted",
"(",
"list",
"(",
"values",
".",
"items",
"(",
")",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"# Sort timestamp/value dict by the timestamps",
"start_timestamp",
"=",
"time_series_sorted",
"[",
"0",
"]",
"[",
"0",
"]",
"end_timestamp",
"=",
"time_series_sorted",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"start_ctime",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"start_timestamp",
")",
")",
".",
"ctime",
"(",
")",
"end_ctime",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"end_timestamp",
")",
")",
".",
"ctime",
"(",
")",
"values",
"=",
"self",
".",
"_scale_x_values_timestamps",
"(",
"values",
"=",
"time_series_sorted",
",",
"max_width",
"=",
"max_width",
")",
"values",
"=",
"[",
"value",
"for",
"value",
"in",
"values",
"if",
"value",
"is",
"not",
"None",
"]",
"if",
"not",
"max_height",
":",
"max_height",
"=",
"min",
"(",
"20",
",",
"max",
"(",
"values",
")",
")",
"stdev",
"=",
"statistics",
".",
"stdev",
"(",
"values",
")",
"mean",
"=",
"statistics",
".",
"mean",
"(",
"values",
")",
"# Do value adjustments",
"adjusted_values",
"=",
"list",
"(",
"values",
")",
"adjusted_values",
"=",
"self",
".",
"_scale_x_values",
"(",
"values",
"=",
"values",
",",
"max_width",
"=",
"max_width",
")",
"upper_value",
"=",
"max",
"(",
"adjusted_values",
")",
"# Getting upper/lower after scaling x values so we don't label a spike we can't see",
"lower_value",
"=",
"min",
"(",
"adjusted_values",
")",
"adjusted_values",
"=",
"self",
".",
"_scale_y_values",
"(",
"values",
"=",
"adjusted_values",
",",
"new_min",
"=",
"0",
",",
"new_max",
"=",
"max_height",
",",
"scale_old_from_zero",
"=",
"False",
")",
"adjusted_values",
"=",
"self",
".",
"_round_floats_to_ints",
"(",
"values",
"=",
"adjusted_values",
")",
"# Obtain Ascii Graph String",
"field",
"=",
"self",
".",
"_get_ascii_field",
"(",
"adjusted_values",
")",
"graph_string",
"=",
"self",
".",
"_draw_ascii_graph",
"(",
"field",
"=",
"field",
")",
"# Label the graph",
"if",
"label",
":",
"top_label",
"=",
"'Upper value: {upper_value:.2f} '",
".",
"format",
"(",
"upper_value",
"=",
"upper_value",
")",
".",
"ljust",
"(",
"max_width",
",",
"border_fill_char",
")",
"result",
"+=",
"top_label",
"+",
"'\\n'",
"result",
"+=",
"'{graph_string}\\n'",
".",
"format",
"(",
"graph_string",
"=",
"graph_string",
")",
"if",
"label",
":",
"lower",
"=",
"f'Lower value: {lower_value:.2f} '",
"stats",
"=",
"f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******'",
"fill_length",
"=",
"max_width",
"-",
"len",
"(",
"lower",
")",
"-",
"len",
"(",
"stats",
")",
"stat_label",
"=",
"f'{lower}{\"*\" * fill_length}{stats}\\n'",
"result",
"+=",
"stat_label",
"if",
"start_ctime",
"and",
"end_ctime",
":",
"fill_length",
"=",
"max_width",
"-",
"len",
"(",
"start_ctime",
")",
"-",
"len",
"(",
"end_ctime",
")",
"result",
"+=",
"f'{start_ctime}{\" \" * fill_length}{end_ctime}\\n'",
"return",
"result"
] |
Accepts a list of y values and returns an ascii graph
Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
|
[
"Accepts",
"a",
"list",
"of",
"y",
"values",
"and",
"returns",
"an",
"ascii",
"graph",
"Optionally",
"values",
"can",
"also",
"be",
"a",
"dictionary",
"with",
"a",
"key",
"of",
"timestamp",
"and",
"a",
"value",
"of",
"value",
".",
"InGraphs",
"returns",
"data",
"in",
"this",
"format",
"for",
"example",
"."
] |
python
|
train
|
metapensiero/metapensiero.signal
|
src/metapensiero/signal/user.py
|
https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L145-L157
|
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
|
[
"def",
"_check_local_handlers",
"(",
"cls",
",",
"signals",
",",
"handlers",
",",
"namespace",
",",
"configs",
")",
":",
"for",
"aname",
",",
"sig_name",
"in",
"handlers",
".",
"items",
"(",
")",
":",
"# WARN: this code doesn't take in account the case where a new",
"# method with the same name of an handler in a base class is",
"# present in this class but it isn't an handler (so the handler",
"# with the same name should be removed from the handlers)",
"if",
"sig_name",
"not",
"in",
"signals",
":",
"disable_check",
"=",
"configs",
"[",
"aname",
"]",
".",
"get",
"(",
"'disable_check'",
",",
"False",
")",
"if",
"not",
"disable_check",
":",
"raise",
"SignalError",
"(",
"\"Cannot find a signal named '%s'\"",
"%",
"sig_name",
")"
] |
For every marked handler, see if there is a suitable signal. If
not, raise an error.
|
[
"For",
"every",
"marked",
"handler",
"see",
"if",
"there",
"is",
"a",
"suitable",
"signal",
".",
"If",
"not",
"raise",
"an",
"error",
"."
] |
python
|
train
|
facelessuser/backrefs
|
backrefs/_bre_parse.py
|
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L1078-L1092
|
def get_group(self, t, i):
"""Get group number."""
try:
value = []
if t in _DIGIT and t != '0':
value.append(t)
t = next(i)
if t in _DIGIT:
value.append(t)
else:
i.rewind(1)
except StopIteration:
pass
return ''.join(value) if value else None
|
[
"def",
"get_group",
"(",
"self",
",",
"t",
",",
"i",
")",
":",
"try",
":",
"value",
"=",
"[",
"]",
"if",
"t",
"in",
"_DIGIT",
"and",
"t",
"!=",
"'0'",
":",
"value",
".",
"append",
"(",
"t",
")",
"t",
"=",
"next",
"(",
"i",
")",
"if",
"t",
"in",
"_DIGIT",
":",
"value",
".",
"append",
"(",
"t",
")",
"else",
":",
"i",
".",
"rewind",
"(",
"1",
")",
"except",
"StopIteration",
":",
"pass",
"return",
"''",
".",
"join",
"(",
"value",
")",
"if",
"value",
"else",
"None"
] |
Get group number.
|
[
"Get",
"group",
"number",
"."
] |
python
|
train
|
spulec/moto
|
scripts/scaffold.py
|
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/scripts/scaffold.py#L167-L216
|
def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist
"""
lib_dir = get_lib_dir(service)
test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green')
client = boto3.client(service)
service_class = client.__class__.__name__
endpoint_prefix = client._service_model.endpoint_prefix
tmpl_context = {
'service': service,
'service_class': service_class,
'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
}
# initialize service directory
if os.path.exists(lib_dir):
print_progress('skip creating', lib_dir, 'yellow')
else:
print_progress('creating', lib_dir, 'green')
os.makedirs(lib_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib')
for tmpl_filename in os.listdir(tmpl_dir):
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service
)
# initialize test directory
if os.path.exists(test_dir):
print_progress('skip creating', test_dir, 'yellow')
else:
print_progress('creating', test_dir, 'green')
os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
)
# append mock to init files
append_mock_to_init_py(service)
append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service)
|
[
"def",
"initialize_service",
"(",
"service",
",",
"operation",
",",
"api_protocol",
")",
":",
"lib_dir",
"=",
"get_lib_dir",
"(",
"service",
")",
"test_dir",
"=",
"get_test_dir",
"(",
"service",
")",
"print_progress",
"(",
"'Initializing service'",
",",
"service",
",",
"'green'",
")",
"client",
"=",
"boto3",
".",
"client",
"(",
"service",
")",
"service_class",
"=",
"client",
".",
"__class__",
".",
"__name__",
"endpoint_prefix",
"=",
"client",
".",
"_service_model",
".",
"endpoint_prefix",
"tmpl_context",
"=",
"{",
"'service'",
":",
"service",
",",
"'service_class'",
":",
"service_class",
",",
"'endpoint_prefix'",
":",
"endpoint_prefix",
",",
"'api_protocol'",
":",
"api_protocol",
",",
"'escaped_service'",
":",
"get_escaped_service",
"(",
"service",
")",
"}",
"# initialize service directory",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"lib_dir",
")",
":",
"print_progress",
"(",
"'skip creating'",
",",
"lib_dir",
",",
"'yellow'",
")",
"else",
":",
"print_progress",
"(",
"'creating'",
",",
"lib_dir",
",",
"'green'",
")",
"os",
".",
"makedirs",
"(",
"lib_dir",
")",
"tmpl_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TEMPLATE_DIR",
",",
"'lib'",
")",
"for",
"tmpl_filename",
"in",
"os",
".",
"listdir",
"(",
"tmpl_dir",
")",
":",
"render_template",
"(",
"tmpl_dir",
",",
"tmpl_filename",
",",
"tmpl_context",
",",
"service",
")",
"# initialize test directory",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"test_dir",
")",
":",
"print_progress",
"(",
"'skip creating'",
",",
"test_dir",
",",
"'yellow'",
")",
"else",
":",
"print_progress",
"(",
"'creating'",
",",
"test_dir",
",",
"'green'",
")",
"os",
".",
"makedirs",
"(",
"test_dir",
")",
"tmpl_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TEMPLATE_DIR",
",",
"'test'",
")",
"for",
"tmpl_filename",
"in",
"os",
".",
"listdir",
"(",
"tmpl_dir",
")",
":",
"alt_filename",
"=",
"'test_{}.py'",
".",
"format",
"(",
"get_escaped_service",
"(",
"service",
")",
")",
"if",
"tmpl_filename",
"==",
"'test_service.py.j2'",
"else",
"None",
"render_template",
"(",
"tmpl_dir",
",",
"tmpl_filename",
",",
"tmpl_context",
",",
"service",
",",
"alt_filename",
")",
"# append mock to init files",
"append_mock_to_init_py",
"(",
"service",
")",
"append_mock_import_to_backends_py",
"(",
"service",
")",
"append_mock_dict_to_backends_py",
"(",
"service",
")"
] |
create lib and test dirs if not exist
|
[
"create",
"lib",
"and",
"test",
"dirs",
"if",
"not",
"exist"
] |
python
|
train
|
nabla-c0d3/sslyze
|
sslyze/utils/http_response_parser.py
|
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/utils/http_response_parser.py#L26-L37
|
def _parse(read_method: Callable) -> HTTPResponse:
"""Trick to standardize the API between sockets and SSLConnection objects.
"""
response = read_method(4096)
while b'HTTP/' not in response or b'\r\n\r\n' not in response:
# Parse until the end of the headers
response += read_method(4096)
fake_sock = _FakeSocket(response)
response = HTTPResponse(fake_sock) # type: ignore
response.begin()
return response
|
[
"def",
"_parse",
"(",
"read_method",
":",
"Callable",
")",
"->",
"HTTPResponse",
":",
"response",
"=",
"read_method",
"(",
"4096",
")",
"while",
"b'HTTP/'",
"not",
"in",
"response",
"or",
"b'\\r\\n\\r\\n'",
"not",
"in",
"response",
":",
"# Parse until the end of the headers",
"response",
"+=",
"read_method",
"(",
"4096",
")",
"fake_sock",
"=",
"_FakeSocket",
"(",
"response",
")",
"response",
"=",
"HTTPResponse",
"(",
"fake_sock",
")",
"# type: ignore",
"response",
".",
"begin",
"(",
")",
"return",
"response"
] |
Trick to standardize the API between sockets and SSLConnection objects.
|
[
"Trick",
"to",
"standardize",
"the",
"API",
"between",
"sockets",
"and",
"SSLConnection",
"objects",
"."
] |
python
|
train
|
iotile/coretools
|
iotilesensorgraph/iotile/sg/sensor_log.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/sensor_log.py#L382-L419
|
def inspect_last(self, stream, only_allocated=False):
"""Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
"""
if only_allocated:
found = False
for walker in self._virtual_walkers:
if walker.matches(stream):
found = True
break
if not found:
raise UnresolvedIdentifierError("inspect_last could not find an allocated virtual streamer for the desired stream", stream=stream)
if stream in self._last_values:
return self._last_values[stream]
raise StreamEmptyError(u"inspect_last called on stream that has never been written to", stream=stream)
|
[
"def",
"inspect_last",
"(",
"self",
",",
"stream",
",",
"only_allocated",
"=",
"False",
")",
":",
"if",
"only_allocated",
":",
"found",
"=",
"False",
"for",
"walker",
"in",
"self",
".",
"_virtual_walkers",
":",
"if",
"walker",
".",
"matches",
"(",
"stream",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"raise",
"UnresolvedIdentifierError",
"(",
"\"inspect_last could not find an allocated virtual streamer for the desired stream\"",
",",
"stream",
"=",
"stream",
")",
"if",
"stream",
"in",
"self",
".",
"_last_values",
":",
"return",
"self",
".",
"_last_values",
"[",
"stream",
"]",
"raise",
"StreamEmptyError",
"(",
"u\"inspect_last called on stream that has never been written to\"",
",",
"stream",
"=",
"stream",
")"
] |
Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
|
[
"Return",
"the",
"last",
"value",
"pushed",
"into",
"a",
"stream",
"."
] |
python
|
train
|
dossier/dossier.models
|
dossier/models/features/sip.py
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/sip.py#L27-L87
|
def noun_phrases_as_tokens(text):
'''Generate a bag of lists of unnormalized tokens representing noun
phrases from ``text``.
This is built around python's nltk library for getting Noun
Phrases (NPs). This is all documented in the NLTK Book
http://www.nltk.org/book/ch03.html and blog posts that cite the
book.
:rtype: list of lists of strings
'''
## from NLTK Book:
sentence_re = r'''(?x) # set flag to allow verbose regexps
([A-Z])(\.[A-Z])+\.? # abbreviations, e.g. U.S.A.
| \w+(-\w+)* # words with optional internal hyphens
| \$?\d+(\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| \.\.\. # ellipsis
| [][.,;"'?():-_`] # these are separate tokens
'''
## From Su Nam Kim paper:
## http://www.comp.nus.edu.sg/~kanmy/papers/10.1007_s10579-012-9210-3.pdf
grammar = r'''
NBAR:
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
'''
if len(text.strip()) == 0:
return []
chunker = nltk.RegexpParser(grammar)
toks = nltk.regexp_tokenize(text, sentence_re)
postoks = nltk.tag.pos_tag(toks)
#print postoks
tree = chunker.parse(postoks)
stops = stopwords.words('english')
stops += dossier_stopwords()
## These next four functions are standard uses of NLTK illustrated by
## http://alexbowe.com/au-naturale/
## https://gist.github.com/alexbowe/879414
def leaves(tree):
'''Finds NP (nounphrase) leaf nodes of a chunk tree.'''
for subtree in tree.subtrees(filter = lambda t: t.label()=='NP'):
yield subtree.leaves()
def acceptable_word(word):
'''Checks conditions for acceptable word: length, stopword.'''
return 2 <= len(word) <= 40 and word.lower() not in stops
def get_terms(tree):
for leaf in leaves(tree):
yield [w for w,t in leaf if acceptable_word(w)]
return list(get_terms(tree))
|
[
"def",
"noun_phrases_as_tokens",
"(",
"text",
")",
":",
"## from NLTK Book:",
"sentence_re",
"=",
"r'''(?x) # set flag to allow verbose regexps\n ([A-Z])(\\.[A-Z])+\\.? # abbreviations, e.g. U.S.A.\n | \\w+(-\\w+)* # words with optional internal hyphens\n | \\$?\\d+(\\.\\d+)?%? # currency and percentages, e.g. $12.40, 82%\n | \\.\\.\\. # ellipsis\n | [][.,;\"'?():-_`] # these are separate tokens\n '''",
"## From Su Nam Kim paper:",
"## http://www.comp.nus.edu.sg/~kanmy/papers/10.1007_s10579-012-9210-3.pdf",
"grammar",
"=",
"r'''\n NBAR:\n {<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns\n\n NP:\n {<NBAR>}\n {<NBAR><IN><NBAR>} # Above, connected with in/of/etc...\n '''",
"if",
"len",
"(",
"text",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"return",
"[",
"]",
"chunker",
"=",
"nltk",
".",
"RegexpParser",
"(",
"grammar",
")",
"toks",
"=",
"nltk",
".",
"regexp_tokenize",
"(",
"text",
",",
"sentence_re",
")",
"postoks",
"=",
"nltk",
".",
"tag",
".",
"pos_tag",
"(",
"toks",
")",
"#print postoks",
"tree",
"=",
"chunker",
".",
"parse",
"(",
"postoks",
")",
"stops",
"=",
"stopwords",
".",
"words",
"(",
"'english'",
")",
"stops",
"+=",
"dossier_stopwords",
"(",
")",
"## These next four functions are standard uses of NLTK illustrated by",
"## http://alexbowe.com/au-naturale/",
"## https://gist.github.com/alexbowe/879414",
"def",
"leaves",
"(",
"tree",
")",
":",
"'''Finds NP (nounphrase) leaf nodes of a chunk tree.'''",
"for",
"subtree",
"in",
"tree",
".",
"subtrees",
"(",
"filter",
"=",
"lambda",
"t",
":",
"t",
".",
"label",
"(",
")",
"==",
"'NP'",
")",
":",
"yield",
"subtree",
".",
"leaves",
"(",
")",
"def",
"acceptable_word",
"(",
"word",
")",
":",
"'''Checks conditions for acceptable word: length, stopword.'''",
"return",
"2",
"<=",
"len",
"(",
"word",
")",
"<=",
"40",
"and",
"word",
".",
"lower",
"(",
")",
"not",
"in",
"stops",
"def",
"get_terms",
"(",
"tree",
")",
":",
"for",
"leaf",
"in",
"leaves",
"(",
"tree",
")",
":",
"yield",
"[",
"w",
"for",
"w",
",",
"t",
"in",
"leaf",
"if",
"acceptable_word",
"(",
"w",
")",
"]",
"return",
"list",
"(",
"get_terms",
"(",
"tree",
")",
")"
] |
Generate a bag of lists of unnormalized tokens representing noun
phrases from ``text``.
This is built around python's nltk library for getting Noun
Phrases (NPs). This is all documented in the NLTK Book
http://www.nltk.org/book/ch03.html and blog posts that cite the
book.
:rtype: list of lists of strings
|
[
"Generate",
"a",
"bag",
"of",
"lists",
"of",
"unnormalized",
"tokens",
"representing",
"noun",
"phrases",
"from",
"text",
"."
] |
python
|
train
|
andreikop/qutepart
|
qutepart/vim.py
|
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/vim.py#L116-L130
|
def keyPressEvent(self, ev):
"""Check the event. Return True if processed and False otherwise
"""
if ev.key() in (Qt.Key_Shift, Qt.Key_Control,
Qt.Key_Meta, Qt.Key_Alt,
Qt.Key_AltGr, Qt.Key_CapsLock,
Qt.Key_NumLock, Qt.Key_ScrollLock):
return False # ignore modifier pressing. Will process key pressing later
self._processingKeyPress = True
try:
ret = self._mode.keyPressEvent(ev)
finally:
self._processingKeyPress = False
return ret
|
[
"def",
"keyPressEvent",
"(",
"self",
",",
"ev",
")",
":",
"if",
"ev",
".",
"key",
"(",
")",
"in",
"(",
"Qt",
".",
"Key_Shift",
",",
"Qt",
".",
"Key_Control",
",",
"Qt",
".",
"Key_Meta",
",",
"Qt",
".",
"Key_Alt",
",",
"Qt",
".",
"Key_AltGr",
",",
"Qt",
".",
"Key_CapsLock",
",",
"Qt",
".",
"Key_NumLock",
",",
"Qt",
".",
"Key_ScrollLock",
")",
":",
"return",
"False",
"# ignore modifier pressing. Will process key pressing later",
"self",
".",
"_processingKeyPress",
"=",
"True",
"try",
":",
"ret",
"=",
"self",
".",
"_mode",
".",
"keyPressEvent",
"(",
"ev",
")",
"finally",
":",
"self",
".",
"_processingKeyPress",
"=",
"False",
"return",
"ret"
] |
Check the event. Return True if processed and False otherwise
|
[
"Check",
"the",
"event",
".",
"Return",
"True",
"if",
"processed",
"and",
"False",
"otherwise"
] |
python
|
train
|
KelSolaar/Manager
|
manager/components_manager.py
|
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1322-L1342
|
def get_interface(self, component):
"""
Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object
"""
profile = self.get_profile(component)
if profile:
return profile.interface
|
[
"def",
"get_interface",
"(",
"self",
",",
"component",
")",
":",
"profile",
"=",
"self",
".",
"get_profile",
"(",
"component",
")",
"if",
"profile",
":",
"return",
"profile",
".",
"interface"
] |
Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object
|
[
"Gets",
"given",
"Component",
"interface",
"."
] |
python
|
train
|
llazzaro/analyzerstrategies
|
analyzerstrategies/zscorePortfolioStrategy.py
|
https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L74-L81
|
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0
|
[
"def",
"__getCashToBuyStock",
"(",
"self",
")",
":",
"account",
"=",
"self",
".",
"__strategy",
".",
"getAccountCopy",
"(",
")",
"if",
"(",
"account",
".",
"buyingPower",
">=",
"account",
".",
"getTotalValue",
"(",
")",
"/",
"self",
".",
"__buyingRatio",
")",
":",
"return",
"account",
".",
"getTotalValue",
"(",
")",
"/",
"self",
".",
"__buyingRatio",
"else",
":",
"return",
"0"
] |
calculate the amount of money to buy stock
|
[
"calculate",
"the",
"amount",
"of",
"money",
"to",
"buy",
"stock"
] |
python
|
train
|
ilevkivskyi/typing_inspect
|
typing_inspect.py
|
https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L140-L151
|
def is_union_type(tp):
"""Test if the type is a union type. Examples::
is_union_type(int) == False
is_union_type(Union) == True
is_union_type(Union[int, int]) == False
is_union_type(Union[T, int]) == True
"""
if NEW_TYPING:
return (tp is Union or
isinstance(tp, _GenericAlias) and tp.__origin__ is Union)
return type(tp) is _Union
|
[
"def",
"is_union_type",
"(",
"tp",
")",
":",
"if",
"NEW_TYPING",
":",
"return",
"(",
"tp",
"is",
"Union",
"or",
"isinstance",
"(",
"tp",
",",
"_GenericAlias",
")",
"and",
"tp",
".",
"__origin__",
"is",
"Union",
")",
"return",
"type",
"(",
"tp",
")",
"is",
"_Union"
] |
Test if the type is a union type. Examples::
is_union_type(int) == False
is_union_type(Union) == True
is_union_type(Union[int, int]) == False
is_union_type(Union[T, int]) == True
|
[
"Test",
"if",
"the",
"type",
"is",
"a",
"union",
"type",
".",
"Examples",
"::"
] |
python
|
train
|
hugapi/hug
|
hug/routing.py
|
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/routing.py#L256-L277
|
def allow_origins(self, *origins, methods=None, max_age=None, credentials=None, headers=None, **overrides):
"""Convenience method for quickly allowing other resources to access this one"""
response_headers = {}
if origins:
@hug.response_middleware()
def process_data(request, response, resource):
if 'ORIGIN' in request.headers:
origin = request.headers['ORIGIN']
if origin in origins:
response.set_header('Access-Control-Allow-Origin', origin)
else:
response_headers['Access-Control-Allow-Origin'] = '*'
if methods:
response_headers['Access-Control-Allow-Methods'] = ', '.join(methods)
if max_age:
response_headers['Access-Control-Max-Age'] = max_age
if credentials:
response_headers['Access-Control-Allow-Credentials'] = str(credentials).lower()
if headers:
response_headers['Access-Control-Allow-Headers'] = headers
return self.add_response_headers(response_headers, **overrides)
|
[
"def",
"allow_origins",
"(",
"self",
",",
"*",
"origins",
",",
"methods",
"=",
"None",
",",
"max_age",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"overrides",
")",
":",
"response_headers",
"=",
"{",
"}",
"if",
"origins",
":",
"@",
"hug",
".",
"response_middleware",
"(",
")",
"def",
"process_data",
"(",
"request",
",",
"response",
",",
"resource",
")",
":",
"if",
"'ORIGIN'",
"in",
"request",
".",
"headers",
":",
"origin",
"=",
"request",
".",
"headers",
"[",
"'ORIGIN'",
"]",
"if",
"origin",
"in",
"origins",
":",
"response",
".",
"set_header",
"(",
"'Access-Control-Allow-Origin'",
",",
"origin",
")",
"else",
":",
"response_headers",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"'*'",
"if",
"methods",
":",
"response_headers",
"[",
"'Access-Control-Allow-Methods'",
"]",
"=",
"', '",
".",
"join",
"(",
"methods",
")",
"if",
"max_age",
":",
"response_headers",
"[",
"'Access-Control-Max-Age'",
"]",
"=",
"max_age",
"if",
"credentials",
":",
"response_headers",
"[",
"'Access-Control-Allow-Credentials'",
"]",
"=",
"str",
"(",
"credentials",
")",
".",
"lower",
"(",
")",
"if",
"headers",
":",
"response_headers",
"[",
"'Access-Control-Allow-Headers'",
"]",
"=",
"headers",
"return",
"self",
".",
"add_response_headers",
"(",
"response_headers",
",",
"*",
"*",
"overrides",
")"
] |
Convenience method for quickly allowing other resources to access this one
|
[
"Convenience",
"method",
"for",
"quickly",
"allowing",
"other",
"resources",
"to",
"access",
"this",
"one"
] |
python
|
train
|
pyQode/pyqode.core
|
pyqode/core/panels/marker.py
|
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/marker.py#L36-L50
|
def icon(self):
"""
Gets the icon file name. Read-only.
"""
if isinstance(self._icon, str):
if QtGui.QIcon.hasThemeIcon(self._icon):
return QtGui.QIcon.fromTheme(self._icon)
else:
return QtGui.QIcon(self._icon)
elif isinstance(self._icon, tuple):
return QtGui.QIcon.fromTheme(self._icon[0],
QtGui.QIcon(self._icon[1]))
elif isinstance(self._icon, QtGui.QIcon):
return self._icon
return QtGui.QIcon()
|
[
"def",
"icon",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_icon",
",",
"str",
")",
":",
"if",
"QtGui",
".",
"QIcon",
".",
"hasThemeIcon",
"(",
"self",
".",
"_icon",
")",
":",
"return",
"QtGui",
".",
"QIcon",
".",
"fromTheme",
"(",
"self",
".",
"_icon",
")",
"else",
":",
"return",
"QtGui",
".",
"QIcon",
"(",
"self",
".",
"_icon",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_icon",
",",
"tuple",
")",
":",
"return",
"QtGui",
".",
"QIcon",
".",
"fromTheme",
"(",
"self",
".",
"_icon",
"[",
"0",
"]",
",",
"QtGui",
".",
"QIcon",
"(",
"self",
".",
"_icon",
"[",
"1",
"]",
")",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_icon",
",",
"QtGui",
".",
"QIcon",
")",
":",
"return",
"self",
".",
"_icon",
"return",
"QtGui",
".",
"QIcon",
"(",
")"
] |
Gets the icon file name. Read-only.
|
[
"Gets",
"the",
"icon",
"file",
"name",
".",
"Read",
"-",
"only",
"."
] |
python
|
train
|
shin-/dockerpy-creds
|
dockerpycreds/utils.py
|
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/utils.py#L6-L29
|
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if sys.platform != 'win32':
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable
|
[
"def",
"find_executable",
"(",
"executable",
",",
"path",
"=",
"None",
")",
":",
"if",
"sys",
".",
"platform",
"!=",
"'win32'",
":",
"return",
"distutils",
".",
"spawn",
".",
"find_executable",
"(",
"executable",
",",
"path",
")",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"paths",
"=",
"path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"extensions",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATHEXT'",
",",
"'.exe'",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"executable",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"executable",
")",
":",
"for",
"p",
"in",
"paths",
":",
"for",
"ext",
"in",
"extensions",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"base",
"+",
"ext",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"return",
"f",
"return",
"None",
"else",
":",
"return",
"executable"
] |
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
|
[
"As",
"distutils",
".",
"spawn",
".",
"find_executable",
"but",
"on",
"Windows",
"look",
"up",
"every",
"extension",
"declared",
"in",
"PATHEXT",
"instead",
"of",
"just",
".",
"exe"
] |
python
|
train
|
mozilla/mozdownload
|
mozdownload/scraper.py
|
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L528-L541
|
def build_filename(self, binary):
"""Return the proposed filename with extension for the binary."""
try:
# Get exact timestamp of the build to build the local file name
folder = self.builds[self.build_index]
timestamp = re.search(r'([\d\-]+)-\D.*', folder).group(1)
except Exception:
# If it's not available use the build's date
timestamp = self.date.strftime('%Y-%m-%d')
return '%(TIMESTAMP)s-%(BRANCH)s-%(NAME)s' % {
'TIMESTAMP': timestamp,
'BRANCH': self.branch,
'NAME': binary}
|
[
"def",
"build_filename",
"(",
"self",
",",
"binary",
")",
":",
"try",
":",
"# Get exact timestamp of the build to build the local file name",
"folder",
"=",
"self",
".",
"builds",
"[",
"self",
".",
"build_index",
"]",
"timestamp",
"=",
"re",
".",
"search",
"(",
"r'([\\d\\-]+)-\\D.*'",
",",
"folder",
")",
".",
"group",
"(",
"1",
")",
"except",
"Exception",
":",
"# If it's not available use the build's date",
"timestamp",
"=",
"self",
".",
"date",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
"return",
"'%(TIMESTAMP)s-%(BRANCH)s-%(NAME)s'",
"%",
"{",
"'TIMESTAMP'",
":",
"timestamp",
",",
"'BRANCH'",
":",
"self",
".",
"branch",
",",
"'NAME'",
":",
"binary",
"}"
] |
Return the proposed filename with extension for the binary.
|
[
"Return",
"the",
"proposed",
"filename",
"with",
"extension",
"for",
"the",
"binary",
"."
] |
python
|
train
|
chimera0/accel-brain-code
|
Reinforcement-Learning/pyqlearning/annealing_model.py
|
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/annealing_model.py#L140-L145
|
def set_computed_cost_arr(self, value):
''' setter '''
if isinstance(value, np.ndarray):
self.__computed_cost_arr = value
else:
raise TypeError()
|
[
"def",
"set_computed_cost_arr",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"np",
".",
"ndarray",
")",
":",
"self",
".",
"__computed_cost_arr",
"=",
"value",
"else",
":",
"raise",
"TypeError",
"(",
")"
] |
setter
|
[
"setter"
] |
python
|
train
|
LogicalDash/LiSE
|
ELiDE/ELiDE/card.py
|
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L241-L247
|
def on_art_image(self, *args):
"""When I get a new ``art_image``, store its texture in
``art_texture``.
"""
if self.art_image is not None:
self.art_texture = self.art_image.texture
|
[
"def",
"on_art_image",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"art_image",
"is",
"not",
"None",
":",
"self",
".",
"art_texture",
"=",
"self",
".",
"art_image",
".",
"texture"
] |
When I get a new ``art_image``, store its texture in
``art_texture``.
|
[
"When",
"I",
"get",
"a",
"new",
"art_image",
"store",
"its",
"texture",
"in",
"art_texture",
"."
] |
python
|
train
|
miguelgrinberg/python-engineio
|
engineio/async_drivers/aiohttp.py
|
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/async_drivers/aiohttp.py#L22-L72
|
def translate_request(request):
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
message = request._message
payload = request._payload
uri_parts = urlsplit(message.path)
environ = {
'wsgi.input': payload,
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'aiohttp',
'REQUEST_METHOD': message.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': message.path,
'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'aiohttp',
'SERVER_PORT': '0',
'aiohttp.request': request
}
for hdr_name, hdr_value in message.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
|
[
"def",
"translate_request",
"(",
"request",
")",
":",
"message",
"=",
"request",
".",
"_message",
"payload",
"=",
"request",
".",
"_payload",
"uri_parts",
"=",
"urlsplit",
"(",
"message",
".",
"path",
")",
"environ",
"=",
"{",
"'wsgi.input'",
":",
"payload",
",",
"'wsgi.errors'",
":",
"sys",
".",
"stderr",
",",
"'wsgi.version'",
":",
"(",
"1",
",",
"0",
")",
",",
"'wsgi.async'",
":",
"True",
",",
"'wsgi.multithread'",
":",
"False",
",",
"'wsgi.multiprocess'",
":",
"False",
",",
"'wsgi.run_once'",
":",
"False",
",",
"'SERVER_SOFTWARE'",
":",
"'aiohttp'",
",",
"'REQUEST_METHOD'",
":",
"message",
".",
"method",
",",
"'QUERY_STRING'",
":",
"uri_parts",
".",
"query",
"or",
"''",
",",
"'RAW_URI'",
":",
"message",
".",
"path",
",",
"'SERVER_PROTOCOL'",
":",
"'HTTP/%s.%s'",
"%",
"message",
".",
"version",
",",
"'REMOTE_ADDR'",
":",
"'127.0.0.1'",
",",
"'REMOTE_PORT'",
":",
"'0'",
",",
"'SERVER_NAME'",
":",
"'aiohttp'",
",",
"'SERVER_PORT'",
":",
"'0'",
",",
"'aiohttp.request'",
":",
"request",
"}",
"for",
"hdr_name",
",",
"hdr_value",
"in",
"message",
".",
"headers",
".",
"items",
"(",
")",
":",
"hdr_name",
"=",
"hdr_name",
".",
"upper",
"(",
")",
"if",
"hdr_name",
"==",
"'CONTENT-TYPE'",
":",
"environ",
"[",
"'CONTENT_TYPE'",
"]",
"=",
"hdr_value",
"continue",
"elif",
"hdr_name",
"==",
"'CONTENT-LENGTH'",
":",
"environ",
"[",
"'CONTENT_LENGTH'",
"]",
"=",
"hdr_value",
"continue",
"key",
"=",
"'HTTP_%s'",
"%",
"hdr_name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"if",
"key",
"in",
"environ",
":",
"hdr_value",
"=",
"'%s,%s'",
"%",
"(",
"environ",
"[",
"key",
"]",
",",
"hdr_value",
")",
"environ",
"[",
"key",
"]",
"=",
"hdr_value",
"environ",
"[",
"'wsgi.url_scheme'",
"]",
"=",
"environ",
".",
"get",
"(",
"'HTTP_X_FORWARDED_PROTO'",
",",
"'http'",
")",
"path_info",
"=",
"uri_parts",
".",
"path",
"environ",
"[",
"'PATH_INFO'",
"]",
"=",
"path_info",
"environ",
"[",
"'SCRIPT_NAME'",
"]",
"=",
"''",
"return",
"environ"
] |
This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
|
[
"This",
"function",
"takes",
"the",
"arguments",
"passed",
"to",
"the",
"request",
"handler",
"and",
"uses",
"them",
"to",
"generate",
"a",
"WSGI",
"compatible",
"environ",
"dictionary",
"."
] |
python
|
train
|
trevorstephens/gplearn
|
gplearn/genetic.py
|
https://github.com/trevorstephens/gplearn/blob/5c0465f2ecdcd5abcdf3fe520688d24cd59e4a52/gplearn/genetic.py#L36-L152
|
def _parallel_evolve(n_programs, parents, X, y, sample_weight, seeds, params):
"""Private function used to build a batch of programs within a job."""
n_samples, n_features = X.shape
# Unpack parameters
tournament_size = params['tournament_size']
function_set = params['function_set']
arities = params['arities']
init_depth = params['init_depth']
init_method = params['init_method']
const_range = params['const_range']
metric = params['_metric']
transformer = params['_transformer']
parsimony_coefficient = params['parsimony_coefficient']
method_probs = params['method_probs']
p_point_replace = params['p_point_replace']
max_samples = params['max_samples']
feature_names = params['feature_names']
max_samples = int(max_samples * n_samples)
def _tournament():
"""Find the fittest individual from a sub-population."""
contenders = random_state.randint(0, len(parents), tournament_size)
fitness = [parents[p].fitness_ for p in contenders]
if metric.greater_is_better:
parent_index = contenders[np.argmax(fitness)]
else:
parent_index = contenders[np.argmin(fitness)]
return parents[parent_index], parent_index
# Build programs
programs = []
for i in range(n_programs):
random_state = check_random_state(seeds[i])
if parents is None:
program = None
genome = None
else:
method = random_state.uniform()
parent, parent_index = _tournament()
if method < method_probs[0]:
# crossover
donor, donor_index = _tournament()
program, removed, remains = parent.crossover(donor.program,
random_state)
genome = {'method': 'Crossover',
'parent_idx': parent_index,
'parent_nodes': removed,
'donor_idx': donor_index,
'donor_nodes': remains}
elif method < method_probs[1]:
# subtree_mutation
program, removed, _ = parent.subtree_mutation(random_state)
genome = {'method': 'Subtree Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[2]:
# hoist_mutation
program, removed = parent.hoist_mutation(random_state)
genome = {'method': 'Hoist Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[3]:
# point_mutation
program, mutated = parent.point_mutation(random_state)
genome = {'method': 'Point Mutation',
'parent_idx': parent_index,
'parent_nodes': mutated}
else:
# reproduction
program = parent.reproduce()
genome = {'method': 'Reproduction',
'parent_idx': parent_index,
'parent_nodes': []}
program = _Program(function_set=function_set,
arities=arities,
init_depth=init_depth,
init_method=init_method,
n_features=n_features,
metric=metric,
transformer=transformer,
const_range=const_range,
p_point_replace=p_point_replace,
parsimony_coefficient=parsimony_coefficient,
feature_names=feature_names,
random_state=random_state,
program=program)
program.parents = genome
# Draw samples, using sample weights, and then fit
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
oob_sample_weight = curr_sample_weight.copy()
indices, not_indices = program.get_all_indices(n_samples,
max_samples,
random_state)
curr_sample_weight[not_indices] = 0
oob_sample_weight[indices] = 0
program.raw_fitness_ = program.raw_fitness(X, y, curr_sample_weight)
if max_samples < n_samples:
# Calculate OOB fitness
program.oob_fitness_ = program.raw_fitness(X, y, oob_sample_weight)
programs.append(program)
return programs
|
[
"def",
"_parallel_evolve",
"(",
"n_programs",
",",
"parents",
",",
"X",
",",
"y",
",",
"sample_weight",
",",
"seeds",
",",
"params",
")",
":",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"# Unpack parameters",
"tournament_size",
"=",
"params",
"[",
"'tournament_size'",
"]",
"function_set",
"=",
"params",
"[",
"'function_set'",
"]",
"arities",
"=",
"params",
"[",
"'arities'",
"]",
"init_depth",
"=",
"params",
"[",
"'init_depth'",
"]",
"init_method",
"=",
"params",
"[",
"'init_method'",
"]",
"const_range",
"=",
"params",
"[",
"'const_range'",
"]",
"metric",
"=",
"params",
"[",
"'_metric'",
"]",
"transformer",
"=",
"params",
"[",
"'_transformer'",
"]",
"parsimony_coefficient",
"=",
"params",
"[",
"'parsimony_coefficient'",
"]",
"method_probs",
"=",
"params",
"[",
"'method_probs'",
"]",
"p_point_replace",
"=",
"params",
"[",
"'p_point_replace'",
"]",
"max_samples",
"=",
"params",
"[",
"'max_samples'",
"]",
"feature_names",
"=",
"params",
"[",
"'feature_names'",
"]",
"max_samples",
"=",
"int",
"(",
"max_samples",
"*",
"n_samples",
")",
"def",
"_tournament",
"(",
")",
":",
"\"\"\"Find the fittest individual from a sub-population.\"\"\"",
"contenders",
"=",
"random_state",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"parents",
")",
",",
"tournament_size",
")",
"fitness",
"=",
"[",
"parents",
"[",
"p",
"]",
".",
"fitness_",
"for",
"p",
"in",
"contenders",
"]",
"if",
"metric",
".",
"greater_is_better",
":",
"parent_index",
"=",
"contenders",
"[",
"np",
".",
"argmax",
"(",
"fitness",
")",
"]",
"else",
":",
"parent_index",
"=",
"contenders",
"[",
"np",
".",
"argmin",
"(",
"fitness",
")",
"]",
"return",
"parents",
"[",
"parent_index",
"]",
",",
"parent_index",
"# Build programs",
"programs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_programs",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"seeds",
"[",
"i",
"]",
")",
"if",
"parents",
"is",
"None",
":",
"program",
"=",
"None",
"genome",
"=",
"None",
"else",
":",
"method",
"=",
"random_state",
".",
"uniform",
"(",
")",
"parent",
",",
"parent_index",
"=",
"_tournament",
"(",
")",
"if",
"method",
"<",
"method_probs",
"[",
"0",
"]",
":",
"# crossover",
"donor",
",",
"donor_index",
"=",
"_tournament",
"(",
")",
"program",
",",
"removed",
",",
"remains",
"=",
"parent",
".",
"crossover",
"(",
"donor",
".",
"program",
",",
"random_state",
")",
"genome",
"=",
"{",
"'method'",
":",
"'Crossover'",
",",
"'parent_idx'",
":",
"parent_index",
",",
"'parent_nodes'",
":",
"removed",
",",
"'donor_idx'",
":",
"donor_index",
",",
"'donor_nodes'",
":",
"remains",
"}",
"elif",
"method",
"<",
"method_probs",
"[",
"1",
"]",
":",
"# subtree_mutation",
"program",
",",
"removed",
",",
"_",
"=",
"parent",
".",
"subtree_mutation",
"(",
"random_state",
")",
"genome",
"=",
"{",
"'method'",
":",
"'Subtree Mutation'",
",",
"'parent_idx'",
":",
"parent_index",
",",
"'parent_nodes'",
":",
"removed",
"}",
"elif",
"method",
"<",
"method_probs",
"[",
"2",
"]",
":",
"# hoist_mutation",
"program",
",",
"removed",
"=",
"parent",
".",
"hoist_mutation",
"(",
"random_state",
")",
"genome",
"=",
"{",
"'method'",
":",
"'Hoist Mutation'",
",",
"'parent_idx'",
":",
"parent_index",
",",
"'parent_nodes'",
":",
"removed",
"}",
"elif",
"method",
"<",
"method_probs",
"[",
"3",
"]",
":",
"# point_mutation",
"program",
",",
"mutated",
"=",
"parent",
".",
"point_mutation",
"(",
"random_state",
")",
"genome",
"=",
"{",
"'method'",
":",
"'Point Mutation'",
",",
"'parent_idx'",
":",
"parent_index",
",",
"'parent_nodes'",
":",
"mutated",
"}",
"else",
":",
"# reproduction",
"program",
"=",
"parent",
".",
"reproduce",
"(",
")",
"genome",
"=",
"{",
"'method'",
":",
"'Reproduction'",
",",
"'parent_idx'",
":",
"parent_index",
",",
"'parent_nodes'",
":",
"[",
"]",
"}",
"program",
"=",
"_Program",
"(",
"function_set",
"=",
"function_set",
",",
"arities",
"=",
"arities",
",",
"init_depth",
"=",
"init_depth",
",",
"init_method",
"=",
"init_method",
",",
"n_features",
"=",
"n_features",
",",
"metric",
"=",
"metric",
",",
"transformer",
"=",
"transformer",
",",
"const_range",
"=",
"const_range",
",",
"p_point_replace",
"=",
"p_point_replace",
",",
"parsimony_coefficient",
"=",
"parsimony_coefficient",
",",
"feature_names",
"=",
"feature_names",
",",
"random_state",
"=",
"random_state",
",",
"program",
"=",
"program",
")",
"program",
".",
"parents",
"=",
"genome",
"# Draw samples, using sample weights, and then fit",
"if",
"sample_weight",
"is",
"None",
":",
"curr_sample_weight",
"=",
"np",
".",
"ones",
"(",
"(",
"n_samples",
",",
")",
")",
"else",
":",
"curr_sample_weight",
"=",
"sample_weight",
".",
"copy",
"(",
")",
"oob_sample_weight",
"=",
"curr_sample_weight",
".",
"copy",
"(",
")",
"indices",
",",
"not_indices",
"=",
"program",
".",
"get_all_indices",
"(",
"n_samples",
",",
"max_samples",
",",
"random_state",
")",
"curr_sample_weight",
"[",
"not_indices",
"]",
"=",
"0",
"oob_sample_weight",
"[",
"indices",
"]",
"=",
"0",
"program",
".",
"raw_fitness_",
"=",
"program",
".",
"raw_fitness",
"(",
"X",
",",
"y",
",",
"curr_sample_weight",
")",
"if",
"max_samples",
"<",
"n_samples",
":",
"# Calculate OOB fitness",
"program",
".",
"oob_fitness_",
"=",
"program",
".",
"raw_fitness",
"(",
"X",
",",
"y",
",",
"oob_sample_weight",
")",
"programs",
".",
"append",
"(",
"program",
")",
"return",
"programs"
] |
Private function used to build a batch of programs within a job.
|
[
"Private",
"function",
"used",
"to",
"build",
"a",
"batch",
"of",
"programs",
"within",
"a",
"job",
"."
] |
python
|
train
|
CenturyLinkCloud/clc-python-sdk
|
src/clc/APIv1/user.py
|
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv1/user.py#L44-L54
|
def GetUsers(alias=None):
"""Gets all of users assigned to a given account.
https://t3n.zendesk.com/entries/22427662-GetUsers
:param alias: short code for a particular account. If none will use account's default alias
"""
if alias is None: alias = clc.v1.Account.GetAlias()
r = clc.v1.API.Call('post','User/GetUsers',{'AccountAlias': alias})
if int(r['StatusCode']) == 0:
return(r['Users'])
|
[
"def",
"GetUsers",
"(",
"alias",
"=",
"None",
")",
":",
"if",
"alias",
"is",
"None",
":",
"alias",
"=",
"clc",
".",
"v1",
".",
"Account",
".",
"GetAlias",
"(",
")",
"r",
"=",
"clc",
".",
"v1",
".",
"API",
".",
"Call",
"(",
"'post'",
",",
"'User/GetUsers'",
",",
"{",
"'AccountAlias'",
":",
"alias",
"}",
")",
"if",
"int",
"(",
"r",
"[",
"'StatusCode'",
"]",
")",
"==",
"0",
":",
"return",
"(",
"r",
"[",
"'Users'",
"]",
")"
] |
Gets all of users assigned to a given account.
https://t3n.zendesk.com/entries/22427662-GetUsers
:param alias: short code for a particular account. If none will use account's default alias
|
[
"Gets",
"all",
"of",
"users",
"assigned",
"to",
"a",
"given",
"account",
".",
"https",
":",
"//",
"t3n",
".",
"zendesk",
".",
"com",
"/",
"entries",
"/",
"22427662",
"-",
"GetUsers"
] |
python
|
train
|
projectatomic/osbs-client
|
osbs/build/build_request.py
|
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/build_request.py#L488-L500
|
def remove_tag_and_push_registries(tag_and_push_registries, version):
"""
Remove matching entries from tag_and_push_registries (in-place)
:param tag_and_push_registries: dict, uri -> dict
:param version: str, 'version' to match against
"""
registries = [uri
for uri, regdict in tag_and_push_registries.items()
if regdict['version'] == version]
for registry in registries:
logger.info("removing %s registry: %s", version, registry)
del tag_and_push_registries[registry]
|
[
"def",
"remove_tag_and_push_registries",
"(",
"tag_and_push_registries",
",",
"version",
")",
":",
"registries",
"=",
"[",
"uri",
"for",
"uri",
",",
"regdict",
"in",
"tag_and_push_registries",
".",
"items",
"(",
")",
"if",
"regdict",
"[",
"'version'",
"]",
"==",
"version",
"]",
"for",
"registry",
"in",
"registries",
":",
"logger",
".",
"info",
"(",
"\"removing %s registry: %s\"",
",",
"version",
",",
"registry",
")",
"del",
"tag_and_push_registries",
"[",
"registry",
"]"
] |
Remove matching entries from tag_and_push_registries (in-place)
:param tag_and_push_registries: dict, uri -> dict
:param version: str, 'version' to match against
|
[
"Remove",
"matching",
"entries",
"from",
"tag_and_push_registries",
"(",
"in",
"-",
"place",
")"
] |
python
|
train
|
ssato/python-anytemplate
|
anytemplate/engines/cheetah.py
|
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/engines/cheetah.py#L68-L76
|
def supports(cls, template_file=None):
"""
:return: Whether the engine can process given template file or not.
"""
if anytemplate.compat.IS_PYTHON_3:
cls._priority = 99
return False # Always as it's not ported to python 3.
return super(Engine, cls).supports(template_file=template_file)
|
[
"def",
"supports",
"(",
"cls",
",",
"template_file",
"=",
"None",
")",
":",
"if",
"anytemplate",
".",
"compat",
".",
"IS_PYTHON_3",
":",
"cls",
".",
"_priority",
"=",
"99",
"return",
"False",
"# Always as it's not ported to python 3.",
"return",
"super",
"(",
"Engine",
",",
"cls",
")",
".",
"supports",
"(",
"template_file",
"=",
"template_file",
")"
] |
:return: Whether the engine can process given template file or not.
|
[
":",
"return",
":",
"Whether",
"the",
"engine",
"can",
"process",
"given",
"template",
"file",
"or",
"not",
"."
] |
python
|
train
|
SALib/SALib
|
src/SALib/analyze/sobol.py
|
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/sobol.py#L253-L302
|
def Si_to_pandas_dict(S_dict):
"""Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of (None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
"""
problem = S_dict.problem
total_order = {
'ST': S_dict['ST'],
'ST_conf': S_dict['ST_conf']
}
first_order = {
'S1': S_dict['S1'],
'S1_conf': S_dict['S1_conf']
}
idx = None
second_order = None
if 'S2' in S_dict:
names = problem['names']
idx = list(combinations(names, 2))
second_order = {
'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
for i in idx],
'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
|
[
"def",
"Si_to_pandas_dict",
"(",
"S_dict",
")",
":",
"problem",
"=",
"S_dict",
".",
"problem",
"total_order",
"=",
"{",
"'ST'",
":",
"S_dict",
"[",
"'ST'",
"]",
",",
"'ST_conf'",
":",
"S_dict",
"[",
"'ST_conf'",
"]",
"}",
"first_order",
"=",
"{",
"'S1'",
":",
"S_dict",
"[",
"'S1'",
"]",
",",
"'S1_conf'",
":",
"S_dict",
"[",
"'S1_conf'",
"]",
"}",
"idx",
"=",
"None",
"second_order",
"=",
"None",
"if",
"'S2'",
"in",
"S_dict",
":",
"names",
"=",
"problem",
"[",
"'names'",
"]",
"idx",
"=",
"list",
"(",
"combinations",
"(",
"names",
",",
"2",
")",
")",
"second_order",
"=",
"{",
"'S2'",
":",
"[",
"S_dict",
"[",
"'S2'",
"]",
"[",
"names",
".",
"index",
"(",
"i",
"[",
"0",
"]",
")",
",",
"names",
".",
"index",
"(",
"i",
"[",
"1",
"]",
")",
"]",
"for",
"i",
"in",
"idx",
"]",
",",
"'S2_conf'",
":",
"[",
"S_dict",
"[",
"'S2_conf'",
"]",
"[",
"names",
".",
"index",
"(",
"i",
"[",
"0",
"]",
")",
",",
"names",
".",
"index",
"(",
"i",
"[",
"1",
"]",
")",
"]",
"for",
"i",
"in",
"idx",
"]",
"}",
"return",
"total_order",
",",
"first_order",
",",
"(",
"idx",
",",
"second_order",
")"
] |
Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of (None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
|
[
"Convert",
"Si",
"information",
"into",
"Pandas",
"DataFrame",
"compatible",
"dict",
".",
"Parameters",
"----------",
"S_dict",
":",
"ResultDict",
"Sobol",
"sensitivity",
"indices",
"See",
"Also",
"----------",
"Si_list_to_dict",
"Returns",
"----------",
"tuple",
":",
"of",
"total",
"first",
"and",
"second",
"order",
"sensitivities",
".",
"Total",
"and",
"first",
"order",
"are",
"dicts",
".",
"Second",
"order",
"sensitivities",
"contain",
"a",
"tuple",
"of",
"parameter",
"name",
"combinations",
"for",
"use",
"as",
"the",
"DataFrame",
"index",
"and",
"second",
"order",
"sensitivities",
".",
"If",
"no",
"second",
"order",
"indices",
"found",
"then",
"returns",
"tuple",
"of",
"(",
"None",
"None",
")",
"Examples",
"--------",
">>>",
"X",
"=",
"saltelli",
".",
"sample",
"(",
"problem",
"1000",
")",
">>>",
"Y",
"=",
"Ishigami",
".",
"evaluate",
"(",
"X",
")",
">>>",
"Si",
"=",
"sobol",
".",
"analyze",
"(",
"problem",
"Y",
"print_to_console",
"=",
"True",
")",
">>>",
"T_Si",
"first_Si",
"(",
"idx",
"second_Si",
")",
"=",
"sobol",
".",
"Si_to_pandas_dict",
"(",
"Si",
"problem",
")"
] |
python
|
train
|
neherlab/treetime
|
treetime/treeanc.py
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L1991-L2053
|
def get_tree_dict(self, keep_var_ambigs=False):
"""
For VCF-based objects, returns a nested dict with all the information required to
reconstruct sequences for all nodes (terminal and internal).
Parameters
----------
keep_var_ambigs : boolean
If true, generates dict sequences based on the *original* compressed sequences, which
may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous
bases ("AAAAANN") are stripped of ambiguous bases *before* compression, so ambiguous
bases at this sites will *not* be preserved.
Returns
-------
tree_dict : dict
Format: ::
{
'reference':'AGCTCGA...A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'positions': [1,4,7,10,100...],
'inferred_const_sites': [7,100....]
}
reference: str
The reference sequence to which the variable sites are mapped
sequences: nested dict
A dict for each sequence with the position and alternative call for each variant
positions: list
All variable positions in the alignment
inferred_cost_sites: list
*(optional)* Positions that were constant except ambiguous bases, which were
converted into constant sites by TreeAnc (ex: 'AAAN' -> 'AAAA')
Raises
------
TypeError
Description
"""
if self.is_vcf:
tree_dict = {}
tree_dict['reference'] = self.ref
tree_dict['positions'] = self.nonref_positions
tree_aln = {}
for n in self.tree.find_clades():
if hasattr(n, 'sequence'):
if keep_var_ambigs: #regenerate dict to include ambig bases
tree_aln[n.name] = self.dict_sequence(n, keep_var_ambigs)
else:
tree_aln[n.name] = n.sequence
tree_dict['sequences'] = tree_aln
if len(self.inferred_const_sites) != 0:
tree_dict['inferred_const_sites'] = self.inferred_const_sites
return tree_dict
else:
raise TypeError("A dict can only be returned for trees created with VCF-input!")
|
[
"def",
"get_tree_dict",
"(",
"self",
",",
"keep_var_ambigs",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_vcf",
":",
"tree_dict",
"=",
"{",
"}",
"tree_dict",
"[",
"'reference'",
"]",
"=",
"self",
".",
"ref",
"tree_dict",
"[",
"'positions'",
"]",
"=",
"self",
".",
"nonref_positions",
"tree_aln",
"=",
"{",
"}",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
":",
"if",
"hasattr",
"(",
"n",
",",
"'sequence'",
")",
":",
"if",
"keep_var_ambigs",
":",
"#regenerate dict to include ambig bases",
"tree_aln",
"[",
"n",
".",
"name",
"]",
"=",
"self",
".",
"dict_sequence",
"(",
"n",
",",
"keep_var_ambigs",
")",
"else",
":",
"tree_aln",
"[",
"n",
".",
"name",
"]",
"=",
"n",
".",
"sequence",
"tree_dict",
"[",
"'sequences'",
"]",
"=",
"tree_aln",
"if",
"len",
"(",
"self",
".",
"inferred_const_sites",
")",
"!=",
"0",
":",
"tree_dict",
"[",
"'inferred_const_sites'",
"]",
"=",
"self",
".",
"inferred_const_sites",
"return",
"tree_dict",
"else",
":",
"raise",
"TypeError",
"(",
"\"A dict can only be returned for trees created with VCF-input!\"",
")"
] |
For VCF-based objects, returns a nested dict with all the information required to
reconstruct sequences for all nodes (terminal and internal).
Parameters
----------
keep_var_ambigs : boolean
If true, generates dict sequences based on the *original* compressed sequences, which
may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous
bases ("AAAAANN") are stripped of ambiguous bases *before* compression, so ambiguous
bases at this sites will *not* be preserved.
Returns
-------
tree_dict : dict
Format: ::
{
'reference':'AGCTCGA...A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'positions': [1,4,7,10,100...],
'inferred_const_sites': [7,100....]
}
reference: str
The reference sequence to which the variable sites are mapped
sequences: nested dict
A dict for each sequence with the position and alternative call for each variant
positions: list
All variable positions in the alignment
inferred_cost_sites: list
*(optional)* Positions that were constant except ambiguous bases, which were
converted into constant sites by TreeAnc (ex: 'AAAN' -> 'AAAA')
Raises
------
TypeError
Description
|
[
"For",
"VCF",
"-",
"based",
"objects",
"returns",
"a",
"nested",
"dict",
"with",
"all",
"the",
"information",
"required",
"to",
"reconstruct",
"sequences",
"for",
"all",
"nodes",
"(",
"terminal",
"and",
"internal",
")",
"."
] |
python
|
test
|
Erotemic/utool
|
utool/util_regex.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_regex.py#L443-L469
|
def regex_parse(regex, text, fromstart=True):
r"""
regex_parse
Args:
regex (str):
text (str):
fromstart (bool):
Returns:
dict or None:
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> regex = r'(?P<string>\'[^\']*\')'
>>> text = " 'just' 'a' sentance with 'strings' in it "
>>> fromstart = False
>>> result = regex_parse(regex, text, fromstart)['string']
>>> print(result)
"""
match = regex_get_match(regex, text, fromstart=fromstart)
if match is not None:
parse_dict = match.groupdict()
return parse_dict
return None
|
[
"def",
"regex_parse",
"(",
"regex",
",",
"text",
",",
"fromstart",
"=",
"True",
")",
":",
"match",
"=",
"regex_get_match",
"(",
"regex",
",",
"text",
",",
"fromstart",
"=",
"fromstart",
")",
"if",
"match",
"is",
"not",
"None",
":",
"parse_dict",
"=",
"match",
".",
"groupdict",
"(",
")",
"return",
"parse_dict",
"return",
"None"
] |
r"""
regex_parse
Args:
regex (str):
text (str):
fromstart (bool):
Returns:
dict or None:
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> regex = r'(?P<string>\'[^\']*\')'
>>> text = " 'just' 'a' sentance with 'strings' in it "
>>> fromstart = False
>>> result = regex_parse(regex, text, fromstart)['string']
>>> print(result)
|
[
"r",
"regex_parse"
] |
python
|
train
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py#L136-L161
|
def _handle_display_data(self, msg):
""" Overridden to handle rich data types, like SVG.
"""
if not self._hidden and self._is_from_this_session(msg):
source = msg['content']['source']
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
if data.has_key('image/svg+xml'):
self.log.debug("display: %s", msg.get('content', ''))
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif data.has_key('image/png'):
self.log.debug("display: %s", msg.get('content', ''))
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True)
elif data.has_key('image/jpeg') and self._jpg_supported:
self.log.debug("display: %s", msg.get('content', ''))
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True)
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_display_data(msg)
|
[
"def",
"_handle_display_data",
"(",
"self",
",",
"msg",
")",
":",
"if",
"not",
"self",
".",
"_hidden",
"and",
"self",
".",
"_is_from_this_session",
"(",
"msg",
")",
":",
"source",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'source'",
"]",
"data",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'data'",
"]",
"metadata",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'metadata'",
"]",
"# Try to use the svg or html representations.",
"# FIXME: Is this the right ordering of things to try?",
"if",
"data",
".",
"has_key",
"(",
"'image/svg+xml'",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"display: %s\"",
",",
"msg",
".",
"get",
"(",
"'content'",
",",
"''",
")",
")",
"svg",
"=",
"data",
"[",
"'image/svg+xml'",
"]",
"self",
".",
"_append_svg",
"(",
"svg",
",",
"True",
")",
"elif",
"data",
".",
"has_key",
"(",
"'image/png'",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"display: %s\"",
",",
"msg",
".",
"get",
"(",
"'content'",
",",
"''",
")",
")",
"# PNG data is base64 encoded as it passes over the network",
"# in a JSON structure so we decode it.",
"png",
"=",
"decodestring",
"(",
"data",
"[",
"'image/png'",
"]",
".",
"encode",
"(",
"'ascii'",
")",
")",
"self",
".",
"_append_png",
"(",
"png",
",",
"True",
")",
"elif",
"data",
".",
"has_key",
"(",
"'image/jpeg'",
")",
"and",
"self",
".",
"_jpg_supported",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"display: %s\"",
",",
"msg",
".",
"get",
"(",
"'content'",
",",
"''",
")",
")",
"jpg",
"=",
"decodestring",
"(",
"data",
"[",
"'image/jpeg'",
"]",
".",
"encode",
"(",
"'ascii'",
")",
")",
"self",
".",
"_append_jpg",
"(",
"jpg",
",",
"True",
")",
"else",
":",
"# Default back to the plain text representation.",
"return",
"super",
"(",
"RichIPythonWidget",
",",
"self",
")",
".",
"_handle_display_data",
"(",
"msg",
")"
] |
Overridden to handle rich data types, like SVG.
|
[
"Overridden",
"to",
"handle",
"rich",
"data",
"types",
"like",
"SVG",
"."
] |
python
|
test
|
scizzorz/bumpy
|
bumpy.py
|
https://github.com/scizzorz/bumpy/blob/99ed5c5ccaa61842cafe9faf8b082de44bdf01f9/bumpy.py#L317-L328
|
def age(*paths):
'''Return the minimum age of a set of files.
Returns 0 if no paths are given.
Returns time.time() if a path does not exist.'''
if not paths:
return 0
for path in paths:
if not os.path.exists(path):
return time.time()
return min([(time.time() - os.path.getmtime(path)) for path in paths])
|
[
"def",
"age",
"(",
"*",
"paths",
")",
":",
"if",
"not",
"paths",
":",
"return",
"0",
"for",
"path",
"in",
"paths",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"time",
".",
"time",
"(",
")",
"return",
"min",
"(",
"[",
"(",
"time",
".",
"time",
"(",
")",
"-",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")",
")",
"for",
"path",
"in",
"paths",
"]",
")"
] |
Return the minimum age of a set of files.
Returns 0 if no paths are given.
Returns time.time() if a path does not exist.
|
[
"Return",
"the",
"minimum",
"age",
"of",
"a",
"set",
"of",
"files",
".",
"Returns",
"0",
"if",
"no",
"paths",
"are",
"given",
".",
"Returns",
"time",
".",
"time",
"()",
"if",
"a",
"path",
"does",
"not",
"exist",
"."
] |
python
|
train
|
pantsbuild/pex
|
pex/interpreter.py
|
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/interpreter.py#L349-L364
|
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if cls._matches_binary_name(basefile):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
|
[
"def",
"find",
"(",
"cls",
",",
"paths",
")",
":",
"pythons",
"=",
"[",
"]",
"for",
"path",
"in",
"paths",
":",
"for",
"fn",
"in",
"cls",
".",
"expand_path",
"(",
"path",
")",
":",
"basefile",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
"if",
"cls",
".",
"_matches_binary_name",
"(",
"basefile",
")",
":",
"try",
":",
"pythons",
".",
"append",
"(",
"cls",
".",
"from_binary",
"(",
"fn",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"TRACER",
".",
"log",
"(",
"'Could not identify %s: %s'",
"%",
"(",
"fn",
",",
"e",
")",
")",
"continue",
"return",
"pythons"
] |
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
|
[
"Given",
"a",
"list",
"of",
"files",
"or",
"directories",
"try",
"to",
"detect",
"python",
"interpreters",
"amongst",
"them",
".",
"Returns",
"a",
"list",
"of",
"PythonInterpreter",
"objects",
"."
] |
python
|
train
|
gmr/rejected
|
rejected/process.py
|
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/process.py#L523-L536
|
def report_stats(self):
"""Create the dict of stats data for the MCP stats queue"""
if not self.previous:
self.previous = dict()
for key in self.counters:
self.previous[key] = 0
values = {
'name': self.name,
'consumer_name': self.consumer_name,
'counts': dict(self.counters),
'previous': dict(self.previous)
}
self.previous = dict(self.counters)
return values
|
[
"def",
"report_stats",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"previous",
":",
"self",
".",
"previous",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"self",
".",
"counters",
":",
"self",
".",
"previous",
"[",
"key",
"]",
"=",
"0",
"values",
"=",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'consumer_name'",
":",
"self",
".",
"consumer_name",
",",
"'counts'",
":",
"dict",
"(",
"self",
".",
"counters",
")",
",",
"'previous'",
":",
"dict",
"(",
"self",
".",
"previous",
")",
"}",
"self",
".",
"previous",
"=",
"dict",
"(",
"self",
".",
"counters",
")",
"return",
"values"
] |
Create the dict of stats data for the MCP stats queue
|
[
"Create",
"the",
"dict",
"of",
"stats",
"data",
"for",
"the",
"MCP",
"stats",
"queue"
] |
python
|
train
|
grahame/dividebatur
|
dividebatur/counter.py
|
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L568-L653
|
def determine_bulk_exclusions(self, candidate_aggregates):
"determine candidates who may be bulk excluded, under 273(13)"
# adjustment as under (13C) - seems to only apply if more than one candidate was elected in a round
continuing = self.get_continuing_candidates(candidate_aggregates)
candidate_votes = candidate_aggregates.get_candidate_votes()
by_votes = self.get_votes_to_candidates(continuing, candidate_aggregates)
adjustment = sum(excess_votes for _, _, excess_votes in self.election_distributions_pending)
candidate_notional_votes = self.get_candidate_notional_votes(candidate_aggregates, adjustment)
leading_shortfall, vacancy_shortfall = self.get_leading_and_vacancy_shortfall(candidate_aggregates)
def determine_candidate_A():
# notional votes >= vacancy shortfall
eligible = [candidate_id for candidate_id, notional in candidate_notional_votes.items() if notional >= vacancy_shortfall]
if len(eligible) == 0:
return None
# lowest in the poll: tie is irrelevant
eligible.sort(key=lambda candidate_id: candidate_votes[candidate_id])
return eligible[0]
sorted_votes = list(sorted(by_votes.keys()))
def notional_lower_than_higher(candidate_id):
"check notional votes of candidate is lower than the number of votes of the candidate standing immediately higher"
votes = candidate_votes[candidate_id]
votes_idx = sorted_votes.index(votes)
# no higher candidate
if votes_idx == len(sorted_votes) - 1:
return False
# legislation ambiguous, but if there's a tie above us let's check all the candidates on that count
notional = candidate_notional_votes[candidate_id]
higher_votes = sorted_votes[votes_idx + 1]
acceptable = all(notional < candidate_votes[t] for t in by_votes[higher_votes])
return acceptable
def highest(eligible):
"return the highest ranked candidate in candidates, by vote. if a tie, or list empty, return None"
if not eligible:
return
binned = self.get_votes_to_candidates(eligible, candidate_aggregates)
possible = binned[max(binned)]
if len(possible) == 1:
return possible[0]
def determine_candidate_B(candidate_A):
if candidate_A is not None:
A_votes = candidate_votes[candidate_A]
eligible = [candidate_id for candidate_id in continuing if candidate_votes[candidate_id] < A_votes]
else:
eligible = [candidate_id for candidate_id in continuing if candidate_notional_votes[candidate_id] < vacancy_shortfall]
eligible = [candidate_id for candidate_id in eligible if notional_lower_than_higher(candidate_id)]
return highest(eligible)
def determine_candidate_C():
eligible = [candidate_id for candidate_id in continuing if candidate_notional_votes[candidate_id] < leading_shortfall]
# the candidate of those eligible which stands highest in the poll
eligible.sort(key=lambda candidate_id: candidate_votes[candidate_id])
return highest(eligible)
def candidates_lte(candidate_id):
votes = candidate_votes[candidate_id]
lower_votes = [t for t in by_votes.keys() if t < votes]
to_exclude = [candidate_id]
for vote in lower_votes:
to_exclude += [candidate_id for candidate_id in by_votes[vote] if candidate_id in continuing]
return to_exclude
# candidate A, B, C as under (13A)(a)
to_exclude = None
candidate_A = determine_candidate_A()
candidate_B = determine_candidate_B(candidate_A)
candidate_C = None
if candidate_B:
candidate_B_votes = candidate_votes[candidate_B]
if candidate_B_votes < leading_shortfall:
to_exclude = candidates_lte(candidate_B)
else:
candidate_C = determine_candidate_C()
if candidate_C:
to_exclude = candidates_lte(candidate_C)
if to_exclude and len(to_exclude) == 1:
to_exclude = None
return to_exclude, ExclusionReason("bulk", {
"candidate_A": candidate_A,
"candidate_B": candidate_B,
"candidate_C": candidate_C
})
|
[
"def",
"determine_bulk_exclusions",
"(",
"self",
",",
"candidate_aggregates",
")",
":",
"# adjustment as under (13C) - seems to only apply if more than one candidate was elected in a round",
"continuing",
"=",
"self",
".",
"get_continuing_candidates",
"(",
"candidate_aggregates",
")",
"candidate_votes",
"=",
"candidate_aggregates",
".",
"get_candidate_votes",
"(",
")",
"by_votes",
"=",
"self",
".",
"get_votes_to_candidates",
"(",
"continuing",
",",
"candidate_aggregates",
")",
"adjustment",
"=",
"sum",
"(",
"excess_votes",
"for",
"_",
",",
"_",
",",
"excess_votes",
"in",
"self",
".",
"election_distributions_pending",
")",
"candidate_notional_votes",
"=",
"self",
".",
"get_candidate_notional_votes",
"(",
"candidate_aggregates",
",",
"adjustment",
")",
"leading_shortfall",
",",
"vacancy_shortfall",
"=",
"self",
".",
"get_leading_and_vacancy_shortfall",
"(",
"candidate_aggregates",
")",
"def",
"determine_candidate_A",
"(",
")",
":",
"# notional votes >= vacancy shortfall",
"eligible",
"=",
"[",
"candidate_id",
"for",
"candidate_id",
",",
"notional",
"in",
"candidate_notional_votes",
".",
"items",
"(",
")",
"if",
"notional",
">=",
"vacancy_shortfall",
"]",
"if",
"len",
"(",
"eligible",
")",
"==",
"0",
":",
"return",
"None",
"# lowest in the poll: tie is irrelevant",
"eligible",
".",
"sort",
"(",
"key",
"=",
"lambda",
"candidate_id",
":",
"candidate_votes",
"[",
"candidate_id",
"]",
")",
"return",
"eligible",
"[",
"0",
"]",
"sorted_votes",
"=",
"list",
"(",
"sorted",
"(",
"by_votes",
".",
"keys",
"(",
")",
")",
")",
"def",
"notional_lower_than_higher",
"(",
"candidate_id",
")",
":",
"\"check notional votes of candidate is lower than the number of votes of the candidate standing immediately higher\"",
"votes",
"=",
"candidate_votes",
"[",
"candidate_id",
"]",
"votes_idx",
"=",
"sorted_votes",
".",
"index",
"(",
"votes",
")",
"# no higher candidate",
"if",
"votes_idx",
"==",
"len",
"(",
"sorted_votes",
")",
"-",
"1",
":",
"return",
"False",
"# legislation ambiguous, but if there's a tie above us let's check all the candidates on that count",
"notional",
"=",
"candidate_notional_votes",
"[",
"candidate_id",
"]",
"higher_votes",
"=",
"sorted_votes",
"[",
"votes_idx",
"+",
"1",
"]",
"acceptable",
"=",
"all",
"(",
"notional",
"<",
"candidate_votes",
"[",
"t",
"]",
"for",
"t",
"in",
"by_votes",
"[",
"higher_votes",
"]",
")",
"return",
"acceptable",
"def",
"highest",
"(",
"eligible",
")",
":",
"\"return the highest ranked candidate in candidates, by vote. if a tie, or list empty, return None\"",
"if",
"not",
"eligible",
":",
"return",
"binned",
"=",
"self",
".",
"get_votes_to_candidates",
"(",
"eligible",
",",
"candidate_aggregates",
")",
"possible",
"=",
"binned",
"[",
"max",
"(",
"binned",
")",
"]",
"if",
"len",
"(",
"possible",
")",
"==",
"1",
":",
"return",
"possible",
"[",
"0",
"]",
"def",
"determine_candidate_B",
"(",
"candidate_A",
")",
":",
"if",
"candidate_A",
"is",
"not",
"None",
":",
"A_votes",
"=",
"candidate_votes",
"[",
"candidate_A",
"]",
"eligible",
"=",
"[",
"candidate_id",
"for",
"candidate_id",
"in",
"continuing",
"if",
"candidate_votes",
"[",
"candidate_id",
"]",
"<",
"A_votes",
"]",
"else",
":",
"eligible",
"=",
"[",
"candidate_id",
"for",
"candidate_id",
"in",
"continuing",
"if",
"candidate_notional_votes",
"[",
"candidate_id",
"]",
"<",
"vacancy_shortfall",
"]",
"eligible",
"=",
"[",
"candidate_id",
"for",
"candidate_id",
"in",
"eligible",
"if",
"notional_lower_than_higher",
"(",
"candidate_id",
")",
"]",
"return",
"highest",
"(",
"eligible",
")",
"def",
"determine_candidate_C",
"(",
")",
":",
"eligible",
"=",
"[",
"candidate_id",
"for",
"candidate_id",
"in",
"continuing",
"if",
"candidate_notional_votes",
"[",
"candidate_id",
"]",
"<",
"leading_shortfall",
"]",
"# the candidate of those eligible which stands highest in the poll",
"eligible",
".",
"sort",
"(",
"key",
"=",
"lambda",
"candidate_id",
":",
"candidate_votes",
"[",
"candidate_id",
"]",
")",
"return",
"highest",
"(",
"eligible",
")",
"def",
"candidates_lte",
"(",
"candidate_id",
")",
":",
"votes",
"=",
"candidate_votes",
"[",
"candidate_id",
"]",
"lower_votes",
"=",
"[",
"t",
"for",
"t",
"in",
"by_votes",
".",
"keys",
"(",
")",
"if",
"t",
"<",
"votes",
"]",
"to_exclude",
"=",
"[",
"candidate_id",
"]",
"for",
"vote",
"in",
"lower_votes",
":",
"to_exclude",
"+=",
"[",
"candidate_id",
"for",
"candidate_id",
"in",
"by_votes",
"[",
"vote",
"]",
"if",
"candidate_id",
"in",
"continuing",
"]",
"return",
"to_exclude",
"# candidate A, B, C as under (13A)(a)",
"to_exclude",
"=",
"None",
"candidate_A",
"=",
"determine_candidate_A",
"(",
")",
"candidate_B",
"=",
"determine_candidate_B",
"(",
"candidate_A",
")",
"candidate_C",
"=",
"None",
"if",
"candidate_B",
":",
"candidate_B_votes",
"=",
"candidate_votes",
"[",
"candidate_B",
"]",
"if",
"candidate_B_votes",
"<",
"leading_shortfall",
":",
"to_exclude",
"=",
"candidates_lte",
"(",
"candidate_B",
")",
"else",
":",
"candidate_C",
"=",
"determine_candidate_C",
"(",
")",
"if",
"candidate_C",
":",
"to_exclude",
"=",
"candidates_lte",
"(",
"candidate_C",
")",
"if",
"to_exclude",
"and",
"len",
"(",
"to_exclude",
")",
"==",
"1",
":",
"to_exclude",
"=",
"None",
"return",
"to_exclude",
",",
"ExclusionReason",
"(",
"\"bulk\"",
",",
"{",
"\"candidate_A\"",
":",
"candidate_A",
",",
"\"candidate_B\"",
":",
"candidate_B",
",",
"\"candidate_C\"",
":",
"candidate_C",
"}",
")"
] |
determine candidates who may be bulk excluded, under 273(13)
|
[
"determine",
"candidates",
"who",
"may",
"be",
"bulk",
"excluded",
"under",
"273",
"(",
"13",
")"
] |
python
|
train
|
vaexio/vaex
|
packages/vaex-core/vaex/delayed.py
|
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/delayed.py#L28-L73
|
def delayed(f):
'''Decorator to transparantly accept delayed computation.
Example:
>>> delayed_sum = ds.sum(ds.E, binby=ds.x, limits=limits,
>>> shape=4, delay=True)
>>> @vaex.delayed
>>> def total_sum(sums):
>>> return sums.sum()
>>> sum_of_sums = total_sum(delayed_sum)
>>> ds.execute()
>>> sum_of_sums.get()
See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations
'''
def wrapped(*args, **kwargs):
# print "calling", f, "with", kwargs
# key_values = kwargs.items()
key_promise = list([(key, promisify(value)) for key, value in kwargs.items()])
# key_promise = [(key, promisify(value)) for key, value in key_values]
arg_promises = list([promisify(value) for value in args])
kwarg_promises = list([promise for key, promise in key_promise])
promises = arg_promises + kwarg_promises
for promise in promises:
def echo_error(exc, promise=promise):
print("error with ", promise, "exception is", exc)
# raise exc
def echo(value, promise=promise):
print("done with ", repr(promise), "value is", value)
# promise.then(echo, echo_error)
# print promises
allarguments = aplus.listPromise(*promises)
def call(_):
kwargs_real = {key: promise.get() for key, promise in key_promise}
args_real = list([promise.get() for promise in arg_promises])
return f(*args_real, **kwargs_real)
def error(exc):
print("error", exc)
raise exc
return allarguments.then(call, error)
return wrapped
|
[
"def",
"delayed",
"(",
"f",
")",
":",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# print \"calling\", f, \"with\", kwargs",
"# key_values = kwargs.items()",
"key_promise",
"=",
"list",
"(",
"[",
"(",
"key",
",",
"promisify",
"(",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
")",
"# key_promise = [(key, promisify(value)) for key, value in key_values]",
"arg_promises",
"=",
"list",
"(",
"[",
"promisify",
"(",
"value",
")",
"for",
"value",
"in",
"args",
"]",
")",
"kwarg_promises",
"=",
"list",
"(",
"[",
"promise",
"for",
"key",
",",
"promise",
"in",
"key_promise",
"]",
")",
"promises",
"=",
"arg_promises",
"+",
"kwarg_promises",
"for",
"promise",
"in",
"promises",
":",
"def",
"echo_error",
"(",
"exc",
",",
"promise",
"=",
"promise",
")",
":",
"print",
"(",
"\"error with \"",
",",
"promise",
",",
"\"exception is\"",
",",
"exc",
")",
"# raise exc",
"def",
"echo",
"(",
"value",
",",
"promise",
"=",
"promise",
")",
":",
"print",
"(",
"\"done with \"",
",",
"repr",
"(",
"promise",
")",
",",
"\"value is\"",
",",
"value",
")",
"# promise.then(echo, echo_error)",
"# print promises",
"allarguments",
"=",
"aplus",
".",
"listPromise",
"(",
"*",
"promises",
")",
"def",
"call",
"(",
"_",
")",
":",
"kwargs_real",
"=",
"{",
"key",
":",
"promise",
".",
"get",
"(",
")",
"for",
"key",
",",
"promise",
"in",
"key_promise",
"}",
"args_real",
"=",
"list",
"(",
"[",
"promise",
".",
"get",
"(",
")",
"for",
"promise",
"in",
"arg_promises",
"]",
")",
"return",
"f",
"(",
"*",
"args_real",
",",
"*",
"*",
"kwargs_real",
")",
"def",
"error",
"(",
"exc",
")",
":",
"print",
"(",
"\"error\"",
",",
"exc",
")",
"raise",
"exc",
"return",
"allarguments",
".",
"then",
"(",
"call",
",",
"error",
")",
"return",
"wrapped"
] |
Decorator to transparantly accept delayed computation.
Example:
>>> delayed_sum = ds.sum(ds.E, binby=ds.x, limits=limits,
>>> shape=4, delay=True)
>>> @vaex.delayed
>>> def total_sum(sums):
>>> return sums.sum()
>>> sum_of_sums = total_sum(delayed_sum)
>>> ds.execute()
>>> sum_of_sums.get()
See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations
|
[
"Decorator",
"to",
"transparantly",
"accept",
"delayed",
"computation",
"."
] |
python
|
test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.