repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
nats-io/asyncio-nats
|
nats/aio/client.py
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L478-L585
|
def subscribe(self, subject,
queue="",
cb=None,
future=None,
max_msgs=0,
is_async=False,
pending_msgs_limit=DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit=DEFAULT_SUB_PENDING_BYTES_LIMIT,
):
"""
Takes a subject string and optional queue string to send a SUB cmd,
and a callback which to which messages (Msg) will be dispatched to
be processed sequentially by default.
"""
if subject == "":
raise ErrBadSubject
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
sub = Subscription(subject=subject,
queue=queue,
max_msgs=max_msgs,
is_async=is_async,
)
if cb is not None:
if asyncio.iscoroutinefunction(cb):
sub.coro = cb
elif sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# NOTE: Consider to deprecate this eventually, it should always
# be coroutines otherwise they could affect the single thread,
# for now still allow to be flexible.
sub.cb = cb
sub.pending_msgs_limit = pending_msgs_limit
sub.pending_bytes_limit = pending_bytes_limit
sub.pending_queue = asyncio.Queue(
maxsize=pending_msgs_limit,
loop=self._loop,
)
# Close the delivery coroutine over the sub and error handler
# instead of having subscription type hold over state of the conn.
err_cb = self._error_cb
@asyncio.coroutine
def wait_for_msgs():
nonlocal sub
nonlocal err_cb
while True:
try:
msg = yield from sub.pending_queue.get()
sub.pending_size -= len(msg.data)
try:
# Invoke depending of type of handler.
if sub.coro is not None:
if sub.is_async:
# NOTE: Deprecate this usage in a next release,
# the handler implementation ought to decide
# the concurrency level at which the messages
# should be processed.
self._loop.create_task(sub.coro(msg))
else:
yield from sub.coro(msg)
elif sub.cb is not None:
if sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# Schedule regular callbacks to be processed sequentially.
self._loop.call_soon(sub.cb, msg)
except asyncio.CancelledError:
# In case the coroutine handler gets cancelled
# then stop task loop and return.
break
except Exception as e:
# All errors from calling a handler
# are async errors.
if err_cb is not None:
yield from err_cb(e)
except asyncio.CancelledError:
break
# Start task for each subscription, it should be cancelled
# on both unsubscribe and closing as well.
sub.wait_for_msgs_task = self._loop.create_task(
wait_for_msgs())
elif future is not None:
# Used to handle the single response from a request.
sub.future = future
else:
raise NatsError("nats: invalid subscription type")
self._ssid += 1
ssid = self._ssid
self._subs[ssid] = sub
yield from self._subscribe(sub, ssid)
return ssid
|
[
"def",
"subscribe",
"(",
"self",
",",
"subject",
",",
"queue",
"=",
"\"\"",
",",
"cb",
"=",
"None",
",",
"future",
"=",
"None",
",",
"max_msgs",
"=",
"0",
",",
"is_async",
"=",
"False",
",",
"pending_msgs_limit",
"=",
"DEFAULT_SUB_PENDING_MSGS_LIMIT",
",",
"pending_bytes_limit",
"=",
"DEFAULT_SUB_PENDING_BYTES_LIMIT",
",",
")",
":",
"if",
"subject",
"==",
"\"\"",
":",
"raise",
"ErrBadSubject",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_draining",
":",
"raise",
"ErrConnectionDraining",
"sub",
"=",
"Subscription",
"(",
"subject",
"=",
"subject",
",",
"queue",
"=",
"queue",
",",
"max_msgs",
"=",
"max_msgs",
",",
"is_async",
"=",
"is_async",
",",
")",
"if",
"cb",
"is",
"not",
"None",
":",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"cb",
")",
":",
"sub",
".",
"coro",
"=",
"cb",
"elif",
"sub",
".",
"is_async",
":",
"raise",
"NatsError",
"(",
"\"nats: must use coroutine for async subscriptions\"",
")",
"else",
":",
"# NOTE: Consider to deprecate this eventually, it should always",
"# be coroutines otherwise they could affect the single thread,",
"# for now still allow to be flexible.",
"sub",
".",
"cb",
"=",
"cb",
"sub",
".",
"pending_msgs_limit",
"=",
"pending_msgs_limit",
"sub",
".",
"pending_bytes_limit",
"=",
"pending_bytes_limit",
"sub",
".",
"pending_queue",
"=",
"asyncio",
".",
"Queue",
"(",
"maxsize",
"=",
"pending_msgs_limit",
",",
"loop",
"=",
"self",
".",
"_loop",
",",
")",
"# Close the delivery coroutine over the sub and error handler",
"# instead of having subscription type hold over state of the conn.",
"err_cb",
"=",
"self",
".",
"_error_cb",
"@",
"asyncio",
".",
"coroutine",
"def",
"wait_for_msgs",
"(",
")",
":",
"nonlocal",
"sub",
"nonlocal",
"err_cb",
"while",
"True",
":",
"try",
":",
"msg",
"=",
"yield",
"from",
"sub",
".",
"pending_queue",
".",
"get",
"(",
")",
"sub",
".",
"pending_size",
"-=",
"len",
"(",
"msg",
".",
"data",
")",
"try",
":",
"# Invoke depending of type of handler.",
"if",
"sub",
".",
"coro",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"is_async",
":",
"# NOTE: Deprecate this usage in a next release,",
"# the handler implementation ought to decide",
"# the concurrency level at which the messages",
"# should be processed.",
"self",
".",
"_loop",
".",
"create_task",
"(",
"sub",
".",
"coro",
"(",
"msg",
")",
")",
"else",
":",
"yield",
"from",
"sub",
".",
"coro",
"(",
"msg",
")",
"elif",
"sub",
".",
"cb",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"is_async",
":",
"raise",
"NatsError",
"(",
"\"nats: must use coroutine for async subscriptions\"",
")",
"else",
":",
"# Schedule regular callbacks to be processed sequentially.",
"self",
".",
"_loop",
".",
"call_soon",
"(",
"sub",
".",
"cb",
",",
"msg",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"# In case the coroutine handler gets cancelled",
"# then stop task loop and return.",
"break",
"except",
"Exception",
"as",
"e",
":",
"# All errors from calling a handler",
"# are async errors.",
"if",
"err_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"err_cb",
"(",
"e",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"break",
"# Start task for each subscription, it should be cancelled",
"# on both unsubscribe and closing as well.",
"sub",
".",
"wait_for_msgs_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"wait_for_msgs",
"(",
")",
")",
"elif",
"future",
"is",
"not",
"None",
":",
"# Used to handle the single response from a request.",
"sub",
".",
"future",
"=",
"future",
"else",
":",
"raise",
"NatsError",
"(",
"\"nats: invalid subscription type\"",
")",
"self",
".",
"_ssid",
"+=",
"1",
"ssid",
"=",
"self",
".",
"_ssid",
"self",
".",
"_subs",
"[",
"ssid",
"]",
"=",
"sub",
"yield",
"from",
"self",
".",
"_subscribe",
"(",
"sub",
",",
"ssid",
")",
"return",
"ssid"
] |
Takes a subject string and optional queue string to send a SUB cmd,
and a callback which to which messages (Msg) will be dispatched to
be processed sequentially by default.
|
[
"Takes",
"a",
"subject",
"string",
"and",
"optional",
"queue",
"string",
"to",
"send",
"a",
"SUB",
"cmd",
"and",
"a",
"callback",
"which",
"to",
"which",
"messages",
"(",
"Msg",
")",
"will",
"be",
"dispatched",
"to",
"be",
"processed",
"sequentially",
"by",
"default",
"."
] |
python
|
test
|
fastai/fastai
|
fastai/callbacks/tensorboard.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L345-L357
|
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient()
|
[
"def",
"write",
"(",
"self",
")",
"->",
"None",
":",
"if",
"len",
"(",
"self",
".",
"gradients",
")",
"==",
"0",
":",
"return",
"norms",
"=",
"[",
"x",
".",
"data",
".",
"norm",
"(",
")",
"for",
"x",
"in",
"self",
".",
"gradients",
"]",
"self",
".",
"_write_avg_norm",
"(",
"norms",
"=",
"norms",
")",
"self",
".",
"_write_median_norm",
"(",
"norms",
"=",
"norms",
")",
"self",
".",
"_write_max_norm",
"(",
"norms",
"=",
"norms",
")",
"self",
".",
"_write_min_norm",
"(",
"norms",
"=",
"norms",
")",
"self",
".",
"_write_num_zeros",
"(",
")",
"self",
".",
"_write_avg_gradient",
"(",
")",
"self",
".",
"_write_median_gradient",
"(",
")",
"self",
".",
"_write_max_gradient",
"(",
")",
"self",
".",
"_write_min_gradient",
"(",
")"
] |
Writes model gradient statistics to Tensorboard.
|
[
"Writes",
"model",
"gradient",
"statistics",
"to",
"Tensorboard",
"."
] |
python
|
train
|
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_param.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_param.py#L405-L410
|
def check_new_target_system(self):
'''handle a new target_system'''
sysid = self.get_sysid()
if sysid in self.pstate:
return
self.add_new_target_system(sysid)
|
[
"def",
"check_new_target_system",
"(",
"self",
")",
":",
"sysid",
"=",
"self",
".",
"get_sysid",
"(",
")",
"if",
"sysid",
"in",
"self",
".",
"pstate",
":",
"return",
"self",
".",
"add_new_target_system",
"(",
"sysid",
")"
] |
handle a new target_system
|
[
"handle",
"a",
"new",
"target_system"
] |
python
|
train
|
swharden/SWHLab
|
doc/oldcode/swhlab/core/common.py
|
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/common.py#L213-L218
|
def html_temp_launch(html):
"""given text, make it a temporary HTML file and launch it."""
fname = tempfile.gettempdir()+"/swhlab/temp.html"
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname)
|
[
"def",
"html_temp_launch",
"(",
"html",
")",
":",
"fname",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"+",
"\"/swhlab/temp.html\"",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html",
")",
"webbrowser",
".",
"open",
"(",
"fname",
")"
] |
given text, make it a temporary HTML file and launch it.
|
[
"given",
"text",
"make",
"it",
"a",
"temporary",
"HTML",
"file",
"and",
"launch",
"it",
"."
] |
python
|
valid
|
kubernetes-client/python
|
kubernetes/client/apis/core_v1_api.py
|
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L5188-L5209
|
def connect_put_node_proxy_with_path(self, name, path, **kwargs):
"""
connect PUT requests to proxy of Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_put_node_proxy_with_path(name, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NodeProxyOptions (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to node.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_put_node_proxy_with_path_with_http_info(name, path, **kwargs)
else:
(data) = self.connect_put_node_proxy_with_path_with_http_info(name, path, **kwargs)
return data
|
[
"def",
"connect_put_node_proxy_with_path",
"(",
"self",
",",
"name",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"connect_put_node_proxy_with_path_with_http_info",
"(",
"name",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"connect_put_node_proxy_with_path_with_http_info",
"(",
"name",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
connect PUT requests to proxy of Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_put_node_proxy_with_path(name, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NodeProxyOptions (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to node.
:return: str
If the method is called asynchronously,
returns the request thread.
|
[
"connect",
"PUT",
"requests",
"to",
"proxy",
"of",
"Node",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
">>>",
"thread",
"=",
"api",
".",
"connect_put_node_proxy_with_path",
"(",
"name",
"path",
"async_req",
"=",
"True",
")",
">>>",
"result",
"=",
"thread",
".",
"get",
"()"
] |
python
|
train
|
numba/llvmlite
|
llvmlite/ir/values.py
|
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/ir/values.py#L161-L166
|
def literal_struct(cls, elems):
"""
Construct a literal structure constant made of the given members.
"""
tys = [el.type for el in elems]
return cls(types.LiteralStructType(tys), elems)
|
[
"def",
"literal_struct",
"(",
"cls",
",",
"elems",
")",
":",
"tys",
"=",
"[",
"el",
".",
"type",
"for",
"el",
"in",
"elems",
"]",
"return",
"cls",
"(",
"types",
".",
"LiteralStructType",
"(",
"tys",
")",
",",
"elems",
")"
] |
Construct a literal structure constant made of the given members.
|
[
"Construct",
"a",
"literal",
"structure",
"constant",
"made",
"of",
"the",
"given",
"members",
"."
] |
python
|
train
|
bapakode/OmMongo
|
ommongo/fields/fields.py
|
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/fields.py#L588-L591
|
def unwrap(self, value, session=None):
''' Validates ``value`` and unwraps it with ``ComputedField.computed_type``'''
self.validate_unwrap(value)
return self.computed_type.unwrap(value, session=session)
|
[
"def",
"unwrap",
"(",
"self",
",",
"value",
",",
"session",
"=",
"None",
")",
":",
"self",
".",
"validate_unwrap",
"(",
"value",
")",
"return",
"self",
".",
"computed_type",
".",
"unwrap",
"(",
"value",
",",
"session",
"=",
"session",
")"
] |
Validates ``value`` and unwraps it with ``ComputedField.computed_type``
|
[
"Validates",
"value",
"and",
"unwraps",
"it",
"with",
"ComputedField",
".",
"computed_type"
] |
python
|
train
|
edx/edx-organizations
|
organizations/data.py
|
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L53-L61
|
def _activate_organization(organization):
"""
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
"""
[_activate_organization_course_relationship(record) for record
in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)]
[_activate_record(record) for record
in internal.Organization.objects.filter(id=organization.id, active=False)]
|
[
"def",
"_activate_organization",
"(",
"organization",
")",
":",
"[",
"_activate_organization_course_relationship",
"(",
"record",
")",
"for",
"record",
"in",
"internal",
".",
"OrganizationCourse",
".",
"objects",
".",
"filter",
"(",
"organization_id",
"=",
"organization",
".",
"id",
",",
"active",
"=",
"False",
")",
"]",
"[",
"_activate_record",
"(",
"record",
")",
"for",
"record",
"in",
"internal",
".",
"Organization",
".",
"objects",
".",
"filter",
"(",
"id",
"=",
"organization",
".",
"id",
",",
"active",
"=",
"False",
")",
"]"
] |
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
|
[
"Activates",
"an",
"inactivated",
"(",
"soft",
"-",
"deleted",
")",
"organization",
"as",
"well",
"as",
"any",
"inactive",
"relationships"
] |
python
|
valid
|
PlaidWeb/Publ
|
publ/queries.py
|
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/queries.py#L145-L157
|
def where_entry_date(query, datespec):
""" Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
"""
date, interval, _ = utils.parse_date(datespec)
start_date, end_date = date.span(interval)
return orm.select(
e for e in query if
e.local_date >= start_date.naive and
e.local_date <= end_date.naive
)
|
[
"def",
"where_entry_date",
"(",
"query",
",",
"datespec",
")",
":",
"date",
",",
"interval",
",",
"_",
"=",
"utils",
".",
"parse_date",
"(",
"datespec",
")",
"start_date",
",",
"end_date",
"=",
"date",
".",
"span",
"(",
"interval",
")",
"return",
"orm",
".",
"select",
"(",
"e",
"for",
"e",
"in",
"query",
"if",
"e",
".",
"local_date",
">=",
"start_date",
".",
"naive",
"and",
"e",
".",
"local_date",
"<=",
"end_date",
".",
"naive",
")"
] |
Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
|
[
"Where",
"clause",
"for",
"entries",
"which",
"match",
"a",
"textual",
"date",
"spec"
] |
python
|
train
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/functions.py
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/functions.py#L118-L129
|
def writer(path):
"""
Creates a compressed file writer from for a path with a specified
compression type.
"""
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys())))
|
[
"def",
"writer",
"(",
"path",
")",
":",
"filename",
",",
"extension",
"=",
"extract_extension",
"(",
"path",
")",
"if",
"extension",
"in",
"FILE_WRITERS",
":",
"writer_func",
"=",
"FILE_WRITERS",
"[",
"extension",
"]",
"return",
"writer_func",
"(",
"path",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Output compression {0} not supported. Type {1}\"",
".",
"format",
"(",
"extension",
",",
"tuple",
"(",
"FILE_WRITERS",
".",
"keys",
"(",
")",
")",
")",
")"
] |
Creates a compressed file writer from for a path with a specified
compression type.
|
[
"Creates",
"a",
"compressed",
"file",
"writer",
"from",
"for",
"a",
"path",
"with",
"a",
"specified",
"compression",
"type",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/libcloud_storage.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_storage.py#L242-L286
|
def download_object(container_name, object_name, destination_path, profile,
overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs):
'''
Download an object to the specified destination path.
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:param libcloud_kwargs: Extra arguments for the driver's download_object method
:type libcloud_kwargs: ``dict``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1
'''
conn = _get_driver(profile=profile)
obj = conn.get_object(container_name, object_name)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs)
|
[
"def",
"download_object",
"(",
"container_name",
",",
"object_name",
",",
"destination_path",
",",
"profile",
",",
"overwrite_existing",
"=",
"False",
",",
"delete_on_failure",
"=",
"True",
",",
"*",
"*",
"libcloud_kwargs",
")",
":",
"conn",
"=",
"_get_driver",
"(",
"profile",
"=",
"profile",
")",
"obj",
"=",
"conn",
".",
"get_object",
"(",
"container_name",
",",
"object_name",
")",
"libcloud_kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"libcloud_kwargs",
")",
"return",
"conn",
".",
"download_object",
"(",
"obj",
",",
"destination_path",
",",
"overwrite_existing",
",",
"delete_on_failure",
",",
"*",
"*",
"libcloud_kwargs",
")"
] |
Download an object to the specified destination path.
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:param libcloud_kwargs: Extra arguments for the driver's download_object method
:type libcloud_kwargs: ``dict``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1
|
[
"Download",
"an",
"object",
"to",
"the",
"specified",
"destination",
"path",
"."
] |
python
|
train
|
KelSolaar/Umbra
|
umbra/ui/widgets/notification_QLabel.py
|
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/notification_QLabel.py#L735-L742
|
def __fade_in(self):
"""
Starts the Widget fade in.
"""
self.__timer.stop()
self.__vector = self.__fade_speed
self.__timer.start()
|
[
"def",
"__fade_in",
"(",
"self",
")",
":",
"self",
".",
"__timer",
".",
"stop",
"(",
")",
"self",
".",
"__vector",
"=",
"self",
".",
"__fade_speed",
"self",
".",
"__timer",
".",
"start",
"(",
")"
] |
Starts the Widget fade in.
|
[
"Starts",
"the",
"Widget",
"fade",
"in",
"."
] |
python
|
train
|
waqasbhatti/astrobase
|
astrobase/fakelcs/recovery.py
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/fakelcs/recovery.py#L1950-L2064
|
def parallel_periodicvar_recovery(simbasedir,
period_tolerance=1.0e-3,
liststartind=None,
listmaxobjects=None,
nworkers=None):
'''This is a parallel driver for `periodicvar_recovery`.
Parameters
----------
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the period-finding result pickles in
`simbasedir/periodfinding`.
listmaxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input period-finding result pickles over several sessions or machines.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
Returns
-------
str
Returns the filename of the pickle produced containing all of the period
recovery results.
'''
# figure out the periodfinding pickles directory
pfpkldir = os.path.join(simbasedir,'periodfinding')
if not os.path.exists(pfpkldir):
LOGERROR('no "periodfinding" subdirectory in %s, can\'t continue' %
simbasedir)
return None
# find all the periodfinding pickles
pfpkl_list = glob.glob(os.path.join(pfpkldir,'*periodfinding*pkl*'))
if len(pfpkl_list) > 0:
if liststartind:
pfpkl_list = pfpkl_list[liststartind:]
if listmaxobjects:
pfpkl_list = pfpkl_list[:listmaxobjects]
tasks = [(x, simbasedir, period_tolerance) for x in pfpkl_list]
pool = mp.Pool(nworkers)
results = pool.map(periodrec_worker, tasks)
pool.close()
pool.join()
resdict = {x['objectid']:x for x in results if x is not None}
actual_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and x['actual_vartype'] in PERIODIC_VARTYPES)],
dtype=np.unicode_
)
recovered_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'actual' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_twice_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'twice' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_half_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'half' in x['best_recovered_status'])],
dtype=np.unicode_
)
all_objectids = [x['objectid'] for x in results]
outdict = {'simbasedir':os.path.abspath(simbasedir),
'objectids':all_objectids,
'period_tolerance':period_tolerance,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'alias_twice_periodicvars':alias_twice_periodicvars,
'alias_half_periodicvars':alias_half_periodicvars,
'details':resdict}
outfile = os.path.join(simbasedir,'periodicvar-recovery.pkl')
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
else:
LOGERROR(
'no periodfinding result pickles found in %s, can\'t continue' %
pfpkldir
)
return None
|
[
"def",
"parallel_periodicvar_recovery",
"(",
"simbasedir",
",",
"period_tolerance",
"=",
"1.0e-3",
",",
"liststartind",
"=",
"None",
",",
"listmaxobjects",
"=",
"None",
",",
"nworkers",
"=",
"None",
")",
":",
"# figure out the periodfinding pickles directory",
"pfpkldir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simbasedir",
",",
"'periodfinding'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pfpkldir",
")",
":",
"LOGERROR",
"(",
"'no \"periodfinding\" subdirectory in %s, can\\'t continue'",
"%",
"simbasedir",
")",
"return",
"None",
"# find all the periodfinding pickles",
"pfpkl_list",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pfpkldir",
",",
"'*periodfinding*pkl*'",
")",
")",
"if",
"len",
"(",
"pfpkl_list",
")",
">",
"0",
":",
"if",
"liststartind",
":",
"pfpkl_list",
"=",
"pfpkl_list",
"[",
"liststartind",
":",
"]",
"if",
"listmaxobjects",
":",
"pfpkl_list",
"=",
"pfpkl_list",
"[",
":",
"listmaxobjects",
"]",
"tasks",
"=",
"[",
"(",
"x",
",",
"simbasedir",
",",
"period_tolerance",
")",
"for",
"x",
"in",
"pfpkl_list",
"]",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"nworkers",
")",
"results",
"=",
"pool",
".",
"map",
"(",
"periodrec_worker",
",",
"tasks",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"resdict",
"=",
"{",
"x",
"[",
"'objectid'",
"]",
":",
"x",
"for",
"x",
"in",
"results",
"if",
"x",
"is",
"not",
"None",
"}",
"actual_periodicvars",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'objectid'",
"]",
"for",
"x",
"in",
"results",
"if",
"(",
"x",
"is",
"not",
"None",
"and",
"x",
"[",
"'actual_vartype'",
"]",
"in",
"PERIODIC_VARTYPES",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
"recovered_periodicvars",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'objectid'",
"]",
"for",
"x",
"in",
"results",
"if",
"(",
"x",
"is",
"not",
"None",
"and",
"'actual'",
"in",
"x",
"[",
"'best_recovered_status'",
"]",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
"alias_twice_periodicvars",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'objectid'",
"]",
"for",
"x",
"in",
"results",
"if",
"(",
"x",
"is",
"not",
"None",
"and",
"'twice'",
"in",
"x",
"[",
"'best_recovered_status'",
"]",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
"alias_half_periodicvars",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'objectid'",
"]",
"for",
"x",
"in",
"results",
"if",
"(",
"x",
"is",
"not",
"None",
"and",
"'half'",
"in",
"x",
"[",
"'best_recovered_status'",
"]",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
"all_objectids",
"=",
"[",
"x",
"[",
"'objectid'",
"]",
"for",
"x",
"in",
"results",
"]",
"outdict",
"=",
"{",
"'simbasedir'",
":",
"os",
".",
"path",
".",
"abspath",
"(",
"simbasedir",
")",
",",
"'objectids'",
":",
"all_objectids",
",",
"'period_tolerance'",
":",
"period_tolerance",
",",
"'actual_periodicvars'",
":",
"actual_periodicvars",
",",
"'recovered_periodicvars'",
":",
"recovered_periodicvars",
",",
"'alias_twice_periodicvars'",
":",
"alias_twice_periodicvars",
",",
"'alias_half_periodicvars'",
":",
"alias_half_periodicvars",
",",
"'details'",
":",
"resdict",
"}",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"simbasedir",
",",
"'periodicvar-recovery.pkl'",
")",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"pickle",
".",
"dump",
"(",
"outdict",
",",
"outfd",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"return",
"outdict",
"else",
":",
"LOGERROR",
"(",
"'no periodfinding result pickles found in %s, can\\'t continue'",
"%",
"pfpkldir",
")",
"return",
"None"
] |
This is a parallel driver for `periodicvar_recovery`.
Parameters
----------
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the period-finding result pickles in
`simbasedir/periodfinding`.
listmaxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input period-finding result pickles over several sessions or machines.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
Returns
-------
str
Returns the filename of the pickle produced containing all of the period
recovery results.
|
[
"This",
"is",
"a",
"parallel",
"driver",
"for",
"periodicvar_recovery",
"."
] |
python
|
valid
|
fermiPy/fermipy
|
fermipy/gtanalysis.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L4982-L5001
|
def model_counts_spectrum(self, name, logemin, logemax, weighted=False):
"""Return the model counts spectrum of a source.
Parameters
----------
name : str
Source name.
"""
# EAC, we need this b/c older version of the ST don't have the right signature
try:
cs = np.array(self.like.logLike.modelCountsSpectrum(
str(name), weighted))
except (TypeError, NotImplementedError):
cs = np.array(self.like.logLike.modelCountsSpectrum(str(name)))
imin = utils.val_to_edge(self.log_energies, logemin)[0]
imax = utils.val_to_edge(self.log_energies, logemax)[0]
if imax <= imin:
raise Exception('Invalid energy range.')
return cs[imin:imax]
|
[
"def",
"model_counts_spectrum",
"(",
"self",
",",
"name",
",",
"logemin",
",",
"logemax",
",",
"weighted",
"=",
"False",
")",
":",
"# EAC, we need this b/c older version of the ST don't have the right signature",
"try",
":",
"cs",
"=",
"np",
".",
"array",
"(",
"self",
".",
"like",
".",
"logLike",
".",
"modelCountsSpectrum",
"(",
"str",
"(",
"name",
")",
",",
"weighted",
")",
")",
"except",
"(",
"TypeError",
",",
"NotImplementedError",
")",
":",
"cs",
"=",
"np",
".",
"array",
"(",
"self",
".",
"like",
".",
"logLike",
".",
"modelCountsSpectrum",
"(",
"str",
"(",
"name",
")",
")",
")",
"imin",
"=",
"utils",
".",
"val_to_edge",
"(",
"self",
".",
"log_energies",
",",
"logemin",
")",
"[",
"0",
"]",
"imax",
"=",
"utils",
".",
"val_to_edge",
"(",
"self",
".",
"log_energies",
",",
"logemax",
")",
"[",
"0",
"]",
"if",
"imax",
"<=",
"imin",
":",
"raise",
"Exception",
"(",
"'Invalid energy range.'",
")",
"return",
"cs",
"[",
"imin",
":",
"imax",
"]"
] |
Return the model counts spectrum of a source.
Parameters
----------
name : str
Source name.
|
[
"Return",
"the",
"model",
"counts",
"spectrum",
"of",
"a",
"source",
"."
] |
python
|
train
|
chrisspen/dtree
|
dtree.py
|
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1028-L1037
|
def get_gain(self, attr_name):
"""
Calculates the information gain from splitting on the given attribute.
"""
subset_entropy = 0.0
for value in iterkeys(self._attr_value_counts[attr_name]):
value_prob = self.get_value_prob(attr_name, value)
e = self.get_entropy(attr_name, value)
subset_entropy += value_prob * e
return (self.main_entropy - subset_entropy)
|
[
"def",
"get_gain",
"(",
"self",
",",
"attr_name",
")",
":",
"subset_entropy",
"=",
"0.0",
"for",
"value",
"in",
"iterkeys",
"(",
"self",
".",
"_attr_value_counts",
"[",
"attr_name",
"]",
")",
":",
"value_prob",
"=",
"self",
".",
"get_value_prob",
"(",
"attr_name",
",",
"value",
")",
"e",
"=",
"self",
".",
"get_entropy",
"(",
"attr_name",
",",
"value",
")",
"subset_entropy",
"+=",
"value_prob",
"*",
"e",
"return",
"(",
"self",
".",
"main_entropy",
"-",
"subset_entropy",
")"
] |
Calculates the information gain from splitting on the given attribute.
|
[
"Calculates",
"the",
"information",
"gain",
"from",
"splitting",
"on",
"the",
"given",
"attribute",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/bcache.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L211-L264
|
def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size=None):
'''
Create a backing device for attachment to a set.
Because the block size must be the same, a cache set already needs to exist.
CLI example:
.. code-block:: bash
salt '*' bcache.back_make sdc cache_mode=writeback attach=True
:param cache_mode: writethrough, writeback, writearound or none.
:param force: Overwrite existing bcaches
:param attach: Immediately attach the backing device to the set
:param bucket_size: Size of a bucket (see kernel doc)
'''
# pylint: disable=too-many-return-statements
cache = uuid()
if not cache:
log.error('No bcache set found')
return False
elif _sysfs_attr(_bcpath(dev)):
if not force:
log.error('%s already contains a bcache. Wipe it manually or use force', dev)
return False
elif uuid(dev) and not detach(dev):
return False
elif not stop(dev):
return False
dev = _devpath(dev)
block_size = _size_map(_fssys('block_size'))
# You might want to override, we pick the cache set's as sane default
if bucket_size is None:
bucket_size = _size_map(_fssys('bucket_size'))
cmd = 'make-bcache --block {0} --bucket {1} --{2} --bdev {3}'.format(block_size, bucket_size, cache_mode, dev)
if force:
cmd += ' --wipe-bcache'
if not _run_all(cmd, 'error', 'Error creating backing device {0}: %s'.format(dev)):
return False
elif not _sysfs_attr('fs/bcache/register', _devpath(dev),
'error', 'Error registering backing device {0}'.format(dev)):
return False
elif not _wait(lambda: _sysfs_attr(_bcpath(dev)) is not False,
'error', 'Backing device {0} did not register'.format(dev)):
return False
elif attach:
return attach_(dev)
return True
|
[
"def",
"back_make",
"(",
"dev",
",",
"cache_mode",
"=",
"'writeback'",
",",
"force",
"=",
"False",
",",
"attach",
"=",
"True",
",",
"bucket_size",
"=",
"None",
")",
":",
"# pylint: disable=too-many-return-statements",
"cache",
"=",
"uuid",
"(",
")",
"if",
"not",
"cache",
":",
"log",
".",
"error",
"(",
"'No bcache set found'",
")",
"return",
"False",
"elif",
"_sysfs_attr",
"(",
"_bcpath",
"(",
"dev",
")",
")",
":",
"if",
"not",
"force",
":",
"log",
".",
"error",
"(",
"'%s already contains a bcache. Wipe it manually or use force'",
",",
"dev",
")",
"return",
"False",
"elif",
"uuid",
"(",
"dev",
")",
"and",
"not",
"detach",
"(",
"dev",
")",
":",
"return",
"False",
"elif",
"not",
"stop",
"(",
"dev",
")",
":",
"return",
"False",
"dev",
"=",
"_devpath",
"(",
"dev",
")",
"block_size",
"=",
"_size_map",
"(",
"_fssys",
"(",
"'block_size'",
")",
")",
"# You might want to override, we pick the cache set's as sane default",
"if",
"bucket_size",
"is",
"None",
":",
"bucket_size",
"=",
"_size_map",
"(",
"_fssys",
"(",
"'bucket_size'",
")",
")",
"cmd",
"=",
"'make-bcache --block {0} --bucket {1} --{2} --bdev {3}'",
".",
"format",
"(",
"block_size",
",",
"bucket_size",
",",
"cache_mode",
",",
"dev",
")",
"if",
"force",
":",
"cmd",
"+=",
"' --wipe-bcache'",
"if",
"not",
"_run_all",
"(",
"cmd",
",",
"'error'",
",",
"'Error creating backing device {0}: %s'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"elif",
"not",
"_sysfs_attr",
"(",
"'fs/bcache/register'",
",",
"_devpath",
"(",
"dev",
")",
",",
"'error'",
",",
"'Error registering backing device {0}'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"elif",
"not",
"_wait",
"(",
"lambda",
":",
"_sysfs_attr",
"(",
"_bcpath",
"(",
"dev",
")",
")",
"is",
"not",
"False",
",",
"'error'",
",",
"'Backing device {0} did not register'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"elif",
"attach",
":",
"return",
"attach_",
"(",
"dev",
")",
"return",
"True"
] |
Create a backing device for attachment to a set.
Because the block size must be the same, a cache set already needs to exist.
CLI example:
.. code-block:: bash
salt '*' bcache.back_make sdc cache_mode=writeback attach=True
:param cache_mode: writethrough, writeback, writearound or none.
:param force: Overwrite existing bcaches
:param attach: Immediately attach the backing device to the set
:param bucket_size: Size of a bucket (see kernel doc)
|
[
"Create",
"a",
"backing",
"device",
"for",
"attachment",
"to",
"a",
"set",
".",
"Because",
"the",
"block",
"size",
"must",
"be",
"the",
"same",
"a",
"cache",
"set",
"already",
"needs",
"to",
"exist",
"."
] |
python
|
train
|
edx/django-user-tasks
|
user_tasks/tasks.py
|
https://github.com/edx/django-user-tasks/blob/6a9cf3821f4d8e202e6b48703e6a62e2a889adfb/user_tasks/tasks.py#L74-L95
|
def status(self):
"""
Get the :py:class:`~user_tasks.models.UserTaskStatus` model instance for this UserTaskMixin.
"""
task_id = self.request.id
try:
# Most calls are for existing objects, don't waste time
# preparing creation arguments unless necessary
return UserTaskStatus.objects.get(task_id=task_id)
except UserTaskStatus.DoesNotExist:
# Probably an eager task that skipped the before_task_publish
# signal (or an atomic view where the new record hasn't been
# committed yet). Create a record for it.
arguments_dict = self.arguments_as_dict(*self.request.args, **self.request.kwargs)
name = self.generate_name(arguments_dict)
task_class = '.'.join([self.__class__.__module__, self.__class__.__name__])
total_steps = self.calculate_total_steps(arguments_dict)
user_id = arguments_dict['user_id']
# Use get_or_create() again just in case another process created it in the meantime
return UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'user_id': user_id, 'name': name, 'task_class': task_class,
'total_steps': total_steps})[0]
|
[
"def",
"status",
"(",
"self",
")",
":",
"task_id",
"=",
"self",
".",
"request",
".",
"id",
"try",
":",
"# Most calls are for existing objects, don't waste time",
"# preparing creation arguments unless necessary",
"return",
"UserTaskStatus",
".",
"objects",
".",
"get",
"(",
"task_id",
"=",
"task_id",
")",
"except",
"UserTaskStatus",
".",
"DoesNotExist",
":",
"# Probably an eager task that skipped the before_task_publish",
"# signal (or an atomic view where the new record hasn't been",
"# committed yet). Create a record for it.",
"arguments_dict",
"=",
"self",
".",
"arguments_as_dict",
"(",
"*",
"self",
".",
"request",
".",
"args",
",",
"*",
"*",
"self",
".",
"request",
".",
"kwargs",
")",
"name",
"=",
"self",
".",
"generate_name",
"(",
"arguments_dict",
")",
"task_class",
"=",
"'.'",
".",
"join",
"(",
"[",
"self",
".",
"__class__",
".",
"__module__",
",",
"self",
".",
"__class__",
".",
"__name__",
"]",
")",
"total_steps",
"=",
"self",
".",
"calculate_total_steps",
"(",
"arguments_dict",
")",
"user_id",
"=",
"arguments_dict",
"[",
"'user_id'",
"]",
"# Use get_or_create() again just in case another process created it in the meantime",
"return",
"UserTaskStatus",
".",
"objects",
".",
"get_or_create",
"(",
"task_id",
"=",
"task_id",
",",
"defaults",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'name'",
":",
"name",
",",
"'task_class'",
":",
"task_class",
",",
"'total_steps'",
":",
"total_steps",
"}",
")",
"[",
"0",
"]"
] |
Get the :py:class:`~user_tasks.models.UserTaskStatus` model instance for this UserTaskMixin.
|
[
"Get",
"the",
":",
"py",
":",
"class",
":",
"~user_tasks",
".",
"models",
".",
"UserTaskStatus",
"model",
"instance",
"for",
"this",
"UserTaskMixin",
"."
] |
python
|
train
|
apache/incubator-heron
|
heron/tools/tracker/src/python/handlers/machineshandler.py
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/machineshandler.py#L58-L103
|
def get(self):
""" get method """
clusters = self.get_arguments(constants.PARAM_CLUSTER)
environs = self.get_arguments(constants.PARAM_ENVIRON)
topology_names = self.get_arguments(constants.PARAM_TOPOLOGY)
ret = {}
if len(topology_names) > 1:
if not clusters:
message = "Missing argument" + constants.PARAM_CLUSTER
self.write_error_response(message)
return
if not environs:
message = "Missing argument" + constants.PARAM_ENVIRON
self.write_error_response(message)
return
ret = {}
topologies = self.tracker.topologies
for topology in topologies:
cluster = topology.cluster
environ = topology.environ
topology_name = topology.name
if not cluster or not environ:
continue
# This cluster is not asked for.
if clusters and cluster not in clusters:
continue
# This environ is not asked for.
if environs and environ not in environs:
continue
if topology_names and topology_name not in topology_names:
continue
if cluster not in ret:
ret[cluster] = {}
if environ not in ret[cluster]:
ret[cluster][environ] = {}
ret[cluster][environ][topology_name] = topology.get_machines()
self.write_success_response(ret)
|
[
"def",
"get",
"(",
"self",
")",
":",
"clusters",
"=",
"self",
".",
"get_arguments",
"(",
"constants",
".",
"PARAM_CLUSTER",
")",
"environs",
"=",
"self",
".",
"get_arguments",
"(",
"constants",
".",
"PARAM_ENVIRON",
")",
"topology_names",
"=",
"self",
".",
"get_arguments",
"(",
"constants",
".",
"PARAM_TOPOLOGY",
")",
"ret",
"=",
"{",
"}",
"if",
"len",
"(",
"topology_names",
")",
">",
"1",
":",
"if",
"not",
"clusters",
":",
"message",
"=",
"\"Missing argument\"",
"+",
"constants",
".",
"PARAM_CLUSTER",
"self",
".",
"write_error_response",
"(",
"message",
")",
"return",
"if",
"not",
"environs",
":",
"message",
"=",
"\"Missing argument\"",
"+",
"constants",
".",
"PARAM_ENVIRON",
"self",
".",
"write_error_response",
"(",
"message",
")",
"return",
"ret",
"=",
"{",
"}",
"topologies",
"=",
"self",
".",
"tracker",
".",
"topologies",
"for",
"topology",
"in",
"topologies",
":",
"cluster",
"=",
"topology",
".",
"cluster",
"environ",
"=",
"topology",
".",
"environ",
"topology_name",
"=",
"topology",
".",
"name",
"if",
"not",
"cluster",
"or",
"not",
"environ",
":",
"continue",
"# This cluster is not asked for.",
"if",
"clusters",
"and",
"cluster",
"not",
"in",
"clusters",
":",
"continue",
"# This environ is not asked for.",
"if",
"environs",
"and",
"environ",
"not",
"in",
"environs",
":",
"continue",
"if",
"topology_names",
"and",
"topology_name",
"not",
"in",
"topology_names",
":",
"continue",
"if",
"cluster",
"not",
"in",
"ret",
":",
"ret",
"[",
"cluster",
"]",
"=",
"{",
"}",
"if",
"environ",
"not",
"in",
"ret",
"[",
"cluster",
"]",
":",
"ret",
"[",
"cluster",
"]",
"[",
"environ",
"]",
"=",
"{",
"}",
"ret",
"[",
"cluster",
"]",
"[",
"environ",
"]",
"[",
"topology_name",
"]",
"=",
"topology",
".",
"get_machines",
"(",
")",
"self",
".",
"write_success_response",
"(",
"ret",
")"
] |
get method
|
[
"get",
"method"
] |
python
|
valid
|
cggh/scikit-allel
|
allel/stats/selection.py
|
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L661-L736
|
def xpnsl(h1, h2, use_threads=True):
"""Cross-population version of the NSL statistic.
Parameters
----------
h1 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the first population.
h2 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the second population.
use_threads : bool, optional
If True use multiple threads to compute.
Returns
-------
score : ndarray, float, shape (n_variants,)
Unstandardized XPNSL scores.
"""
# check inputs
h1 = asarray_ndim(h1, 2)
check_integer_dtype(h1)
h2 = asarray_ndim(h2, 2)
check_integer_dtype(h2)
check_dim0_aligned(h1, h2)
h1 = memoryview_safe(h1)
h2 = memoryview_safe(h2)
if use_threads and multiprocessing.cpu_count() > 1:
# use multiple threads
# setup threadpool
pool = ThreadPool(min(4, multiprocessing.cpu_count()))
# scan forward
res1_fwd = pool.apply_async(nsl_scan, args=(h1,))
res2_fwd = pool.apply_async(nsl_scan, args=(h2,))
# scan backward
res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],))
res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],))
# wait for both to finish
pool.close()
pool.join()
# obtain results
nsl1_fwd = res1_fwd.get()
nsl2_fwd = res2_fwd.get()
nsl1_rev = res1_rev.get()
nsl2_rev = res2_rev.get()
# cleanup
pool.terminate()
else:
# compute without threads
# scan forward
nsl1_fwd = nsl_scan(h1)
nsl2_fwd = nsl_scan(h2)
# scan backward
nsl1_rev = nsl_scan(h1[::-1])
nsl2_rev = nsl_scan(h2[::-1])
# handle reverse scans
nsl1_rev = nsl1_rev[::-1]
nsl2_rev = nsl2_rev[::-1]
# compute unstandardized score
nsl1 = nsl1_fwd + nsl1_rev
nsl2 = nsl2_fwd + nsl2_rev
score = np.log(nsl1 / nsl2)
return score
|
[
"def",
"xpnsl",
"(",
"h1",
",",
"h2",
",",
"use_threads",
"=",
"True",
")",
":",
"# check inputs",
"h1",
"=",
"asarray_ndim",
"(",
"h1",
",",
"2",
")",
"check_integer_dtype",
"(",
"h1",
")",
"h2",
"=",
"asarray_ndim",
"(",
"h2",
",",
"2",
")",
"check_integer_dtype",
"(",
"h2",
")",
"check_dim0_aligned",
"(",
"h1",
",",
"h2",
")",
"h1",
"=",
"memoryview_safe",
"(",
"h1",
")",
"h2",
"=",
"memoryview_safe",
"(",
"h2",
")",
"if",
"use_threads",
"and",
"multiprocessing",
".",
"cpu_count",
"(",
")",
">",
"1",
":",
"# use multiple threads",
"# setup threadpool",
"pool",
"=",
"ThreadPool",
"(",
"min",
"(",
"4",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
")",
"# scan forward",
"res1_fwd",
"=",
"pool",
".",
"apply_async",
"(",
"nsl_scan",
",",
"args",
"=",
"(",
"h1",
",",
")",
")",
"res2_fwd",
"=",
"pool",
".",
"apply_async",
"(",
"nsl_scan",
",",
"args",
"=",
"(",
"h2",
",",
")",
")",
"# scan backward",
"res1_rev",
"=",
"pool",
".",
"apply_async",
"(",
"nsl_scan",
",",
"args",
"=",
"(",
"h1",
"[",
":",
":",
"-",
"1",
"]",
",",
")",
")",
"res2_rev",
"=",
"pool",
".",
"apply_async",
"(",
"nsl_scan",
",",
"args",
"=",
"(",
"h2",
"[",
":",
":",
"-",
"1",
"]",
",",
")",
")",
"# wait for both to finish",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"# obtain results",
"nsl1_fwd",
"=",
"res1_fwd",
".",
"get",
"(",
")",
"nsl2_fwd",
"=",
"res2_fwd",
".",
"get",
"(",
")",
"nsl1_rev",
"=",
"res1_rev",
".",
"get",
"(",
")",
"nsl2_rev",
"=",
"res2_rev",
".",
"get",
"(",
")",
"# cleanup",
"pool",
".",
"terminate",
"(",
")",
"else",
":",
"# compute without threads",
"# scan forward",
"nsl1_fwd",
"=",
"nsl_scan",
"(",
"h1",
")",
"nsl2_fwd",
"=",
"nsl_scan",
"(",
"h2",
")",
"# scan backward",
"nsl1_rev",
"=",
"nsl_scan",
"(",
"h1",
"[",
":",
":",
"-",
"1",
"]",
")",
"nsl2_rev",
"=",
"nsl_scan",
"(",
"h2",
"[",
":",
":",
"-",
"1",
"]",
")",
"# handle reverse scans",
"nsl1_rev",
"=",
"nsl1_rev",
"[",
":",
":",
"-",
"1",
"]",
"nsl2_rev",
"=",
"nsl2_rev",
"[",
":",
":",
"-",
"1",
"]",
"# compute unstandardized score",
"nsl1",
"=",
"nsl1_fwd",
"+",
"nsl1_rev",
"nsl2",
"=",
"nsl2_fwd",
"+",
"nsl2_rev",
"score",
"=",
"np",
".",
"log",
"(",
"nsl1",
"/",
"nsl2",
")",
"return",
"score"
] |
Cross-population version of the NSL statistic.
Parameters
----------
h1 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the first population.
h2 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the second population.
use_threads : bool, optional
If True use multiple threads to compute.
Returns
-------
score : ndarray, float, shape (n_variants,)
Unstandardized XPNSL scores.
|
[
"Cross",
"-",
"population",
"version",
"of",
"the",
"NSL",
"statistic",
"."
] |
python
|
train
|
jgrassler/mkdocs-pandoc
|
mkdocs_pandoc/filters/tables.py
|
https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L60-L169
|
def convert_table(self, block):
""""Converts a table to grid table format"""
lines_orig = block.split('\n')
lines_orig.pop() # Remove extra newline at end of block
widest_cell = [] # Will hold the width of the widest cell for each column
widest_word = [] # Will hold the width of the widest word for each column
widths = [] # Will hold the computed widths of grid table columns
rows = [] # Will hold table cells during processing
lines = [] # Will hold the finished table
has_border = False # Will be set to True if this is a bordered table
width_unit = 0.0 # This number is used to divide up self.width according
# to the following formula:
#
# self.width = width_unit * maxwidth
#
# Where maxwidth is the sum over all elements of
# widest_cell.
# Only process tables, leave everything else untouched
if not self.test(None, block):
return lines_orig
if lines_orig[0].startswith('|'):
has_border = True
# Initialize width arrays
for i in range(0, len(self._split_row(lines_orig[0], has_border))):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
# Parse lines into array of cells and record width of widest cell/word
for line in lines_orig:
row = self._split_row(line, has_border)
# pad widest_cell to account for under length first row
for i in range(0, len(row) - len(widest_cell)):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
for i in range(0, len(row)):
# Record cell width
if len(row[i]) > widest_cell[i]:
widest_cell[i] = len(row[i])
# Record longest word
words = row[i].split()
for word in words:
# Keep URLs from throwing the word length count off too badly.
match = re.match(r'\[(.*?)\]\(.*?\)', word)
if match:
word = match.group(1)
if len(word) > widest_word[i]:
widest_word[i] = len(word)
rows.append(row)
# Remove table header divider line from rows
rows.pop(1)
# Compute first approximation of column widths based on maximum cell width
for width in widest_cell:
width_unit += float(width)
width_unit = self.width / width_unit
for i in range(0, len(widest_cell)):
widths[i] = int(widest_cell[i] * width_unit)
# Add rounding errors to narrowest column
if sum(widths) < self.width:
widths[widths.index(min(widths))] += self.width - sum(widths)
# Attempt to correct first approximation of column widths based on
# words that fail to fit their cell's width (if this fails textwrap
# will break up long words but since it does not add hyphens this
# should be avoided)
for i in range(0, len(widths)):
if widths[i] < widest_word[i]:
offset = widest_word[i] - widths[i]
for j in range(0, len(widths)):
if widths[j] - widest_word[j] >= offset:
widths[j] -= offset
widths[i] += offset
offset = 0
lines.append(self.ruler_line(widths, linetype='-'))
# Only add header row if it contains more than just whitespace
if ''.join(rows[0]).strip() != '':
lines.extend(self.wrap_row(widths, rows[0]))
lines.append(self.ruler_line(widths, linetype='='))
for row in rows[1:]:
# Skip empty rows
if ''.join(row).strip() == '':
continue
lines.extend(self.wrap_row(widths, row))
lines.append(self.ruler_line(widths, linetype='-'))
# Append empty line after table
lines.append('')
return lines
|
[
"def",
"convert_table",
"(",
"self",
",",
"block",
")",
":",
"lines_orig",
"=",
"block",
".",
"split",
"(",
"'\\n'",
")",
"lines_orig",
".",
"pop",
"(",
")",
"# Remove extra newline at end of block",
"widest_cell",
"=",
"[",
"]",
"# Will hold the width of the widest cell for each column",
"widest_word",
"=",
"[",
"]",
"# Will hold the width of the widest word for each column",
"widths",
"=",
"[",
"]",
"# Will hold the computed widths of grid table columns",
"rows",
"=",
"[",
"]",
"# Will hold table cells during processing",
"lines",
"=",
"[",
"]",
"# Will hold the finished table",
"has_border",
"=",
"False",
"# Will be set to True if this is a bordered table",
"width_unit",
"=",
"0.0",
"# This number is used to divide up self.width according",
"# to the following formula:",
"#",
"# self.width = width_unit * maxwidth",
"#",
"# Where maxwidth is the sum over all elements of",
"# widest_cell.",
"# Only process tables, leave everything else untouched",
"if",
"not",
"self",
".",
"test",
"(",
"None",
",",
"block",
")",
":",
"return",
"lines_orig",
"if",
"lines_orig",
"[",
"0",
"]",
".",
"startswith",
"(",
"'|'",
")",
":",
"has_border",
"=",
"True",
"# Initialize width arrays",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_split_row",
"(",
"lines_orig",
"[",
"0",
"]",
",",
"has_border",
")",
")",
")",
":",
"widest_cell",
".",
"append",
"(",
"0",
")",
"widest_word",
".",
"append",
"(",
"0",
")",
"widths",
".",
"append",
"(",
"0",
")",
"# Parse lines into array of cells and record width of widest cell/word",
"for",
"line",
"in",
"lines_orig",
":",
"row",
"=",
"self",
".",
"_split_row",
"(",
"line",
",",
"has_border",
")",
"# pad widest_cell to account for under length first row",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
"-",
"len",
"(",
"widest_cell",
")",
")",
":",
"widest_cell",
".",
"append",
"(",
"0",
")",
"widest_word",
".",
"append",
"(",
"0",
")",
"widths",
".",
"append",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
")",
":",
"# Record cell width",
"if",
"len",
"(",
"row",
"[",
"i",
"]",
")",
">",
"widest_cell",
"[",
"i",
"]",
":",
"widest_cell",
"[",
"i",
"]",
"=",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"# Record longest word",
"words",
"=",
"row",
"[",
"i",
"]",
".",
"split",
"(",
")",
"for",
"word",
"in",
"words",
":",
"# Keep URLs from throwing the word length count off too badly.",
"match",
"=",
"re",
".",
"match",
"(",
"r'\\[(.*?)\\]\\(.*?\\)'",
",",
"word",
")",
"if",
"match",
":",
"word",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"len",
"(",
"word",
")",
">",
"widest_word",
"[",
"i",
"]",
":",
"widest_word",
"[",
"i",
"]",
"=",
"len",
"(",
"word",
")",
"rows",
".",
"append",
"(",
"row",
")",
"# Remove table header divider line from rows",
"rows",
".",
"pop",
"(",
"1",
")",
"# Compute first approximation of column widths based on maximum cell width",
"for",
"width",
"in",
"widest_cell",
":",
"width_unit",
"+=",
"float",
"(",
"width",
")",
"width_unit",
"=",
"self",
".",
"width",
"/",
"width_unit",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"widest_cell",
")",
")",
":",
"widths",
"[",
"i",
"]",
"=",
"int",
"(",
"widest_cell",
"[",
"i",
"]",
"*",
"width_unit",
")",
"# Add rounding errors to narrowest column",
"if",
"sum",
"(",
"widths",
")",
"<",
"self",
".",
"width",
":",
"widths",
"[",
"widths",
".",
"index",
"(",
"min",
"(",
"widths",
")",
")",
"]",
"+=",
"self",
".",
"width",
"-",
"sum",
"(",
"widths",
")",
"# Attempt to correct first approximation of column widths based on",
"# words that fail to fit their cell's width (if this fails textwrap",
"# will break up long words but since it does not add hyphens this",
"# should be avoided)",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"widths",
")",
")",
":",
"if",
"widths",
"[",
"i",
"]",
"<",
"widest_word",
"[",
"i",
"]",
":",
"offset",
"=",
"widest_word",
"[",
"i",
"]",
"-",
"widths",
"[",
"i",
"]",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"widths",
")",
")",
":",
"if",
"widths",
"[",
"j",
"]",
"-",
"widest_word",
"[",
"j",
"]",
">=",
"offset",
":",
"widths",
"[",
"j",
"]",
"-=",
"offset",
"widths",
"[",
"i",
"]",
"+=",
"offset",
"offset",
"=",
"0",
"lines",
".",
"append",
"(",
"self",
".",
"ruler_line",
"(",
"widths",
",",
"linetype",
"=",
"'-'",
")",
")",
"# Only add header row if it contains more than just whitespace",
"if",
"''",
".",
"join",
"(",
"rows",
"[",
"0",
"]",
")",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"lines",
".",
"extend",
"(",
"self",
".",
"wrap_row",
"(",
"widths",
",",
"rows",
"[",
"0",
"]",
")",
")",
"lines",
".",
"append",
"(",
"self",
".",
"ruler_line",
"(",
"widths",
",",
"linetype",
"=",
"'='",
")",
")",
"for",
"row",
"in",
"rows",
"[",
"1",
":",
"]",
":",
"# Skip empty rows",
"if",
"''",
".",
"join",
"(",
"row",
")",
".",
"strip",
"(",
")",
"==",
"''",
":",
"continue",
"lines",
".",
"extend",
"(",
"self",
".",
"wrap_row",
"(",
"widths",
",",
"row",
")",
")",
"lines",
".",
"append",
"(",
"self",
".",
"ruler_line",
"(",
"widths",
",",
"linetype",
"=",
"'-'",
")",
")",
"# Append empty line after table",
"lines",
".",
"append",
"(",
"''",
")",
"return",
"lines"
] |
Converts a table to grid table format
|
[
"Converts",
"a",
"table",
"to",
"grid",
"table",
"format"
] |
python
|
train
|
bram85/topydo
|
topydo/ui/columns/CommandLineWidget.py
|
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/CommandLineWidget.py#L63-L76
|
def _history_move(self, p_step):
"""
Changes current value of the command-line to the value obtained from
history_tmp list with index calculated by addition of p_step to the
current position in the command history (history_pos attribute).
Also saves value of the command-line (before changing it) to history_tmp
for potential later access.
"""
if len(self.history) > 0:
# don't pollute real history - use temporary storage
self.history_tmp[self.history_pos] = self.edit_text
self.history_pos = self.history_pos + p_step
self.set_edit_text(self.history_tmp[self.history_pos])
|
[
"def",
"_history_move",
"(",
"self",
",",
"p_step",
")",
":",
"if",
"len",
"(",
"self",
".",
"history",
")",
">",
"0",
":",
"# don't pollute real history - use temporary storage",
"self",
".",
"history_tmp",
"[",
"self",
".",
"history_pos",
"]",
"=",
"self",
".",
"edit_text",
"self",
".",
"history_pos",
"=",
"self",
".",
"history_pos",
"+",
"p_step",
"self",
".",
"set_edit_text",
"(",
"self",
".",
"history_tmp",
"[",
"self",
".",
"history_pos",
"]",
")"
] |
Changes current value of the command-line to the value obtained from
history_tmp list with index calculated by addition of p_step to the
current position in the command history (history_pos attribute).
Also saves value of the command-line (before changing it) to history_tmp
for potential later access.
|
[
"Changes",
"current",
"value",
"of",
"the",
"command",
"-",
"line",
"to",
"the",
"value",
"obtained",
"from",
"history_tmp",
"list",
"with",
"index",
"calculated",
"by",
"addition",
"of",
"p_step",
"to",
"the",
"current",
"position",
"in",
"the",
"command",
"history",
"(",
"history_pos",
"attribute",
")",
"."
] |
python
|
train
|
Esri/ArcREST
|
src/arcrest/manageorg/_portals.py
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_portals.py#L91-L100
|
def portal(self, portalID=None):
"""returns a specific reference to a portal"""
if portalID is None:
portalID = self.portalSelf.id
url = "%s/%s" % (self.root, portalID)
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=True)
|
[
"def",
"portal",
"(",
"self",
",",
"portalID",
"=",
"None",
")",
":",
"if",
"portalID",
"is",
"None",
":",
"portalID",
"=",
"self",
".",
"portalSelf",
".",
"id",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"root",
",",
"portalID",
")",
"return",
"Portal",
"(",
"url",
"=",
"url",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"initalize",
"=",
"True",
")"
] |
returns a specific reference to a portal
|
[
"returns",
"a",
"specific",
"reference",
"to",
"a",
"portal"
] |
python
|
train
|
solvebio/solvebio-python
|
solvebio/cli/credentials.py
|
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/credentials.py#L76-L93
|
def get_credentials():
"""
Returns the user's stored API key if a valid credentials file is found.
Raises CredentialsError if no valid credentials file is found.
"""
try:
netrc_path = netrc.path()
auths = netrc(netrc_path).authenticators(
urlparse(solvebio.api_host).netloc)
except (IOError, TypeError, NetrcParseError) as e:
raise CredentialsError(
'Could not open credentials file: ' + str(e))
if auths:
# auths = (login, account, password)
return auths[2]
else:
return None
|
[
"def",
"get_credentials",
"(",
")",
":",
"try",
":",
"netrc_path",
"=",
"netrc",
".",
"path",
"(",
")",
"auths",
"=",
"netrc",
"(",
"netrc_path",
")",
".",
"authenticators",
"(",
"urlparse",
"(",
"solvebio",
".",
"api_host",
")",
".",
"netloc",
")",
"except",
"(",
"IOError",
",",
"TypeError",
",",
"NetrcParseError",
")",
"as",
"e",
":",
"raise",
"CredentialsError",
"(",
"'Could not open credentials file: '",
"+",
"str",
"(",
"e",
")",
")",
"if",
"auths",
":",
"# auths = (login, account, password)",
"return",
"auths",
"[",
"2",
"]",
"else",
":",
"return",
"None"
] |
Returns the user's stored API key if a valid credentials file is found.
Raises CredentialsError if no valid credentials file is found.
|
[
"Returns",
"the",
"user",
"s",
"stored",
"API",
"key",
"if",
"a",
"valid",
"credentials",
"file",
"is",
"found",
".",
"Raises",
"CredentialsError",
"if",
"no",
"valid",
"credentials",
"file",
"is",
"found",
"."
] |
python
|
test
|
nok/sklearn-porter
|
sklearn_porter/estimator/classifier/AdaBoostClassifier/__init__.py
|
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/AdaBoostClassifier/__init__.py#L199-L254
|
def create_branches(self, left_nodes, right_nodes, threshold,
value, features, node, depth, init=False):
"""
Parse and port a single tree estimator.
Parameters
----------
:param left_nodes : object
The left children node.
:param right_nodes : object
The left children node.
:param threshold : object
The decision threshold.
:param value : object
The label or class.
:param features : object
The feature values.
:param node : int
The current node.
:param depth : int
The tree depth.
:param init : bool, default: True
Whether it's the initial tree or not.
Returns
-------
:return out : string
The ported single tree as function or method.
"""
out = ''
if threshold[node] != -2.:
if not init:
out += '\n'
temp = self.temp('if', n_indents=depth, skipping=init)
out += temp.format(features[node], '<=', self.repr(threshold[node]))
if left_nodes[node] != -1.:
out += self.create_branches(
left_nodes, right_nodes, threshold, value,
features, left_nodes[node], depth + 1)
out += '\n'
out += self.temp('else', n_indents=depth)
if right_nodes[node] != -1.:
out += self.create_branches(
left_nodes, right_nodes, threshold, value,
features, right_nodes[node], depth + 1)
out += '\n'
out += self.temp('endif', n_indents=depth)
else:
clazzes = []
temp = self.temp('arr', n_indents=depth)
for i, val in enumerate(value[node][0]):
clazz = temp.format(i, self.repr(val))
clazz = '\n' + clazz
clazzes.append(clazz)
out += self.temp('join').join(clazzes) + self.temp('join')
return out
|
[
"def",
"create_branches",
"(",
"self",
",",
"left_nodes",
",",
"right_nodes",
",",
"threshold",
",",
"value",
",",
"features",
",",
"node",
",",
"depth",
",",
"init",
"=",
"False",
")",
":",
"out",
"=",
"''",
"if",
"threshold",
"[",
"node",
"]",
"!=",
"-",
"2.",
":",
"if",
"not",
"init",
":",
"out",
"+=",
"'\\n'",
"temp",
"=",
"self",
".",
"temp",
"(",
"'if'",
",",
"n_indents",
"=",
"depth",
",",
"skipping",
"=",
"init",
")",
"out",
"+=",
"temp",
".",
"format",
"(",
"features",
"[",
"node",
"]",
",",
"'<='",
",",
"self",
".",
"repr",
"(",
"threshold",
"[",
"node",
"]",
")",
")",
"if",
"left_nodes",
"[",
"node",
"]",
"!=",
"-",
"1.",
":",
"out",
"+=",
"self",
".",
"create_branches",
"(",
"left_nodes",
",",
"right_nodes",
",",
"threshold",
",",
"value",
",",
"features",
",",
"left_nodes",
"[",
"node",
"]",
",",
"depth",
"+",
"1",
")",
"out",
"+=",
"'\\n'",
"out",
"+=",
"self",
".",
"temp",
"(",
"'else'",
",",
"n_indents",
"=",
"depth",
")",
"if",
"right_nodes",
"[",
"node",
"]",
"!=",
"-",
"1.",
":",
"out",
"+=",
"self",
".",
"create_branches",
"(",
"left_nodes",
",",
"right_nodes",
",",
"threshold",
",",
"value",
",",
"features",
",",
"right_nodes",
"[",
"node",
"]",
",",
"depth",
"+",
"1",
")",
"out",
"+=",
"'\\n'",
"out",
"+=",
"self",
".",
"temp",
"(",
"'endif'",
",",
"n_indents",
"=",
"depth",
")",
"else",
":",
"clazzes",
"=",
"[",
"]",
"temp",
"=",
"self",
".",
"temp",
"(",
"'arr'",
",",
"n_indents",
"=",
"depth",
")",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"value",
"[",
"node",
"]",
"[",
"0",
"]",
")",
":",
"clazz",
"=",
"temp",
".",
"format",
"(",
"i",
",",
"self",
".",
"repr",
"(",
"val",
")",
")",
"clazz",
"=",
"'\\n'",
"+",
"clazz",
"clazzes",
".",
"append",
"(",
"clazz",
")",
"out",
"+=",
"self",
".",
"temp",
"(",
"'join'",
")",
".",
"join",
"(",
"clazzes",
")",
"+",
"self",
".",
"temp",
"(",
"'join'",
")",
"return",
"out"
] |
Parse and port a single tree estimator.
Parameters
----------
:param left_nodes : object
The left children node.
:param right_nodes : object
The left children node.
:param threshold : object
The decision threshold.
:param value : object
The label or class.
:param features : object
The feature values.
:param node : int
The current node.
:param depth : int
The tree depth.
:param init : bool, default: True
Whether it's the initial tree or not.
Returns
-------
:return out : string
The ported single tree as function or method.
|
[
"Parse",
"and",
"port",
"a",
"single",
"tree",
"estimator",
"."
] |
python
|
train
|
ciena/afkak
|
afkak/kafkacodec.py
|
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L250-L265
|
def decode_produce_response(cls, data):
"""
Decode bytes to a ProduceResponse
:param bytes data: bytes to decode
:returns: iterable of `afkak.common.ProduceResponse`
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
topic, cur = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq', data, cur)
yield ProduceResponse(topic, partition, error, offset)
|
[
"def",
"decode_produce_response",
"(",
"cls",
",",
"data",
")",
":",
"(",
"(",
"correlation_id",
",",
"num_topics",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>ii'",
",",
"data",
",",
"0",
")",
"for",
"_i",
"in",
"range",
"(",
"num_topics",
")",
":",
"topic",
",",
"cur",
"=",
"read_short_ascii",
"(",
"data",
",",
"cur",
")",
"(",
"(",
"num_partitions",
",",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>i'",
",",
"data",
",",
"cur",
")",
"for",
"_i",
"in",
"range",
"(",
"num_partitions",
")",
":",
"(",
"(",
"partition",
",",
"error",
",",
"offset",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>ihq'",
",",
"data",
",",
"cur",
")",
"yield",
"ProduceResponse",
"(",
"topic",
",",
"partition",
",",
"error",
",",
"offset",
")"
] |
Decode bytes to a ProduceResponse
:param bytes data: bytes to decode
:returns: iterable of `afkak.common.ProduceResponse`
|
[
"Decode",
"bytes",
"to",
"a",
"ProduceResponse"
] |
python
|
train
|
pybel/pybel
|
src/pybel/struct/summary/edge_summary.py
|
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/summary/edge_summary.py#L24-L35
|
def iter_annotation_value_pairs(graph) -> Iterable[Tuple[str, str]]:
"""Iterate over the key/value pairs, with duplicates, for each annotation used in a BEL graph.
:param pybel.BELGraph graph: A BEL graph
"""
return (
(key, value)
for _, _, data in graph.edges(data=True)
if ANNOTATIONS in data
for key, values in data[ANNOTATIONS].items()
for value in values
)
|
[
"def",
"iter_annotation_value_pairs",
"(",
"graph",
")",
"->",
"Iterable",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"return",
"(",
"(",
"key",
",",
"value",
")",
"for",
"_",
",",
"_",
",",
"data",
"in",
"graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
"if",
"ANNOTATIONS",
"in",
"data",
"for",
"key",
",",
"values",
"in",
"data",
"[",
"ANNOTATIONS",
"]",
".",
"items",
"(",
")",
"for",
"value",
"in",
"values",
")"
] |
Iterate over the key/value pairs, with duplicates, for each annotation used in a BEL graph.
:param pybel.BELGraph graph: A BEL graph
|
[
"Iterate",
"over",
"the",
"key",
"/",
"value",
"pairs",
"with",
"duplicates",
"for",
"each",
"annotation",
"used",
"in",
"a",
"BEL",
"graph",
"."
] |
python
|
train
|
wheerd/multiset
|
multiset.py
|
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L222-L256
|
def union(self, *others):
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
result._total = _total
return result
|
[
"def",
"union",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"old_multiplicity",
"=",
"_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
">",
"old_multiplicity",
":",
"_elements",
"[",
"element",
"]",
"=",
"multiplicity",
"_total",
"+=",
"multiplicity",
"-",
"old_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] |
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union.
|
[
"r",
"Return",
"a",
"new",
"multiset",
"with",
"all",
"elements",
"from",
"the",
"multiset",
"and",
"the",
"others",
"with",
"maximal",
"multiplicities",
"."
] |
python
|
train
|
gwpy/gwpy
|
gwpy/signal/qtransform.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L482-L583
|
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new
|
[
"def",
"interpolate",
"(",
"self",
",",
"tres",
"=",
"\"<default>\"",
",",
"fres",
"=",
"\"<default>\"",
",",
"logf",
"=",
"False",
",",
"outseg",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"(",
"interp2d",
",",
"InterpolatedUnivariateSpline",
")",
"from",
".",
".",
"spectrogram",
"import",
"Spectrogram",
"if",
"outseg",
"is",
"None",
":",
"outseg",
"=",
"self",
".",
"energies",
"[",
"0",
"]",
".",
"span",
"frequencies",
"=",
"self",
".",
"plane",
".",
"frequencies",
"dtype",
"=",
"self",
".",
"energies",
"[",
"0",
"]",
".",
"dtype",
"# build regular Spectrogram from peak-Q data by interpolating each",
"# (Q, frequency) `TimeSeries` to have the same time resolution",
"if",
"tres",
"==",
"\"<default>\"",
":",
"tres",
"=",
"abs",
"(",
"Segment",
"(",
"outseg",
")",
")",
"/",
"1000.",
"xout",
"=",
"numpy",
".",
"arange",
"(",
"*",
"outseg",
",",
"step",
"=",
"tres",
")",
"nx",
"=",
"xout",
".",
"size",
"ny",
"=",
"frequencies",
".",
"size",
"out",
"=",
"Spectrogram",
"(",
"numpy",
".",
"empty",
"(",
"(",
"nx",
",",
"ny",
")",
",",
"dtype",
"=",
"dtype",
")",
",",
"t0",
"=",
"outseg",
"[",
"0",
"]",
",",
"dt",
"=",
"tres",
",",
"frequencies",
"=",
"frequencies",
")",
"# record Q in output",
"out",
".",
"q",
"=",
"self",
".",
"plane",
".",
"q",
"# interpolate rows",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"energies",
")",
":",
"xrow",
"=",
"numpy",
".",
"arange",
"(",
"row",
".",
"x0",
".",
"value",
",",
"(",
"row",
".",
"x0",
"+",
"row",
".",
"duration",
")",
".",
"value",
",",
"row",
".",
"dx",
".",
"value",
")",
"interp",
"=",
"InterpolatedUnivariateSpline",
"(",
"xrow",
",",
"row",
".",
"value",
")",
"out",
"[",
":",
",",
"i",
"]",
"=",
"interp",
"(",
"xout",
")",
".",
"astype",
"(",
"dtype",
",",
"casting",
"=",
"\"same_kind\"",
",",
"copy",
"=",
"False",
")",
"if",
"fres",
"is",
"None",
":",
"return",
"out",
"# interpolate the spectrogram to increase its frequency resolution",
"# --- this is done because Duncan doesn't like interpolated images",
"# since they don't support log scaling",
"interp",
"=",
"interp2d",
"(",
"xout",
",",
"frequencies",
",",
"out",
".",
"value",
".",
"T",
",",
"kind",
"=",
"'cubic'",
")",
"if",
"not",
"logf",
":",
"if",
"fres",
"==",
"\"<default>\"",
":",
"fres",
"=",
".5",
"outfreq",
"=",
"numpy",
".",
"arange",
"(",
"self",
".",
"plane",
".",
"frange",
"[",
"0",
"]",
",",
"self",
".",
"plane",
".",
"frange",
"[",
"1",
"]",
",",
"fres",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"if",
"fres",
"==",
"\"<default>\"",
":",
"fres",
"=",
"500",
"# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,",
"# but numpy-1.12.0 introduced the function `~numpy.geomspace`",
"logfmin",
"=",
"numpy",
".",
"log10",
"(",
"self",
".",
"plane",
".",
"frange",
"[",
"0",
"]",
")",
"logfmax",
"=",
"numpy",
".",
"log10",
"(",
"self",
".",
"plane",
".",
"frange",
"[",
"1",
"]",
")",
"outfreq",
"=",
"numpy",
".",
"logspace",
"(",
"logfmin",
",",
"logfmax",
",",
"num",
"=",
"int",
"(",
"fres",
")",
")",
"new",
"=",
"type",
"(",
"out",
")",
"(",
"interp",
"(",
"xout",
",",
"outfreq",
")",
".",
"T",
".",
"astype",
"(",
"dtype",
",",
"casting",
"=",
"\"same_kind\"",
",",
"copy",
"=",
"False",
")",
",",
"t0",
"=",
"outseg",
"[",
"0",
"]",
",",
"dt",
"=",
"tres",
",",
"frequencies",
"=",
"outfreq",
",",
")",
"new",
".",
"q",
"=",
"self",
".",
"plane",
".",
"q",
"return",
"new"
] |
Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
|
[
"Interpolate",
"this",
"QGram",
"over",
"a",
"regularly",
"-",
"gridded",
"spectrogram"
] |
python
|
train
|
rigetti/pyquil
|
pyquil/magic.py
|
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/magic.py#L64-L102
|
def _if_statement(test, if_function, else_function) -> None:
"""
Evaluate an if statement within a @magicquil block.
If the test value is a Quil Addr then unwind it into quil code equivalent to an if then statement using jumps. Both
sides of the if statement need to be evaluated and placed into separate Programs, which is why we create new
program contexts for their evaluation.
If the test value is not a Quil Addr then fall back to what Python would normally do with an if statement.
Params are:
if <test>:
<if_function>
else:
<else_function>
NB: This function must be named exactly _if_statement and be in scope for the ast transformer
"""
if isinstance(test, Addr):
token = _program_context.set(Program())
if_function()
if_program = _program_context.get()
_program_context.reset(token)
if else_function:
token = _program_context.set(Program())
else_function()
else_program = _program_context.get()
_program_context.reset(token)
else:
else_program = None
program = _program_context.get()
program.if_then(test, if_program, else_program)
else:
if test:
if_function()
elif else_function:
else_function()
|
[
"def",
"_if_statement",
"(",
"test",
",",
"if_function",
",",
"else_function",
")",
"->",
"None",
":",
"if",
"isinstance",
"(",
"test",
",",
"Addr",
")",
":",
"token",
"=",
"_program_context",
".",
"set",
"(",
"Program",
"(",
")",
")",
"if_function",
"(",
")",
"if_program",
"=",
"_program_context",
".",
"get",
"(",
")",
"_program_context",
".",
"reset",
"(",
"token",
")",
"if",
"else_function",
":",
"token",
"=",
"_program_context",
".",
"set",
"(",
"Program",
"(",
")",
")",
"else_function",
"(",
")",
"else_program",
"=",
"_program_context",
".",
"get",
"(",
")",
"_program_context",
".",
"reset",
"(",
"token",
")",
"else",
":",
"else_program",
"=",
"None",
"program",
"=",
"_program_context",
".",
"get",
"(",
")",
"program",
".",
"if_then",
"(",
"test",
",",
"if_program",
",",
"else_program",
")",
"else",
":",
"if",
"test",
":",
"if_function",
"(",
")",
"elif",
"else_function",
":",
"else_function",
"(",
")"
] |
Evaluate an if statement within a @magicquil block.
If the test value is a Quil Addr then unwind it into quil code equivalent to an if then statement using jumps. Both
sides of the if statement need to be evaluated and placed into separate Programs, which is why we create new
program contexts for their evaluation.
If the test value is not a Quil Addr then fall back to what Python would normally do with an if statement.
Params are:
if <test>:
<if_function>
else:
<else_function>
NB: This function must be named exactly _if_statement and be in scope for the ast transformer
|
[
"Evaluate",
"an",
"if",
"statement",
"within",
"a",
"@magicquil",
"block",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/zabbix.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zabbix.py#L1894-L1951
|
def usermacro_get(macro=None, hostids=None, templateids=None, hostmacroids=None,
globalmacroids=None, globalmacro=False, **kwargs):
'''
Retrieve user macros according to the given parameters.
Args:
macro: name of the usermacro
hostids: Return macros for the given hostids
templateids: Return macros for the given templateids
hostmacroids: Return macros with the given hostmacroids
globalmacroids: Return macros with the given globalmacroids (implies globalmacro=True)
globalmacro: if True, returns only global macros
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
Returns:
Array with usermacro details, False if no usermacro found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_get macro='{$SNMP_COMMUNITY}'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'usermacro.get'
params = {"output": "extend", "filter": {}}
if macro:
# Python mistakenly interprets macro names starting and ending with '{' and '}' as a dict
if isinstance(macro, dict):
macro = "{" + six.text_type(macro.keys()[0]) +"}"
if not macro.startswith('{') and not macro.endswith('}'):
macro = "{" + macro + "}"
params['filter'].setdefault('macro', macro)
if hostids:
params.setdefault('hostids', hostids)
elif templateids:
params.setdefault('templateids', hostids)
if hostmacroids:
params.setdefault('hostmacroids', hostmacroids)
elif globalmacroids:
globalmacro = True
params.setdefault('globalmacroids', globalmacroids)
if globalmacro:
params = _params_extend(params, globalmacro=True)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result'] if ret['result'] else False
else:
raise KeyError
except KeyError:
return ret
|
[
"def",
"usermacro_get",
"(",
"macro",
"=",
"None",
",",
"hostids",
"=",
"None",
",",
"templateids",
"=",
"None",
",",
"hostmacroids",
"=",
"None",
",",
"globalmacroids",
"=",
"None",
",",
"globalmacro",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"conn_args",
"=",
"_login",
"(",
"*",
"*",
"kwargs",
")",
"ret",
"=",
"{",
"}",
"try",
":",
"if",
"conn_args",
":",
"method",
"=",
"'usermacro.get'",
"params",
"=",
"{",
"\"output\"",
":",
"\"extend\"",
",",
"\"filter\"",
":",
"{",
"}",
"}",
"if",
"macro",
":",
"# Python mistakenly interprets macro names starting and ending with '{' and '}' as a dict",
"if",
"isinstance",
"(",
"macro",
",",
"dict",
")",
":",
"macro",
"=",
"\"{\"",
"+",
"six",
".",
"text_type",
"(",
"macro",
".",
"keys",
"(",
")",
"[",
"0",
"]",
")",
"+",
"\"}\"",
"if",
"not",
"macro",
".",
"startswith",
"(",
"'{'",
")",
"and",
"not",
"macro",
".",
"endswith",
"(",
"'}'",
")",
":",
"macro",
"=",
"\"{\"",
"+",
"macro",
"+",
"\"}\"",
"params",
"[",
"'filter'",
"]",
".",
"setdefault",
"(",
"'macro'",
",",
"macro",
")",
"if",
"hostids",
":",
"params",
".",
"setdefault",
"(",
"'hostids'",
",",
"hostids",
")",
"elif",
"templateids",
":",
"params",
".",
"setdefault",
"(",
"'templateids'",
",",
"hostids",
")",
"if",
"hostmacroids",
":",
"params",
".",
"setdefault",
"(",
"'hostmacroids'",
",",
"hostmacroids",
")",
"elif",
"globalmacroids",
":",
"globalmacro",
"=",
"True",
"params",
".",
"setdefault",
"(",
"'globalmacroids'",
",",
"globalmacroids",
")",
"if",
"globalmacro",
":",
"params",
"=",
"_params_extend",
"(",
"params",
",",
"globalmacro",
"=",
"True",
")",
"params",
"=",
"_params_extend",
"(",
"params",
",",
"*",
"*",
"kwargs",
")",
"ret",
"=",
"_query",
"(",
"method",
",",
"params",
",",
"conn_args",
"[",
"'url'",
"]",
",",
"conn_args",
"[",
"'auth'",
"]",
")",
"return",
"ret",
"[",
"'result'",
"]",
"if",
"ret",
"[",
"'result'",
"]",
"else",
"False",
"else",
":",
"raise",
"KeyError",
"except",
"KeyError",
":",
"return",
"ret"
] |
Retrieve user macros according to the given parameters.
Args:
macro: name of the usermacro
hostids: Return macros for the given hostids
templateids: Return macros for the given templateids
hostmacroids: Return macros with the given hostmacroids
globalmacroids: Return macros with the given globalmacroids (implies globalmacro=True)
globalmacro: if True, returns only global macros
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
Returns:
Array with usermacro details, False if no usermacro found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_get macro='{$SNMP_COMMUNITY}'
|
[
"Retrieve",
"user",
"macros",
"according",
"to",
"the",
"given",
"parameters",
"."
] |
python
|
train
|
raamana/mrivis
|
mrivis/base.py
|
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L1067-L1079
|
def _set_roi_mask(self, roi_mask):
"""Sets a new ROI mask."""
if isinstance(roi_mask,
np.ndarray): # not (roi_mask is None or roi_mask=='auto'):
self._verify_shape_compatibility(roi_mask, 'ROI set')
self.roi_mask = roi_mask
self.roi_list = np.unique(roi_mask.flatten())
np.setdiff1d(self.roi_list, cfg.background_value)
else:
self.roi_mask = np.ones(self.carpet.shape[:-1]) # last dim is self.fixed_dim already
self.roi_list = [1, ]
|
[
"def",
"_set_roi_mask",
"(",
"self",
",",
"roi_mask",
")",
":",
"if",
"isinstance",
"(",
"roi_mask",
",",
"np",
".",
"ndarray",
")",
":",
"# not (roi_mask is None or roi_mask=='auto'):",
"self",
".",
"_verify_shape_compatibility",
"(",
"roi_mask",
",",
"'ROI set'",
")",
"self",
".",
"roi_mask",
"=",
"roi_mask",
"self",
".",
"roi_list",
"=",
"np",
".",
"unique",
"(",
"roi_mask",
".",
"flatten",
"(",
")",
")",
"np",
".",
"setdiff1d",
"(",
"self",
".",
"roi_list",
",",
"cfg",
".",
"background_value",
")",
"else",
":",
"self",
".",
"roi_mask",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"carpet",
".",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"# last dim is self.fixed_dim already",
"self",
".",
"roi_list",
"=",
"[",
"1",
",",
"]"
] |
Sets a new ROI mask.
|
[
"Sets",
"a",
"new",
"ROI",
"mask",
"."
] |
python
|
train
|
techdragon/python-check-pypi-name
|
src/check_pypi_name/__init__.py
|
https://github.com/techdragon/python-check-pypi-name/blob/2abfa98878755ed9073b4f5448f4380f88e3e8f3/src/check_pypi_name/__init__.py#L7-L86
|
def check_pypi_name(pypi_package_name, pypi_registry_host=None):
"""
Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return:
"""
if pypi_registry_host is None:
pypi_registry_host = 'pypi.python.org'
# Just a helpful reminder why this bytearray size was chosen.
# HTTP/1.1 200 OK
# HTTP/1.1 404 Not Found
receive_buffer = bytearray(b'------------')
context = ssl.create_default_context()
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD /simple/", pypi_package_name.encode('ascii'), b"/ HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"
]))
ssl_http_socket.recv_into(receive_buffer)
# Early return when possible.
if b'HTTP/1.1 200' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return True
elif b'HTTP/1.1 404' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return False
remaining_bytes = ssl_http_socket.recv(2048)
redirect_path_location_start = remaining_bytes.find(b'Location:') + 10
redirect_path_location_end = remaining_bytes.find(b'\r\n', redirect_path_location_start)
# Append the trailing slash to avoid a needless extra redirect.
redirect_path = remaining_bytes[redirect_path_location_start:redirect_path_location_end] + b'/'
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
# Reset the bytearray to empty
# receive_buffer = bytearray(b'------------')
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD ", redirect_path, b" HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"]))
ssl_http_socket.recv_into(receive_buffer)
if b'HTTP/1.1 200' in receive_buffer:
return True
elif b'HTTP/1.1 404' in receive_buffer:
return False
else:
NotImplementedError('A definitive answer was not found by primary or secondary lookups.')
|
[
"def",
"check_pypi_name",
"(",
"pypi_package_name",
",",
"pypi_registry_host",
"=",
"None",
")",
":",
"if",
"pypi_registry_host",
"is",
"None",
":",
"pypi_registry_host",
"=",
"'pypi.python.org'",
"# Just a helpful reminder why this bytearray size was chosen.",
"# HTTP/1.1 200 OK",
"# HTTP/1.1 404 Not Found",
"receive_buffer",
"=",
"bytearray",
"(",
"b'------------'",
")",
"context",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"ssl_http_socket",
"=",
"context",
".",
"wrap_socket",
"(",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
")",
",",
"server_hostname",
"=",
"pypi_registry_host",
")",
"ssl_http_socket",
".",
"connect",
"(",
"(",
"pypi_registry_host",
",",
"443",
")",
")",
"ssl_http_socket",
".",
"send",
"(",
"b''",
".",
"join",
"(",
"[",
"b\"HEAD /simple/\"",
",",
"pypi_package_name",
".",
"encode",
"(",
"'ascii'",
")",
",",
"b\"/ HTTP/1.0\"",
",",
"b\"\\r\\n\"",
",",
"b\"Host: \"",
",",
"pypi_registry_host",
".",
"encode",
"(",
"'ascii'",
")",
",",
"b\"\\r\\n\"",
",",
"b\"\\r\\n\\r\\n\"",
"]",
")",
")",
"ssl_http_socket",
".",
"recv_into",
"(",
"receive_buffer",
")",
"# Early return when possible.",
"if",
"b'HTTP/1.1 200'",
"in",
"receive_buffer",
":",
"ssl_http_socket",
".",
"shutdown",
"(",
"1",
")",
"ssl_http_socket",
".",
"close",
"(",
")",
"return",
"True",
"elif",
"b'HTTP/1.1 404'",
"in",
"receive_buffer",
":",
"ssl_http_socket",
".",
"shutdown",
"(",
"1",
")",
"ssl_http_socket",
".",
"close",
"(",
")",
"return",
"False",
"remaining_bytes",
"=",
"ssl_http_socket",
".",
"recv",
"(",
"2048",
")",
"redirect_path_location_start",
"=",
"remaining_bytes",
".",
"find",
"(",
"b'Location:'",
")",
"+",
"10",
"redirect_path_location_end",
"=",
"remaining_bytes",
".",
"find",
"(",
"b'\\r\\n'",
",",
"redirect_path_location_start",
")",
"# Append the trailing slash to avoid a needless extra redirect.",
"redirect_path",
"=",
"remaining_bytes",
"[",
"redirect_path_location_start",
":",
"redirect_path_location_end",
"]",
"+",
"b'/'",
"ssl_http_socket",
".",
"shutdown",
"(",
"1",
")",
"ssl_http_socket",
".",
"close",
"(",
")",
"# Reset the bytearray to empty",
"# receive_buffer = bytearray(b'------------')",
"ssl_http_socket",
"=",
"context",
".",
"wrap_socket",
"(",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
")",
",",
"server_hostname",
"=",
"pypi_registry_host",
")",
"ssl_http_socket",
".",
"connect",
"(",
"(",
"pypi_registry_host",
",",
"443",
")",
")",
"ssl_http_socket",
".",
"send",
"(",
"b''",
".",
"join",
"(",
"[",
"b\"HEAD \"",
",",
"redirect_path",
",",
"b\" HTTP/1.0\"",
",",
"b\"\\r\\n\"",
",",
"b\"Host: \"",
",",
"pypi_registry_host",
".",
"encode",
"(",
"'ascii'",
")",
",",
"b\"\\r\\n\"",
",",
"b\"\\r\\n\\r\\n\"",
"]",
")",
")",
"ssl_http_socket",
".",
"recv_into",
"(",
"receive_buffer",
")",
"if",
"b'HTTP/1.1 200'",
"in",
"receive_buffer",
":",
"return",
"True",
"elif",
"b'HTTP/1.1 404'",
"in",
"receive_buffer",
":",
"return",
"False",
"else",
":",
"NotImplementedError",
"(",
"'A definitive answer was not found by primary or secondary lookups.'",
")"
] |
Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return:
|
[
"Check",
"if",
"a",
"package",
"name",
"exists",
"on",
"pypi",
"."
] |
python
|
test
|
Alignak-monitoring-contrib/alignak-backend-client
|
alignak_backend_client/backend_client.py
|
https://github.com/Alignak-monitoring-contrib/alignak-backend-client/blob/1e21f6ce703e66984d1f9b20fe7866460ab50b39/alignak_backend_client/backend_client.py#L441-L458
|
def file_dump(self, data, filename): # pylint: disable=no-self-use
"""
Dump the data to a JSON formatted file
:param data: data to be dumped
:param filename: name of the file to use. Only the file name, not the full path!
:return: dumped file absolute file name
"""
dump = json.dumps(data, indent=4,
separators=(',', ': '), sort_keys=True)
path = os.path.join(self.folder or os.getcwd(), filename)
try:
dfile = open(path, "wt")
dfile.write(dump)
dfile.close()
return path
except (OSError, IndexError) as exp: # pragma: no cover, should never happen
logger.exception("Error when writing the list dump file %s : %s", path, str(exp))
return None
|
[
"def",
"file_dump",
"(",
"self",
",",
"data",
",",
"filename",
")",
":",
"# pylint: disable=no-self-use",
"dump",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
",",
"sort_keys",
"=",
"True",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"folder",
"or",
"os",
".",
"getcwd",
"(",
")",
",",
"filename",
")",
"try",
":",
"dfile",
"=",
"open",
"(",
"path",
",",
"\"wt\"",
")",
"dfile",
".",
"write",
"(",
"dump",
")",
"dfile",
".",
"close",
"(",
")",
"return",
"path",
"except",
"(",
"OSError",
",",
"IndexError",
")",
"as",
"exp",
":",
"# pragma: no cover, should never happen",
"logger",
".",
"exception",
"(",
"\"Error when writing the list dump file %s : %s\"",
",",
"path",
",",
"str",
"(",
"exp",
")",
")",
"return",
"None"
] |
Dump the data to a JSON formatted file
:param data: data to be dumped
:param filename: name of the file to use. Only the file name, not the full path!
:return: dumped file absolute file name
|
[
"Dump",
"the",
"data",
"to",
"a",
"JSON",
"formatted",
"file",
":",
"param",
"data",
":",
"data",
"to",
"be",
"dumped",
":",
"param",
"filename",
":",
"name",
"of",
"the",
"file",
"to",
"use",
".",
"Only",
"the",
"file",
"name",
"not",
"the",
"full",
"path!",
":",
"return",
":",
"dumped",
"file",
"absolute",
"file",
"name"
] |
python
|
test
|
zetaops/zengine
|
zengine/management_commands.py
|
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/management_commands.py#L44-L92
|
def run(self):
"""
Creates new permissions.
"""
from pyoko.lib.utils import get_object_from_path
from zengine.config import settings
model = get_object_from_path(settings.PERMISSION_MODEL)
perm_provider = get_object_from_path(settings.PERMISSION_PROVIDER)
existing_perms = []
new_perms = []
for code, name, desc in perm_provider():
code = six.text_type(code)
if self.manager.args.dry:
exists = model.objects.filter(code=code, name=name)
if exists:
perm = exists[0]
new = False
else:
new = True
perm = model(code=code, name=name)
else:
try:
perm = model.objects.get(code)
existing_perms.append(perm)
except ObjectDoesNotExist:
perm = model(description=desc, code=code, name=name)
perm.key = code
perm.save()
new_perms.append(perm)
# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)
# if new:
# new_perms.append(perm)
# else:
# existing_perms.append(perm)
report = "\n\n%s permission(s) were found in DB. " % len(existing_perms)
if new_perms:
report += "\n%s new permission record added. " % len(new_perms)
else:
report += 'No new perms added. '
if new_perms:
if not self.manager.args.dry:
SelectBoxCache.flush(model.__name__)
report += 'Total %s perms exists.' % (len(existing_perms) + len(new_perms))
report = "\n + " + "\n + ".join([p.name or p.code for p in new_perms]) + report
if self.manager.args.dry:
print("\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\n")
print(report + "\n")
|
[
"def",
"run",
"(",
"self",
")",
":",
"from",
"pyoko",
".",
"lib",
".",
"utils",
"import",
"get_object_from_path",
"from",
"zengine",
".",
"config",
"import",
"settings",
"model",
"=",
"get_object_from_path",
"(",
"settings",
".",
"PERMISSION_MODEL",
")",
"perm_provider",
"=",
"get_object_from_path",
"(",
"settings",
".",
"PERMISSION_PROVIDER",
")",
"existing_perms",
"=",
"[",
"]",
"new_perms",
"=",
"[",
"]",
"for",
"code",
",",
"name",
",",
"desc",
"in",
"perm_provider",
"(",
")",
":",
"code",
"=",
"six",
".",
"text_type",
"(",
"code",
")",
"if",
"self",
".",
"manager",
".",
"args",
".",
"dry",
":",
"exists",
"=",
"model",
".",
"objects",
".",
"filter",
"(",
"code",
"=",
"code",
",",
"name",
"=",
"name",
")",
"if",
"exists",
":",
"perm",
"=",
"exists",
"[",
"0",
"]",
"new",
"=",
"False",
"else",
":",
"new",
"=",
"True",
"perm",
"=",
"model",
"(",
"code",
"=",
"code",
",",
"name",
"=",
"name",
")",
"else",
":",
"try",
":",
"perm",
"=",
"model",
".",
"objects",
".",
"get",
"(",
"code",
")",
"existing_perms",
".",
"append",
"(",
"perm",
")",
"except",
"ObjectDoesNotExist",
":",
"perm",
"=",
"model",
"(",
"description",
"=",
"desc",
",",
"code",
"=",
"code",
",",
"name",
"=",
"name",
")",
"perm",
".",
"key",
"=",
"code",
"perm",
".",
"save",
"(",
")",
"new_perms",
".",
"append",
"(",
"perm",
")",
"# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)",
"# if new:",
"# new_perms.append(perm)",
"# else:",
"# existing_perms.append(perm)",
"report",
"=",
"\"\\n\\n%s permission(s) were found in DB. \"",
"%",
"len",
"(",
"existing_perms",
")",
"if",
"new_perms",
":",
"report",
"+=",
"\"\\n%s new permission record added. \"",
"%",
"len",
"(",
"new_perms",
")",
"else",
":",
"report",
"+=",
"'No new perms added. '",
"if",
"new_perms",
":",
"if",
"not",
"self",
".",
"manager",
".",
"args",
".",
"dry",
":",
"SelectBoxCache",
".",
"flush",
"(",
"model",
".",
"__name__",
")",
"report",
"+=",
"'Total %s perms exists.'",
"%",
"(",
"len",
"(",
"existing_perms",
")",
"+",
"len",
"(",
"new_perms",
")",
")",
"report",
"=",
"\"\\n + \"",
"+",
"\"\\n + \"",
".",
"join",
"(",
"[",
"p",
".",
"name",
"or",
"p",
".",
"code",
"for",
"p",
"in",
"new_perms",
"]",
")",
"+",
"report",
"if",
"self",
".",
"manager",
".",
"args",
".",
"dry",
":",
"print",
"(",
"\"\\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\\n\"",
")",
"print",
"(",
"report",
"+",
"\"\\n\"",
")"
] |
Creates new permissions.
|
[
"Creates",
"new",
"permissions",
"."
] |
python
|
train
|
openvax/varcode
|
varcode/effects/mutate.py
|
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/mutate.py#L62-L87
|
def substitute(sequence, offset, ref, alt):
"""Mutate a sequence by substituting given `alt` at instead of `ref` at the
given `position`.
Parameters
----------
sequence : sequence
String of amino acids or DNA bases
offset : int
Base 0 offset from start of `sequence`
ref : sequence or str
What do we expect to find at the position?
alt : sequence or str
Alternate sequence to insert
"""
n_ref = len(ref)
sequence_ref = sequence[offset:offset + n_ref]
assert str(sequence_ref) == str(ref), \
"Reference %s at offset %d != expected reference %s" % \
(sequence_ref, offset, ref)
prefix = sequence[:offset]
suffix = sequence[offset + n_ref:]
return prefix + alt + suffix
|
[
"def",
"substitute",
"(",
"sequence",
",",
"offset",
",",
"ref",
",",
"alt",
")",
":",
"n_ref",
"=",
"len",
"(",
"ref",
")",
"sequence_ref",
"=",
"sequence",
"[",
"offset",
":",
"offset",
"+",
"n_ref",
"]",
"assert",
"str",
"(",
"sequence_ref",
")",
"==",
"str",
"(",
"ref",
")",
",",
"\"Reference %s at offset %d != expected reference %s\"",
"%",
"(",
"sequence_ref",
",",
"offset",
",",
"ref",
")",
"prefix",
"=",
"sequence",
"[",
":",
"offset",
"]",
"suffix",
"=",
"sequence",
"[",
"offset",
"+",
"n_ref",
":",
"]",
"return",
"prefix",
"+",
"alt",
"+",
"suffix"
] |
Mutate a sequence by substituting given `alt` at instead of `ref` at the
given `position`.
Parameters
----------
sequence : sequence
String of amino acids or DNA bases
offset : int
Base 0 offset from start of `sequence`
ref : sequence or str
What do we expect to find at the position?
alt : sequence or str
Alternate sequence to insert
|
[
"Mutate",
"a",
"sequence",
"by",
"substituting",
"given",
"alt",
"at",
"instead",
"of",
"ref",
"at",
"the",
"given",
"position",
"."
] |
python
|
train
|
sveetch/boussole
|
boussole/watcher.py
|
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/watcher.py#L143-L161
|
def compile_dependencies(self, sourcepath, include_self=False):
"""
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
"""
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items])
|
[
"def",
"compile_dependencies",
"(",
"self",
",",
"sourcepath",
",",
"include_self",
"=",
"False",
")",
":",
"items",
"=",
"self",
".",
"inspector",
".",
"parents",
"(",
"sourcepath",
")",
"# Also add the current event related path",
"if",
"include_self",
":",
"items",
".",
"add",
"(",
"sourcepath",
")",
"return",
"filter",
"(",
"None",
",",
"[",
"self",
".",
"compile_source",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"]",
")"
] |
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
|
[
"Apply",
"compile",
"on",
"all",
"dependencies"
] |
python
|
train
|
pybel/pybel
|
src/pybel/canonicalize.py
|
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L178-L183
|
def _unset_annotation_to_str(keys: List[str]) -> str:
"""Return an unset annotation string."""
if len(keys) == 1:
return 'UNSET {}'.format(list(keys)[0])
return 'UNSET {{{}}}'.format(', '.join('{}'.format(key) for key in keys))
|
[
"def",
"_unset_annotation_to_str",
"(",
"keys",
":",
"List",
"[",
"str",
"]",
")",
"->",
"str",
":",
"if",
"len",
"(",
"keys",
")",
"==",
"1",
":",
"return",
"'UNSET {}'",
".",
"format",
"(",
"list",
"(",
"keys",
")",
"[",
"0",
"]",
")",
"return",
"'UNSET {{{}}}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"'{}'",
".",
"format",
"(",
"key",
")",
"for",
"key",
"in",
"keys",
")",
")"
] |
Return an unset annotation string.
|
[
"Return",
"an",
"unset",
"annotation",
"string",
"."
] |
python
|
train
|
pri22296/beautifultable
|
beautifultable/rows.py
|
https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/rows.py#L65-L99
|
def _clamp_string(self, row_item, column_index, delimiter=''):
"""Clamp `row_item` to fit in column referred by column_index.
This method considers padding and appends the delimiter if `row_item`
needs to be truncated.
Parameters
----------
row_item: str
String which should be clamped.
column_index: int
Index of the column `row_item` belongs to.
delimiter: str
String which is to be appended to the clamped string.
Returns
-------
str
The modified string which fits in it's column.
"""
width = (self._table.column_widths[column_index]
- self._table.left_padding_widths[column_index]
- self._table.right_padding_widths[column_index])
if termwidth(row_item) <= width:
return row_item
else:
if width - len(delimiter) >= 0:
clamped_string = (textwrap(row_item, width-len(delimiter))[0]
+ delimiter)
else:
clamped_string = delimiter[:width]
return clamped_string
|
[
"def",
"_clamp_string",
"(",
"self",
",",
"row_item",
",",
"column_index",
",",
"delimiter",
"=",
"''",
")",
":",
"width",
"=",
"(",
"self",
".",
"_table",
".",
"column_widths",
"[",
"column_index",
"]",
"-",
"self",
".",
"_table",
".",
"left_padding_widths",
"[",
"column_index",
"]",
"-",
"self",
".",
"_table",
".",
"right_padding_widths",
"[",
"column_index",
"]",
")",
"if",
"termwidth",
"(",
"row_item",
")",
"<=",
"width",
":",
"return",
"row_item",
"else",
":",
"if",
"width",
"-",
"len",
"(",
"delimiter",
")",
">=",
"0",
":",
"clamped_string",
"=",
"(",
"textwrap",
"(",
"row_item",
",",
"width",
"-",
"len",
"(",
"delimiter",
")",
")",
"[",
"0",
"]",
"+",
"delimiter",
")",
"else",
":",
"clamped_string",
"=",
"delimiter",
"[",
":",
"width",
"]",
"return",
"clamped_string"
] |
Clamp `row_item` to fit in column referred by column_index.
This method considers padding and appends the delimiter if `row_item`
needs to be truncated.
Parameters
----------
row_item: str
String which should be clamped.
column_index: int
Index of the column `row_item` belongs to.
delimiter: str
String which is to be appended to the clamped string.
Returns
-------
str
The modified string which fits in it's column.
|
[
"Clamp",
"row_item",
"to",
"fit",
"in",
"column",
"referred",
"by",
"column_index",
"."
] |
python
|
train
|
StackStorm/pybind
|
pybind/slxos/v17r_2_00/keychain/key/accept_lifetime/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/keychain/key/accept_lifetime/__init__.py#L127-L148
|
def _set_start_time(self, v, load=False):
"""
Setter method for start_time, mapped from YANG variable /keychain/key/accept_lifetime/start_time (time-format-start)
If this variable is read-only (config: false) in the
source YANG file, then _set_start_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_start_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\d{4})', 'length': [u'0..32']}), is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enter start time in the format HH:MM:SS|MM/DD/YYYY', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='time-format-start', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """start_time must be of a type compatible with time-format-start""",
'defined-type': "brocade-keychain:time-format-start",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\d{4})', 'length': [u'0..32']}), is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enter start time in the format HH:MM:SS|MM/DD/YYYY', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='time-format-start', is_config=True)""",
})
self.__start_time = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_start_time",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_dict",
"=",
"{",
"'pattern'",
":",
"u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\\\d{4})'",
",",
"'length'",
":",
"[",
"u'0..32'",
"]",
"}",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"start-time\"",
",",
"rest_name",
"=",
"\"start-time\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'enter start time in the format HH:MM:SS|MM/DD/YYYY'",
",",
"u'cli-drop-node-name'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-keychain'",
",",
"defining_module",
"=",
"'brocade-keychain'",
",",
"yang_type",
"=",
"'time-format-start'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"start_time must be of a type compatible with time-format-start\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-keychain:time-format-start\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\\\d{4})', 'length': [u'0..32']}), is_leaf=True, yang_name=\"start-time\", rest_name=\"start-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enter start time in the format HH:MM:SS|MM/DD/YYYY', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='time-format-start', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__start_time",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for start_time, mapped from YANG variable /keychain/key/accept_lifetime/start_time (time-format-start)
If this variable is read-only (config: false) in the
source YANG file, then _set_start_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_start_time() directly.
|
[
"Setter",
"method",
"for",
"start_time",
"mapped",
"from",
"YANG",
"variable",
"/",
"keychain",
"/",
"key",
"/",
"accept_lifetime",
"/",
"start_time",
"(",
"time",
"-",
"format",
"-",
"start",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_start_time",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_start_time",
"()",
"directly",
"."
] |
python
|
train
|
Calysto/calysto
|
calysto/ai/conx.py
|
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L4686-L4697
|
def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0):
"""
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
"""
# better not add context layer first if using sweep() without mapInput
SRN.add(self, layer, verbosity)
if hiddenLayerName in self.contextLayers:
raise KeyError('There is already a context layer associated with this hidden layer.', \
hiddenLayerName)
else:
self.contextLayers[hiddenLayerName] = layer
layer.kind = 'Context'
|
[
"def",
"addContext",
"(",
"self",
",",
"layer",
",",
"hiddenLayerName",
"=",
"'hidden'",
",",
"verbosity",
"=",
"0",
")",
":",
"# better not add context layer first if using sweep() without mapInput",
"SRN",
".",
"add",
"(",
"self",
",",
"layer",
",",
"verbosity",
")",
"if",
"hiddenLayerName",
"in",
"self",
".",
"contextLayers",
":",
"raise",
"KeyError",
"(",
"'There is already a context layer associated with this hidden layer.'",
",",
"hiddenLayerName",
")",
"else",
":",
"self",
".",
"contextLayers",
"[",
"hiddenLayerName",
"]",
"=",
"layer",
"layer",
".",
"kind",
"=",
"'Context'"
] |
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
|
[
"Adds",
"a",
"context",
"layer",
".",
"Necessary",
"to",
"keep",
"self",
".",
"contextLayers",
"dictionary",
"up",
"to",
"date",
"."
] |
python
|
train
|
intelligenia/modeltranslation
|
modeltranslation/models.py
|
https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/models.py#L206-L216
|
def _load_source_object(self):
"""
Loads related object in a dynamic attribute and returns it.
"""
if hasattr(self, "source_obj"):
self.source_text = getattr(self.source_obj, self.field)
return self.source_obj
self._load_source_model()
self.source_obj = self.source_model.objects.get(id=self.object_id)
return self.source_obj
|
[
"def",
"_load_source_object",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"source_obj\"",
")",
":",
"self",
".",
"source_text",
"=",
"getattr",
"(",
"self",
".",
"source_obj",
",",
"self",
".",
"field",
")",
"return",
"self",
".",
"source_obj",
"self",
".",
"_load_source_model",
"(",
")",
"self",
".",
"source_obj",
"=",
"self",
".",
"source_model",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"self",
".",
"object_id",
")",
"return",
"self",
".",
"source_obj"
] |
Loads related object in a dynamic attribute and returns it.
|
[
"Loads",
"related",
"object",
"in",
"a",
"dynamic",
"attribute",
"and",
"returns",
"it",
"."
] |
python
|
train
|
Rapptz/discord.py
|
discord/message.py
|
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/message.py#L566-L601
|
async def delete(self, *, delay=None):
"""|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1.0
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def delete():
await asyncio.sleep(delay, loop=self._state.loop)
try:
await self._state.http.delete_message(self.channel.id, self.id)
except HTTPException:
pass
asyncio.ensure_future(delete(), loop=self._state.loop)
else:
await self._state.http.delete_message(self.channel.id, self.id)
|
[
"async",
"def",
"delete",
"(",
"self",
",",
"*",
",",
"delay",
"=",
"None",
")",
":",
"if",
"delay",
"is",
"not",
"None",
":",
"async",
"def",
"delete",
"(",
")",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"delay",
",",
"loop",
"=",
"self",
".",
"_state",
".",
"loop",
")",
"try",
":",
"await",
"self",
".",
"_state",
".",
"http",
".",
"delete_message",
"(",
"self",
".",
"channel",
".",
"id",
",",
"self",
".",
"id",
")",
"except",
"HTTPException",
":",
"pass",
"asyncio",
".",
"ensure_future",
"(",
"delete",
"(",
")",
",",
"loop",
"=",
"self",
".",
"_state",
".",
"loop",
")",
"else",
":",
"await",
"self",
".",
"_state",
".",
"http",
".",
"delete_message",
"(",
"self",
".",
"channel",
".",
"id",
",",
"self",
".",
"id",
")"
] |
|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1.0
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
HTTPException
Deleting the message failed.
|
[
"|coro|"
] |
python
|
train
|
yahoo/TensorFlowOnSpark
|
tensorflowonspark/dfutil.py
|
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/tensorflowonspark/dfutil.py#L134-L168
|
def infer_schema(example, binary_features=[]):
"""Given a tf.train.Example, infer the Spark DataFrame schema (StructFields).
Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to
disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint"
from the caller in the ``binary_features`` argument.
Args:
:example: a tf.train.Example
:binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays.
Returns:
A DataFrame StructType schema
"""
def _infer_sql_type(k, v):
# special handling for binary features
if k in binary_features:
return BinaryType()
if v.int64_list.value:
result = v.int64_list.value
sql_type = LongType()
elif v.float_list.value:
result = v.float_list.value
sql_type = DoubleType()
else:
result = v.bytes_list.value
sql_type = StringType()
if len(result) > 1: # represent multi-item tensors as Spark SQL ArrayType() of base types
return ArrayType(sql_type)
else: # represent everything else as base types (and empty tensors as StringType())
return sql_type
return StructType([StructField(k, _infer_sql_type(k, v), True) for k, v in sorted(example.features.feature.items())])
|
[
"def",
"infer_schema",
"(",
"example",
",",
"binary_features",
"=",
"[",
"]",
")",
":",
"def",
"_infer_sql_type",
"(",
"k",
",",
"v",
")",
":",
"# special handling for binary features",
"if",
"k",
"in",
"binary_features",
":",
"return",
"BinaryType",
"(",
")",
"if",
"v",
".",
"int64_list",
".",
"value",
":",
"result",
"=",
"v",
".",
"int64_list",
".",
"value",
"sql_type",
"=",
"LongType",
"(",
")",
"elif",
"v",
".",
"float_list",
".",
"value",
":",
"result",
"=",
"v",
".",
"float_list",
".",
"value",
"sql_type",
"=",
"DoubleType",
"(",
")",
"else",
":",
"result",
"=",
"v",
".",
"bytes_list",
".",
"value",
"sql_type",
"=",
"StringType",
"(",
")",
"if",
"len",
"(",
"result",
")",
">",
"1",
":",
"# represent multi-item tensors as Spark SQL ArrayType() of base types",
"return",
"ArrayType",
"(",
"sql_type",
")",
"else",
":",
"# represent everything else as base types (and empty tensors as StringType())",
"return",
"sql_type",
"return",
"StructType",
"(",
"[",
"StructField",
"(",
"k",
",",
"_infer_sql_type",
"(",
"k",
",",
"v",
")",
",",
"True",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"example",
".",
"features",
".",
"feature",
".",
"items",
"(",
")",
")",
"]",
")"
] |
Given a tf.train.Example, infer the Spark DataFrame schema (StructFields).
Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to
disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint"
from the caller in the ``binary_features`` argument.
Args:
:example: a tf.train.Example
:binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays.
Returns:
A DataFrame StructType schema
|
[
"Given",
"a",
"tf",
".",
"train",
".",
"Example",
"infer",
"the",
"Spark",
"DataFrame",
"schema",
"(",
"StructFields",
")",
"."
] |
python
|
train
|
openego/ding0
|
ding0/grid/mv_grid/util/data_input.py
|
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/util/data_input.py#L312-L376
|
def _parse_tsplib(f):
"""Parses a TSPLIB file descriptor and returns a dict containing the problem definition"""
line = ''
specs = {}
used_specs = ['NAME', 'COMMENT', 'DIMENSION', 'CAPACITY', 'TYPE', 'EDGE_WEIGHT_TYPE']
used_data = ['DEMAND_SECTION', 'DEPOT_SECTION']
# Parse specs part
for line in f:
line = strip(line)
# Arbitrary sort, so we test everything out
s = None
for s in used_specs:
if line.startswith(s):
specs[s] = line.split('{} :'.format(s))[-1].strip() # get value data part
break
if s == 'EDGE_WEIGHT_TYPE' and s in specs and specs[s] == 'EXPLICIT':
used_specs.append('EDGE_WEIGHT_FORMAT')
# All specs read
if len(specs) == len(used_specs):
break
if len(specs) != len(used_specs):
missing_specs = set(used_specs).symmetric_difference(set(specs))
raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs))
print(specs)
if specs['EDGE_WEIGHT_TYPE'] == 'EUC_2D':
used_data.append('NODE_COORD_SECTION')
elif specs['EDGE_WEIGHT_FORMAT'] == 'FULL_MATRIX':
used_data.append('EDGE_WEIGHT_SECTION')
else:
raise ParseException('EDGE_WEIGHT_TYPE or EDGE_WEIGHT_FORMAT not supported')
_post_process_specs(specs)
# Parse data part
for line in f:
line = strip(line)
for d in used_data:
if line.startswith(d):
if d == 'DEPOT_SECTION':
specs[d] = _parse_depot_section(f)
elif d in ['NODE_COORD_SECTION', 'DEMAND_SECTION']:
specs[d] = _parse_nodes_section(f, d, specs['DIMENSION'])
elif d == 'EDGE_WEIGHT_SECTION':
specs[d] = _parse_edge_weight(f, specs['DIMENSION'])
if len(specs) == len(used_specs) + len(used_data):
break
if len(specs) != len(used_specs) + len(used_data):
missing_specs = set(specs).symmetric_difference(set(used_specs).union(set(used_data)))
raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs))
_post_process_data(specs)
return specs
|
[
"def",
"_parse_tsplib",
"(",
"f",
")",
":",
"line",
"=",
"''",
"specs",
"=",
"{",
"}",
"used_specs",
"=",
"[",
"'NAME'",
",",
"'COMMENT'",
",",
"'DIMENSION'",
",",
"'CAPACITY'",
",",
"'TYPE'",
",",
"'EDGE_WEIGHT_TYPE'",
"]",
"used_data",
"=",
"[",
"'DEMAND_SECTION'",
",",
"'DEPOT_SECTION'",
"]",
"# Parse specs part",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"strip",
"(",
"line",
")",
"# Arbitrary sort, so we test everything out",
"s",
"=",
"None",
"for",
"s",
"in",
"used_specs",
":",
"if",
"line",
".",
"startswith",
"(",
"s",
")",
":",
"specs",
"[",
"s",
"]",
"=",
"line",
".",
"split",
"(",
"'{} :'",
".",
"format",
"(",
"s",
")",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"# get value data part",
"break",
"if",
"s",
"==",
"'EDGE_WEIGHT_TYPE'",
"and",
"s",
"in",
"specs",
"and",
"specs",
"[",
"s",
"]",
"==",
"'EXPLICIT'",
":",
"used_specs",
".",
"append",
"(",
"'EDGE_WEIGHT_FORMAT'",
")",
"# All specs read",
"if",
"len",
"(",
"specs",
")",
"==",
"len",
"(",
"used_specs",
")",
":",
"break",
"if",
"len",
"(",
"specs",
")",
"!=",
"len",
"(",
"used_specs",
")",
":",
"missing_specs",
"=",
"set",
"(",
"used_specs",
")",
".",
"symmetric_difference",
"(",
"set",
"(",
"specs",
")",
")",
"raise",
"ParseException",
"(",
"'Error parsing TSPLIB data: specs {} missing'",
".",
"format",
"(",
"missing_specs",
")",
")",
"print",
"(",
"specs",
")",
"if",
"specs",
"[",
"'EDGE_WEIGHT_TYPE'",
"]",
"==",
"'EUC_2D'",
":",
"used_data",
".",
"append",
"(",
"'NODE_COORD_SECTION'",
")",
"elif",
"specs",
"[",
"'EDGE_WEIGHT_FORMAT'",
"]",
"==",
"'FULL_MATRIX'",
":",
"used_data",
".",
"append",
"(",
"'EDGE_WEIGHT_SECTION'",
")",
"else",
":",
"raise",
"ParseException",
"(",
"'EDGE_WEIGHT_TYPE or EDGE_WEIGHT_FORMAT not supported'",
")",
"_post_process_specs",
"(",
"specs",
")",
"# Parse data part",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"strip",
"(",
"line",
")",
"for",
"d",
"in",
"used_data",
":",
"if",
"line",
".",
"startswith",
"(",
"d",
")",
":",
"if",
"d",
"==",
"'DEPOT_SECTION'",
":",
"specs",
"[",
"d",
"]",
"=",
"_parse_depot_section",
"(",
"f",
")",
"elif",
"d",
"in",
"[",
"'NODE_COORD_SECTION'",
",",
"'DEMAND_SECTION'",
"]",
":",
"specs",
"[",
"d",
"]",
"=",
"_parse_nodes_section",
"(",
"f",
",",
"d",
",",
"specs",
"[",
"'DIMENSION'",
"]",
")",
"elif",
"d",
"==",
"'EDGE_WEIGHT_SECTION'",
":",
"specs",
"[",
"d",
"]",
"=",
"_parse_edge_weight",
"(",
"f",
",",
"specs",
"[",
"'DIMENSION'",
"]",
")",
"if",
"len",
"(",
"specs",
")",
"==",
"len",
"(",
"used_specs",
")",
"+",
"len",
"(",
"used_data",
")",
":",
"break",
"if",
"len",
"(",
"specs",
")",
"!=",
"len",
"(",
"used_specs",
")",
"+",
"len",
"(",
"used_data",
")",
":",
"missing_specs",
"=",
"set",
"(",
"specs",
")",
".",
"symmetric_difference",
"(",
"set",
"(",
"used_specs",
")",
".",
"union",
"(",
"set",
"(",
"used_data",
")",
")",
")",
"raise",
"ParseException",
"(",
"'Error parsing TSPLIB data: specs {} missing'",
".",
"format",
"(",
"missing_specs",
")",
")",
"_post_process_data",
"(",
"specs",
")",
"return",
"specs"
] |
Parses a TSPLIB file descriptor and returns a dict containing the problem definition
|
[
"Parses",
"a",
"TSPLIB",
"file",
"descriptor",
"and",
"returns",
"a",
"dict",
"containing",
"the",
"problem",
"definition"
] |
python
|
train
|
ray-project/ray
|
python/ray/experimental/array/distributed/core.py
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/array/distributed/core.py#L58-L68
|
def assemble(self):
"""Assemble an array from a distributed array of object IDs."""
first_block = ray.get(self.objectids[(0, ) * self.ndim])
dtype = first_block.dtype
result = np.zeros(self.shape, dtype=dtype)
for index in np.ndindex(*self.num_blocks):
lower = DistArray.compute_block_lower(index, self.shape)
upper = DistArray.compute_block_upper(index, self.shape)
result[[slice(l, u) for (l, u) in zip(lower, upper)]] = ray.get(
self.objectids[index])
return result
|
[
"def",
"assemble",
"(",
"self",
")",
":",
"first_block",
"=",
"ray",
".",
"get",
"(",
"self",
".",
"objectids",
"[",
"(",
"0",
",",
")",
"*",
"self",
".",
"ndim",
"]",
")",
"dtype",
"=",
"first_block",
".",
"dtype",
"result",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"for",
"index",
"in",
"np",
".",
"ndindex",
"(",
"*",
"self",
".",
"num_blocks",
")",
":",
"lower",
"=",
"DistArray",
".",
"compute_block_lower",
"(",
"index",
",",
"self",
".",
"shape",
")",
"upper",
"=",
"DistArray",
".",
"compute_block_upper",
"(",
"index",
",",
"self",
".",
"shape",
")",
"result",
"[",
"[",
"slice",
"(",
"l",
",",
"u",
")",
"for",
"(",
"l",
",",
"u",
")",
"in",
"zip",
"(",
"lower",
",",
"upper",
")",
"]",
"]",
"=",
"ray",
".",
"get",
"(",
"self",
".",
"objectids",
"[",
"index",
"]",
")",
"return",
"result"
] |
Assemble an array from a distributed array of object IDs.
|
[
"Assemble",
"an",
"array",
"from",
"a",
"distributed",
"array",
"of",
"object",
"IDs",
"."
] |
python
|
train
|
zarr-developers/zarr
|
zarr/hierarchy.py
|
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L458-L496
|
def visitvalues(self, func):
"""Run ``func`` on each object.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(obj):
... print(obj)
>>> g1.visitvalues(print_visitor)
<zarr.hierarchy.Group '/bar'>
<zarr.hierarchy.Group '/bar/baz'>
<zarr.hierarchy.Group '/bar/quux'>
<zarr.hierarchy.Group '/foo'>
>>> g3.visitvalues(print_visitor)
<zarr.hierarchy.Group '/bar/baz'>
<zarr.hierarchy.Group '/bar/quux'>
"""
def _visit(obj):
yield obj
keys = sorted(getattr(obj, "keys", lambda: [])())
for k in keys:
for v in _visit(obj[k]):
yield v
for each_obj in islice(_visit(self), 1, None):
value = func(each_obj)
if value is not None:
return value
|
[
"def",
"visitvalues",
"(",
"self",
",",
"func",
")",
":",
"def",
"_visit",
"(",
"obj",
")",
":",
"yield",
"obj",
"keys",
"=",
"sorted",
"(",
"getattr",
"(",
"obj",
",",
"\"keys\"",
",",
"lambda",
":",
"[",
"]",
")",
"(",
")",
")",
"for",
"k",
"in",
"keys",
":",
"for",
"v",
"in",
"_visit",
"(",
"obj",
"[",
"k",
"]",
")",
":",
"yield",
"v",
"for",
"each_obj",
"in",
"islice",
"(",
"_visit",
"(",
"self",
")",
",",
"1",
",",
"None",
")",
":",
"value",
"=",
"func",
"(",
"each_obj",
")",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"value"
] |
Run ``func`` on each object.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(obj):
... print(obj)
>>> g1.visitvalues(print_visitor)
<zarr.hierarchy.Group '/bar'>
<zarr.hierarchy.Group '/bar/baz'>
<zarr.hierarchy.Group '/bar/quux'>
<zarr.hierarchy.Group '/foo'>
>>> g3.visitvalues(print_visitor)
<zarr.hierarchy.Group '/bar/baz'>
<zarr.hierarchy.Group '/bar/quux'>
|
[
"Run",
"func",
"on",
"each",
"object",
"."
] |
python
|
train
|
senseobservationsystems/commonsense-python-lib
|
senseapi.py
|
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L392-L410
|
def AuthenticateOauth (self, oauth_token_key, oauth_token_secret, oauth_consumer_key, oauth_consumer_secret):
"""
Authenticate using Oauth
@param oauth_token_key (string) - A valid oauth token key obtained from CommonSense
@param oauth_token_secret (string) - A valid oauth token secret obtained from CommonSense
@param oauth_consumer_key (string) - A valid oauth consumer key obtained from CommonSense
@param oauth_consumer_secret (string) - A valid oauth consumer secret obtained from CommonSense
@return (boolean) - Boolean indicating whether the provided credentials were successfully authenticated
"""
self.__oauth_consumer__ = oauth.OAuthConsumer(str(oauth_consumer_key), str(oauth_consumer_secret))
self.__oauth_token__ = oauth.OAuthToken(str(oauth_token_key), str(oauth_token_secret))
self.__authentication__ = 'oauth'
if self.__SenseApiCall__('/users/current.json', 'GET'):
return True
else:
self.__error__ = "api call unsuccessful"
return False
|
[
"def",
"AuthenticateOauth",
"(",
"self",
",",
"oauth_token_key",
",",
"oauth_token_secret",
",",
"oauth_consumer_key",
",",
"oauth_consumer_secret",
")",
":",
"self",
".",
"__oauth_consumer__",
"=",
"oauth",
".",
"OAuthConsumer",
"(",
"str",
"(",
"oauth_consumer_key",
")",
",",
"str",
"(",
"oauth_consumer_secret",
")",
")",
"self",
".",
"__oauth_token__",
"=",
"oauth",
".",
"OAuthToken",
"(",
"str",
"(",
"oauth_token_key",
")",
",",
"str",
"(",
"oauth_token_secret",
")",
")",
"self",
".",
"__authentication__",
"=",
"'oauth'",
"if",
"self",
".",
"__SenseApiCall__",
"(",
"'/users/current.json'",
",",
"'GET'",
")",
":",
"return",
"True",
"else",
":",
"self",
".",
"__error__",
"=",
"\"api call unsuccessful\"",
"return",
"False"
] |
Authenticate using Oauth
@param oauth_token_key (string) - A valid oauth token key obtained from CommonSense
@param oauth_token_secret (string) - A valid oauth token secret obtained from CommonSense
@param oauth_consumer_key (string) - A valid oauth consumer key obtained from CommonSense
@param oauth_consumer_secret (string) - A valid oauth consumer secret obtained from CommonSense
@return (boolean) - Boolean indicating whether the provided credentials were successfully authenticated
|
[
"Authenticate",
"using",
"Oauth"
] |
python
|
train
|
linkhub-sdk/popbill.py
|
popbill/htTaxinvoiceService.py
|
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L71-L85
|
def getJobState(self, CorpNum, JobID, UserID=None):
""" 수집 상태 확인
args
CorpNum : 팝빌회원 사업자번호
JobID : 작업아이디
UserID : 팝빌회원 아이디
return
수집 상태 정보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.")
return self._httpget('/HomeTax/Taxinvoice/' + JobID + '/State', CorpNum, UserID)
|
[
"def",
"getJobState",
"(",
"self",
",",
"CorpNum",
",",
"JobID",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"JobID",
"==",
"None",
"or",
"len",
"(",
"JobID",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"작업아이디(jobID)가 올바르지 않습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/HomeTax/Taxinvoice/'",
"+",
"JobID",
"+",
"'/State'",
",",
"CorpNum",
",",
"UserID",
")"
] |
수집 상태 확인
args
CorpNum : 팝빌회원 사업자번호
JobID : 작업아이디
UserID : 팝빌회원 아이디
return
수집 상태 정보
raise
PopbillException
|
[
"수집",
"상태",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"JobID",
":",
"작업아이디",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"수집",
"상태",
"정보",
"raise",
"PopbillException"
] |
python
|
train
|
bitesofcode/projexui
|
projexui/widgets/xcalendarwidget/xcalendaritem.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendaritem.py#L261-L334
|
def rebuildDay( self ):
"""
Rebuilds the current item in day mode.
"""
scene = self.scene()
if ( not scene ):
return
# calculate the base information
start_date = self.dateStart()
end_date = self.dateEnd()
min_date = scene.minimumDate()
max_date = scene.maximumDate()
# make sure our item is visible
if ( not (min_date <= end_date and start_date <= max_date)):
self.hide()
self.setPath(QPainterPath())
return
# make sure we have valid range information
if ( start_date < min_date ):
start_date = min_date
start_inrange = False
else:
start_inrange = True
if ( max_date < end_date ):
end_date = max_date
end_inrange = False
else:
end_inrange = True
# rebuild the path
path = QPainterPath()
self.setPos(0, 0)
pad = 2
offset = 18
height = 16
# rebuild a timed item
if ( not self.isAllDay() ):
start_dtime = QDateTime(self.dateStart(), self.timeStart())
end_dtime = QDateTime(self.dateStart(),
self.timeEnd().addSecs(-30*60))
start_rect = scene.dateTimeRect(start_dtime)
end_rect = scene.dateTimeRect(end_dtime)
left = start_rect.left() + pad
top = start_rect.top() + pad
right = start_rect.right() - pad
bottom = end_rect.bottom() - pad
path.moveTo(left, top)
path.lineTo(right, top)
path.lineTo(right, bottom)
path.lineTo(left, bottom)
path.lineTo(left, top)
data = (left + 6,
top + 6,
right - left - 12,
bottom - top - 12,
Qt.AlignTop | Qt.AlignLeft,
'%s - %s\n(%s)' % (self.timeStart().toString('h:mmap')[:-1],
self.timeEnd().toString('h:mmap'),
self.title()))
self._textData.append(data)
self.setPath(path)
self.show()
|
[
"def",
"rebuildDay",
"(",
"self",
")",
":",
"scene",
"=",
"self",
".",
"scene",
"(",
")",
"if",
"(",
"not",
"scene",
")",
":",
"return",
"# calculate the base information\r",
"start_date",
"=",
"self",
".",
"dateStart",
"(",
")",
"end_date",
"=",
"self",
".",
"dateEnd",
"(",
")",
"min_date",
"=",
"scene",
".",
"minimumDate",
"(",
")",
"max_date",
"=",
"scene",
".",
"maximumDate",
"(",
")",
"# make sure our item is visible\r",
"if",
"(",
"not",
"(",
"min_date",
"<=",
"end_date",
"and",
"start_date",
"<=",
"max_date",
")",
")",
":",
"self",
".",
"hide",
"(",
")",
"self",
".",
"setPath",
"(",
"QPainterPath",
"(",
")",
")",
"return",
"# make sure we have valid range information\r",
"if",
"(",
"start_date",
"<",
"min_date",
")",
":",
"start_date",
"=",
"min_date",
"start_inrange",
"=",
"False",
"else",
":",
"start_inrange",
"=",
"True",
"if",
"(",
"max_date",
"<",
"end_date",
")",
":",
"end_date",
"=",
"max_date",
"end_inrange",
"=",
"False",
"else",
":",
"end_inrange",
"=",
"True",
"# rebuild the path\r",
"path",
"=",
"QPainterPath",
"(",
")",
"self",
".",
"setPos",
"(",
"0",
",",
"0",
")",
"pad",
"=",
"2",
"offset",
"=",
"18",
"height",
"=",
"16",
"# rebuild a timed item\r",
"if",
"(",
"not",
"self",
".",
"isAllDay",
"(",
")",
")",
":",
"start_dtime",
"=",
"QDateTime",
"(",
"self",
".",
"dateStart",
"(",
")",
",",
"self",
".",
"timeStart",
"(",
")",
")",
"end_dtime",
"=",
"QDateTime",
"(",
"self",
".",
"dateStart",
"(",
")",
",",
"self",
".",
"timeEnd",
"(",
")",
".",
"addSecs",
"(",
"-",
"30",
"*",
"60",
")",
")",
"start_rect",
"=",
"scene",
".",
"dateTimeRect",
"(",
"start_dtime",
")",
"end_rect",
"=",
"scene",
".",
"dateTimeRect",
"(",
"end_dtime",
")",
"left",
"=",
"start_rect",
".",
"left",
"(",
")",
"+",
"pad",
"top",
"=",
"start_rect",
".",
"top",
"(",
")",
"+",
"pad",
"right",
"=",
"start_rect",
".",
"right",
"(",
")",
"-",
"pad",
"bottom",
"=",
"end_rect",
".",
"bottom",
"(",
")",
"-",
"pad",
"path",
".",
"moveTo",
"(",
"left",
",",
"top",
")",
"path",
".",
"lineTo",
"(",
"right",
",",
"top",
")",
"path",
".",
"lineTo",
"(",
"right",
",",
"bottom",
")",
"path",
".",
"lineTo",
"(",
"left",
",",
"bottom",
")",
"path",
".",
"lineTo",
"(",
"left",
",",
"top",
")",
"data",
"=",
"(",
"left",
"+",
"6",
",",
"top",
"+",
"6",
",",
"right",
"-",
"left",
"-",
"12",
",",
"bottom",
"-",
"top",
"-",
"12",
",",
"Qt",
".",
"AlignTop",
"|",
"Qt",
".",
"AlignLeft",
",",
"'%s - %s\\n(%s)'",
"%",
"(",
"self",
".",
"timeStart",
"(",
")",
".",
"toString",
"(",
"'h:mmap'",
")",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"timeEnd",
"(",
")",
".",
"toString",
"(",
"'h:mmap'",
")",
",",
"self",
".",
"title",
"(",
")",
")",
")",
"self",
".",
"_textData",
".",
"append",
"(",
"data",
")",
"self",
".",
"setPath",
"(",
"path",
")",
"self",
".",
"show",
"(",
")"
] |
Rebuilds the current item in day mode.
|
[
"Rebuilds",
"the",
"current",
"item",
"in",
"day",
"mode",
"."
] |
python
|
train
|
ishepard/pydriller
|
pydriller/git_repository.py
|
https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L302-L318
|
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
[
"def",
"get_commits_modified_file",
"(",
"self",
",",
"filepath",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"path",
"=",
"str",
"(",
"Path",
"(",
"filepath",
")",
")",
"commits",
"=",
"[",
"]",
"try",
":",
"commits",
"=",
"self",
".",
"git",
".",
"log",
"(",
"\"--follow\"",
",",
"\"--format=%H\"",
",",
"path",
")",
".",
"split",
"(",
"'\\n'",
")",
"except",
"GitCommandError",
":",
"logger",
".",
"debug",
"(",
"\"Could not find information of file %s\"",
",",
"path",
")",
"return",
"commits"
] |
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
|
[
"Given",
"a",
"filepath",
"returns",
"all",
"the",
"commits",
"that",
"modified",
"this",
"file",
"(",
"following",
"renames",
")",
"."
] |
python
|
train
|
MoseleyBioinformaticsLab/ctfile
|
ctfile/ctfile.py
|
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L1144-L1155
|
def default(self, o):
"""Default encoder.
:param o: Atom or Bond instance.
:type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`.
:return: Dictionary that contains information required for atom and bond block of ``Ctab``.
:rtype: :py:class:`collections.OrderedDict`
"""
if isinstance(o, Atom) or isinstance(o, Bond):
return o._ctab_data
else:
return o.__dict__
|
[
"def",
"default",
"(",
"self",
",",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"Atom",
")",
"or",
"isinstance",
"(",
"o",
",",
"Bond",
")",
":",
"return",
"o",
".",
"_ctab_data",
"else",
":",
"return",
"o",
".",
"__dict__"
] |
Default encoder.
:param o: Atom or Bond instance.
:type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`.
:return: Dictionary that contains information required for atom and bond block of ``Ctab``.
:rtype: :py:class:`collections.OrderedDict`
|
[
"Default",
"encoder",
"."
] |
python
|
train
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py#L635-L654
|
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor
|
[
"def",
"get_executor",
"(",
"self",
",",
"create",
"=",
"1",
")",
":",
"try",
":",
"executor",
"=",
"self",
".",
"executor",
"except",
"AttributeError",
":",
"if",
"not",
"create",
":",
"raise",
"try",
":",
"act",
"=",
"self",
".",
"builder",
".",
"action",
"except",
"AttributeError",
":",
"executor",
"=",
"SCons",
".",
"Executor",
".",
"Null",
"(",
"targets",
"=",
"[",
"self",
"]",
")",
"else",
":",
"executor",
"=",
"SCons",
".",
"Executor",
".",
"Executor",
"(",
"act",
",",
"self",
".",
"env",
"or",
"self",
".",
"builder",
".",
"env",
",",
"[",
"self",
".",
"builder",
".",
"overrides",
"]",
",",
"[",
"self",
"]",
",",
"self",
".",
"sources",
")",
"self",
".",
"executor",
"=",
"executor",
"return",
"executor"
] |
Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so.
|
[
"Fetch",
"the",
"action",
"executor",
"for",
"this",
"node",
".",
"Create",
"one",
"if",
"there",
"isn",
"t",
"already",
"one",
"and",
"requested",
"to",
"do",
"so",
"."
] |
python
|
train
|
Qiskit/qiskit-terra
|
qiskit/tools/qi/qi.py
|
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qi/qi.py#L249-L287
|
def choi_to_rauli(choi, order=1):
"""
Convert a Choi-matrix to a Pauli-basis superoperator.
Note that this function assumes that the Choi-matrix
is defined in the standard column-stacking convention
and is normalized to have trace 1. For a channel E this
is defined as: choi = (I \\otimes E)(bell_state).
The resulting 'rauli' R acts on input states as
|rho_out>_p = R.|rho_in>_p
where |rho> = vectorize(rho, method='pauli') for order=1
and |rho> = vectorize(rho, method='pauli_weights') for order=0.
Args:
choi (matrix): the input Choi-matrix.
order (int): ordering of the Pauli group vector.
order=1 (default) is standard lexicographic ordering.
Eg: [II, IX, IY, IZ, XI, XX, XY,...]
order=0 is ordered by weights.
Eg. [II, IX, IY, IZ, XI, XY, XZ, XX, XY,...]
Returns:
np.array: A superoperator in the Pauli basis.
"""
if order == 0:
order = 'weight'
elif order == 1:
order = 'tensor'
# get number of qubits'
num_qubits = int(np.log2(np.sqrt(len(choi))))
pgp = pauli_group(num_qubits, case=order)
rauli = []
for i in pgp:
for j in pgp:
pauliop = np.kron(j.to_matrix().T, i.to_matrix())
rauli += [np.trace(np.dot(choi, pauliop))]
return np.array(rauli).reshape(4 ** num_qubits, 4 ** num_qubits)
|
[
"def",
"choi_to_rauli",
"(",
"choi",
",",
"order",
"=",
"1",
")",
":",
"if",
"order",
"==",
"0",
":",
"order",
"=",
"'weight'",
"elif",
"order",
"==",
"1",
":",
"order",
"=",
"'tensor'",
"# get number of qubits'",
"num_qubits",
"=",
"int",
"(",
"np",
".",
"log2",
"(",
"np",
".",
"sqrt",
"(",
"len",
"(",
"choi",
")",
")",
")",
")",
"pgp",
"=",
"pauli_group",
"(",
"num_qubits",
",",
"case",
"=",
"order",
")",
"rauli",
"=",
"[",
"]",
"for",
"i",
"in",
"pgp",
":",
"for",
"j",
"in",
"pgp",
":",
"pauliop",
"=",
"np",
".",
"kron",
"(",
"j",
".",
"to_matrix",
"(",
")",
".",
"T",
",",
"i",
".",
"to_matrix",
"(",
")",
")",
"rauli",
"+=",
"[",
"np",
".",
"trace",
"(",
"np",
".",
"dot",
"(",
"choi",
",",
"pauliop",
")",
")",
"]",
"return",
"np",
".",
"array",
"(",
"rauli",
")",
".",
"reshape",
"(",
"4",
"**",
"num_qubits",
",",
"4",
"**",
"num_qubits",
")"
] |
Convert a Choi-matrix to a Pauli-basis superoperator.
Note that this function assumes that the Choi-matrix
is defined in the standard column-stacking convention
and is normalized to have trace 1. For a channel E this
is defined as: choi = (I \\otimes E)(bell_state).
The resulting 'rauli' R acts on input states as
|rho_out>_p = R.|rho_in>_p
where |rho> = vectorize(rho, method='pauli') for order=1
and |rho> = vectorize(rho, method='pauli_weights') for order=0.
Args:
choi (matrix): the input Choi-matrix.
order (int): ordering of the Pauli group vector.
order=1 (default) is standard lexicographic ordering.
Eg: [II, IX, IY, IZ, XI, XX, XY,...]
order=0 is ordered by weights.
Eg. [II, IX, IY, IZ, XI, XY, XZ, XX, XY,...]
Returns:
np.array: A superoperator in the Pauli basis.
|
[
"Convert",
"a",
"Choi",
"-",
"matrix",
"to",
"a",
"Pauli",
"-",
"basis",
"superoperator",
"."
] |
python
|
test
|
spotify/pyschema
|
pyschema/core.py
|
https://github.com/spotify/pyschema/blob/7e6c3934150bcb040c628d74ace6caf5fcf867df/pyschema/core.py#L512-L522
|
def from_json_compatible(schema, dct):
"Load from json-encodable"
kwargs = {}
for key in dct:
field_type = schema._fields.get(key)
if field_type is None:
raise ParseError("Unexpected field encountered in line for record %s: %s" % (schema.__name__, key))
kwargs[key] = field_type.load(dct[key])
return schema(**kwargs)
|
[
"def",
"from_json_compatible",
"(",
"schema",
",",
"dct",
")",
":",
"kwargs",
"=",
"{",
"}",
"for",
"key",
"in",
"dct",
":",
"field_type",
"=",
"schema",
".",
"_fields",
".",
"get",
"(",
"key",
")",
"if",
"field_type",
"is",
"None",
":",
"raise",
"ParseError",
"(",
"\"Unexpected field encountered in line for record %s: %s\"",
"%",
"(",
"schema",
".",
"__name__",
",",
"key",
")",
")",
"kwargs",
"[",
"key",
"]",
"=",
"field_type",
".",
"load",
"(",
"dct",
"[",
"key",
"]",
")",
"return",
"schema",
"(",
"*",
"*",
"kwargs",
")"
] |
Load from json-encodable
|
[
"Load",
"from",
"json",
"-",
"encodable"
] |
python
|
test
|
Jaymon/endpoints
|
endpoints/utils.py
|
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/utils.py#L211-L244
|
def _sort(self, a, b):
'''
sort the headers according to rfc 2616 so when __iter__ is called, the accept media types are
in order from most preferred to least preferred
'''
ret = 0
# first we check q, higher values win:
if a[1] != b[1]:
ret = cmp(a[1], b[1])
else:
found = False
for i in range(2):
ai = a[0][i]
bi = b[0][i]
if ai == '*':
if bi != '*':
ret = -1
found = True
break
else:
# both *, more verbose params win
ret = cmp(len(a[2]), len(b[2]))
found = True
break
elif bi == '*':
ret = 1
found = True
break
if not found:
ret = cmp(len(a[2]), len(b[2]))
return ret
|
[
"def",
"_sort",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"ret",
"=",
"0",
"# first we check q, higher values win:",
"if",
"a",
"[",
"1",
"]",
"!=",
"b",
"[",
"1",
"]",
":",
"ret",
"=",
"cmp",
"(",
"a",
"[",
"1",
"]",
",",
"b",
"[",
"1",
"]",
")",
"else",
":",
"found",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"ai",
"=",
"a",
"[",
"0",
"]",
"[",
"i",
"]",
"bi",
"=",
"b",
"[",
"0",
"]",
"[",
"i",
"]",
"if",
"ai",
"==",
"'*'",
":",
"if",
"bi",
"!=",
"'*'",
":",
"ret",
"=",
"-",
"1",
"found",
"=",
"True",
"break",
"else",
":",
"# both *, more verbose params win",
"ret",
"=",
"cmp",
"(",
"len",
"(",
"a",
"[",
"2",
"]",
")",
",",
"len",
"(",
"b",
"[",
"2",
"]",
")",
")",
"found",
"=",
"True",
"break",
"elif",
"bi",
"==",
"'*'",
":",
"ret",
"=",
"1",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"ret",
"=",
"cmp",
"(",
"len",
"(",
"a",
"[",
"2",
"]",
")",
",",
"len",
"(",
"b",
"[",
"2",
"]",
")",
")",
"return",
"ret"
] |
sort the headers according to rfc 2616 so when __iter__ is called, the accept media types are
in order from most preferred to least preferred
|
[
"sort",
"the",
"headers",
"according",
"to",
"rfc",
"2616",
"so",
"when",
"__iter__",
"is",
"called",
"the",
"accept",
"media",
"types",
"are",
"in",
"order",
"from",
"most",
"preferred",
"to",
"least",
"preferred"
] |
python
|
train
|
Microsoft/nni
|
tools/nni_cmd/launcher.py
|
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/launcher.py#L142-L155
|
def set_trial_config(experiment_config, port, config_file_name):
'''set trial configuration'''
request_data = dict()
request_data['trial_config'] = experiment_config['trial']
response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT)
if check_response(response):
return True
else:
print('Error message is {}'.format(response.text))
_, stderr_full_path = get_log_path(config_file_name)
if response:
with open(stderr_full_path, 'a+') as fout:
fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':')))
return False
|
[
"def",
"set_trial_config",
"(",
"experiment_config",
",",
"port",
",",
"config_file_name",
")",
":",
"request_data",
"=",
"dict",
"(",
")",
"request_data",
"[",
"'trial_config'",
"]",
"=",
"experiment_config",
"[",
"'trial'",
"]",
"response",
"=",
"rest_put",
"(",
"cluster_metadata_url",
"(",
"port",
")",
",",
"json",
".",
"dumps",
"(",
"request_data",
")",
",",
"REST_TIME_OUT",
")",
"if",
"check_response",
"(",
"response",
")",
":",
"return",
"True",
"else",
":",
"print",
"(",
"'Error message is {}'",
".",
"format",
"(",
"response",
".",
"text",
")",
")",
"_",
",",
"stderr_full_path",
"=",
"get_log_path",
"(",
"config_file_name",
")",
"if",
"response",
":",
"with",
"open",
"(",
"stderr_full_path",
",",
"'a+'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
")",
"return",
"False"
] |
set trial configuration
|
[
"set",
"trial",
"configuration"
] |
python
|
train
|
inveniosoftware/invenio-records-rest
|
invenio_records_rest/serializers/datacite.py
|
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/serializers/datacite.py#L120-L142
|
def serialize_oaipmh(self, pid, record):
"""Serialize a single record for OAI-PMH."""
root = etree.Element(
'oai_datacite',
nsmap={
None: 'http://schema.datacite.org/oai/oai-1.0/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xml': 'xml',
},
attrib={
'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd',
}
)
root.append(E.isReferenceQuality(self.is_reference_quality))
root.append(E.schemaVersion(self.serializer.version))
root.append(E.datacentreSymbol(self.datacentre))
root.append(E.payload(
self.serializer.serialize_oaipmh(pid, record)
))
return root
|
[
"def",
"serialize_oaipmh",
"(",
"self",
",",
"pid",
",",
"record",
")",
":",
"root",
"=",
"etree",
".",
"Element",
"(",
"'oai_datacite'",
",",
"nsmap",
"=",
"{",
"None",
":",
"'http://schema.datacite.org/oai/oai-1.0/'",
",",
"'xsi'",
":",
"'http://www.w3.org/2001/XMLSchema-instance'",
",",
"'xml'",
":",
"'xml'",
",",
"}",
",",
"attrib",
"=",
"{",
"'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'",
":",
"'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd'",
",",
"}",
")",
"root",
".",
"append",
"(",
"E",
".",
"isReferenceQuality",
"(",
"self",
".",
"is_reference_quality",
")",
")",
"root",
".",
"append",
"(",
"E",
".",
"schemaVersion",
"(",
"self",
".",
"serializer",
".",
"version",
")",
")",
"root",
".",
"append",
"(",
"E",
".",
"datacentreSymbol",
"(",
"self",
".",
"datacentre",
")",
")",
"root",
".",
"append",
"(",
"E",
".",
"payload",
"(",
"self",
".",
"serializer",
".",
"serialize_oaipmh",
"(",
"pid",
",",
"record",
")",
")",
")",
"return",
"root"
] |
Serialize a single record for OAI-PMH.
|
[
"Serialize",
"a",
"single",
"record",
"for",
"OAI",
"-",
"PMH",
"."
] |
python
|
train
|
ecell/ecell4
|
ecell4/util/viz.py
|
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L802-L825
|
def generate_html(keywords, tmpl_path, package_name='ecell4.util'):
"""
Generate static html file from JSON model and its own id.
Parameters
----------
model : dict
JSON model from which ecell4.viz generates a plot.
model_id : string
Unique id for the plot.
Returns
-------
html :
A HTML object
"""
from jinja2 import Template
import pkgutil
template = Template(pkgutil.get_data(package_name, tmpl_path).decode())
# path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path
# template = Template(open(path).read())
html = template.render(**keywords)
return html
|
[
"def",
"generate_html",
"(",
"keywords",
",",
"tmpl_path",
",",
"package_name",
"=",
"'ecell4.util'",
")",
":",
"from",
"jinja2",
"import",
"Template",
"import",
"pkgutil",
"template",
"=",
"Template",
"(",
"pkgutil",
".",
"get_data",
"(",
"package_name",
",",
"tmpl_path",
")",
".",
"decode",
"(",
")",
")",
"# path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path",
"# template = Template(open(path).read())",
"html",
"=",
"template",
".",
"render",
"(",
"*",
"*",
"keywords",
")",
"return",
"html"
] |
Generate static html file from JSON model and its own id.
Parameters
----------
model : dict
JSON model from which ecell4.viz generates a plot.
model_id : string
Unique id for the plot.
Returns
-------
html :
A HTML object
|
[
"Generate",
"static",
"html",
"file",
"from",
"JSON",
"model",
"and",
"its",
"own",
"id",
"."
] |
python
|
train
|
heronotears/lazyxml
|
lazyxml/parser.py
|
https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L160-L169
|
def get_node(self, element):
r"""Get node info.
Parse element and get the element tag info. Include tag name, value, attribute, namespace.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
"""
ns, tag = self.split_namespace(element.tag)
return {'tag': tag, 'value': (element.text or '').strip(), 'attr': element.attrib, 'namespace': ns}
|
[
"def",
"get_node",
"(",
"self",
",",
"element",
")",
":",
"ns",
",",
"tag",
"=",
"self",
".",
"split_namespace",
"(",
"element",
".",
"tag",
")",
"return",
"{",
"'tag'",
":",
"tag",
",",
"'value'",
":",
"(",
"element",
".",
"text",
"or",
"''",
")",
".",
"strip",
"(",
")",
",",
"'attr'",
":",
"element",
".",
"attrib",
",",
"'namespace'",
":",
"ns",
"}"
] |
r"""Get node info.
Parse element and get the element tag info. Include tag name, value, attribute, namespace.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
|
[
"r",
"Get",
"node",
"info",
"."
] |
python
|
train
|
pypa/setuptools
|
setuptools/msvc.py
|
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L494-L522
|
def find_available_vc_vers(self):
"""
Find all available Microsoft Visual C++ versions.
"""
ms = self.ri.microsoft
vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
vc_vers = []
for hkey in self.ri.HKEYS:
for key in vckeys:
try:
bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
except (OSError, IOError):
continue
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
try:
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
for i in range(subkeys):
try:
ver = float(winreg.EnumKey(bkey, i))
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
return sorted(vc_vers)
|
[
"def",
"find_available_vc_vers",
"(",
"self",
")",
":",
"ms",
"=",
"self",
".",
"ri",
".",
"microsoft",
"vckeys",
"=",
"(",
"self",
".",
"ri",
".",
"vc",
",",
"self",
".",
"ri",
".",
"vc_for_python",
",",
"self",
".",
"ri",
".",
"vs",
")",
"vc_vers",
"=",
"[",
"]",
"for",
"hkey",
"in",
"self",
".",
"ri",
".",
"HKEYS",
":",
"for",
"key",
"in",
"vckeys",
":",
"try",
":",
"bkey",
"=",
"winreg",
".",
"OpenKey",
"(",
"hkey",
",",
"ms",
"(",
"key",
")",
",",
"0",
",",
"winreg",
".",
"KEY_READ",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"continue",
"subkeys",
",",
"values",
",",
"_",
"=",
"winreg",
".",
"QueryInfoKey",
"(",
"bkey",
")",
"for",
"i",
"in",
"range",
"(",
"values",
")",
":",
"try",
":",
"ver",
"=",
"float",
"(",
"winreg",
".",
"EnumValue",
"(",
"bkey",
",",
"i",
")",
"[",
"0",
"]",
")",
"if",
"ver",
"not",
"in",
"vc_vers",
":",
"vc_vers",
".",
"append",
"(",
"ver",
")",
"except",
"ValueError",
":",
"pass",
"for",
"i",
"in",
"range",
"(",
"subkeys",
")",
":",
"try",
":",
"ver",
"=",
"float",
"(",
"winreg",
".",
"EnumKey",
"(",
"bkey",
",",
"i",
")",
")",
"if",
"ver",
"not",
"in",
"vc_vers",
":",
"vc_vers",
".",
"append",
"(",
"ver",
")",
"except",
"ValueError",
":",
"pass",
"return",
"sorted",
"(",
"vc_vers",
")"
] |
Find all available Microsoft Visual C++ versions.
|
[
"Find",
"all",
"available",
"Microsoft",
"Visual",
"C",
"++",
"versions",
"."
] |
python
|
train
|
wdecoster/nanoget
|
nanoget/nanoget.py
|
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L85-L97
|
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True)
|
[
"def",
"combine_dfs",
"(",
"dfs",
",",
"names",
",",
"method",
")",
":",
"if",
"method",
"==",
"\"track\"",
":",
"res",
"=",
"list",
"(",
")",
"for",
"df",
",",
"identifier",
"in",
"zip",
"(",
"dfs",
",",
"names",
")",
":",
"df",
"[",
"\"dataset\"",
"]",
"=",
"identifier",
"res",
".",
"append",
"(",
"df",
")",
"return",
"pd",
".",
"concat",
"(",
"res",
",",
"ignore_index",
"=",
"True",
")",
"elif",
"method",
"==",
"\"simple\"",
":",
"return",
"pd",
".",
"concat",
"(",
"dfs",
",",
"ignore_index",
"=",
"True",
")"
] |
Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column.
|
[
"Combine",
"dataframes",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1222-L1245
|
def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
"""Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.
s^i is a learnable parameter with identity initialization.
std^i is optionally learnable with identity initialization.
Args:
name: variable scope.
z: input_tensor
logscale_factor: equivalent to scaling up the learning_rate by a factor
of logscale_factor.
trainable: Whether or not std^i is learnt.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
z_shape = common_layers.shape_list(z)
latent_multiplier = tf.get_variable(
"latent_multiplier", shape=z_shape, dtype=tf.float32,
initializer=tf.ones_initializer())
log_scale = tf.get_variable(
"log_scale_latent", shape=z_shape, dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=trainable)
log_scale = log_scale * logscale_factor
return tfp.distributions.Normal(
loc=latent_multiplier * z, scale=tf.exp(log_scale))
|
[
"def",
"scale_gaussian_prior",
"(",
"name",
",",
"z",
",",
"logscale_factor",
"=",
"3.0",
",",
"trainable",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"z_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"z",
")",
"latent_multiplier",
"=",
"tf",
".",
"get_variable",
"(",
"\"latent_multiplier\"",
",",
"shape",
"=",
"z_shape",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"tf",
".",
"ones_initializer",
"(",
")",
")",
"log_scale",
"=",
"tf",
".",
"get_variable",
"(",
"\"log_scale_latent\"",
",",
"shape",
"=",
"z_shape",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"trainable",
"=",
"trainable",
")",
"log_scale",
"=",
"log_scale",
"*",
"logscale_factor",
"return",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"loc",
"=",
"latent_multiplier",
"*",
"z",
",",
"scale",
"=",
"tf",
".",
"exp",
"(",
"log_scale",
")",
")"
] |
Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.
s^i is a learnable parameter with identity initialization.
std^i is optionally learnable with identity initialization.
Args:
name: variable scope.
z: input_tensor
logscale_factor: equivalent to scaling up the learning_rate by a factor
of logscale_factor.
trainable: Whether or not std^i is learnt.
|
[
"Returns",
"N",
"(",
"s^i",
"*",
"z^i",
"std^i",
")",
"where",
"s^i",
"and",
"std^i",
"are",
"pre",
"-",
"component",
"."
] |
python
|
train
|
mommermi/callhorizons
|
callhorizons/callhorizons.py
|
https://github.com/mommermi/callhorizons/blob/fdd7ad9e87cac107c1b7f88e594d118210da3b1a/callhorizons/callhorizons.py#L372-L386
|
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian
Dates
:param discreteepochs: array_like
list or 1D array of floats or strings
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
"""
if not isinstance(discreteepochs, (list, np.ndarray)):
discreteepochs = [discreteepochs]
self.discreteepochs = list(discreteepochs)
|
[
"def",
"set_discreteepochs",
"(",
"self",
",",
"discreteepochs",
")",
":",
"if",
"not",
"isinstance",
"(",
"discreteepochs",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"discreteepochs",
"=",
"[",
"discreteepochs",
"]",
"self",
".",
"discreteepochs",
"=",
"list",
"(",
"discreteepochs",
")"
] |
Set a list of discrete epochs, epochs have to be given as Julian
Dates
:param discreteepochs: array_like
list or 1D array of floats or strings
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
|
[
"Set",
"a",
"list",
"of",
"discrete",
"epochs",
"epochs",
"have",
"to",
"be",
"given",
"as",
"Julian",
"Dates"
] |
python
|
train
|
apache/airflow
|
airflow/bin/cli.py
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L763-L868
|
def restart_workers(gunicorn_master_proc, num_workers_expected, master_timeout):
"""
Runs forever, monitoring the child processes of @gunicorn_master_proc and
restarting workers occasionally.
Each iteration of the loop traverses one edge of this state transition
diagram, where each state (node) represents
[ num_ready_workers_running / num_workers_running ]. We expect most time to
be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.
The horizontal transition at ? happens after the new worker parses all the
dags (so it could take a while!)
V ────────────────────────────────────────────────────────────────────────┐
[n / n] ──TTIN──> [ [n, n+bs) / n + bs ] ────?───> [n + bs / n + bs] ──TTOU─┘
^ ^───────────────┘
│
│ ┌────────────────v
└──────┴────── [ [0, n) / n ] <─── start
We change the number of workers by sending TTIN and TTOU to the gunicorn
master process, which increases and decreases the number of child workers
respectively. Gunicorn guarantees that on TTOU workers are terminated
gracefully and that the oldest worker is terminated.
"""
def wait_until_true(fn, timeout=0):
"""
Sleeps until fn is true
"""
t = time.time()
while not fn():
if 0 < timeout <= time.time() - t:
raise AirflowWebServerTimeout(
"No response from gunicorn master within {0} seconds"
.format(timeout))
time.sleep(0.1)
def start_refresh(gunicorn_master_proc):
batch_size = conf.getint('webserver', 'worker_refresh_batch_size')
log.debug('%s doing a refresh of %s workers', state, batch_size)
sys.stdout.flush()
sys.stderr.flush()
excess = 0
for _ in range(batch_size):
gunicorn_master_proc.send_signal(signal.SIGTTIN)
excess += 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
try:
wait_until_true(lambda: num_workers_expected ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
while True:
num_workers_running = get_num_workers_running(gunicorn_master_proc)
num_ready_workers_running = \
get_num_ready_workers_running(gunicorn_master_proc)
state = '[{0} / {1}]'.format(num_ready_workers_running, num_workers_running)
# Whenever some workers are not ready, wait until all workers are ready
if num_ready_workers_running < num_workers_running:
log.debug('%s some workers are starting up, waiting...', state)
sys.stdout.flush()
time.sleep(1)
# Kill a worker gracefully by asking gunicorn to reduce number of workers
elif num_workers_running > num_workers_expected:
excess = num_workers_running - num_workers_expected
log.debug('%s killing %s workers', state, excess)
for _ in range(excess):
gunicorn_master_proc.send_signal(signal.SIGTTOU)
excess -= 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
# Start a new worker by asking gunicorn to increase number of workers
elif num_workers_running == num_workers_expected:
refresh_interval = conf.getint('webserver', 'worker_refresh_interval')
log.debug(
'%s sleeping for %ss starting doing a refresh...',
state, refresh_interval
)
time.sleep(refresh_interval)
start_refresh(gunicorn_master_proc)
else:
# num_ready_workers_running == num_workers_running < num_workers_expected
log.error((
"%s some workers seem to have died and gunicorn"
"did not restart them as expected"
), state)
time.sleep(10)
if len(
psutil.Process(gunicorn_master_proc.pid).children()
) < num_workers_expected:
start_refresh(gunicorn_master_proc)
except (AirflowWebServerTimeout, OSError) as err:
log.error(err)
log.error("Shutting down webserver")
try:
gunicorn_master_proc.terminate()
gunicorn_master_proc.wait()
finally:
sys.exit(1)
|
[
"def",
"restart_workers",
"(",
"gunicorn_master_proc",
",",
"num_workers_expected",
",",
"master_timeout",
")",
":",
"def",
"wait_until_true",
"(",
"fn",
",",
"timeout",
"=",
"0",
")",
":",
"\"\"\"\n Sleeps until fn is true\n \"\"\"",
"t",
"=",
"time",
".",
"time",
"(",
")",
"while",
"not",
"fn",
"(",
")",
":",
"if",
"0",
"<",
"timeout",
"<=",
"time",
".",
"time",
"(",
")",
"-",
"t",
":",
"raise",
"AirflowWebServerTimeout",
"(",
"\"No response from gunicorn master within {0} seconds\"",
".",
"format",
"(",
"timeout",
")",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"def",
"start_refresh",
"(",
"gunicorn_master_proc",
")",
":",
"batch_size",
"=",
"conf",
".",
"getint",
"(",
"'webserver'",
",",
"'worker_refresh_batch_size'",
")",
"log",
".",
"debug",
"(",
"'%s doing a refresh of %s workers'",
",",
"state",
",",
"batch_size",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"excess",
"=",
"0",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
":",
"gunicorn_master_proc",
".",
"send_signal",
"(",
"signal",
".",
"SIGTTIN",
")",
"excess",
"+=",
"1",
"wait_until_true",
"(",
"lambda",
":",
"num_workers_expected",
"+",
"excess",
"==",
"get_num_workers_running",
"(",
"gunicorn_master_proc",
")",
",",
"master_timeout",
")",
"try",
":",
"wait_until_true",
"(",
"lambda",
":",
"num_workers_expected",
"==",
"get_num_workers_running",
"(",
"gunicorn_master_proc",
")",
",",
"master_timeout",
")",
"while",
"True",
":",
"num_workers_running",
"=",
"get_num_workers_running",
"(",
"gunicorn_master_proc",
")",
"num_ready_workers_running",
"=",
"get_num_ready_workers_running",
"(",
"gunicorn_master_proc",
")",
"state",
"=",
"'[{0} / {1}]'",
".",
"format",
"(",
"num_ready_workers_running",
",",
"num_workers_running",
")",
"# Whenever some workers are not ready, wait until all workers are ready",
"if",
"num_ready_workers_running",
"<",
"num_workers_running",
":",
"log",
".",
"debug",
"(",
"'%s some workers are starting up, waiting...'",
",",
"state",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"# Kill a worker gracefully by asking gunicorn to reduce number of workers",
"elif",
"num_workers_running",
">",
"num_workers_expected",
":",
"excess",
"=",
"num_workers_running",
"-",
"num_workers_expected",
"log",
".",
"debug",
"(",
"'%s killing %s workers'",
",",
"state",
",",
"excess",
")",
"for",
"_",
"in",
"range",
"(",
"excess",
")",
":",
"gunicorn_master_proc",
".",
"send_signal",
"(",
"signal",
".",
"SIGTTOU",
")",
"excess",
"-=",
"1",
"wait_until_true",
"(",
"lambda",
":",
"num_workers_expected",
"+",
"excess",
"==",
"get_num_workers_running",
"(",
"gunicorn_master_proc",
")",
",",
"master_timeout",
")",
"# Start a new worker by asking gunicorn to increase number of workers",
"elif",
"num_workers_running",
"==",
"num_workers_expected",
":",
"refresh_interval",
"=",
"conf",
".",
"getint",
"(",
"'webserver'",
",",
"'worker_refresh_interval'",
")",
"log",
".",
"debug",
"(",
"'%s sleeping for %ss starting doing a refresh...'",
",",
"state",
",",
"refresh_interval",
")",
"time",
".",
"sleep",
"(",
"refresh_interval",
")",
"start_refresh",
"(",
"gunicorn_master_proc",
")",
"else",
":",
"# num_ready_workers_running == num_workers_running < num_workers_expected",
"log",
".",
"error",
"(",
"(",
"\"%s some workers seem to have died and gunicorn\"",
"\"did not restart them as expected\"",
")",
",",
"state",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"if",
"len",
"(",
"psutil",
".",
"Process",
"(",
"gunicorn_master_proc",
".",
"pid",
")",
".",
"children",
"(",
")",
")",
"<",
"num_workers_expected",
":",
"start_refresh",
"(",
"gunicorn_master_proc",
")",
"except",
"(",
"AirflowWebServerTimeout",
",",
"OSError",
")",
"as",
"err",
":",
"log",
".",
"error",
"(",
"err",
")",
"log",
".",
"error",
"(",
"\"Shutting down webserver\"",
")",
"try",
":",
"gunicorn_master_proc",
".",
"terminate",
"(",
")",
"gunicorn_master_proc",
".",
"wait",
"(",
")",
"finally",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Runs forever, monitoring the child processes of @gunicorn_master_proc and
restarting workers occasionally.
Each iteration of the loop traverses one edge of this state transition
diagram, where each state (node) represents
[ num_ready_workers_running / num_workers_running ]. We expect most time to
be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.
The horizontal transition at ? happens after the new worker parses all the
dags (so it could take a while!)
V ────────────────────────────────────────────────────────────────────────┐
[n / n] ──TTIN──> [ [n, n+bs) / n + bs ] ────?───> [n + bs / n + bs] ──TTOU─┘
^ ^───────────────┘
│
│ ┌────────────────v
└──────┴────── [ [0, n) / n ] <─── start
We change the number of workers by sending TTIN and TTOU to the gunicorn
master process, which increases and decreases the number of child workers
respectively. Gunicorn guarantees that on TTOU workers are terminated
gracefully and that the oldest worker is terminated.
|
[
"Runs",
"forever",
"monitoring",
"the",
"child",
"processes",
"of"
] |
python
|
test
|
ciena/afkak
|
afkak/kafkacodec.py
|
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L610-L621
|
def create_snappy_message(message_set):
"""
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
"""
encoded_message_set = KafkaCodec._encode_message_set(message_set)
snapped = snappy_encode(encoded_message_set)
return Message(0, CODEC_SNAPPY, None, snapped)
|
[
"def",
"create_snappy_message",
"(",
"message_set",
")",
":",
"encoded_message_set",
"=",
"KafkaCodec",
".",
"_encode_message_set",
"(",
"message_set",
")",
"snapped",
"=",
"snappy_encode",
"(",
"encoded_message_set",
")",
"return",
"Message",
"(",
"0",
",",
"CODEC_SNAPPY",
",",
"None",
",",
"snapped",
")"
] |
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
|
[
"Construct",
"a",
"Snappy",
"-",
"compressed",
"message",
"containing",
"multiple",
"messages"
] |
python
|
train
|
pazz/alot
|
alot/buffers/thread.py
|
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L267-L278
|
def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos)
|
[
"def",
"focus_property",
"(",
"self",
",",
"prop",
",",
"direction",
")",
":",
"newpos",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"direction",
"(",
"newpos",
")",
"while",
"newpos",
"is",
"not",
"None",
":",
"MT",
"=",
"self",
".",
"_tree",
"[",
"newpos",
"]",
"if",
"prop",
"(",
"MT",
")",
":",
"newpos",
"=",
"self",
".",
"_sanitize_position",
"(",
"(",
"newpos",
",",
")",
")",
"self",
".",
"body",
".",
"set_focus",
"(",
"newpos",
")",
"break",
"newpos",
"=",
"direction",
"(",
"newpos",
")"
] |
does a walk in the given direction and focuses the
first message tree that matches the given property
|
[
"does",
"a",
"walk",
"in",
"the",
"given",
"direction",
"and",
"focuses",
"the",
"first",
"message",
"tree",
"that",
"matches",
"the",
"given",
"property"
] |
python
|
train
|
westonplatter/fast_arrow
|
fast_arrow/resources/option.py
|
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option.py#L44-L62
|
def fetch_list(cls, client, ids):
"""
fetch instruments by ids
"""
results = []
request_url = "https://api.robinhood.com/options/instruments/"
for _ids in chunked_list(ids, 50):
params = {"ids": ",".join(_ids)}
data = client.get(request_url, params=params)
partial_results = data["results"]
while data["next"]:
data = client.get(data["next"])
partial_results.extend(data["results"])
results.extend(partial_results)
return results
|
[
"def",
"fetch_list",
"(",
"cls",
",",
"client",
",",
"ids",
")",
":",
"results",
"=",
"[",
"]",
"request_url",
"=",
"\"https://api.robinhood.com/options/instruments/\"",
"for",
"_ids",
"in",
"chunked_list",
"(",
"ids",
",",
"50",
")",
":",
"params",
"=",
"{",
"\"ids\"",
":",
"\",\"",
".",
"join",
"(",
"_ids",
")",
"}",
"data",
"=",
"client",
".",
"get",
"(",
"request_url",
",",
"params",
"=",
"params",
")",
"partial_results",
"=",
"data",
"[",
"\"results\"",
"]",
"while",
"data",
"[",
"\"next\"",
"]",
":",
"data",
"=",
"client",
".",
"get",
"(",
"data",
"[",
"\"next\"",
"]",
")",
"partial_results",
".",
"extend",
"(",
"data",
"[",
"\"results\"",
"]",
")",
"results",
".",
"extend",
"(",
"partial_results",
")",
"return",
"results"
] |
fetch instruments by ids
|
[
"fetch",
"instruments",
"by",
"ids"
] |
python
|
train
|
saltstack/salt
|
salt/modules/lxc.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2706-L2733
|
def set_parameter(name, parameter, value, path=None):
'''
Set the value of a cgroup parameter for a container.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.set_parameter name parameter value
'''
if not exists(name, path=path):
return None
cmd = 'lxc-cgroup'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' -n {0} {1} {2}'.format(name, parameter, value)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
return False
else:
return True
|
[
"def",
"set_parameter",
"(",
"name",
",",
"parameter",
",",
"value",
",",
"path",
"=",
"None",
")",
":",
"if",
"not",
"exists",
"(",
"name",
",",
"path",
"=",
"path",
")",
":",
"return",
"None",
"cmd",
"=",
"'lxc-cgroup'",
"if",
"path",
":",
"cmd",
"+=",
"' -P {0}'",
".",
"format",
"(",
"pipes",
".",
"quote",
"(",
"path",
")",
")",
"cmd",
"+=",
"' -n {0} {1} {2}'",
".",
"format",
"(",
"name",
",",
"parameter",
",",
"value",
")",
"ret",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"if",
"ret",
"[",
"'retcode'",
"]",
"!=",
"0",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
Set the value of a cgroup parameter for a container.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.set_parameter name parameter value
|
[
"Set",
"the",
"value",
"of",
"a",
"cgroup",
"parameter",
"for",
"a",
"container",
"."
] |
python
|
train
|
dcos/shakedown
|
shakedown/dcos/__init__.py
|
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/__init__.py#L114-L130
|
def authenticate(username, password):
"""Authenticate with a DC/OS cluster and return an ACS token.
return: ACS token
"""
url = _gen_url('acs/api/v1/auth/login')
creds = {
'uid': username,
'password': password
}
response = dcos.http.request('post', url, json=creds)
if response.status_code == 200:
return response.json()['token']
else:
return None
|
[
"def",
"authenticate",
"(",
"username",
",",
"password",
")",
":",
"url",
"=",
"_gen_url",
"(",
"'acs/api/v1/auth/login'",
")",
"creds",
"=",
"{",
"'uid'",
":",
"username",
",",
"'password'",
":",
"password",
"}",
"response",
"=",
"dcos",
".",
"http",
".",
"request",
"(",
"'post'",
",",
"url",
",",
"json",
"=",
"creds",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
".",
"json",
"(",
")",
"[",
"'token'",
"]",
"else",
":",
"return",
"None"
] |
Authenticate with a DC/OS cluster and return an ACS token.
return: ACS token
|
[
"Authenticate",
"with",
"a",
"DC",
"/",
"OS",
"cluster",
"and",
"return",
"an",
"ACS",
"token",
".",
"return",
":",
"ACS",
"token"
] |
python
|
train
|
opendatateam/udata
|
udata/frontend/markdown.py
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/markdown.py#L40-L59
|
def nofollow_callback(attrs, new=False):
"""
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
"""
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs
|
[
"def",
"nofollow_callback",
"(",
"attrs",
",",
"new",
"=",
"False",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
")",
"if",
"parsed_url",
".",
"netloc",
"in",
"(",
"''",
",",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
")",
":",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
"=",
"'{scheme}://{netloc}{path}'",
".",
"format",
"(",
"scheme",
"=",
"'https'",
"if",
"request",
".",
"is_secure",
"else",
"'http'",
",",
"netloc",
"=",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
",",
"path",
"=",
"parsed_url",
".",
"path",
")",
"return",
"attrs",
"else",
":",
"rel",
"=",
"[",
"x",
"for",
"x",
"in",
"attrs",
".",
"get",
"(",
"(",
"None",
",",
"'rel'",
")",
",",
"''",
")",
".",
"split",
"(",
"' '",
")",
"if",
"x",
"]",
"if",
"'nofollow'",
"not",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"rel",
"]",
":",
"rel",
".",
"append",
"(",
"'nofollow'",
")",
"attrs",
"[",
"(",
"None",
",",
"'rel'",
")",
"]",
"=",
"' '",
".",
"join",
"(",
"rel",
")",
"return",
"attrs"
] |
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
|
[
"Turn",
"relative",
"links",
"into",
"external",
"ones",
"and",
"avoid",
"nofollow",
"for",
"us"
] |
python
|
train
|
pmorissette/ffn
|
ffn/core.py
|
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L1223-L1235
|
def calc_cagr(prices):
"""
Calculates the `CAGR (compound annual growth rate) <https://www.investopedia.com/terms/c/cagr.asp>`_ for a given price series.
Args:
* prices (pandas.Series): A Series of prices.
Returns:
* float -- cagr.
"""
start = prices.index[0]
end = prices.index[-1]
return (prices.iloc[-1] / prices.iloc[0]) ** (1 / year_frac(start, end)) - 1
|
[
"def",
"calc_cagr",
"(",
"prices",
")",
":",
"start",
"=",
"prices",
".",
"index",
"[",
"0",
"]",
"end",
"=",
"prices",
".",
"index",
"[",
"-",
"1",
"]",
"return",
"(",
"prices",
".",
"iloc",
"[",
"-",
"1",
"]",
"/",
"prices",
".",
"iloc",
"[",
"0",
"]",
")",
"**",
"(",
"1",
"/",
"year_frac",
"(",
"start",
",",
"end",
")",
")",
"-",
"1"
] |
Calculates the `CAGR (compound annual growth rate) <https://www.investopedia.com/terms/c/cagr.asp>`_ for a given price series.
Args:
* prices (pandas.Series): A Series of prices.
Returns:
* float -- cagr.
|
[
"Calculates",
"the",
"CAGR",
"(",
"compound",
"annual",
"growth",
"rate",
")",
"<https",
":",
"//",
"www",
".",
"investopedia",
".",
"com",
"/",
"terms",
"/",
"c",
"/",
"cagr",
".",
"asp",
">",
"_",
"for",
"a",
"given",
"price",
"series",
"."
] |
python
|
train
|
PGower/PyCanvas
|
pycanvas/apis/discussion_topics.py
|
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L19-L72
|
def list_discussion_topics_courses(self, course_id, exclude_context_module_locked_topics=None, include=None, only_announcements=None, order_by=None, scope=None, search_term=None):
"""
List discussion topics.
Returns the paginated list of discussion topics for this course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""If "all_dates" is passed, all dates associated with graded discussions'
assignments will be included."""
if include is not None:
self._validate_enum(include, ["all_dates"])
params["include"] = include
# OPTIONAL - order_by
"""Determines the order of the discussion topic list. Defaults to "position"."""
if order_by is not None:
self._validate_enum(order_by, ["position", "recent_activity"])
params["order_by"] = order_by
# OPTIONAL - scope
"""Only return discussion topics in the given state(s). Defaults to including
all topics. Filtering is done after pagination, so pages
may be smaller than requested if topics are filtered.
Can pass multiple states as comma separated string."""
if scope is not None:
self._validate_enum(scope, ["locked", "unlocked", "pinned", "unpinned"])
params["scope"] = scope
# OPTIONAL - only_announcements
"""Return announcements instead of discussion topics. Defaults to false"""
if only_announcements is not None:
params["only_announcements"] = only_announcements
# OPTIONAL - search_term
"""The partial title of the discussion topics to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - exclude_context_module_locked_topics
"""For students, exclude topics that are locked by module progression.
Defaults to false."""
if exclude_context_module_locked_topics is not None:
params["exclude_context_module_locked_topics"] = exclude_context_module_locked_topics
self.logger.debug("GET /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, all_pages=True)
|
[
"def",
"list_discussion_topics_courses",
"(",
"self",
",",
"course_id",
",",
"exclude_context_module_locked_topics",
"=",
"None",
",",
"include",
"=",
"None",
",",
"only_announcements",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"search_term",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - course_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"course_id\"",
"]",
"=",
"course_id",
"# OPTIONAL - include\r",
"\"\"\"If \"all_dates\" is passed, all dates associated with graded discussions'\r\n assignments will be included.\"\"\"",
"if",
"include",
"is",
"not",
"None",
":",
"self",
".",
"_validate_enum",
"(",
"include",
",",
"[",
"\"all_dates\"",
"]",
")",
"params",
"[",
"\"include\"",
"]",
"=",
"include",
"# OPTIONAL - order_by\r",
"\"\"\"Determines the order of the discussion topic list. Defaults to \"position\".\"\"\"",
"if",
"order_by",
"is",
"not",
"None",
":",
"self",
".",
"_validate_enum",
"(",
"order_by",
",",
"[",
"\"position\"",
",",
"\"recent_activity\"",
"]",
")",
"params",
"[",
"\"order_by\"",
"]",
"=",
"order_by",
"# OPTIONAL - scope\r",
"\"\"\"Only return discussion topics in the given state(s). Defaults to including\r\n all topics. Filtering is done after pagination, so pages\r\n may be smaller than requested if topics are filtered.\r\n Can pass multiple states as comma separated string.\"\"\"",
"if",
"scope",
"is",
"not",
"None",
":",
"self",
".",
"_validate_enum",
"(",
"scope",
",",
"[",
"\"locked\"",
",",
"\"unlocked\"",
",",
"\"pinned\"",
",",
"\"unpinned\"",
"]",
")",
"params",
"[",
"\"scope\"",
"]",
"=",
"scope",
"# OPTIONAL - only_announcements\r",
"\"\"\"Return announcements instead of discussion topics. Defaults to false\"\"\"",
"if",
"only_announcements",
"is",
"not",
"None",
":",
"params",
"[",
"\"only_announcements\"",
"]",
"=",
"only_announcements",
"# OPTIONAL - search_term\r",
"\"\"\"The partial title of the discussion topics to match and return.\"\"\"",
"if",
"search_term",
"is",
"not",
"None",
":",
"params",
"[",
"\"search_term\"",
"]",
"=",
"search_term",
"# OPTIONAL - exclude_context_module_locked_topics\r",
"\"\"\"For students, exclude topics that are locked by module progression.\r\n Defaults to false.\"\"\"",
"if",
"exclude_context_module_locked_topics",
"is",
"not",
"None",
":",
"params",
"[",
"\"exclude_context_module_locked_topics\"",
"]",
"=",
"exclude_context_module_locked_topics",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/courses/{course_id}/discussion_topics\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"all_pages",
"=",
"True",
")"
] |
List discussion topics.
Returns the paginated list of discussion topics for this course or group.
|
[
"List",
"discussion",
"topics",
".",
"Returns",
"the",
"paginated",
"list",
"of",
"discussion",
"topics",
"for",
"this",
"course",
"or",
"group",
"."
] |
python
|
train
|
NASA-AMMOS/AIT-Core
|
ait/core/dtype.py
|
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/dtype.py#L535-L541
|
def evrs(self):
"""Getter EVRs dictionary"""
if self._evrs is None:
import ait.core.evr as evr
self._evrs = evr.getDefaultDict()
return self._evrs
|
[
"def",
"evrs",
"(",
"self",
")",
":",
"if",
"self",
".",
"_evrs",
"is",
"None",
":",
"import",
"ait",
".",
"core",
".",
"evr",
"as",
"evr",
"self",
".",
"_evrs",
"=",
"evr",
".",
"getDefaultDict",
"(",
")",
"return",
"self",
".",
"_evrs"
] |
Getter EVRs dictionary
|
[
"Getter",
"EVRs",
"dictionary"
] |
python
|
train
|
boto/s3transfer
|
s3transfer/download.py
|
https://github.com/boto/s3transfer/blob/2aead638c8385d8ae0b1756b2de17e8fad45fffa/s3transfer/download.py#L488-L547
|
def _main(self, client, bucket, key, fileobj, extra_args, callbacks,
max_attempts, download_output_manager, io_chunksize,
start_index=0, bandwidth_limiter=None):
"""Downloads an object and places content into io queue
:param client: The client to use when calling GetObject
:param bucket: The bucket to download from
:param key: The key to download from
:param fileobj: The file handle to write content to
:param exta_args: Any extra arguements to include in GetObject request
:param callbacks: List of progress callbacks to invoke on download
:param max_attempts: The number of retries to do when downloading
:param download_output_manager: The download output manager associated
with the current download.
:param io_chunksize: The size of each io chunk to read from the
download stream and queue in the io queue.
:param start_index: The location in the file to start writing the
content of the key to.
:param bandwidth_limiter: The bandwidth limiter to use when throttling
the downloading of data in streams.
"""
last_exception = None
for i in range(max_attempts):
try:
response = client.get_object(
Bucket=bucket, Key=key, **extra_args)
streaming_body = StreamReaderProgress(
response['Body'], callbacks)
if bandwidth_limiter:
streaming_body = \
bandwidth_limiter.get_bandwith_limited_stream(
streaming_body, self._transfer_coordinator)
current_index = start_index
chunks = DownloadChunkIterator(streaming_body, io_chunksize)
for chunk in chunks:
# If the transfer is done because of a cancellation
# or error somewhere else, stop trying to submit more
# data to be written and break out of the download.
if not self._transfer_coordinator.done():
self._handle_io(
download_output_manager, fileobj, chunk,
current_index
)
current_index += len(chunk)
else:
return
return
except S3_RETRYABLE_DOWNLOAD_ERRORS as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
last_exception = e
# Also invoke the progress callbacks to indicate that we
# are trying to download the stream again and all progress
# for this GetObject has been lost.
invoke_progress_callbacks(
callbacks, start_index - current_index)
continue
raise RetriesExceededError(last_exception)
|
[
"def",
"_main",
"(",
"self",
",",
"client",
",",
"bucket",
",",
"key",
",",
"fileobj",
",",
"extra_args",
",",
"callbacks",
",",
"max_attempts",
",",
"download_output_manager",
",",
"io_chunksize",
",",
"start_index",
"=",
"0",
",",
"bandwidth_limiter",
"=",
"None",
")",
":",
"last_exception",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"max_attempts",
")",
":",
"try",
":",
"response",
"=",
"client",
".",
"get_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
",",
"*",
"*",
"extra_args",
")",
"streaming_body",
"=",
"StreamReaderProgress",
"(",
"response",
"[",
"'Body'",
"]",
",",
"callbacks",
")",
"if",
"bandwidth_limiter",
":",
"streaming_body",
"=",
"bandwidth_limiter",
".",
"get_bandwith_limited_stream",
"(",
"streaming_body",
",",
"self",
".",
"_transfer_coordinator",
")",
"current_index",
"=",
"start_index",
"chunks",
"=",
"DownloadChunkIterator",
"(",
"streaming_body",
",",
"io_chunksize",
")",
"for",
"chunk",
"in",
"chunks",
":",
"# If the transfer is done because of a cancellation",
"# or error somewhere else, stop trying to submit more",
"# data to be written and break out of the download.",
"if",
"not",
"self",
".",
"_transfer_coordinator",
".",
"done",
"(",
")",
":",
"self",
".",
"_handle_io",
"(",
"download_output_manager",
",",
"fileobj",
",",
"chunk",
",",
"current_index",
")",
"current_index",
"+=",
"len",
"(",
"chunk",
")",
"else",
":",
"return",
"return",
"except",
"S3_RETRYABLE_DOWNLOAD_ERRORS",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Retrying exception caught (%s), \"",
"\"retrying request, (attempt %s / %s)\"",
",",
"e",
",",
"i",
",",
"max_attempts",
",",
"exc_info",
"=",
"True",
")",
"last_exception",
"=",
"e",
"# Also invoke the progress callbacks to indicate that we",
"# are trying to download the stream again and all progress",
"# for this GetObject has been lost.",
"invoke_progress_callbacks",
"(",
"callbacks",
",",
"start_index",
"-",
"current_index",
")",
"continue",
"raise",
"RetriesExceededError",
"(",
"last_exception",
")"
] |
Downloads an object and places content into io queue
:param client: The client to use when calling GetObject
:param bucket: The bucket to download from
:param key: The key to download from
:param fileobj: The file handle to write content to
:param exta_args: Any extra arguements to include in GetObject request
:param callbacks: List of progress callbacks to invoke on download
:param max_attempts: The number of retries to do when downloading
:param download_output_manager: The download output manager associated
with the current download.
:param io_chunksize: The size of each io chunk to read from the
download stream and queue in the io queue.
:param start_index: The location in the file to start writing the
content of the key to.
:param bandwidth_limiter: The bandwidth limiter to use when throttling
the downloading of data in streams.
|
[
"Downloads",
"an",
"object",
"and",
"places",
"content",
"into",
"io",
"queue"
] |
python
|
test
|
econ-ark/HARK
|
HARK/core.py
|
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/core.py#L973-L1008
|
def solve(self):
'''
"Solves" the market by finding a "dynamic rule" that governs the aggregate
market state such that when agents believe in these dynamics, their actions
collectively generate the same dynamic rule.
Parameters
----------
None
Returns
-------
None
'''
go = True
max_loops = self.max_loops # Failsafe against infinite solution loop
completed_loops = 0
old_dynamics = None
while go: # Loop until the dynamic process converges or we hit the loop cap
self.solveAgents() # Solve each AgentType's micro problem
self.makeHistory() # "Run" the model while tracking aggregate variables
new_dynamics = self.updateDynamics() # Find a new aggregate dynamic rule
# Check to see if the dynamic rule has converged (if this is not the first loop)
if completed_loops > 0:
distance = new_dynamics.distance(old_dynamics)
else:
distance = 1000000.0
# Move to the next loop if the terminal conditions are not met
old_dynamics = new_dynamics
completed_loops += 1
go = distance >= self.tolerance and completed_loops < max_loops
self.dynamics = new_dynamics
|
[
"def",
"solve",
"(",
"self",
")",
":",
"go",
"=",
"True",
"max_loops",
"=",
"self",
".",
"max_loops",
"# Failsafe against infinite solution loop",
"completed_loops",
"=",
"0",
"old_dynamics",
"=",
"None",
"while",
"go",
":",
"# Loop until the dynamic process converges or we hit the loop cap",
"self",
".",
"solveAgents",
"(",
")",
"# Solve each AgentType's micro problem",
"self",
".",
"makeHistory",
"(",
")",
"# \"Run\" the model while tracking aggregate variables",
"new_dynamics",
"=",
"self",
".",
"updateDynamics",
"(",
")",
"# Find a new aggregate dynamic rule",
"# Check to see if the dynamic rule has converged (if this is not the first loop)",
"if",
"completed_loops",
">",
"0",
":",
"distance",
"=",
"new_dynamics",
".",
"distance",
"(",
"old_dynamics",
")",
"else",
":",
"distance",
"=",
"1000000.0",
"# Move to the next loop if the terminal conditions are not met",
"old_dynamics",
"=",
"new_dynamics",
"completed_loops",
"+=",
"1",
"go",
"=",
"distance",
">=",
"self",
".",
"tolerance",
"and",
"completed_loops",
"<",
"max_loops",
"self",
".",
"dynamics",
"=",
"new_dynamics"
] |
"Solves" the market by finding a "dynamic rule" that governs the aggregate
market state such that when agents believe in these dynamics, their actions
collectively generate the same dynamic rule.
Parameters
----------
None
Returns
-------
None
|
[
"Solves",
"the",
"market",
"by",
"finding",
"a",
"dynamic",
"rule",
"that",
"governs",
"the",
"aggregate",
"market",
"state",
"such",
"that",
"when",
"agents",
"believe",
"in",
"these",
"dynamics",
"their",
"actions",
"collectively",
"generate",
"the",
"same",
"dynamic",
"rule",
"."
] |
python
|
train
|
amadev/doan
|
doan/dataset.py
|
https://github.com/amadev/doan/blob/5adfa983ac547007a688fe7517291a432919aa3e/doan/dataset.py#L150-L157
|
def r_num(obj):
"""Read list of numbers."""
if isinstance(obj, (list, tuple)):
it = iter
else:
it = LinesIterator
dataset = Dataset([Dataset.FLOAT])
return dataset.load(it(obj))
|
[
"def",
"r_num",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"it",
"=",
"iter",
"else",
":",
"it",
"=",
"LinesIterator",
"dataset",
"=",
"Dataset",
"(",
"[",
"Dataset",
".",
"FLOAT",
"]",
")",
"return",
"dataset",
".",
"load",
"(",
"it",
"(",
"obj",
")",
")"
] |
Read list of numbers.
|
[
"Read",
"list",
"of",
"numbers",
"."
] |
python
|
train
|
openthread/openthread
|
tools/harness-thci/OpenThread_WpanCtl.py
|
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread_WpanCtl.py#L933-L956
|
def getMAC(self, bType=MacType.RandomMac):
"""get one specific type of MAC address
currently OpenThreadWpan only supports Random MAC address
Args:
bType: indicate which kind of MAC address is required
Returns:
specific type of MAC address
"""
print '%s call getMAC' % self.port
# if power down happens, return extended address assigned previously
if self.isPowerDown:
macAddr64 = self.mac
else:
if bType == MacType.FactoryMac:
macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:HardwareAddress')[0])
elif bType == MacType.HashMac:
macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:MACAddress')[0])
else:
macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:ExtendedAddress')[0])
return int(macAddr64, 16)
|
[
"def",
"getMAC",
"(",
"self",
",",
"bType",
"=",
"MacType",
".",
"RandomMac",
")",
":",
"print",
"'%s call getMAC'",
"%",
"self",
".",
"port",
"# if power down happens, return extended address assigned previously",
"if",
"self",
".",
"isPowerDown",
":",
"macAddr64",
"=",
"self",
".",
"mac",
"else",
":",
"if",
"bType",
"==",
"MacType",
".",
"FactoryMac",
":",
"macAddr64",
"=",
"self",
".",
"__stripValue",
"(",
"self",
".",
"__sendCommand",
"(",
"WPANCTL_CMD",
"+",
"'getprop -v NCP:HardwareAddress'",
")",
"[",
"0",
"]",
")",
"elif",
"bType",
"==",
"MacType",
".",
"HashMac",
":",
"macAddr64",
"=",
"self",
".",
"__stripValue",
"(",
"self",
".",
"__sendCommand",
"(",
"WPANCTL_CMD",
"+",
"'getprop -v NCP:MACAddress'",
")",
"[",
"0",
"]",
")",
"else",
":",
"macAddr64",
"=",
"self",
".",
"__stripValue",
"(",
"self",
".",
"__sendCommand",
"(",
"WPANCTL_CMD",
"+",
"'getprop -v NCP:ExtendedAddress'",
")",
"[",
"0",
"]",
")",
"return",
"int",
"(",
"macAddr64",
",",
"16",
")"
] |
get one specific type of MAC address
currently OpenThreadWpan only supports Random MAC address
Args:
bType: indicate which kind of MAC address is required
Returns:
specific type of MAC address
|
[
"get",
"one",
"specific",
"type",
"of",
"MAC",
"address",
"currently",
"OpenThreadWpan",
"only",
"supports",
"Random",
"MAC",
"address"
] |
python
|
train
|
dask/dask-ml
|
dask_ml/model_selection/_search.py
|
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L921-L956
|
def compute_n_splits(cv, X, y=None, groups=None):
"""Return the number of splits.
Parameters
----------
cv : BaseCrossValidator
X, y, groups : array_like, dask object, or None
Returns
-------
n_splits : int
"""
if not any(is_dask_collection(i) for i in (X, y, groups)):
return cv.get_n_splits(X, y, groups)
if isinstance(cv, (_BaseKFold, BaseShuffleSplit)):
return cv.n_splits
elif isinstance(cv, PredefinedSplit):
return len(cv.unique_folds)
elif isinstance(cv, _CVIterableWrapper):
return len(cv.cv)
elif isinstance(cv, (LeaveOneOut, LeavePOut)) and not is_dask_collection(X):
# Only `X` is referenced for these classes
return cv.get_n_splits(X, None, None)
elif isinstance(cv, (LeaveOneGroupOut, LeavePGroupsOut)) and not is_dask_collection(
groups
):
# Only `groups` is referenced for these classes
return cv.get_n_splits(None, None, groups)
else:
return delayed(cv).get_n_splits(X, y, groups).compute()
|
[
"def",
"compute_n_splits",
"(",
"cv",
",",
"X",
",",
"y",
"=",
"None",
",",
"groups",
"=",
"None",
")",
":",
"if",
"not",
"any",
"(",
"is_dask_collection",
"(",
"i",
")",
"for",
"i",
"in",
"(",
"X",
",",
"y",
",",
"groups",
")",
")",
":",
"return",
"cv",
".",
"get_n_splits",
"(",
"X",
",",
"y",
",",
"groups",
")",
"if",
"isinstance",
"(",
"cv",
",",
"(",
"_BaseKFold",
",",
"BaseShuffleSplit",
")",
")",
":",
"return",
"cv",
".",
"n_splits",
"elif",
"isinstance",
"(",
"cv",
",",
"PredefinedSplit",
")",
":",
"return",
"len",
"(",
"cv",
".",
"unique_folds",
")",
"elif",
"isinstance",
"(",
"cv",
",",
"_CVIterableWrapper",
")",
":",
"return",
"len",
"(",
"cv",
".",
"cv",
")",
"elif",
"isinstance",
"(",
"cv",
",",
"(",
"LeaveOneOut",
",",
"LeavePOut",
")",
")",
"and",
"not",
"is_dask_collection",
"(",
"X",
")",
":",
"# Only `X` is referenced for these classes",
"return",
"cv",
".",
"get_n_splits",
"(",
"X",
",",
"None",
",",
"None",
")",
"elif",
"isinstance",
"(",
"cv",
",",
"(",
"LeaveOneGroupOut",
",",
"LeavePGroupsOut",
")",
")",
"and",
"not",
"is_dask_collection",
"(",
"groups",
")",
":",
"# Only `groups` is referenced for these classes",
"return",
"cv",
".",
"get_n_splits",
"(",
"None",
",",
"None",
",",
"groups",
")",
"else",
":",
"return",
"delayed",
"(",
"cv",
")",
".",
"get_n_splits",
"(",
"X",
",",
"y",
",",
"groups",
")",
".",
"compute",
"(",
")"
] |
Return the number of splits.
Parameters
----------
cv : BaseCrossValidator
X, y, groups : array_like, dask object, or None
Returns
-------
n_splits : int
|
[
"Return",
"the",
"number",
"of",
"splits",
"."
] |
python
|
train
|
chrisjrn/registrasion
|
registrasion/controllers/invoice.py
|
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/invoice.py#L361-L378
|
def update_validity(self):
''' Voids this invoice if the attached cart is no longer valid because
the cart revision has changed, or the reservations have expired. '''
is_valid = self._invoice_matches_cart()
cart = self.invoice.cart
if self.invoice.is_unpaid and is_valid and cart:
try:
CartController(cart).validate_cart()
except ValidationError:
is_valid = False
if not is_valid:
if self.invoice.total_payments() > 0:
# Free up the payments made to this invoice
self.refund()
else:
self.void()
|
[
"def",
"update_validity",
"(",
"self",
")",
":",
"is_valid",
"=",
"self",
".",
"_invoice_matches_cart",
"(",
")",
"cart",
"=",
"self",
".",
"invoice",
".",
"cart",
"if",
"self",
".",
"invoice",
".",
"is_unpaid",
"and",
"is_valid",
"and",
"cart",
":",
"try",
":",
"CartController",
"(",
"cart",
")",
".",
"validate_cart",
"(",
")",
"except",
"ValidationError",
":",
"is_valid",
"=",
"False",
"if",
"not",
"is_valid",
":",
"if",
"self",
".",
"invoice",
".",
"total_payments",
"(",
")",
">",
"0",
":",
"# Free up the payments made to this invoice",
"self",
".",
"refund",
"(",
")",
"else",
":",
"self",
".",
"void",
"(",
")"
] |
Voids this invoice if the attached cart is no longer valid because
the cart revision has changed, or the reservations have expired.
|
[
"Voids",
"this",
"invoice",
"if",
"the",
"attached",
"cart",
"is",
"no",
"longer",
"valid",
"because",
"the",
"cart",
"revision",
"has",
"changed",
"or",
"the",
"reservations",
"have",
"expired",
"."
] |
python
|
test
|
eandersson/amqpstorm
|
amqpstorm/message.py
|
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/message.py#L373-L390
|
def _try_decode_dict(self, content):
"""Decode content of a dictionary.
:param dict content:
:return:
"""
result = dict()
for key, value in content.items():
key = try_utf8_decode(key)
if isinstance(value, dict):
result[key] = self._try_decode_dict(value)
elif isinstance(value, list):
result[key] = self._try_decode_list(value)
elif isinstance(value, tuple):
result[key] = self._try_decode_tuple(value)
else:
result[key] = try_utf8_decode(value)
return result
|
[
"def",
"_try_decode_dict",
"(",
"self",
",",
"content",
")",
":",
"result",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"content",
".",
"items",
"(",
")",
":",
"key",
"=",
"try_utf8_decode",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"result",
"[",
"key",
"]",
"=",
"self",
".",
"_try_decode_dict",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"result",
"[",
"key",
"]",
"=",
"self",
".",
"_try_decode_list",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"result",
"[",
"key",
"]",
"=",
"self",
".",
"_try_decode_tuple",
"(",
"value",
")",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"try_utf8_decode",
"(",
"value",
")",
"return",
"result"
] |
Decode content of a dictionary.
:param dict content:
:return:
|
[
"Decode",
"content",
"of",
"a",
"dictionary",
"."
] |
python
|
train
|
openvax/mhctools
|
mhctools/common.py
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/common.py#L24-L35
|
def seq_to_str(obj, sep=","):
"""
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
"""
if isinstance(obj, string_classes):
return obj
elif isinstance(obj, (list, tuple)):
return sep.join([str(x) for x in obj])
else:
return str(obj)
|
[
"def",
"seq_to_str",
"(",
"obj",
",",
"sep",
"=",
"\",\"",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"string_classes",
")",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"sep",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
")",
"else",
":",
"return",
"str",
"(",
"obj",
")"
] |
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
|
[
"Given",
"a",
"sequence",
"convert",
"it",
"to",
"a",
"comma",
"separated",
"string",
".",
"If",
"however",
"the",
"argument",
"is",
"a",
"single",
"object",
"return",
"its",
"string",
"representation",
"."
] |
python
|
valid
|
LonamiWebs/Telethon
|
telethon/tl/custom/dialog.py
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/dialog.py#L99-L112
|
async def delete(self):
"""
Deletes the dialog from your dialog list. If you own the
channel this won't destroy it, only delete it from the list.
"""
if self.is_channel:
await self._client(functions.channels.LeaveChannelRequest(
self.input_entity))
else:
if self.is_group:
await self._client(functions.messages.DeleteChatUserRequest(
self.entity.id, types.InputPeerSelf()))
await self._client(functions.messages.DeleteHistoryRequest(
self.input_entity, 0))
|
[
"async",
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_channel",
":",
"await",
"self",
".",
"_client",
"(",
"functions",
".",
"channels",
".",
"LeaveChannelRequest",
"(",
"self",
".",
"input_entity",
")",
")",
"else",
":",
"if",
"self",
".",
"is_group",
":",
"await",
"self",
".",
"_client",
"(",
"functions",
".",
"messages",
".",
"DeleteChatUserRequest",
"(",
"self",
".",
"entity",
".",
"id",
",",
"types",
".",
"InputPeerSelf",
"(",
")",
")",
")",
"await",
"self",
".",
"_client",
"(",
"functions",
".",
"messages",
".",
"DeleteHistoryRequest",
"(",
"self",
".",
"input_entity",
",",
"0",
")",
")"
] |
Deletes the dialog from your dialog list. If you own the
channel this won't destroy it, only delete it from the list.
|
[
"Deletes",
"the",
"dialog",
"from",
"your",
"dialog",
"list",
".",
"If",
"you",
"own",
"the",
"channel",
"this",
"won",
"t",
"destroy",
"it",
"only",
"delete",
"it",
"from",
"the",
"list",
"."
] |
python
|
train
|
EpistasisLab/scikit-rebate
|
skrebate/scoring_utils.py
|
https://github.com/EpistasisLab/scikit-rebate/blob/67dab51a7525fa5d076b059f1e6f8cff7481c1ef/skrebate/scoring_utils.py#L86-L108
|
def ramp_function(data_type, attr, fname, xinstfeature, xNNifeature):
""" Our own user simplified variation of the ramp function suggested by Hong 1994, 1997. Hong's method requires the user to specifiy two thresholds
that indicate the max difference before a score of 1 is given, as well a min difference before a score of 0 is given, and any in the middle get a
score that is the normalized difference between the two continuous feature values. This was done because when discrete and continuous features were mixed,
continuous feature scores were underestimated. Towards simplicity, automation, and a dataset adaptable approach,
here we simply check whether the difference is greater than the standard deviation for the given feature; if so we assign a score of 1, otherwise we
assign the normalized feature score difference. This should help compensate for the underestimation. """
diff = 0
mmdiff = attr[fname][3] # Max/Min range of values for target feature
rawfd = abs(xinstfeature - xNNifeature) # prenormalized feature value difference
if data_type == 'mixed': # Ramp function utilized
# Check whether feature value difference is greater than the standard deviation
standDev = attr[fname][4]
if rawfd > standDev: # feature value difference is is wider than a standard deviation
diff = 1
else:
diff = abs(xinstfeature - xNNifeature) / mmdiff
else: # Normal continuous feature scoring
diff = abs(xinstfeature - xNNifeature) / mmdiff
return diff
|
[
"def",
"ramp_function",
"(",
"data_type",
",",
"attr",
",",
"fname",
",",
"xinstfeature",
",",
"xNNifeature",
")",
":",
"diff",
"=",
"0",
"mmdiff",
"=",
"attr",
"[",
"fname",
"]",
"[",
"3",
"]",
"# Max/Min range of values for target feature\r",
"rawfd",
"=",
"abs",
"(",
"xinstfeature",
"-",
"xNNifeature",
")",
"# prenormalized feature value difference\r",
"if",
"data_type",
"==",
"'mixed'",
":",
"# Ramp function utilized\r",
"# Check whether feature value difference is greater than the standard deviation\r",
"standDev",
"=",
"attr",
"[",
"fname",
"]",
"[",
"4",
"]",
"if",
"rawfd",
">",
"standDev",
":",
"# feature value difference is is wider than a standard deviation\r",
"diff",
"=",
"1",
"else",
":",
"diff",
"=",
"abs",
"(",
"xinstfeature",
"-",
"xNNifeature",
")",
"/",
"mmdiff",
"else",
":",
"# Normal continuous feature scoring\r",
"diff",
"=",
"abs",
"(",
"xinstfeature",
"-",
"xNNifeature",
")",
"/",
"mmdiff",
"return",
"diff"
] |
Our own user simplified variation of the ramp function suggested by Hong 1994, 1997. Hong's method requires the user to specifiy two thresholds
that indicate the max difference before a score of 1 is given, as well a min difference before a score of 0 is given, and any in the middle get a
score that is the normalized difference between the two continuous feature values. This was done because when discrete and continuous features were mixed,
continuous feature scores were underestimated. Towards simplicity, automation, and a dataset adaptable approach,
here we simply check whether the difference is greater than the standard deviation for the given feature; if so we assign a score of 1, otherwise we
assign the normalized feature score difference. This should help compensate for the underestimation.
|
[
"Our",
"own",
"user",
"simplified",
"variation",
"of",
"the",
"ramp",
"function",
"suggested",
"by",
"Hong",
"1994",
"1997",
".",
"Hong",
"s",
"method",
"requires",
"the",
"user",
"to",
"specifiy",
"two",
"thresholds",
"that",
"indicate",
"the",
"max",
"difference",
"before",
"a",
"score",
"of",
"1",
"is",
"given",
"as",
"well",
"a",
"min",
"difference",
"before",
"a",
"score",
"of",
"0",
"is",
"given",
"and",
"any",
"in",
"the",
"middle",
"get",
"a",
"score",
"that",
"is",
"the",
"normalized",
"difference",
"between",
"the",
"two",
"continuous",
"feature",
"values",
".",
"This",
"was",
"done",
"because",
"when",
"discrete",
"and",
"continuous",
"features",
"were",
"mixed",
"continuous",
"feature",
"scores",
"were",
"underestimated",
".",
"Towards",
"simplicity",
"automation",
"and",
"a",
"dataset",
"adaptable",
"approach",
"here",
"we",
"simply",
"check",
"whether",
"the",
"difference",
"is",
"greater",
"than",
"the",
"standard",
"deviation",
"for",
"the",
"given",
"feature",
";",
"if",
"so",
"we",
"assign",
"a",
"score",
"of",
"1",
"otherwise",
"we",
"assign",
"the",
"normalized",
"feature",
"score",
"difference",
".",
"This",
"should",
"help",
"compensate",
"for",
"the",
"underestimation",
"."
] |
python
|
train
|
googlecolab/jupyter_http_over_ws
|
jupyter_http_over_ws/handlers.py
|
https://github.com/googlecolab/jupyter_http_over_ws/blob/21fe278cae6fca4e6c92f92d6d786fae8fdea9b1/jupyter_http_over_ws/handlers.py#L76-L101
|
def _validate_min_version(min_version):
"""Validates the extension version matches the requested version.
Args:
min_version: Minimum version passed as a query param when establishing the
connection.
Returns:
An ExtensionVersionResult indicating validation status. If there is a
problem, the error_reason field will be non-empty.
"""
if min_version is not None:
try:
parsed_min_version = version.StrictVersion(min_version)
except ValueError:
return ExtensionVersionResult(
error_reason=ExtensionValidationError.UNPARSEABLE_REQUESTED_VERSION,
requested_extension_version=min_version)
if parsed_min_version > HANDLER_VERSION:
return ExtensionVersionResult(
error_reason=ExtensionValidationError.OUTDATED_VERSION,
requested_extension_version=str(parsed_min_version))
return ExtensionVersionResult(
error_reason=None, requested_extension_version=min_version)
|
[
"def",
"_validate_min_version",
"(",
"min_version",
")",
":",
"if",
"min_version",
"is",
"not",
"None",
":",
"try",
":",
"parsed_min_version",
"=",
"version",
".",
"StrictVersion",
"(",
"min_version",
")",
"except",
"ValueError",
":",
"return",
"ExtensionVersionResult",
"(",
"error_reason",
"=",
"ExtensionValidationError",
".",
"UNPARSEABLE_REQUESTED_VERSION",
",",
"requested_extension_version",
"=",
"min_version",
")",
"if",
"parsed_min_version",
">",
"HANDLER_VERSION",
":",
"return",
"ExtensionVersionResult",
"(",
"error_reason",
"=",
"ExtensionValidationError",
".",
"OUTDATED_VERSION",
",",
"requested_extension_version",
"=",
"str",
"(",
"parsed_min_version",
")",
")",
"return",
"ExtensionVersionResult",
"(",
"error_reason",
"=",
"None",
",",
"requested_extension_version",
"=",
"min_version",
")"
] |
Validates the extension version matches the requested version.
Args:
min_version: Minimum version passed as a query param when establishing the
connection.
Returns:
An ExtensionVersionResult indicating validation status. If there is a
problem, the error_reason field will be non-empty.
|
[
"Validates",
"the",
"extension",
"version",
"matches",
"the",
"requested",
"version",
"."
] |
python
|
train
|
kalbhor/MusicTools
|
musictools/musictools.py
|
https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L166-L173
|
def revert_metadata(files):
"""
Removes all tags from a mp3 file
"""
for file_path in files:
tags = EasyMP3(file_path)
tags.delete()
tags.save()
|
[
"def",
"revert_metadata",
"(",
"files",
")",
":",
"for",
"file_path",
"in",
"files",
":",
"tags",
"=",
"EasyMP3",
"(",
"file_path",
")",
"tags",
".",
"delete",
"(",
")",
"tags",
".",
"save",
"(",
")"
] |
Removes all tags from a mp3 file
|
[
"Removes",
"all",
"tags",
"from",
"a",
"mp3",
"file"
] |
python
|
train
|
google/prettytensor
|
prettytensor/pretty_tensor_methods.py
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_methods.py#L37-L111
|
def _infer_unknown_dims(old_shape, shape_spec):
"""Attempts to replace DIM_REST (if present) with a value.
Because of `pt.DIM_SAME`, this has more information to compute a shape value
than the default reshape's shape function.
Args:
old_shape: The current shape of the Tensor as a list.
shape_spec: A shape spec, see `pt.reshape`.
Returns:
A list derived from `shape_spec` with `pt.DIM_SAME` replaced by the value
from old_shape (if possible) and `pt.DIM_REST` computed (if possible).
Raises:
ValueError: If there are two many unknown dimensions or the shape_spec
requires out of range DIM_SAME.
TypeError: If shape_spec if not iterable.
"""
# To compute the dimension of an unknown, we need to track which of the values
# from old_shape are not copied for the numerator and any values specified as
# integers for the denominator.
#
# After the loop, if any input dimension is unknown and not DIM_SAME, the
# numerator will be 0. Otherwise it is the product of all non-DIM_SAME
# dimensions. This means that the dimension of DIM_REST is
# numerator / denominator
numerator_elements = [x if x else 0 for x in old_shape]
denominator = 1
unknowns = 0
normalized_shape_spec = []
for s in shape_spec:
# Equality of tf.Dimension is broken and upstream fix hasn't been accepted.
if isinstance(s, tf.Dimension):
normalized_shape_spec.append(s.value)
elif isinstance(s, tf.TensorShape):
for dim in s:
normalized_shape_spec.append(dim.value)
else:
normalized_shape_spec.append(s)
result = []
for i, s in enumerate(normalized_shape_spec):
if s == DIM_SAME:
if i >= len(old_shape):
raise ValueError('%d exceeds the input shape' % i)
if old_shape[i] is None:
result.append(DIM_SAME)
else:
result.append(old_shape[i])
numerator_elements[i] = 1
elif s in (DIM_REST, -1, None):
result.append(-1)
unknowns += 1
else:
x = int(s)
result.append(x)
denominator *= x
numerator = 1
for x in numerator_elements:
numerator *= x
if unknowns > 1:
raise ValueError('Only one unknown value (-1 or *) is allowed: %s' %
shape_spec)
elif numerator % denominator != 0:
raise ValueError('Input (%s) cannot be reshaped to %s.' %
(old_shape, shape_spec))
elif unknowns == 0 and numerator > 0 and numerator != denominator:
raise ValueError('Input (%s) cannot be reshaped to %s.' %
(old_shape, shape_spec))
if numerator and unknowns:
unknown_elements = int(numerator / denominator)
return [unknown_elements if x == -1 else x for x in result]
else:
return result
|
[
"def",
"_infer_unknown_dims",
"(",
"old_shape",
",",
"shape_spec",
")",
":",
"# To compute the dimension of an unknown, we need to track which of the values",
"# from old_shape are not copied for the numerator and any values specified as",
"# integers for the denominator.",
"#",
"# After the loop, if any input dimension is unknown and not DIM_SAME, the",
"# numerator will be 0. Otherwise it is the product of all non-DIM_SAME",
"# dimensions. This means that the dimension of DIM_REST is",
"# numerator / denominator",
"numerator_elements",
"=",
"[",
"x",
"if",
"x",
"else",
"0",
"for",
"x",
"in",
"old_shape",
"]",
"denominator",
"=",
"1",
"unknowns",
"=",
"0",
"normalized_shape_spec",
"=",
"[",
"]",
"for",
"s",
"in",
"shape_spec",
":",
"# Equality of tf.Dimension is broken and upstream fix hasn't been accepted.",
"if",
"isinstance",
"(",
"s",
",",
"tf",
".",
"Dimension",
")",
":",
"normalized_shape_spec",
".",
"append",
"(",
"s",
".",
"value",
")",
"elif",
"isinstance",
"(",
"s",
",",
"tf",
".",
"TensorShape",
")",
":",
"for",
"dim",
"in",
"s",
":",
"normalized_shape_spec",
".",
"append",
"(",
"dim",
".",
"value",
")",
"else",
":",
"normalized_shape_spec",
".",
"append",
"(",
"s",
")",
"result",
"=",
"[",
"]",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"normalized_shape_spec",
")",
":",
"if",
"s",
"==",
"DIM_SAME",
":",
"if",
"i",
">=",
"len",
"(",
"old_shape",
")",
":",
"raise",
"ValueError",
"(",
"'%d exceeds the input shape'",
"%",
"i",
")",
"if",
"old_shape",
"[",
"i",
"]",
"is",
"None",
":",
"result",
".",
"append",
"(",
"DIM_SAME",
")",
"else",
":",
"result",
".",
"append",
"(",
"old_shape",
"[",
"i",
"]",
")",
"numerator_elements",
"[",
"i",
"]",
"=",
"1",
"elif",
"s",
"in",
"(",
"DIM_REST",
",",
"-",
"1",
",",
"None",
")",
":",
"result",
".",
"append",
"(",
"-",
"1",
")",
"unknowns",
"+=",
"1",
"else",
":",
"x",
"=",
"int",
"(",
"s",
")",
"result",
".",
"append",
"(",
"x",
")",
"denominator",
"*=",
"x",
"numerator",
"=",
"1",
"for",
"x",
"in",
"numerator_elements",
":",
"numerator",
"*=",
"x",
"if",
"unknowns",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Only one unknown value (-1 or *) is allowed: %s'",
"%",
"shape_spec",
")",
"elif",
"numerator",
"%",
"denominator",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'Input (%s) cannot be reshaped to %s.'",
"%",
"(",
"old_shape",
",",
"shape_spec",
")",
")",
"elif",
"unknowns",
"==",
"0",
"and",
"numerator",
">",
"0",
"and",
"numerator",
"!=",
"denominator",
":",
"raise",
"ValueError",
"(",
"'Input (%s) cannot be reshaped to %s.'",
"%",
"(",
"old_shape",
",",
"shape_spec",
")",
")",
"if",
"numerator",
"and",
"unknowns",
":",
"unknown_elements",
"=",
"int",
"(",
"numerator",
"/",
"denominator",
")",
"return",
"[",
"unknown_elements",
"if",
"x",
"==",
"-",
"1",
"else",
"x",
"for",
"x",
"in",
"result",
"]",
"else",
":",
"return",
"result"
] |
Attempts to replace DIM_REST (if present) with a value.
Because of `pt.DIM_SAME`, this has more information to compute a shape value
than the default reshape's shape function.
Args:
old_shape: The current shape of the Tensor as a list.
shape_spec: A shape spec, see `pt.reshape`.
Returns:
A list derived from `shape_spec` with `pt.DIM_SAME` replaced by the value
from old_shape (if possible) and `pt.DIM_REST` computed (if possible).
Raises:
ValueError: If there are two many unknown dimensions or the shape_spec
requires out of range DIM_SAME.
TypeError: If shape_spec if not iterable.
|
[
"Attempts",
"to",
"replace",
"DIM_REST",
"(",
"if",
"present",
")",
"with",
"a",
"value",
"."
] |
python
|
train
|
royi1000/py-libhdate
|
hdate/date.py
|
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L137-L142
|
def hebrew_date(self):
"""Return the hebrew date string."""
return u"{} {} {}".format(
hebrew_number(self.hdate.day, hebrew=self.hebrew), # Day
htables.MONTHS[self.hdate.month - 1][self.hebrew], # Month
hebrew_number(self.hdate.year, hebrew=self.hebrew))
|
[
"def",
"hebrew_date",
"(",
"self",
")",
":",
"return",
"u\"{} {} {}\"",
".",
"format",
"(",
"hebrew_number",
"(",
"self",
".",
"hdate",
".",
"day",
",",
"hebrew",
"=",
"self",
".",
"hebrew",
")",
",",
"# Day",
"htables",
".",
"MONTHS",
"[",
"self",
".",
"hdate",
".",
"month",
"-",
"1",
"]",
"[",
"self",
".",
"hebrew",
"]",
",",
"# Month",
"hebrew_number",
"(",
"self",
".",
"hdate",
".",
"year",
",",
"hebrew",
"=",
"self",
".",
"hebrew",
")",
")"
] |
Return the hebrew date string.
|
[
"Return",
"the",
"hebrew",
"date",
"string",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/plugins/editor/panels/linenumber.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/linenumber.py#L114-L134
|
def mouseMoveEvent(self, event):
"""Override Qt method.
Show code analisis, if left button pressed select lines.
"""
line_number = self.editor.get_linenumber_from_mouse_event(event)
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
# this disables pyflakes messages if there is an active drag/selection
# operation
check = self._released == -1
if data and data.code_analysis and check:
self.editor.show_code_analysis_results(line_number,
data)
else:
self.editor.hide_tooltip()
if event.buttons() == Qt.LeftButton:
self._released = line_number
self.editor.select_lines(self._pressed, self._released)
|
[
"def",
"mouseMoveEvent",
"(",
"self",
",",
"event",
")",
":",
"line_number",
"=",
"self",
".",
"editor",
".",
"get_linenumber_from_mouse_event",
"(",
"event",
")",
"block",
"=",
"self",
".",
"editor",
".",
"document",
"(",
")",
".",
"findBlockByNumber",
"(",
"line_number",
"-",
"1",
")",
"data",
"=",
"block",
".",
"userData",
"(",
")",
"# this disables pyflakes messages if there is an active drag/selection",
"# operation",
"check",
"=",
"self",
".",
"_released",
"==",
"-",
"1",
"if",
"data",
"and",
"data",
".",
"code_analysis",
"and",
"check",
":",
"self",
".",
"editor",
".",
"show_code_analysis_results",
"(",
"line_number",
",",
"data",
")",
"else",
":",
"self",
".",
"editor",
".",
"hide_tooltip",
"(",
")",
"if",
"event",
".",
"buttons",
"(",
")",
"==",
"Qt",
".",
"LeftButton",
":",
"self",
".",
"_released",
"=",
"line_number",
"self",
".",
"editor",
".",
"select_lines",
"(",
"self",
".",
"_pressed",
",",
"self",
".",
"_released",
")"
] |
Override Qt method.
Show code analisis, if left button pressed select lines.
|
[
"Override",
"Qt",
"method",
"."
] |
python
|
train
|
PrefPy/prefpy
|
prefpy/gmmra.py
|
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/gmmra.py#L66-L78
|
def _adj(self, k):
"""
Description:
Adjacent breaking
Paramters:
k: not used
"""
G = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
if i == j+1 or j == i+1:
G[i][j] = 1
return G
|
[
"def",
"_adj",
"(",
"self",
",",
"k",
")",
":",
"G",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"m",
",",
"self",
".",
"m",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"m",
")",
":",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"m",
")",
":",
"if",
"i",
"==",
"j",
"+",
"1",
"or",
"j",
"==",
"i",
"+",
"1",
":",
"G",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"1",
"return",
"G"
] |
Description:
Adjacent breaking
Paramters:
k: not used
|
[
"Description",
":",
"Adjacent",
"breaking",
"Paramters",
":",
"k",
":",
"not",
"used"
] |
python
|
train
|
ajdavis/mongo-mockup-db
|
mockupdb/__init__.py
|
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L1401-L1497
|
def autoresponds(self, matcher, *args, **kwargs):
"""Send a canned reply to all matching client requests.
``matcher`` is a `Matcher` or a command name, or an instance of
`OpInsert`, `OpQuery`, etc.
>>> s = MockupDB()
>>> port = s.run()
>>>
>>> from pymongo import MongoClient
>>> client = MongoClient(s.uri)
>>> responder = s.autoresponds('ismaster', maxWireVersion=6)
>>> client.admin.command('ismaster') == {'ok': 1, 'maxWireVersion': 6}
True
The remaining arguments are a :ref:`message spec <message spec>`:
>>> # ok
>>> responder = s.autoresponds('bar', ok=0, errmsg='err')
>>> client.db.command('bar')
Traceback (most recent call last):
...
OperationFailure: command SON([('bar', 1)]) on namespace db.$cmd failed: err
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'_id': 1}, {'_id': 2}]}})
>>> # ok
>>> list(client.db.collection.find()) == [{'_id': 1}, {'_id': 2}]
True
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'a': 1}, {'a': 2}]}})
>>> # bad
>>> list(client.db.collection.find()) == [{'a': 1}, {'a': 2}]
True
Remove an autoresponder like:
>>> responder.cancel()
If the request currently at the head of the queue matches, it is popped
and replied to. Future matching requests skip the queue.
>>> future = go(client.db.command, 'baz')
>>> # bad
>>> responder = s.autoresponds('baz', {'key': 'value'})
>>> future() == {'ok': 1, 'key': 'value'}
True
Responders are applied in order, most recently added first, until one
matches:
>>> responder = s.autoresponds('baz')
>>> client.db.command('baz') == {'ok': 1}
True
>>> responder.cancel()
>>> # The previous responder takes over again.
>>> client.db.command('baz') == {'ok': 1, 'key': 'value'}
True
You can pass a request handler in place of the message spec. Return
True if you handled the request:
>>> responder = s.autoresponds('baz', lambda r: r.ok(a=2))
The standard `Request.ok`, `~Request.replies`, `~Request.fail`,
`~Request.hangup` and so on all return True to make them suitable
as handler functions.
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
True
If the request is not handled, it is checked against the remaining
responders, or enqueued if none match.
You can pass the handler as the only argument so it receives *all*
requests. For example you could log them, then return None to allow
other handlers to run:
>>> def logger(request):
... if not request.matches('ismaster'):
... print('logging: %r' % request)
>>> responder = s.autoresponds(logger)
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
logging: OpMsg({"baz": 1, "$db": "db", "$readPreference": {"mode": "primaryPreferred"}}, namespace="db")
True
The synonym `subscribe` better expresses your intent if your handler
never returns True:
>>> subscriber = s.subscribe(logger)
.. doctest:
:hide:
>>> client.close()
>>> s.stop()
"""
return self._insert_responder("top", matcher, *args, **kwargs)
|
[
"def",
"autoresponds",
"(",
"self",
",",
"matcher",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_insert_responder",
"(",
"\"top\"",
",",
"matcher",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Send a canned reply to all matching client requests.
``matcher`` is a `Matcher` or a command name, or an instance of
`OpInsert`, `OpQuery`, etc.
>>> s = MockupDB()
>>> port = s.run()
>>>
>>> from pymongo import MongoClient
>>> client = MongoClient(s.uri)
>>> responder = s.autoresponds('ismaster', maxWireVersion=6)
>>> client.admin.command('ismaster') == {'ok': 1, 'maxWireVersion': 6}
True
The remaining arguments are a :ref:`message spec <message spec>`:
>>> # ok
>>> responder = s.autoresponds('bar', ok=0, errmsg='err')
>>> client.db.command('bar')
Traceback (most recent call last):
...
OperationFailure: command SON([('bar', 1)]) on namespace db.$cmd failed: err
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'_id': 1}, {'_id': 2}]}})
>>> # ok
>>> list(client.db.collection.find()) == [{'_id': 1}, {'_id': 2}]
True
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'a': 1}, {'a': 2}]}})
>>> # bad
>>> list(client.db.collection.find()) == [{'a': 1}, {'a': 2}]
True
Remove an autoresponder like:
>>> responder.cancel()
If the request currently at the head of the queue matches, it is popped
and replied to. Future matching requests skip the queue.
>>> future = go(client.db.command, 'baz')
>>> # bad
>>> responder = s.autoresponds('baz', {'key': 'value'})
>>> future() == {'ok': 1, 'key': 'value'}
True
Responders are applied in order, most recently added first, until one
matches:
>>> responder = s.autoresponds('baz')
>>> client.db.command('baz') == {'ok': 1}
True
>>> responder.cancel()
>>> # The previous responder takes over again.
>>> client.db.command('baz') == {'ok': 1, 'key': 'value'}
True
You can pass a request handler in place of the message spec. Return
True if you handled the request:
>>> responder = s.autoresponds('baz', lambda r: r.ok(a=2))
The standard `Request.ok`, `~Request.replies`, `~Request.fail`,
`~Request.hangup` and so on all return True to make them suitable
as handler functions.
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
True
If the request is not handled, it is checked against the remaining
responders, or enqueued if none match.
You can pass the handler as the only argument so it receives *all*
requests. For example you could log them, then return None to allow
other handlers to run:
>>> def logger(request):
... if not request.matches('ismaster'):
... print('logging: %r' % request)
>>> responder = s.autoresponds(logger)
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
logging: OpMsg({"baz": 1, "$db": "db", "$readPreference": {"mode": "primaryPreferred"}}, namespace="db")
True
The synonym `subscribe` better expresses your intent if your handler
never returns True:
>>> subscriber = s.subscribe(logger)
.. doctest:
:hide:
>>> client.close()
>>> s.stop()
|
[
"Send",
"a",
"canned",
"reply",
"to",
"all",
"matching",
"client",
"requests",
".",
"matcher",
"is",
"a",
"Matcher",
"or",
"a",
"command",
"name",
"or",
"an",
"instance",
"of",
"OpInsert",
"OpQuery",
"etc",
"."
] |
python
|
train
|
Microsoft/botbuilder-python
|
libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
|
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py#L36-L47
|
def add_step(self, step):
"""
Adds a new step to the waterfall.
:param step: Step to add
:return: Waterfall dialog for fluent calls to `add_step()`.
"""
if not step:
raise TypeError('WaterfallDialog.add_step(): step cannot be None.')
self._steps.append(step)
return self
|
[
"def",
"add_step",
"(",
"self",
",",
"step",
")",
":",
"if",
"not",
"step",
":",
"raise",
"TypeError",
"(",
"'WaterfallDialog.add_step(): step cannot be None.'",
")",
"self",
".",
"_steps",
".",
"append",
"(",
"step",
")",
"return",
"self"
] |
Adds a new step to the waterfall.
:param step: Step to add
:return: Waterfall dialog for fluent calls to `add_step()`.
|
[
"Adds",
"a",
"new",
"step",
"to",
"the",
"waterfall",
".",
":",
"param",
"step",
":",
"Step",
"to",
"add",
":",
"return",
":",
"Waterfall",
"dialog",
"for",
"fluent",
"calls",
"to",
"add_step",
"()",
"."
] |
python
|
test
|
oscarbranson/latools
|
latools/latools.py
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3822-L3918
|
def export_traces(self, outdir=None, focus_stage=None, analytes=None,
samples=None, subset='All_Analyses', filt=False, zip_archive=False):
"""
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if outdir is None:
outdir = os.path.join(self.export_dir, 'trace_export')
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/count {:s}',
'calibrated': 'mol/mol {:s}'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
ind = self.data[s].filt.grab_filt(filt)
out = Bunch()
for a in analytes:
out[a] = nominal_values(d[a][ind])
if focus_stage not in ['rawdata', 'despiked']:
out[a + '_std'] = std_devs(d[a][ind])
out[a + '_std'][out[a + '_std'] == 0] = np.nan
out = pd.DataFrame(out, index=self.data[s].Time[ind])
out.index.name = 'Time'
header = ['# Sample: %s' % (s),
'# Data Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'# Processed using %s configuration' % (self.config['config']),
'# Analysis Stage: %s' % (focus_stage),
'# Unit: %s' % ud[focus_stage]]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s_%s.csv' % (outdir, s, focus_stage), 'w') as f:
f.write(header)
f.write(csv)
if zip_archive:
utils.zipdir(outdir, delete=True)
return
|
[
"def",
"export_traces",
"(",
"self",
",",
"outdir",
"=",
"None",
",",
"focus_stage",
"=",
"None",
",",
"analytes",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"'All_Analyses'",
",",
"filt",
"=",
"False",
",",
"zip_archive",
"=",
"False",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"a",
"!=",
"self",
".",
"internal_standard",
"]",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"export_dir",
",",
"'trace_export'",
")",
"ud",
"=",
"{",
"'rawdata'",
":",
"'counts'",
",",
"'despiked'",
":",
"'counts'",
",",
"'bkgsub'",
":",
"'background corrected counts'",
",",
"'ratios'",
":",
"'counts/count {:s}'",
",",
"'calibrated'",
":",
"'mol/mol {:s}'",
"}",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"ud",
"[",
"focus_stage",
"]",
"=",
"ud",
"[",
"focus_stage",
"]",
".",
"format",
"(",
"self",
".",
"internal_standard",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"data",
"[",
"focus_stage",
"]",
"ind",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"out",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"analytes",
":",
"out",
"[",
"a",
"]",
"=",
"nominal_values",
"(",
"d",
"[",
"a",
"]",
"[",
"ind",
"]",
")",
"if",
"focus_stage",
"not",
"in",
"[",
"'rawdata'",
",",
"'despiked'",
"]",
":",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"=",
"std_devs",
"(",
"d",
"[",
"a",
"]",
"[",
"ind",
"]",
")",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"[",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"==",
"0",
"]",
"=",
"np",
".",
"nan",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"out",
",",
"index",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"Time",
"[",
"ind",
"]",
")",
"out",
".",
"index",
".",
"name",
"=",
"'Time'",
"header",
"=",
"[",
"'# Sample: %s'",
"%",
"(",
"s",
")",
",",
"'# Data Exported from LATOOLS on %s'",
"%",
"(",
"time",
".",
"strftime",
"(",
"'%Y:%m:%d %H:%M:%S'",
")",
")",
",",
"'# Processed using %s configuration'",
"%",
"(",
"self",
".",
"config",
"[",
"'config'",
"]",
")",
",",
"'# Analysis Stage: %s'",
"%",
"(",
"focus_stage",
")",
",",
"'# Unit: %s'",
"%",
"ud",
"[",
"focus_stage",
"]",
"]",
"header",
"=",
"'\\n'",
".",
"join",
"(",
"header",
")",
"+",
"'\\n'",
"csv",
"=",
"out",
".",
"to_csv",
"(",
")",
"with",
"open",
"(",
"'%s/%s_%s.csv'",
"%",
"(",
"outdir",
",",
"s",
",",
"focus_stage",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
")",
"f",
".",
"write",
"(",
"csv",
")",
"if",
"zip_archive",
":",
"utils",
".",
"zipdir",
"(",
"outdir",
",",
"delete",
"=",
"True",
")",
"return"
] |
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
|
[
"Function",
"to",
"export",
"raw",
"data",
"."
] |
python
|
test
|
python-diamond/Diamond
|
src/collectors/openstackswiftrecon/openstackswiftrecon.py
|
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/openstackswiftrecon/openstackswiftrecon.py#L37-L49
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenstackSwiftReconCollector, self).get_default_config()
config.update({
'path': 'swiftrecon',
'recon_account_cache': '/var/cache/swift/account.recon',
'recon_container_cache': '/var/cache/swift/container.recon',
'recon_object_cache': '/var/cache/swift/object.recon',
'interval': 300,
})
return config
|
[
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"OpenstackSwiftReconCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'swiftrecon'",
",",
"'recon_account_cache'",
":",
"'/var/cache/swift/account.recon'",
",",
"'recon_container_cache'",
":",
"'/var/cache/swift/container.recon'",
",",
"'recon_object_cache'",
":",
"'/var/cache/swift/object.recon'",
",",
"'interval'",
":",
"300",
",",
"}",
")",
"return",
"config"
] |
Returns the default collector settings
|
[
"Returns",
"the",
"default",
"collector",
"settings"
] |
python
|
train
|
calvinku96/labreporthelper
|
labreporthelper/bestfit/bestfit.py
|
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/bestfit/bestfit.py#L42-L55
|
def set_args(self, **kwargs):
"""
Set more arguments to self.args
args:
**kwargs:
key and value represents dictionary key and value
"""
try:
kwargs_items = kwargs.iteritems()
except AttributeError:
kwargs_items = kwargs.items()
for key, val in kwargs_items:
self.args[key] = val
|
[
"def",
"set_args",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"kwargs_items",
"=",
"kwargs",
".",
"iteritems",
"(",
")",
"except",
"AttributeError",
":",
"kwargs_items",
"=",
"kwargs",
".",
"items",
"(",
")",
"for",
"key",
",",
"val",
"in",
"kwargs_items",
":",
"self",
".",
"args",
"[",
"key",
"]",
"=",
"val"
] |
Set more arguments to self.args
args:
**kwargs:
key and value represents dictionary key and value
|
[
"Set",
"more",
"arguments",
"to",
"self",
".",
"args"
] |
python
|
train
|
dddomodossola/remi
|
editor/editor_widgets.py
|
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/editor/editor_widgets.py#L263-L266
|
def show(self, baseAppInstance):
"""Allows to show the widget as root window"""
self.from_dict_to_fields(self.configDict)
super(ProjectConfigurationDialog, self).show(baseAppInstance)
|
[
"def",
"show",
"(",
"self",
",",
"baseAppInstance",
")",
":",
"self",
".",
"from_dict_to_fields",
"(",
"self",
".",
"configDict",
")",
"super",
"(",
"ProjectConfigurationDialog",
",",
"self",
")",
".",
"show",
"(",
"baseAppInstance",
")"
] |
Allows to show the widget as root window
|
[
"Allows",
"to",
"show",
"the",
"widget",
"as",
"root",
"window"
] |
python
|
train
|
numenta/nupic
|
src/nupic/frameworks/opf/helpers.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L37-L48
|
def loadExperiment(path):
"""Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
descriptionPyModule = loadExperimentDescriptionScriptFromDir(path)
expIface = getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
return expIface.getModelDescription(), expIface.getModelControl()
|
[
"def",
"loadExperiment",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"descriptionPyModule",
"=",
"loadExperimentDescriptionScriptFromDir",
"(",
"path",
")",
"expIface",
"=",
"getExperimentDescriptionInterfaceFromModule",
"(",
"descriptionPyModule",
")",
"return",
"expIface",
".",
"getModelDescription",
"(",
")",
",",
"expIface",
".",
"getModelControl",
"(",
")"
] |
Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control)
|
[
"Loads",
"the",
"experiment",
"description",
"file",
"from",
"the",
"path",
"."
] |
python
|
valid
|
chrisrink10/basilisp
|
src/basilisp/lang/runtime.py
|
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L788-L793
|
def concat(*seqs) -> ISeq:
"""Concatenate the sequences given by seqs into a single ISeq."""
allseqs = lseq.sequence(itertools.chain(*filter(None, map(to_seq, seqs))))
if allseqs is None:
return lseq.EMPTY
return allseqs
|
[
"def",
"concat",
"(",
"*",
"seqs",
")",
"->",
"ISeq",
":",
"allseqs",
"=",
"lseq",
".",
"sequence",
"(",
"itertools",
".",
"chain",
"(",
"*",
"filter",
"(",
"None",
",",
"map",
"(",
"to_seq",
",",
"seqs",
")",
")",
")",
")",
"if",
"allseqs",
"is",
"None",
":",
"return",
"lseq",
".",
"EMPTY",
"return",
"allseqs"
] |
Concatenate the sequences given by seqs into a single ISeq.
|
[
"Concatenate",
"the",
"sequences",
"given",
"by",
"seqs",
"into",
"a",
"single",
"ISeq",
"."
] |
python
|
test
|
bdcht/grandalf
|
grandalf/layouts.py
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L660-L682
|
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
|
[
"def",
"_coord_vertical_alignment",
"(",
"self",
")",
":",
"dirh",
",",
"dirv",
"=",
"self",
".",
"dirh",
",",
"self",
".",
"dirv",
"g",
"=",
"self",
".",
"grx",
"for",
"l",
"in",
"self",
".",
"layers",
"[",
":",
":",
"-",
"dirv",
"]",
":",
"if",
"not",
"l",
".",
"prevlayer",
"(",
")",
":",
"continue",
"r",
"=",
"None",
"for",
"vk",
"in",
"l",
"[",
":",
":",
"dirh",
"]",
":",
"for",
"m",
"in",
"l",
".",
"_medianindex",
"(",
"vk",
")",
":",
"# take the median node in dirv layer:",
"um",
"=",
"l",
".",
"prevlayer",
"(",
")",
"[",
"m",
"]",
"# if vk is \"free\" align it with um's root",
"if",
"g",
"[",
"vk",
"]",
".",
"align",
"is",
"vk",
":",
"if",
"dirv",
"==",
"1",
":",
"vpair",
"=",
"(",
"vk",
",",
"um",
")",
"else",
":",
"vpair",
"=",
"(",
"um",
",",
"vk",
")",
"# if vk<->um link is used for alignment",
"if",
"(",
"vpair",
"not",
"in",
"self",
".",
"conflicts",
")",
"and",
"(",
"r",
"==",
"None",
"or",
"dirh",
"*",
"r",
"<",
"dirh",
"*",
"m",
")",
":",
"g",
"[",
"um",
"]",
".",
"align",
"=",
"vk",
"g",
"[",
"vk",
"]",
".",
"root",
"=",
"g",
"[",
"um",
"]",
".",
"root",
"g",
"[",
"vk",
"]",
".",
"align",
"=",
"g",
"[",
"vk",
"]",
".",
"root",
"r",
"=",
"m"
] |
performs vertical alignment according to current dirvh internal state.
|
[
"performs",
"vertical",
"alignment",
"according",
"to",
"current",
"dirvh",
"internal",
"state",
"."
] |
python
|
train
|
tensorflow/probability
|
tensorflow_probability/python/math/linalg.py
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L837-L871
|
def _sparse_tensor_dense_matmul(sp_a, b, **kwargs):
"""Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly.
"""
batch_shape = _get_shape(sp_a)[:-2]
# Reshape the SparseTensor into a rank 3 SparseTensors, with the
# batch shape flattened to a single dimension. If the batch rank is 0, then
# we add a batch dimension of rank 1.
sp_a = tf.sparse.reshape(sp_a, tf.concat([[-1], _get_shape(sp_a)[-2:]],
axis=0))
# Reshape b to stack the batch dimension along the rows.
b = tf.reshape(b, tf.concat([[-1], _get_shape(b)[-1:]], axis=0))
# Convert the SparseTensor to a matrix in block diagonal form with blocks of
# matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which
# only accepts rank 2 (Sparse)Tensors.
out = tf.sparse.sparse_dense_matmul(_sparse_block_diag(sp_a), b, **kwargs)
# Finally retrieve the original batch shape from the resulting rank 2 Tensor.
# Note that we avoid inferring the final shape from `sp_a` or `b` because we
# might have transposed one or both of them.
return tf.reshape(
out,
tf.concat([batch_shape, [-1], _get_shape(out)[-1:]], axis=0))
|
[
"def",
"_sparse_tensor_dense_matmul",
"(",
"sp_a",
",",
"b",
",",
"*",
"*",
"kwargs",
")",
":",
"batch_shape",
"=",
"_get_shape",
"(",
"sp_a",
")",
"[",
":",
"-",
"2",
"]",
"# Reshape the SparseTensor into a rank 3 SparseTensors, with the",
"# batch shape flattened to a single dimension. If the batch rank is 0, then",
"# we add a batch dimension of rank 1.",
"sp_a",
"=",
"tf",
".",
"sparse",
".",
"reshape",
"(",
"sp_a",
",",
"tf",
".",
"concat",
"(",
"[",
"[",
"-",
"1",
"]",
",",
"_get_shape",
"(",
"sp_a",
")",
"[",
"-",
"2",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"# Reshape b to stack the batch dimension along the rows.",
"b",
"=",
"tf",
".",
"reshape",
"(",
"b",
",",
"tf",
".",
"concat",
"(",
"[",
"[",
"-",
"1",
"]",
",",
"_get_shape",
"(",
"b",
")",
"[",
"-",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"# Convert the SparseTensor to a matrix in block diagonal form with blocks of",
"# matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which",
"# only accepts rank 2 (Sparse)Tensors.",
"out",
"=",
"tf",
".",
"sparse",
".",
"sparse_dense_matmul",
"(",
"_sparse_block_diag",
"(",
"sp_a",
")",
",",
"b",
",",
"*",
"*",
"kwargs",
")",
"# Finally retrieve the original batch shape from the resulting rank 2 Tensor.",
"# Note that we avoid inferring the final shape from `sp_a` or `b` because we",
"# might have transposed one or both of them.",
"return",
"tf",
".",
"reshape",
"(",
"out",
",",
"tf",
".",
"concat",
"(",
"[",
"batch_shape",
",",
"[",
"-",
"1",
"]",
",",
"_get_shape",
"(",
"out",
")",
"[",
"-",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")"
] |
Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly.
|
[
"Returns",
"(",
"batched",
")",
"matmul",
"of",
"a",
"SparseTensor",
"with",
"a",
"Tensor",
"."
] |
python
|
test
|
neuropsychology/NeuroKit.py
|
neurokit/signal/events.py
|
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/signal/events.py#L235-L295
|
def plot_events_in_signal(signal, events_onsets, color="red", marker=None):
"""
Plot events in signal.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
events_onsets : list or ndarray
Events location.
color : int or list
Marker color.
marker : marker or list of markers (for possible marker values, see: https://matplotlib.org/api/markers_api.html)
Marker type.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=signal, sampling_rate=1000)
>>> events_onsets = bio["ECG"]["R_Peaks"]
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets)
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets, color="red", marker="o")
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], [bio["ECG"]["P_Waves"], bio["ECG"]["R_Peaks"]], color=["blue", "red"], marker=["d","o"])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- `Renatosc <https://github.com/renatosc/>`_
*Dependencies*
- matplotlib
- pandas
"""
df = pd.DataFrame(signal)
ax = df.plot()
def plotOnSignal(x, color, marker=None):
if (marker is None):
plt.axvline(x=event, color=color)
else:
plt.plot(x, signal[x], marker, color=color)
events_onsets = np.array(events_onsets)
try:
len(events_onsets[0])
for index, dim in enumerate(events_onsets):
for event in dim:
plotOnSignal(x=event,
color=color[index] if isinstance(color, list) else color,
marker=marker[index] if isinstance(marker, list) else marker)
except TypeError:
for event in events_onsets:
plotOnSignal(x=event,
color=color[0] if isinstance(color, list) else color,
marker=marker[0] if isinstance(marker, list) else marker)
return ax
|
[
"def",
"plot_events_in_signal",
"(",
"signal",
",",
"events_onsets",
",",
"color",
"=",
"\"red\"",
",",
"marker",
"=",
"None",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"signal",
")",
"ax",
"=",
"df",
".",
"plot",
"(",
")",
"def",
"plotOnSignal",
"(",
"x",
",",
"color",
",",
"marker",
"=",
"None",
")",
":",
"if",
"(",
"marker",
"is",
"None",
")",
":",
"plt",
".",
"axvline",
"(",
"x",
"=",
"event",
",",
"color",
"=",
"color",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"x",
",",
"signal",
"[",
"x",
"]",
",",
"marker",
",",
"color",
"=",
"color",
")",
"events_onsets",
"=",
"np",
".",
"array",
"(",
"events_onsets",
")",
"try",
":",
"len",
"(",
"events_onsets",
"[",
"0",
"]",
")",
"for",
"index",
",",
"dim",
"in",
"enumerate",
"(",
"events_onsets",
")",
":",
"for",
"event",
"in",
"dim",
":",
"plotOnSignal",
"(",
"x",
"=",
"event",
",",
"color",
"=",
"color",
"[",
"index",
"]",
"if",
"isinstance",
"(",
"color",
",",
"list",
")",
"else",
"color",
",",
"marker",
"=",
"marker",
"[",
"index",
"]",
"if",
"isinstance",
"(",
"marker",
",",
"list",
")",
"else",
"marker",
")",
"except",
"TypeError",
":",
"for",
"event",
"in",
"events_onsets",
":",
"plotOnSignal",
"(",
"x",
"=",
"event",
",",
"color",
"=",
"color",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"color",
",",
"list",
")",
"else",
"color",
",",
"marker",
"=",
"marker",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"marker",
",",
"list",
")",
"else",
"marker",
")",
"return",
"ax"
] |
Plot events in signal.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
events_onsets : list or ndarray
Events location.
color : int or list
Marker color.
marker : marker or list of markers (for possible marker values, see: https://matplotlib.org/api/markers_api.html)
Marker type.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=signal, sampling_rate=1000)
>>> events_onsets = bio["ECG"]["R_Peaks"]
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets)
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets, color="red", marker="o")
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], [bio["ECG"]["P_Waves"], bio["ECG"]["R_Peaks"]], color=["blue", "red"], marker=["d","o"])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- `Renatosc <https://github.com/renatosc/>`_
*Dependencies*
- matplotlib
- pandas
|
[
"Plot",
"events",
"in",
"signal",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.