repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
PythonCharmers/python-future | src/future/backports/xmlrpc/server.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/xmlrpc/server.py#L243-L278 | def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding) | [
"def",
"_marshaled_dispatch",
"(",
"self",
",",
"data",
",",
"dispatch_method",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"try",
":",
"params",
",",
"method",
"=",
"loads",
"(",
"data",
",",
"use_builtin_types",
"=",
"self",
".",
"use_builtin_types",
")",
"# generate response",
"if",
"dispatch_method",
"is",
"not",
"None",
":",
"response",
"=",
"dispatch_method",
"(",
"method",
",",
"params",
")",
"else",
":",
"response",
"=",
"self",
".",
"_dispatch",
"(",
"method",
",",
"params",
")",
"# wrap response in a singleton tuple",
"response",
"=",
"(",
"response",
",",
")",
"response",
"=",
"dumps",
"(",
"response",
",",
"methodresponse",
"=",
"1",
",",
"allow_none",
"=",
"self",
".",
"allow_none",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"except",
"Fault",
"as",
"fault",
":",
"response",
"=",
"dumps",
"(",
"fault",
",",
"allow_none",
"=",
"self",
".",
"allow_none",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"except",
":",
"# report exception back to server",
"exc_type",
",",
"exc_value",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"response",
"=",
"dumps",
"(",
"Fault",
"(",
"1",
",",
"\"%s:%s\"",
"%",
"(",
"exc_type",
",",
"exc_value",
")",
")",
",",
"encoding",
"=",
"self",
".",
"encoding",
",",
"allow_none",
"=",
"self",
".",
"allow_none",
",",
")",
"return",
"response",
".",
"encode",
"(",
"self",
".",
"encoding",
")"
] | Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior. | [
"Dispatches",
"an",
"XML",
"-",
"RPC",
"method",
"from",
"marshalled",
"(",
"XML",
")",
"data",
"."
] | python | train |
opennode/waldur-core | waldur_core/core/schemas.py | https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/schemas.py#L250-L258 | def create_view(self, callback, method, request=None):
"""
Given a callback, return an actual view instance.
"""
view = super(WaldurSchemaGenerator, self).create_view(callback, method, request)
if is_disabled_action(view):
view.exclude_from_schema = True
return view | [
"def",
"create_view",
"(",
"self",
",",
"callback",
",",
"method",
",",
"request",
"=",
"None",
")",
":",
"view",
"=",
"super",
"(",
"WaldurSchemaGenerator",
",",
"self",
")",
".",
"create_view",
"(",
"callback",
",",
"method",
",",
"request",
")",
"if",
"is_disabled_action",
"(",
"view",
")",
":",
"view",
".",
"exclude_from_schema",
"=",
"True",
"return",
"view"
] | Given a callback, return an actual view instance. | [
"Given",
"a",
"callback",
"return",
"an",
"actual",
"view",
"instance",
"."
] | python | train |
pyhys/minimalmodbus | dummy_serial.py | https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/dummy_serial.py#L118-L127 | def open(self):
"""Open a (previously initialized) port on dummy_serial."""
if VERBOSE:
_print_out('\nDummy_serial: Opening port\n')
if self._isOpen:
raise IOError('Dummy_serial: The port is already open')
self._isOpen = True
self.port = self.initial_port_name | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"VERBOSE",
":",
"_print_out",
"(",
"'\\nDummy_serial: Opening port\\n'",
")",
"if",
"self",
".",
"_isOpen",
":",
"raise",
"IOError",
"(",
"'Dummy_serial: The port is already open'",
")",
"self",
".",
"_isOpen",
"=",
"True",
"self",
".",
"port",
"=",
"self",
".",
"initial_port_name"
] | Open a (previously initialized) port on dummy_serial. | [
"Open",
"a",
"(",
"previously",
"initialized",
")",
"port",
"on",
"dummy_serial",
"."
] | python | train |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py#L1447-L1548 | def update_app_profile(
self,
app_profile,
update_mask,
ignore_warnings=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an app profile within an instance.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> # TODO: Initialize `app_profile`:
>>> app_profile = {}
>>>
>>> # TODO: Initialize `update_mask`:
>>> update_mask = {}
>>>
>>> response = client.update_app_profile(app_profile, update_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile`
update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced.
If unset, all fields will be replaced.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask`
ignore_warnings (bool): If true, ignore safety checks when updating the app profile.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_app_profile" not in self._inner_api_calls:
self._inner_api_calls[
"update_app_profile"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_app_profile,
default_retry=self._method_configs["UpdateAppProfile"].retry,
default_timeout=self._method_configs["UpdateAppProfile"].timeout,
client_info=self._client_info,
)
request = bigtable_instance_admin_pb2.UpdateAppProfileRequest(
app_profile=app_profile,
update_mask=update_mask,
ignore_warnings=ignore_warnings,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("app_profile.name", app_profile.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["update_app_profile"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
instance_pb2.AppProfile,
metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata,
) | [
"def",
"update_app_profile",
"(",
"self",
",",
"app_profile",
",",
"update_mask",
",",
"ignore_warnings",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"update_app_profile\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"update_app_profile\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"update_app_profile",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateAppProfile\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateAppProfile\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"bigtable_instance_admin_pb2",
".",
"UpdateAppProfileRequest",
"(",
"app_profile",
"=",
"app_profile",
",",
"update_mask",
"=",
"update_mask",
",",
"ignore_warnings",
"=",
"ignore_warnings",
",",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"[",
"]",
"metadata",
"=",
"list",
"(",
"metadata",
")",
"try",
":",
"routing_header",
"=",
"[",
"(",
"\"app_profile.name\"",
",",
"app_profile",
".",
"name",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"routing_metadata",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"routing_header",
")",
"metadata",
".",
"append",
"(",
"routing_metadata",
")",
"operation",
"=",
"self",
".",
"_inner_api_calls",
"[",
"\"update_app_profile\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"return",
"google",
".",
"api_core",
".",
"operation",
".",
"from_gapic",
"(",
"operation",
",",
"self",
".",
"transport",
".",
"_operations_client",
",",
"instance_pb2",
".",
"AppProfile",
",",
"metadata_type",
"=",
"bigtable_instance_admin_pb2",
".",
"UpdateAppProfileMetadata",
",",
")"
] | Updates an app profile within an instance.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> # TODO: Initialize `app_profile`:
>>> app_profile = {}
>>>
>>> # TODO: Initialize `update_mask`:
>>> update_mask = {}
>>>
>>> response = client.update_app_profile(app_profile, update_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile`
update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced.
If unset, all fields will be replaced.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask`
ignore_warnings (bool): If true, ignore safety checks when updating the app profile.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Updates",
"an",
"app",
"profile",
"within",
"an",
"instance",
"."
] | python | train |
numberoverzero/bloop | bloop/stream/shard.py | https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L206-L250 | def load_children(self):
"""If the Shard doesn't have any children, tries to find some from DescribeStream.
If the Shard is open this won't find any children, so an empty response doesn't
mean the Shard will **never** have children.
"""
# Child count is fixed the first time any of the following happen:
# 0 :: stream closed or throughput decreased
# 1 :: shard was open for ~4 hours
# 2 :: throughput increased
if self.children:
return self.children
# ParentShardId -> [Shard, ...]
by_parent = collections.defaultdict(list)
# ShardId -> Shard
by_id = {}
for shard in self.session.describe_stream(
stream_arn=self.stream_arn,
first_shard=self.shard_id)["Shards"]:
parent_list = by_parent[shard.get("ParentShardId")]
shard = Shard(
stream_arn=self.stream_arn,
shard_id=shard["ShardId"],
parent=shard.get("ParentShardId"),
session=self.session)
parent_list.append(shard)
by_id[shard.shard_id] = shard
# Find this shard when looking up shards by ParentShardId
by_id[self.shard_id] = self
# Insert this shard's children, then handle its child's descendants etc.
to_insert = collections.deque(by_parent[self.shard_id])
while to_insert:
shard = to_insert.popleft()
# ParentShardId -> Shard
shard.parent = by_id[shard.parent]
shard.parent.children.append(shard)
# Continue for any shards that have this shard as their parent
to_insert.extend(by_parent[shard.shard_id])
return self.children | [
"def",
"load_children",
"(",
"self",
")",
":",
"# Child count is fixed the first time any of the following happen:",
"# 0 :: stream closed or throughput decreased",
"# 1 :: shard was open for ~4 hours",
"# 2 :: throughput increased",
"if",
"self",
".",
"children",
":",
"return",
"self",
".",
"children",
"# ParentShardId -> [Shard, ...]",
"by_parent",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"# ShardId -> Shard",
"by_id",
"=",
"{",
"}",
"for",
"shard",
"in",
"self",
".",
"session",
".",
"describe_stream",
"(",
"stream_arn",
"=",
"self",
".",
"stream_arn",
",",
"first_shard",
"=",
"self",
".",
"shard_id",
")",
"[",
"\"Shards\"",
"]",
":",
"parent_list",
"=",
"by_parent",
"[",
"shard",
".",
"get",
"(",
"\"ParentShardId\"",
")",
"]",
"shard",
"=",
"Shard",
"(",
"stream_arn",
"=",
"self",
".",
"stream_arn",
",",
"shard_id",
"=",
"shard",
"[",
"\"ShardId\"",
"]",
",",
"parent",
"=",
"shard",
".",
"get",
"(",
"\"ParentShardId\"",
")",
",",
"session",
"=",
"self",
".",
"session",
")",
"parent_list",
".",
"append",
"(",
"shard",
")",
"by_id",
"[",
"shard",
".",
"shard_id",
"]",
"=",
"shard",
"# Find this shard when looking up shards by ParentShardId",
"by_id",
"[",
"self",
".",
"shard_id",
"]",
"=",
"self",
"# Insert this shard's children, then handle its child's descendants etc.",
"to_insert",
"=",
"collections",
".",
"deque",
"(",
"by_parent",
"[",
"self",
".",
"shard_id",
"]",
")",
"while",
"to_insert",
":",
"shard",
"=",
"to_insert",
".",
"popleft",
"(",
")",
"# ParentShardId -> Shard",
"shard",
".",
"parent",
"=",
"by_id",
"[",
"shard",
".",
"parent",
"]",
"shard",
".",
"parent",
".",
"children",
".",
"append",
"(",
"shard",
")",
"# Continue for any shards that have this shard as their parent",
"to_insert",
".",
"extend",
"(",
"by_parent",
"[",
"shard",
".",
"shard_id",
"]",
")",
"return",
"self",
".",
"children"
] | If the Shard doesn't have any children, tries to find some from DescribeStream.
If the Shard is open this won't find any children, so an empty response doesn't
mean the Shard will **never** have children. | [
"If",
"the",
"Shard",
"doesn",
"t",
"have",
"any",
"children",
"tries",
"to",
"find",
"some",
"from",
"DescribeStream",
"."
] | python | train |
django-leonardo/django-leonardo | leonardo/module/media/admin/folder/admin.py | https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/module/media/admin/folder/admin.py#L113-L136 | def response_change(self, request, obj):
"""
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
"""
r = super(FolderAdmin, self).response_change(request, obj)
# Code borrowed from django ModelAdmin to determine changelist on the
# fly
if r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
if obj.parent:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r | [
"def",
"response_change",
"(",
"self",
",",
"request",
",",
"obj",
")",
":",
"r",
"=",
"super",
"(",
"FolderAdmin",
",",
"self",
")",
".",
"response_change",
"(",
"request",
",",
"obj",
")",
"# Code borrowed from django ModelAdmin to determine changelist on the",
"# fly",
"if",
"r",
"[",
"'Location'",
"]",
":",
"# it was a successful save",
"if",
"(",
"r",
"[",
"'Location'",
"]",
"in",
"[",
"'../'",
"]",
"or",
"r",
"[",
"'Location'",
"]",
"==",
"self",
".",
"_get_post_url",
"(",
"obj",
")",
")",
":",
"if",
"obj",
".",
"parent",
":",
"url",
"=",
"reverse",
"(",
"'admin:filer-directory_listing'",
",",
"kwargs",
"=",
"{",
"'folder_id'",
":",
"obj",
".",
"parent",
".",
"id",
"}",
")",
"else",
":",
"url",
"=",
"reverse",
"(",
"'admin:filer-directory_listing-root'",
")",
"url",
"=",
"\"%s%s%s\"",
"%",
"(",
"url",
",",
"popup_param",
"(",
"request",
")",
",",
"selectfolder_param",
"(",
"request",
",",
"\"&\"",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"url",
")",
"else",
":",
"# this means it probably was a save_and_continue_editing",
"pass",
"return",
"r"
] | Overrides the default to be able to forward to the directory listing
instead of the default change_list_view | [
"Overrides",
"the",
"default",
"to",
"be",
"able",
"to",
"forward",
"to",
"the",
"directory",
"listing",
"instead",
"of",
"the",
"default",
"change_list_view"
] | python | train |
aiogram/aiogram | aiogram/contrib/fsm_storage/redis.py | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/contrib/fsm_storage/redis.py#L77-L94 | async def get_record(self, *,
chat: typing.Union[str, int, None] = None,
user: typing.Union[str, int, None] = None) -> typing.Dict:
"""
Get record from storage
:param chat:
:param user:
:return:
"""
chat, user = self.check_address(chat=chat, user=user)
addr = f"fsm:{chat}:{user}"
conn = await self.redis()
data = await conn.execute('GET', addr)
if data is None:
return {'state': None, 'data': {}}
return json.loads(data) | [
"async",
"def",
"get_record",
"(",
"self",
",",
"*",
",",
"chat",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
",",
"user",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
")",
"->",
"typing",
".",
"Dict",
":",
"chat",
",",
"user",
"=",
"self",
".",
"check_address",
"(",
"chat",
"=",
"chat",
",",
"user",
"=",
"user",
")",
"addr",
"=",
"f\"fsm:{chat}:{user}\"",
"conn",
"=",
"await",
"self",
".",
"redis",
"(",
")",
"data",
"=",
"await",
"conn",
".",
"execute",
"(",
"'GET'",
",",
"addr",
")",
"if",
"data",
"is",
"None",
":",
"return",
"{",
"'state'",
":",
"None",
",",
"'data'",
":",
"{",
"}",
"}",
"return",
"json",
".",
"loads",
"(",
"data",
")"
] | Get record from storage
:param chat:
:param user:
:return: | [
"Get",
"record",
"from",
"storage"
] | python | train |
adafruit/Adafruit_Python_LED_Backpack | Adafruit_LED_Backpack/BicolorMatrix8x8.py | https://github.com/adafruit/Adafruit_Python_LED_Backpack/blob/7356b4dd8b4bb162d60987878c2cb752fdd017d5/Adafruit_LED_Backpack/BicolorMatrix8x8.py#L41-L51 | def set_pixel(self, x, y, value):
"""Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW.
"""
if x < 0 or x > 7 or y < 0 or y > 7:
# Ignore out of bounds pixels.
return
# Set green LED based on 1st bit in value.
self.set_led(y * 16 + x, 1 if value & GREEN > 0 else 0)
# Set red LED based on 2nd bit in value.
self.set_led(y * 16 + x + 8, 1 if value & RED > 0 else 0) | [
"def",
"set_pixel",
"(",
"self",
",",
"x",
",",
"y",
",",
"value",
")",
":",
"if",
"x",
"<",
"0",
"or",
"x",
">",
"7",
"or",
"y",
"<",
"0",
"or",
"y",
">",
"7",
":",
"# Ignore out of bounds pixels.",
"return",
"# Set green LED based on 1st bit in value.",
"self",
".",
"set_led",
"(",
"y",
"*",
"16",
"+",
"x",
",",
"1",
"if",
"value",
"&",
"GREEN",
">",
"0",
"else",
"0",
")",
"# Set red LED based on 2nd bit in value.",
"self",
".",
"set_led",
"(",
"y",
"*",
"16",
"+",
"x",
"+",
"8",
",",
"1",
"if",
"value",
"&",
"RED",
">",
"0",
"else",
"0",
")"
] | Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW. | [
"Set",
"pixel",
"at",
"position",
"x",
"y",
"to",
"the",
"given",
"value",
".",
"X",
"and",
"Y",
"should",
"be",
"values",
"of",
"0",
"to",
"8",
".",
"Value",
"should",
"be",
"OFF",
"GREEN",
"RED",
"or",
"YELLOW",
"."
] | python | train |
saltstack/salt | salt/runners/queue.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/queue.py#L280-L304 | def process_runner(quantity=1, queue=None, backend=None):
'''
Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5
'''
queue_kwargs = __get_queue_opts(queue=queue, backend=backend)
data = process_queue(quantity=quantity, is_runner=True, **queue_kwargs)
for job in data['items']:
__salt__[job['fun']](*job['args'], **job['kwargs']) | [
"def",
"process_runner",
"(",
"quantity",
"=",
"1",
",",
"queue",
"=",
"None",
",",
"backend",
"=",
"None",
")",
":",
"queue_kwargs",
"=",
"__get_queue_opts",
"(",
"queue",
"=",
"queue",
",",
"backend",
"=",
"backend",
")",
"data",
"=",
"process_queue",
"(",
"quantity",
"=",
"quantity",
",",
"is_runner",
"=",
"True",
",",
"*",
"*",
"queue_kwargs",
")",
"for",
"job",
"in",
"data",
"[",
"'items'",
"]",
":",
"__salt__",
"[",
"job",
"[",
"'fun'",
"]",
"]",
"(",
"*",
"job",
"[",
"'args'",
"]",
",",
"*",
"*",
"job",
"[",
"'kwargs'",
"]",
")"
] | Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5 | [
"Process",
"queued",
"runners"
] | python | train |
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L79-L99 | def to_motor_distance(self, distance):
""" Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor | [
"def",
"to_motor_distance",
"(",
"self",
",",
"distance",
")",
":",
"if",
"isinstance",
"(",
"distance",
",",
"collections",
".",
"Iterable",
")",
":",
"return",
"[",
"(",
"x",
"*",
"self",
".",
"_distance_to_motor",
")",
"for",
"x",
"in",
"distance",
"]",
"else",
":",
"return",
"distance",
"*",
"self",
".",
"_distance_to_motor"
] | Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s. | [
"Convert",
"distance",
"/",
"s",
"to",
"motor",
"units",
"."
] | python | train |
ibis-project/ibis | ibis/expr/api.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L3402-L3502 | def projection(table, exprs):
"""
Compute new table expression with the indicated column expressions from
this table.
Parameters
----------
exprs : column expression, or string, or list of column expressions and
strings. If strings passed, must be columns in the table already
Returns
-------
projection : TableExpr
Notes
-----
Passing an aggregate function to this method will broadcast the aggregate's
value over the number of rows in the table. See the examples section for
more details.
Examples
--------
Simple projection
>>> import ibis
>>> fields = [('a', 'int64'), ('b', 'double')]
>>> t = ibis.table(fields, name='t')
>>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])
>>> proj # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
a = Column[int64*] 'a' from table
ref_0
b_plus_1 = Add[float64*]
left:
b = Column[float64*] 'b' from table
ref_0
right:
Literal[int8]
1
>>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]
>>> proj.equals(proj2)
True
Aggregate projection
>>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]
>>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
sum_a = WindowOp[int64*]
sum_a = Sum[int64]
a = Column[int64*] 'a' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
mean_b = WindowOp[float64*]
mean_b = Mean[float64]
b = Column[float64*] 'b' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
Note the ``<ibis.expr.window.Window>`` objects here, their existence means
that the result of the aggregation will be broadcast across the number of
rows in the input column. The purpose of this expression rewrite is to make
it easy to write column/scalar-aggregate operations like
.. code-block:: python
t[(t.a - t.a.mean()).name('demeaned_a')]
"""
import ibis.expr.analysis as L
if isinstance(exprs, (Expr, str)):
exprs = [exprs]
projector = L.Projector(table, exprs)
op = projector.get_result()
return op.to_expr() | [
"def",
"projection",
"(",
"table",
",",
"exprs",
")",
":",
"import",
"ibis",
".",
"expr",
".",
"analysis",
"as",
"L",
"if",
"isinstance",
"(",
"exprs",
",",
"(",
"Expr",
",",
"str",
")",
")",
":",
"exprs",
"=",
"[",
"exprs",
"]",
"projector",
"=",
"L",
".",
"Projector",
"(",
"table",
",",
"exprs",
")",
"op",
"=",
"projector",
".",
"get_result",
"(",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
] | Compute new table expression with the indicated column expressions from
this table.
Parameters
----------
exprs : column expression, or string, or list of column expressions and
strings. If strings passed, must be columns in the table already
Returns
-------
projection : TableExpr
Notes
-----
Passing an aggregate function to this method will broadcast the aggregate's
value over the number of rows in the table. See the examples section for
more details.
Examples
--------
Simple projection
>>> import ibis
>>> fields = [('a', 'int64'), ('b', 'double')]
>>> t = ibis.table(fields, name='t')
>>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])
>>> proj # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
a = Column[int64*] 'a' from table
ref_0
b_plus_1 = Add[float64*]
left:
b = Column[float64*] 'b' from table
ref_0
right:
Literal[int8]
1
>>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]
>>> proj.equals(proj2)
True
Aggregate projection
>>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]
>>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
sum_a = WindowOp[int64*]
sum_a = Sum[int64]
a = Column[int64*] 'a' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
mean_b = WindowOp[float64*]
mean_b = Mean[float64]
b = Column[float64*] 'b' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
Note the ``<ibis.expr.window.Window>`` objects here, their existence means
that the result of the aggregation will be broadcast across the number of
rows in the input column. The purpose of this expression rewrite is to make
it easy to write column/scalar-aggregate operations like
.. code-block:: python
t[(t.a - t.a.mean()).name('demeaned_a')] | [
"Compute",
"new",
"table",
"expression",
"with",
"the",
"indicated",
"column",
"expressions",
"from",
"this",
"table",
"."
] | python | train |
sveetch/djangocodemirror | djangocodemirror/templatetags/djangocodemirror_tags.py | https://github.com/sveetch/djangocodemirror/blob/7d556eec59861b2f619398e837bdd089b3a8a7d7/djangocodemirror/templatetags/djangocodemirror_tags.py#L285-L328 | def codemirror_instance(config_name, varname, element_id, assets=True):
"""
Return HTML to init a CodeMirror instance for an element.
This will output the whole HTML needed to initialize a CodeMirror instance
with needed assets loading. Assets can be omitted with the ``assets``
option.
Example:
::
{% load djangocodemirror_tags %}
{% codemirror_instance 'a-config-name' 'foo_codemirror' 'foo' %}
Arguments:
config_name (string): A registred config name.
varname (string): A Javascript variable name.
element_id (string): An HTML element identifier (without
leading ``#``) to attach to a CodeMirror instance.
Keyword Arguments:
assets (Bool): Adds needed assets before the HTML if ``True``, else
only CodeMirror instance will be outputed. Default value is
``True``.
Returns:
string: HTML.
"""
output = io.StringIO()
manifesto = CodemirrorAssetTagRender()
manifesto.register(config_name)
if assets:
output.write(manifesto.css_html())
output.write(manifesto.js_html())
html = manifesto.codemirror_html(config_name, varname, element_id)
output.write(html)
content = output.getvalue()
output.close()
return mark_safe(content) | [
"def",
"codemirror_instance",
"(",
"config_name",
",",
"varname",
",",
"element_id",
",",
"assets",
"=",
"True",
")",
":",
"output",
"=",
"io",
".",
"StringIO",
"(",
")",
"manifesto",
"=",
"CodemirrorAssetTagRender",
"(",
")",
"manifesto",
".",
"register",
"(",
"config_name",
")",
"if",
"assets",
":",
"output",
".",
"write",
"(",
"manifesto",
".",
"css_html",
"(",
")",
")",
"output",
".",
"write",
"(",
"manifesto",
".",
"js_html",
"(",
")",
")",
"html",
"=",
"manifesto",
".",
"codemirror_html",
"(",
"config_name",
",",
"varname",
",",
"element_id",
")",
"output",
".",
"write",
"(",
"html",
")",
"content",
"=",
"output",
".",
"getvalue",
"(",
")",
"output",
".",
"close",
"(",
")",
"return",
"mark_safe",
"(",
"content",
")"
] | Return HTML to init a CodeMirror instance for an element.
This will output the whole HTML needed to initialize a CodeMirror instance
with needed assets loading. Assets can be omitted with the ``assets``
option.
Example:
::
{% load djangocodemirror_tags %}
{% codemirror_instance 'a-config-name' 'foo_codemirror' 'foo' %}
Arguments:
config_name (string): A registred config name.
varname (string): A Javascript variable name.
element_id (string): An HTML element identifier (without
leading ``#``) to attach to a CodeMirror instance.
Keyword Arguments:
assets (Bool): Adds needed assets before the HTML if ``True``, else
only CodeMirror instance will be outputed. Default value is
``True``.
Returns:
string: HTML. | [
"Return",
"HTML",
"to",
"init",
"a",
"CodeMirror",
"instance",
"for",
"an",
"element",
"."
] | python | train |
Parsl/parsl | parsl/dataflow/flow_control.py | https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/flow_control.py#L131-L140 | def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = [] | [
"def",
"make_callback",
"(",
"self",
",",
"kind",
"=",
"None",
")",
":",
"self",
".",
"_wake_up_time",
"=",
"time",
".",
"time",
"(",
")",
"+",
"self",
".",
"interval",
"self",
".",
"callback",
"(",
"tasks",
"=",
"self",
".",
"_event_buffer",
",",
"kind",
"=",
"kind",
")",
"self",
".",
"_event_buffer",
"=",
"[",
"]"
] | Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback | [
"Makes",
"the",
"callback",
"and",
"resets",
"the",
"timer",
"."
] | python | valid |
DLR-RM/RAFCON | source/rafcon/gui/mygaphas/utils/gap_draw_helper.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/utils/gap_draw_helper.py#L33-L48 | def limit_value_string_length(value):
"""This method limits the string representation of the value to MAX_VALUE_LABEL_TEXT_LENGTH + 3 characters.
:param value: Value to limit string representation
:return: String holding the value with a maximum length of MAX_VALUE_LABEL_TEXT_LENGTH + 3
"""
if isinstance(value, string_types) and len(value) > constants.MAX_VALUE_LABEL_TEXT_LENGTH:
value = value[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..."
final_string = " " + value + " "
elif isinstance(value, (dict, list)) and len(str(value)) > constants.MAX_VALUE_LABEL_TEXT_LENGTH:
value_text = str(value)[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..."
final_string = " " + value_text + " "
else:
final_string = " " + str(value) + " "
return final_string | [
"def",
"limit_value_string_length",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
"and",
"len",
"(",
"value",
")",
">",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
":",
"value",
"=",
"value",
"[",
":",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
"]",
"+",
"\"...\"",
"final_string",
"=",
"\" \"",
"+",
"value",
"+",
"\" \"",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
"and",
"len",
"(",
"str",
"(",
"value",
")",
")",
">",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
":",
"value_text",
"=",
"str",
"(",
"value",
")",
"[",
":",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
"]",
"+",
"\"...\"",
"final_string",
"=",
"\" \"",
"+",
"value_text",
"+",
"\" \"",
"else",
":",
"final_string",
"=",
"\" \"",
"+",
"str",
"(",
"value",
")",
"+",
"\" \"",
"return",
"final_string"
] | This method limits the string representation of the value to MAX_VALUE_LABEL_TEXT_LENGTH + 3 characters.
:param value: Value to limit string representation
:return: String holding the value with a maximum length of MAX_VALUE_LABEL_TEXT_LENGTH + 3 | [
"This",
"method",
"limits",
"the",
"string",
"representation",
"of",
"the",
"value",
"to",
"MAX_VALUE_LABEL_TEXT_LENGTH",
"+",
"3",
"characters",
"."
] | python | train |
googleapis/google-cloud-python | logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py#L416-L544 | def list_log_entries(
self,
resource_names,
project_ids=None,
filter_=None,
order_by=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists log entries. Use this method to retrieve log entries from Logging.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `resource_names`:
>>> resource_names = []
>>>
>>> # Iterate over all results
>>> for element in client.list_log_entries(resource_names):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_log_entries(resource_names).pages:
... for element in page:
... # process element
... pass
Args:
resource_names (list[str]): Required. Names of one or more parent resources from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Projects listed in the ``project_ids`` field are added to this list.
project_ids (list[str]): Deprecated. Use ``resource_names`` instead. One or more project
identifiers or project numbers from which to retrieve log entries.
Example: ``"my-project-1A"``. If present, these project identifiers are
converted to resource name format and added to the list of resources in
``resource_names``.
filter_ (str): Optional. A filter that chooses which log entries to return. See
`Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An empty filter
matches all log entries in the resources listed in ``resource_names``.
Referencing a parent resource that is not listed in ``resource_names``
will cause the filter to return no results. The maximum length of the
filter is 20000 characters.
order_by (str): Optional. How the results should be sorted. Presently, the only
permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in order of
increasing values of ``LogEntry.timestamp`` (oldest first), and the
second option returns entries in order of decreasing timestamps (newest
first). Entries with equal timestamps are returned in order of their
``insert_id`` values.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_log_entries" not in self._inner_api_calls:
self._inner_api_calls[
"list_log_entries"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_log_entries,
default_retry=self._method_configs["ListLogEntries"].retry,
default_timeout=self._method_configs["ListLogEntries"].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListLogEntriesRequest(
resource_names=resource_names,
project_ids=project_ids,
filter=filter_,
order_by=order_by,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_log_entries"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="entries",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator | [
"def",
"list_log_entries",
"(",
"self",
",",
"resource_names",
",",
"project_ids",
"=",
"None",
",",
"filter_",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"page_size",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"list_log_entries\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"list_log_entries\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"list_log_entries",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"ListLogEntries\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"ListLogEntries\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"logging_pb2",
".",
"ListLogEntriesRequest",
"(",
"resource_names",
"=",
"resource_names",
",",
"project_ids",
"=",
"project_ids",
",",
"filter",
"=",
"filter_",
",",
"order_by",
"=",
"order_by",
",",
"page_size",
"=",
"page_size",
",",
")",
"iterator",
"=",
"google",
".",
"api_core",
".",
"page_iterator",
".",
"GRPCIterator",
"(",
"client",
"=",
"None",
",",
"method",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"_inner_api_calls",
"[",
"\"list_log_entries\"",
"]",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
",",
"request",
"=",
"request",
",",
"items_field",
"=",
"\"entries\"",
",",
"request_token_field",
"=",
"\"page_token\"",
",",
"response_token_field",
"=",
"\"next_page_token\"",
",",
")",
"return",
"iterator"
] | Lists log entries. Use this method to retrieve log entries from Logging.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `resource_names`:
>>> resource_names = []
>>>
>>> # Iterate over all results
>>> for element in client.list_log_entries(resource_names):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_log_entries(resource_names).pages:
... for element in page:
... # process element
... pass
Args:
resource_names (list[str]): Required. Names of one or more parent resources from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Projects listed in the ``project_ids`` field are added to this list.
project_ids (list[str]): Deprecated. Use ``resource_names`` instead. One or more project
identifiers or project numbers from which to retrieve log entries.
Example: ``"my-project-1A"``. If present, these project identifiers are
converted to resource name format and added to the list of resources in
``resource_names``.
filter_ (str): Optional. A filter that chooses which log entries to return. See
`Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An empty filter
matches all log entries in the resources listed in ``resource_names``.
Referencing a parent resource that is not listed in ``resource_names``
will cause the filter to return no results. The maximum length of the
filter is 20000 characters.
order_by (str): Optional. How the results should be sorted. Presently, the only
permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in order of
increasing values of ``LogEntry.timestamp`` (oldest first), and the
second option returns entries in order of decreasing timestamps (newest
first). Entries with equal timestamps are returned in order of their
``insert_id`` values.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Lists",
"log",
"entries",
".",
"Use",
"this",
"method",
"to",
"retrieve",
"log",
"entries",
"from",
"Logging",
".",
"For",
"ways",
"to",
"export",
"log",
"entries",
"see",
"Exporting",
"Logs",
"<https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"logging",
"/",
"docs",
"/",
"export",
">",
"__",
"."
] | python | train |
openstack/python-scciclient | scciclient/irmc/ipmi.py | https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/ipmi.py#L129-L170 | def get_pci_device(d_info, pci_device_ids):
"""Get quantity of PCI devices.
Get quantity of PCI devices of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:param pci_device_ids: the list contains pairs of <vendorID>/<deviceID> for
PCI devices.
:returns: the number of PCI devices.
"""
# note:
# Get quantity of PCI devices:
# ipmi cmd '0xF1'
#
# $ ipmitool raw 0x2E 0xF1 0x80 0x28 0x00 0x1A 0x01 0x00
#
# Raw response:
# 80 28 00 00 00 05 data1 data2 34 17 76 11 00 04
# 01
# data1: 2 octet of VendorID
# data2: 2 octet of DeviceID
ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'],
userid=d_info['irmc_username'],
password=d_info['irmc_password'])
response = itertools.takewhile(
lambda y: (y[1]['code'] != 0xC9 and y[1].get('error') is None),
_pci_seq(ipmicmd))
def _pci_count(accm, v):
out = v[1]['data']
# if system returns value, record id will be increased.
pci_id = "0x{:02x}{:02x}/0x{:02x}{:02x}".format(
out[7], out[6], out[9], out[8])
return accm + 1 if pci_id in pci_device_ids else accm
device_count = functools.reduce(_pci_count, response, 0)
return device_count | [
"def",
"get_pci_device",
"(",
"d_info",
",",
"pci_device_ids",
")",
":",
"# note:",
"# Get quantity of PCI devices:",
"# ipmi cmd '0xF1'",
"#",
"# $ ipmitool raw 0x2E 0xF1 0x80 0x28 0x00 0x1A 0x01 0x00",
"#",
"# Raw response:",
"# 80 28 00 00 00 05 data1 data2 34 17 76 11 00 04",
"# 01",
"# data1: 2 octet of VendorID",
"# data2: 2 octet of DeviceID",
"ipmicmd",
"=",
"ipmi_command",
".",
"Command",
"(",
"bmc",
"=",
"d_info",
"[",
"'irmc_address'",
"]",
",",
"userid",
"=",
"d_info",
"[",
"'irmc_username'",
"]",
",",
"password",
"=",
"d_info",
"[",
"'irmc_password'",
"]",
")",
"response",
"=",
"itertools",
".",
"takewhile",
"(",
"lambda",
"y",
":",
"(",
"y",
"[",
"1",
"]",
"[",
"'code'",
"]",
"!=",
"0xC9",
"and",
"y",
"[",
"1",
"]",
".",
"get",
"(",
"'error'",
")",
"is",
"None",
")",
",",
"_pci_seq",
"(",
"ipmicmd",
")",
")",
"def",
"_pci_count",
"(",
"accm",
",",
"v",
")",
":",
"out",
"=",
"v",
"[",
"1",
"]",
"[",
"'data'",
"]",
"# if system returns value, record id will be increased.",
"pci_id",
"=",
"\"0x{:02x}{:02x}/0x{:02x}{:02x}\"",
".",
"format",
"(",
"out",
"[",
"7",
"]",
",",
"out",
"[",
"6",
"]",
",",
"out",
"[",
"9",
"]",
",",
"out",
"[",
"8",
"]",
")",
"return",
"accm",
"+",
"1",
"if",
"pci_id",
"in",
"pci_device_ids",
"else",
"accm",
"device_count",
"=",
"functools",
".",
"reduce",
"(",
"_pci_count",
",",
"response",
",",
"0",
")",
"return",
"device_count"
] | Get quantity of PCI devices.
Get quantity of PCI devices of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:param pci_device_ids: the list contains pairs of <vendorID>/<deviceID> for
PCI devices.
:returns: the number of PCI devices. | [
"Get",
"quantity",
"of",
"PCI",
"devices",
"."
] | python | train |
peerplays-network/python-peerplays | peerplays/cli/bookie.py | https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/cli/bookie.py#L49-L55 | def events(ctx, eventgroup):
""" [bookie] List events for an event group
:param str eventgroup: Event Group id
"""
eg = EventGroup(eventgroup, peerplays_instance=ctx.peerplays)
click.echo(pretty_print(eg.events, ctx=ctx)) | [
"def",
"events",
"(",
"ctx",
",",
"eventgroup",
")",
":",
"eg",
"=",
"EventGroup",
"(",
"eventgroup",
",",
"peerplays_instance",
"=",
"ctx",
".",
"peerplays",
")",
"click",
".",
"echo",
"(",
"pretty_print",
"(",
"eg",
".",
"events",
",",
"ctx",
"=",
"ctx",
")",
")"
] | [bookie] List events for an event group
:param str eventgroup: Event Group id | [
"[",
"bookie",
"]",
"List",
"events",
"for",
"an",
"event",
"group"
] | python | train |
Holzhaus/python-cmuclmtk | cmuclmtk/__init__.py | https://github.com/Holzhaus/python-cmuclmtk/blob/67a5c6713c497ca644ea1c697a70e8d930c9d4b4/cmuclmtk/__init__.py#L448-L475 | def binlm2arpa(input_file, output_file, verbosity=2):
"""
Converts a binary format language model, as generated by idngram2lm, into an an ARPA format language model.
"""
cmd = ['binlm2arpa', '-binary', input_file,
'-arpa'. output_file]
if verbosity:
cmd.extend(['-verbosity', verbosity])
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | [
"def",
"binlm2arpa",
"(",
"input_file",
",",
"output_file",
",",
"verbosity",
"=",
"2",
")",
":",
"cmd",
"=",
"[",
"'binlm2arpa'",
",",
"'-binary'",
",",
"input_file",
",",
"'-arpa'",
".",
"output_file",
"]",
"if",
"verbosity",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-verbosity'",
",",
"verbosity",
"]",
")",
"# Ensure that every parameter is of type 'str'",
"cmd",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"cmd",
"]",
"with",
"tempfile",
".",
"SpooledTemporaryFile",
"(",
")",
"as",
"output_f",
":",
"with",
"output_to_debuglogger",
"(",
")",
"as",
"err_f",
":",
"exitcode",
"=",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"output_f",
",",
"stderr",
"=",
"err_f",
")",
"output",
"=",
"output_f",
".",
"read",
"(",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Command '%s' returned with exit code '%d'.\"",
"%",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
",",
"exitcode",
")",
")",
"if",
"exitcode",
"!=",
"0",
":",
"raise",
"ConversionError",
"(",
"\"'%s' returned with non-zero exit status '%s'\"",
"%",
"(",
"cmd",
"[",
"0",
"]",
",",
"exitcode",
")",
")",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
")",
"and",
"type",
"(",
"output",
")",
"is",
"bytes",
":",
"output",
"=",
"output",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"output",
".",
"strip",
"(",
")"
] | Converts a binary format language model, as generated by idngram2lm, into an an ARPA format language model. | [
"Converts",
"a",
"binary",
"format",
"language",
"model",
"as",
"generated",
"by",
"idngram2lm",
"into",
"an",
"an",
"ARPA",
"format",
"language",
"model",
"."
] | python | train |
shonenada/flask-rbac | flask_rbac/__init__.py | https://github.com/shonenada/flask-rbac/blob/e085121ff11825114e2d6f8419f0b6de6f9ba476/flask_rbac/__init__.py#L91-L98 | def is_allowed(self, role, method, resource):
"""Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._allowed | [
"def",
"is_allowed",
"(",
"self",
",",
"role",
",",
"method",
",",
"resource",
")",
":",
"return",
"(",
"role",
",",
"method",
",",
"resource",
")",
"in",
"self",
".",
"_allowed"
] | Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked. | [
"Check",
"whether",
"role",
"is",
"allowed",
"to",
"access",
"resource"
] | python | valid |
HPAC/matchpy | matchpy/expressions/expressions.py | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/expressions.py#L755-L770 | def optional(name, default) -> 'Wildcard':
"""Create a `Wildcard` that matches a single argument with a default value.
If the wildcard does not match, the substitution will contain the
default value instead.
Args:
name:
The name for the wildcard.
default:
The default value of the wildcard.
Returns:
A n optional wildcard.
"""
return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default) | [
"def",
"optional",
"(",
"name",
",",
"default",
")",
"->",
"'Wildcard'",
":",
"return",
"Wildcard",
"(",
"min_count",
"=",
"1",
",",
"fixed_size",
"=",
"True",
",",
"variable_name",
"=",
"name",
",",
"optional",
"=",
"default",
")"
] | Create a `Wildcard` that matches a single argument with a default value.
If the wildcard does not match, the substitution will contain the
default value instead.
Args:
name:
The name for the wildcard.
default:
The default value of the wildcard.
Returns:
A n optional wildcard. | [
"Create",
"a",
"Wildcard",
"that",
"matches",
"a",
"single",
"argument",
"with",
"a",
"default",
"value",
"."
] | python | train |
wrobstory/vincent | vincent/visualization.py | https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/visualization.py#L77-L91 | def viewport(value):
"""2-element list of ints : Dimensions of the viewport
The viewport is a bounding box containing the visualization. If the
dimensions of the visualization are larger than the viewport, then
the visualization will be scrollable.
If undefined, then the full visualization is shown.
"""
if len(value) != 2:
raise ValueError('viewport must have 2 dimensions')
for v in value:
_assert_is_type('viewport dimension', v, int)
if v < 0:
raise ValueError('viewport dimensions cannot be negative') | [
"def",
"viewport",
"(",
"value",
")",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'viewport must have 2 dimensions'",
")",
"for",
"v",
"in",
"value",
":",
"_assert_is_type",
"(",
"'viewport dimension'",
",",
"v",
",",
"int",
")",
"if",
"v",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'viewport dimensions cannot be negative'",
")"
] | 2-element list of ints : Dimensions of the viewport
The viewport is a bounding box containing the visualization. If the
dimensions of the visualization are larger than the viewport, then
the visualization will be scrollable.
If undefined, then the full visualization is shown. | [
"2",
"-",
"element",
"list",
"of",
"ints",
":",
"Dimensions",
"of",
"the",
"viewport"
] | python | train |
openai/universe | universe/wrappers/recording.py | https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/wrappers/recording.py#L77-L88 | def _get_writer(self, i):
"""
Returns a tuple of (log_fn, log_f, bin_fn, bin_f) to be written to by vectorized env channel i
Or all Nones if recording is inactive on that channel
"""
if self._recording_dir is None:
return None
if self._log_n is None:
self._log_n = [None] * self.n
if self._log_n[i] is None:
self._log_n[i] = RecordingWriter(self._recording_dir, self._instance_id, i, async_write=self._async_write)
return self._log_n[i] | [
"def",
"_get_writer",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"_recording_dir",
"is",
"None",
":",
"return",
"None",
"if",
"self",
".",
"_log_n",
"is",
"None",
":",
"self",
".",
"_log_n",
"=",
"[",
"None",
"]",
"*",
"self",
".",
"n",
"if",
"self",
".",
"_log_n",
"[",
"i",
"]",
"is",
"None",
":",
"self",
".",
"_log_n",
"[",
"i",
"]",
"=",
"RecordingWriter",
"(",
"self",
".",
"_recording_dir",
",",
"self",
".",
"_instance_id",
",",
"i",
",",
"async_write",
"=",
"self",
".",
"_async_write",
")",
"return",
"self",
".",
"_log_n",
"[",
"i",
"]"
] | Returns a tuple of (log_fn, log_f, bin_fn, bin_f) to be written to by vectorized env channel i
Or all Nones if recording is inactive on that channel | [
"Returns",
"a",
"tuple",
"of",
"(",
"log_fn",
"log_f",
"bin_fn",
"bin_f",
")",
"to",
"be",
"written",
"to",
"by",
"vectorized",
"env",
"channel",
"i",
"Or",
"all",
"Nones",
"if",
"recording",
"is",
"inactive",
"on",
"that",
"channel"
] | python | train |
bhmm/bhmm | bhmm/util/statistics.py | https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/util/statistics.py#L108-L151 | def confidence_interval_arr(data, conf=0.95):
r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise confidence intervals
Parameters
----------
data : ndarray (K, (shape))
ndarray of ndarrays, the first index is a sample index, the remaining indexes are specific to the
array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds
"""
if conf < 0 or conf > 1:
raise ValueError('Not a meaningful confidence level: '+str(conf))
# list or 1D-array? then fuse it
if types.is_list(data) or (isinstance(data, np.ndarray) and np.ndim(data) == 1):
newshape = tuple([len(data)] + list(data[0].shape))
newdata = np.zeros(newshape)
for i in range(len(data)):
newdata[i, :] = data[i]
data = newdata
# do we have an array now? if yes go, if no fail
if types.is_float_array(data):
I = _indexes(data[0])
lower = np.zeros(data[0].shape)
upper = np.zeros(data[0].shape)
for i in I:
col = _column(data, i)
m, lower[i], upper[i] = confidence_interval(col, conf)
# return
return lower, upper
else:
raise TypeError('data cannot be converted to an ndarray') | [
"def",
"confidence_interval_arr",
"(",
"data",
",",
"conf",
"=",
"0.95",
")",
":",
"if",
"conf",
"<",
"0",
"or",
"conf",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Not a meaningful confidence level: '",
"+",
"str",
"(",
"conf",
")",
")",
"# list or 1D-array? then fuse it",
"if",
"types",
".",
"is_list",
"(",
"data",
")",
"or",
"(",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"np",
".",
"ndim",
"(",
"data",
")",
"==",
"1",
")",
":",
"newshape",
"=",
"tuple",
"(",
"[",
"len",
"(",
"data",
")",
"]",
"+",
"list",
"(",
"data",
"[",
"0",
"]",
".",
"shape",
")",
")",
"newdata",
"=",
"np",
".",
"zeros",
"(",
"newshape",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"newdata",
"[",
"i",
",",
":",
"]",
"=",
"data",
"[",
"i",
"]",
"data",
"=",
"newdata",
"# do we have an array now? if yes go, if no fail",
"if",
"types",
".",
"is_float_array",
"(",
"data",
")",
":",
"I",
"=",
"_indexes",
"(",
"data",
"[",
"0",
"]",
")",
"lower",
"=",
"np",
".",
"zeros",
"(",
"data",
"[",
"0",
"]",
".",
"shape",
")",
"upper",
"=",
"np",
".",
"zeros",
"(",
"data",
"[",
"0",
"]",
".",
"shape",
")",
"for",
"i",
"in",
"I",
":",
"col",
"=",
"_column",
"(",
"data",
",",
"i",
")",
"m",
",",
"lower",
"[",
"i",
"]",
",",
"upper",
"[",
"i",
"]",
"=",
"confidence_interval",
"(",
"col",
",",
"conf",
")",
"# return",
"return",
"lower",
",",
"upper",
"else",
":",
"raise",
"TypeError",
"(",
"'data cannot be converted to an ndarray'",
")"
] | r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise confidence intervals
Parameters
----------
data : ndarray (K, (shape))
ndarray of ndarrays, the first index is a sample index, the remaining indexes are specific to the
array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds | [
"r",
"Computes",
"element",
"-",
"wise",
"confidence",
"intervals",
"from",
"a",
"sample",
"of",
"ndarrays"
] | python | train |
djgagne/hagelslag | hagelslag/processing/ObjectMatcher.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/ObjectMatcher.py#L190-L224 | def match(self, set_a, set_b):
"""
For each step in each track from set_a, identify all steps in all tracks from set_b that meet all
cost function criteria
Args:
set_a: List of STObjects
set_b: List of STObjects
Returns:
track_pairings: pandas.DataFrame
"""
track_step_matches = [[] * len(set_a)]
costs = self.cost_matrix(set_a, set_b)
valid_costs = np.all(costs < 1, axis=2)
set_a_matches, set_b_matches = np.where(valid_costs)
s = 0
track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]),
columns=["Track", "Step", "Time", "Matched", "Pairings"], dtype=object)
set_b_info = []
for trb, track_b in enumerate(set_b):
for t, time in enumerate(track_b.times):
set_b_info.append((trb, t))
set_b_info_arr = np.array(set_b_info, dtype=int)
for tr, track_a in enumerate(set_a):
for t, time in enumerate(track_a.times):
track_pairings.loc[s, ["Track", "Step", "Time"]] = [tr, t, time]
track_pairings.loc[s, "Matched"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0
if track_pairings.loc[s, "Matched"] == 1:
track_pairings.loc[s, "Pairings"] = set_b_info_arr[set_b_matches[set_a_matches == s]]
else:
track_pairings.loc[s, "Pairings"] = np.array([])
s += 1
return track_pairings | [
"def",
"match",
"(",
"self",
",",
"set_a",
",",
"set_b",
")",
":",
"track_step_matches",
"=",
"[",
"[",
"]",
"*",
"len",
"(",
"set_a",
")",
"]",
"costs",
"=",
"self",
".",
"cost_matrix",
"(",
"set_a",
",",
"set_b",
")",
"valid_costs",
"=",
"np",
".",
"all",
"(",
"costs",
"<",
"1",
",",
"axis",
"=",
"2",
")",
"set_a_matches",
",",
"set_b_matches",
"=",
"np",
".",
"where",
"(",
"valid_costs",
")",
"s",
"=",
"0",
"track_pairings",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"np",
".",
"arange",
"(",
"costs",
".",
"shape",
"[",
"0",
"]",
")",
",",
"columns",
"=",
"[",
"\"Track\"",
",",
"\"Step\"",
",",
"\"Time\"",
",",
"\"Matched\"",
",",
"\"Pairings\"",
"]",
",",
"dtype",
"=",
"object",
")",
"set_b_info",
"=",
"[",
"]",
"for",
"trb",
",",
"track_b",
"in",
"enumerate",
"(",
"set_b",
")",
":",
"for",
"t",
",",
"time",
"in",
"enumerate",
"(",
"track_b",
".",
"times",
")",
":",
"set_b_info",
".",
"append",
"(",
"(",
"trb",
",",
"t",
")",
")",
"set_b_info_arr",
"=",
"np",
".",
"array",
"(",
"set_b_info",
",",
"dtype",
"=",
"int",
")",
"for",
"tr",
",",
"track_a",
"in",
"enumerate",
"(",
"set_a",
")",
":",
"for",
"t",
",",
"time",
"in",
"enumerate",
"(",
"track_a",
".",
"times",
")",
":",
"track_pairings",
".",
"loc",
"[",
"s",
",",
"[",
"\"Track\"",
",",
"\"Step\"",
",",
"\"Time\"",
"]",
"]",
"=",
"[",
"tr",
",",
"t",
",",
"time",
"]",
"track_pairings",
".",
"loc",
"[",
"s",
",",
"\"Matched\"",
"]",
"=",
"1",
"if",
"np",
".",
"count_nonzero",
"(",
"set_a_matches",
"==",
"s",
")",
">",
"0",
"else",
"0",
"if",
"track_pairings",
".",
"loc",
"[",
"s",
",",
"\"Matched\"",
"]",
"==",
"1",
":",
"track_pairings",
".",
"loc",
"[",
"s",
",",
"\"Pairings\"",
"]",
"=",
"set_b_info_arr",
"[",
"set_b_matches",
"[",
"set_a_matches",
"==",
"s",
"]",
"]",
"else",
":",
"track_pairings",
".",
"loc",
"[",
"s",
",",
"\"Pairings\"",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"s",
"+=",
"1",
"return",
"track_pairings"
] | For each step in each track from set_a, identify all steps in all tracks from set_b that meet all
cost function criteria
Args:
set_a: List of STObjects
set_b: List of STObjects
Returns:
track_pairings: pandas.DataFrame | [
"For",
"each",
"step",
"in",
"each",
"track",
"from",
"set_a",
"identify",
"all",
"steps",
"in",
"all",
"tracks",
"from",
"set_b",
"that",
"meet",
"all",
"cost",
"function",
"criteria",
"Args",
":",
"set_a",
":",
"List",
"of",
"STObjects",
"set_b",
":",
"List",
"of",
"STObjects"
] | python | train |
Spinmob/spinmob | _data.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L1171-L1200 | def trim(self, *conditions):
"""
Removes data points not satisfying the supplied conditions. Conditions
can be truth arrays (having the same length as the columns!)
or scripted strings.
Example Workflow
----------------
d1 = spinmob.data.load()
d2 = d1.trim( (2<d1[0]) & (d1[0]<10) | (d1[3]==22), 'sin(d[2])*h("gain")<32.2')
Note this will not modify the databox, rather it will generate a new
one with the same header information and return it.
"""
conditions = list(conditions)
# if necessary, evaluate string scripts
for n in range(len(conditions)):
if type(conditions[n]) is str:
conditions[n] = self.execute_script(conditions[n])
# make a new databox with the same options and headers
new_databox = databox(delimiter=self.delimiter)
new_databox.copy_headers(self)
# trim it up, send it out.
cs = _s.fun.trim_data_uber(self, conditions)
for n in range(len(cs)): new_databox.append_column(cs[n], self.ckeys[n])
return new_databox | [
"def",
"trim",
"(",
"self",
",",
"*",
"conditions",
")",
":",
"conditions",
"=",
"list",
"(",
"conditions",
")",
"# if necessary, evaluate string scripts",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"conditions",
")",
")",
":",
"if",
"type",
"(",
"conditions",
"[",
"n",
"]",
")",
"is",
"str",
":",
"conditions",
"[",
"n",
"]",
"=",
"self",
".",
"execute_script",
"(",
"conditions",
"[",
"n",
"]",
")",
"# make a new databox with the same options and headers",
"new_databox",
"=",
"databox",
"(",
"delimiter",
"=",
"self",
".",
"delimiter",
")",
"new_databox",
".",
"copy_headers",
"(",
"self",
")",
"# trim it up, send it out.",
"cs",
"=",
"_s",
".",
"fun",
".",
"trim_data_uber",
"(",
"self",
",",
"conditions",
")",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"cs",
")",
")",
":",
"new_databox",
".",
"append_column",
"(",
"cs",
"[",
"n",
"]",
",",
"self",
".",
"ckeys",
"[",
"n",
"]",
")",
"return",
"new_databox"
] | Removes data points not satisfying the supplied conditions. Conditions
can be truth arrays (having the same length as the columns!)
or scripted strings.
Example Workflow
----------------
d1 = spinmob.data.load()
d2 = d1.trim( (2<d1[0]) & (d1[0]<10) | (d1[3]==22), 'sin(d[2])*h("gain")<32.2')
Note this will not modify the databox, rather it will generate a new
one with the same header information and return it. | [
"Removes",
"data",
"points",
"not",
"satisfying",
"the",
"supplied",
"conditions",
".",
"Conditions",
"can",
"be",
"truth",
"arrays",
"(",
"having",
"the",
"same",
"length",
"as",
"the",
"columns!",
")",
"or",
"scripted",
"strings",
"."
] | python | train |
fastavro/fastavro | fastavro/_write_py.py | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L70-L75 | def prepare_date(data, schema):
"""Converts datetime.date to int timestamp"""
if isinstance(data, datetime.date):
return data.toordinal() - DAYS_SHIFT
else:
return data | [
"def",
"prepare_date",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"date",
")",
":",
"return",
"data",
".",
"toordinal",
"(",
")",
"-",
"DAYS_SHIFT",
"else",
":",
"return",
"data"
] | Converts datetime.date to int timestamp | [
"Converts",
"datetime",
".",
"date",
"to",
"int",
"timestamp"
] | python | train |
odlgroup/odl | odl/tomo/analytic/filtered_back_projection.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L49-L101 | def _fbp_filter(norm_freq, filter_type, frequency_scaling):
"""Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
"""
filter_type, filter_type_in = str(filter_type).lower(), filter_type
if callable(filter_type):
filt = filter_type(norm_freq)
elif filter_type == 'ram-lak':
filt = np.copy(norm_freq)
elif filter_type == 'shepp-logan':
filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling))
elif filter_type == 'cosine':
filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling))
elif filter_type == 'hamming':
filt = norm_freq * (
0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling)))
elif filter_type == 'hann':
filt = norm_freq * (
np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2)
else:
raise ValueError('unknown `filter_type` ({})'
''.format(filter_type_in))
indicator = (norm_freq <= frequency_scaling)
filt *= indicator
return filt | [
"def",
"_fbp_filter",
"(",
"norm_freq",
",",
"filter_type",
",",
"frequency_scaling",
")",
":",
"filter_type",
",",
"filter_type_in",
"=",
"str",
"(",
"filter_type",
")",
".",
"lower",
"(",
")",
",",
"filter_type",
"if",
"callable",
"(",
"filter_type",
")",
":",
"filt",
"=",
"filter_type",
"(",
"norm_freq",
")",
"elif",
"filter_type",
"==",
"'ram-lak'",
":",
"filt",
"=",
"np",
".",
"copy",
"(",
"norm_freq",
")",
"elif",
"filter_type",
"==",
"'shepp-logan'",
":",
"filt",
"=",
"norm_freq",
"*",
"np",
".",
"sinc",
"(",
"norm_freq",
"/",
"(",
"2",
"*",
"frequency_scaling",
")",
")",
"elif",
"filter_type",
"==",
"'cosine'",
":",
"filt",
"=",
"norm_freq",
"*",
"np",
".",
"cos",
"(",
"norm_freq",
"*",
"np",
".",
"pi",
"/",
"(",
"2",
"*",
"frequency_scaling",
")",
")",
"elif",
"filter_type",
"==",
"'hamming'",
":",
"filt",
"=",
"norm_freq",
"*",
"(",
"0.54",
"+",
"0.46",
"*",
"np",
".",
"cos",
"(",
"norm_freq",
"*",
"np",
".",
"pi",
"/",
"(",
"frequency_scaling",
")",
")",
")",
"elif",
"filter_type",
"==",
"'hann'",
":",
"filt",
"=",
"norm_freq",
"*",
"(",
"np",
".",
"cos",
"(",
"norm_freq",
"*",
"np",
".",
"pi",
"/",
"(",
"2",
"*",
"frequency_scaling",
")",
")",
"**",
"2",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unknown `filter_type` ({})'",
"''",
".",
"format",
"(",
"filter_type_in",
")",
")",
"indicator",
"=",
"(",
"norm_freq",
"<=",
"frequency_scaling",
")",
"filt",
"*=",
"indicator",
"return",
"filt"
] | Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8) | [
"Create",
"a",
"smoothing",
"filter",
"for",
"FBP",
"."
] | python | train |
alimanfoo/vcfnp | vcfnp/array.py | https://github.com/alimanfoo/vcfnp/blob/c3f63fb11ada56d4a88076c61c81f99b8ee78b8f/vcfnp/array.py#L34-L138 | def variants(vcf_fn, region=None, fields=None, exclude_fields=None,
dtypes=None, arities=None, fills=None, transformers=None,
vcf_types=None, count=None, progress=0, logstream=None,
condition=None, slice_args=None, flatten_filter=False,
verbose=True, cache=False, cachedir=None, skip_cached=False,
compress_cache=False, truncate=True):
"""
Load an numpy structured array with data from the fixed fields of a VCF
file (including INFO).
Parameters
----------
vcf_fn: string or list
Name of the VCF file or list of file names.
region: string, optional
Region to extract, e.g., 'chr1' or 'chr1:0-100000'.
fields: list or array-like, optional
List of fields to extract from the VCF.
exclude_fields: list or array-like, optional
Fields to exclude from extraction.
dtypes: dict or dict-like, optional
Dictionary cotaining dtypes to use instead of the default inferred
ones.
arities: dict or dict-like, optional
Dictionary containing field:integer mappings used to override the
number of values to expect.
fills: dict or dict-like, optional
Dictionary containing field:fillvalue mappings used to override the
defaults used for missing values.
transformers: dict or dict-like, optional
Dictionary containing field:function mappings used to preprocess
any values prior to loading into array.
vcf_types: dict or dict-like, optional
Dictionary containing field:string mappings used to override any
bogus type declarations in the VCF header (e.g., MQ0Fraction declared
as Integer).
count: int, optional
Attempt to extract a specific number of records.
progress: int, optional
If greater than 0, log progress.
logstream: file or file-like object, optional
Stream to use for logging progress.
condition: array, optional
Boolean array defining which rows to load.
slice_args: tuple or list, optional
Slice of the underlying iterator, e.g., (0, 1000, 10) takes every
10th row from the first 1000.
flatten_filter: bool, optional
Return FILTER as multiple boolean fields, e.g., FILTER_PASS,
FILTER_LowQuality, etc.
verbose: bool, optional
Log more messages.
cache: bool, optional
If True, save the resulting numpy array to disk, and load from the
cache if present rather than rebuilding from the VCF.
cachedir: string, optional
Manually specify the directory to use to store cache files.
skip_cached: bool, optional
If True and cache file is fresh, do not load and return None.
compress_cache: bool, optional
If True, compress the cache file.
truncate: bool, optional
If True (default) only include variants whose start position is within
the given region. If False, use default tabix behaviour.
Examples
--------
>>> from vcfnp import variants
>>> v = variants('fixture/sample.vcf')
>>> v
array([ (b'19', 111, b'.', b'A', b'C', 9.600000381469727, (False, False, False), 2, True, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'19', 112, b'.', b'A', b'G', 10.0, (False, False, False), 2, True, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'20', 14370, b'rs6054257', b'G', b'A', 29.0, (False, False, True), 2, True, 0, b'', 0, 0.5, 0, True, 14, True, 3),
(b'20', 17330, b'.', b'T', b'A', 3.0, (True, False, False), 2, True, 0, b'', 0, 0.016998291015625, 0, False, 11, False, 3),
(b'20', 1110696, b'rs6040355', b'A', b'G', 67.0, (False, False, True), 3, True, 0, b'T', 0, 0.3330078125, 0, True, 10, False, 2),
(b'20', 1230237, b'.', b'T', b'.', 47.0, (False, False, True), 2, False, 0, b'T', 0, 0.0, 0, False, 13, False, 3),
(b'20', 1234567, b'microsat1', b'G', b'GA', 50.0, (False, False, True), 3, False, 1, b'G', 3, 0.0, 6, False, 9, False, 3),
(b'20', 1235237, b'.', b'T', b'.', 0.0, (False, False, False), 2, False, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'X', 10, b'rsTest', b'AC', b'A', 10.0, (False, False, True), 3, False, -1, b'', 0, 0.0, 0, False, 0, False, 0)],
dtype=[('CHROM', 'S12'), ('POS', '<i4'), ('ID', 'S12'), ('REF', 'S12'), ('ALT', 'S12'), ('QUAL', '<f4'), ('FILTER', [('q10', '?'), ('s50', '?'), ('PASS', '?')]), ('num_alleles', 'u1'), ('is_snp', '?'), ('svlen', '<i4'), ('AA', 'S12'), ('AC', '<u2'), ('AF', '<f2'), ('AN', '<u2'), ('DB', '?'), ('DP', '<i4'), ('H2', '?'), ('NS', '<i4')])
>>> v['QUAL']
array([ 9.60000038, 10. , 29. , 3. ,
67. , 47. , 50. , 0. , 10. ], dtype=float32)
>>> v['FILTER']['PASS']
array([False, False, True, False, True, True, True, False, True], dtype=bool)
>>> v['AF']
array([ 0. , 0. , 0.5 , 0.01699829, 0.33300781,
0. , 0. , 0. , 0. ], dtype=float16)
""" # flake8: noqa
loader = _VariantsLoader(vcf_fn, region=region, fields=fields,
exclude_fields=exclude_fields, dtypes=dtypes,
arities=arities, fills=fills,
transformers=transformers, vcf_types=vcf_types,
count=count, progress=progress,
logstream=logstream, condition=condition,
slice_args=slice_args,
flatten_filter=flatten_filter, verbose=verbose,
cache=cache, cachedir=cachedir,
skip_cached=skip_cached,
compress_cache=compress_cache, truncate=truncate)
return loader.load() | [
"def",
"variants",
"(",
"vcf_fn",
",",
"region",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"exclude_fields",
"=",
"None",
",",
"dtypes",
"=",
"None",
",",
"arities",
"=",
"None",
",",
"fills",
"=",
"None",
",",
"transformers",
"=",
"None",
",",
"vcf_types",
"=",
"None",
",",
"count",
"=",
"None",
",",
"progress",
"=",
"0",
",",
"logstream",
"=",
"None",
",",
"condition",
"=",
"None",
",",
"slice_args",
"=",
"None",
",",
"flatten_filter",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"cache",
"=",
"False",
",",
"cachedir",
"=",
"None",
",",
"skip_cached",
"=",
"False",
",",
"compress_cache",
"=",
"False",
",",
"truncate",
"=",
"True",
")",
":",
"# flake8: noqa",
"loader",
"=",
"_VariantsLoader",
"(",
"vcf_fn",
",",
"region",
"=",
"region",
",",
"fields",
"=",
"fields",
",",
"exclude_fields",
"=",
"exclude_fields",
",",
"dtypes",
"=",
"dtypes",
",",
"arities",
"=",
"arities",
",",
"fills",
"=",
"fills",
",",
"transformers",
"=",
"transformers",
",",
"vcf_types",
"=",
"vcf_types",
",",
"count",
"=",
"count",
",",
"progress",
"=",
"progress",
",",
"logstream",
"=",
"logstream",
",",
"condition",
"=",
"condition",
",",
"slice_args",
"=",
"slice_args",
",",
"flatten_filter",
"=",
"flatten_filter",
",",
"verbose",
"=",
"verbose",
",",
"cache",
"=",
"cache",
",",
"cachedir",
"=",
"cachedir",
",",
"skip_cached",
"=",
"skip_cached",
",",
"compress_cache",
"=",
"compress_cache",
",",
"truncate",
"=",
"truncate",
")",
"return",
"loader",
".",
"load",
"(",
")"
] | Load an numpy structured array with data from the fixed fields of a VCF
file (including INFO).
Parameters
----------
vcf_fn: string or list
Name of the VCF file or list of file names.
region: string, optional
Region to extract, e.g., 'chr1' or 'chr1:0-100000'.
fields: list or array-like, optional
List of fields to extract from the VCF.
exclude_fields: list or array-like, optional
Fields to exclude from extraction.
dtypes: dict or dict-like, optional
Dictionary cotaining dtypes to use instead of the default inferred
ones.
arities: dict or dict-like, optional
Dictionary containing field:integer mappings used to override the
number of values to expect.
fills: dict or dict-like, optional
Dictionary containing field:fillvalue mappings used to override the
defaults used for missing values.
transformers: dict or dict-like, optional
Dictionary containing field:function mappings used to preprocess
any values prior to loading into array.
vcf_types: dict or dict-like, optional
Dictionary containing field:string mappings used to override any
bogus type declarations in the VCF header (e.g., MQ0Fraction declared
as Integer).
count: int, optional
Attempt to extract a specific number of records.
progress: int, optional
If greater than 0, log progress.
logstream: file or file-like object, optional
Stream to use for logging progress.
condition: array, optional
Boolean array defining which rows to load.
slice_args: tuple or list, optional
Slice of the underlying iterator, e.g., (0, 1000, 10) takes every
10th row from the first 1000.
flatten_filter: bool, optional
Return FILTER as multiple boolean fields, e.g., FILTER_PASS,
FILTER_LowQuality, etc.
verbose: bool, optional
Log more messages.
cache: bool, optional
If True, save the resulting numpy array to disk, and load from the
cache if present rather than rebuilding from the VCF.
cachedir: string, optional
Manually specify the directory to use to store cache files.
skip_cached: bool, optional
If True and cache file is fresh, do not load and return None.
compress_cache: bool, optional
If True, compress the cache file.
truncate: bool, optional
If True (default) only include variants whose start position is within
the given region. If False, use default tabix behaviour.
Examples
--------
>>> from vcfnp import variants
>>> v = variants('fixture/sample.vcf')
>>> v
array([ (b'19', 111, b'.', b'A', b'C', 9.600000381469727, (False, False, False), 2, True, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'19', 112, b'.', b'A', b'G', 10.0, (False, False, False), 2, True, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'20', 14370, b'rs6054257', b'G', b'A', 29.0, (False, False, True), 2, True, 0, b'', 0, 0.5, 0, True, 14, True, 3),
(b'20', 17330, b'.', b'T', b'A', 3.0, (True, False, False), 2, True, 0, b'', 0, 0.016998291015625, 0, False, 11, False, 3),
(b'20', 1110696, b'rs6040355', b'A', b'G', 67.0, (False, False, True), 3, True, 0, b'T', 0, 0.3330078125, 0, True, 10, False, 2),
(b'20', 1230237, b'.', b'T', b'.', 47.0, (False, False, True), 2, False, 0, b'T', 0, 0.0, 0, False, 13, False, 3),
(b'20', 1234567, b'microsat1', b'G', b'GA', 50.0, (False, False, True), 3, False, 1, b'G', 3, 0.0, 6, False, 9, False, 3),
(b'20', 1235237, b'.', b'T', b'.', 0.0, (False, False, False), 2, False, 0, b'', 0, 0.0, 0, False, 0, False, 0),
(b'X', 10, b'rsTest', b'AC', b'A', 10.0, (False, False, True), 3, False, -1, b'', 0, 0.0, 0, False, 0, False, 0)],
dtype=[('CHROM', 'S12'), ('POS', '<i4'), ('ID', 'S12'), ('REF', 'S12'), ('ALT', 'S12'), ('QUAL', '<f4'), ('FILTER', [('q10', '?'), ('s50', '?'), ('PASS', '?')]), ('num_alleles', 'u1'), ('is_snp', '?'), ('svlen', '<i4'), ('AA', 'S12'), ('AC', '<u2'), ('AF', '<f2'), ('AN', '<u2'), ('DB', '?'), ('DP', '<i4'), ('H2', '?'), ('NS', '<i4')])
>>> v['QUAL']
array([ 9.60000038, 10. , 29. , 3. ,
67. , 47. , 50. , 0. , 10. ], dtype=float32)
>>> v['FILTER']['PASS']
array([False, False, True, False, True, True, True, False, True], dtype=bool)
>>> v['AF']
array([ 0. , 0. , 0.5 , 0.01699829, 0.33300781,
0. , 0. , 0. , 0. ], dtype=float16) | [
"Load",
"an",
"numpy",
"structured",
"array",
"with",
"data",
"from",
"the",
"fixed",
"fields",
"of",
"a",
"VCF",
"file",
"(",
"including",
"INFO",
")",
"."
] | python | train |
iotile/coretools | transport_plugins/websocket/iotile_transport_websocket/device_adapter.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/device_adapter.py#L178-L188 | async def _on_report_notification(self, event):
"""Callback function called when a report event is received.
Args:
event (dict): The report_event
"""
conn_string = event.get('connection_string')
report = self._report_parser.deserialize_report(event.get('serialized_report'))
self.notify_event(conn_string, 'report', report) | [
"async",
"def",
"_on_report_notification",
"(",
"self",
",",
"event",
")",
":",
"conn_string",
"=",
"event",
".",
"get",
"(",
"'connection_string'",
")",
"report",
"=",
"self",
".",
"_report_parser",
".",
"deserialize_report",
"(",
"event",
".",
"get",
"(",
"'serialized_report'",
")",
")",
"self",
".",
"notify_event",
"(",
"conn_string",
",",
"'report'",
",",
"report",
")"
] | Callback function called when a report event is received.
Args:
event (dict): The report_event | [
"Callback",
"function",
"called",
"when",
"a",
"report",
"event",
"is",
"received",
"."
] | python | train |
googleapis/google-cloud-python | datastore/google/cloud/datastore/query.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/query.py#L262-L271 | def projection(self, projection):
"""Set the fields returned the query.
:type projection: str or sequence of strings
:param projection: Each value is a string giving the name of a
property to be included in the projection query.
"""
if isinstance(projection, str):
projection = [projection]
self._projection[:] = projection | [
"def",
"projection",
"(",
"self",
",",
"projection",
")",
":",
"if",
"isinstance",
"(",
"projection",
",",
"str",
")",
":",
"projection",
"=",
"[",
"projection",
"]",
"self",
".",
"_projection",
"[",
":",
"]",
"=",
"projection"
] | Set the fields returned the query.
:type projection: str or sequence of strings
:param projection: Each value is a string giving the name of a
property to be included in the projection query. | [
"Set",
"the",
"fields",
"returned",
"the",
"query",
"."
] | python | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L77-L84 | def get_publisher_name(self, **kwargs):
"""Get the publisher name."""
children = kwargs.get('children', [])
# Find the creator type in children.
for child in children:
if child.tag == 'name':
return child.content
return None | [
"def",
"get_publisher_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"# Find the creator type in children.",
"for",
"child",
"in",
"children",
":",
"if",
"child",
".",
"tag",
"==",
"'name'",
":",
"return",
"child",
".",
"content",
"return",
"None"
] | Get the publisher name. | [
"Get",
"the",
"publisher",
"name",
"."
] | python | train |
fossasia/knittingpattern | knittingpattern/convert/AYABPNGBuilder.py | https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/AYABPNGBuilder.py#L75-L80 | def _set_pixel_and_convert_color(self, x, y, color):
"""set the pixel but convert the color before."""
if color is None:
return
color = self._convert_color_to_rrggbb(color)
self._set_pixel(x, y, color) | [
"def",
"_set_pixel_and_convert_color",
"(",
"self",
",",
"x",
",",
"y",
",",
"color",
")",
":",
"if",
"color",
"is",
"None",
":",
"return",
"color",
"=",
"self",
".",
"_convert_color_to_rrggbb",
"(",
"color",
")",
"self",
".",
"_set_pixel",
"(",
"x",
",",
"y",
",",
"color",
")"
] | set the pixel but convert the color before. | [
"set",
"the",
"pixel",
"but",
"convert",
"the",
"color",
"before",
"."
] | python | valid |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/external/mathjax.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/mathjax.py#L32-L79 | def install_mathjax(tag='v1.1', replace=False):
"""Download and install MathJax for offline use.
This will install mathjax to the 'static' dir in the IPython notebook
package, so it will fail if the caller does not have write access
to that location.
MathJax is a ~15MB download, and ~150MB installed.
Parameters
----------
replace : bool [False]
Whether to remove and replace an existing install.
tag : str ['v1.1']
Which tag to download. Default is 'v1.1', the current stable release,
but alternatives include 'v1.1a' and 'master'.
"""
mathjax_url = "https://github.com/mathjax/MathJax/tarball/%s"%tag
nbdir = os.path.dirname(os.path.abspath(nbmod.__file__))
static = os.path.join(nbdir, 'static')
dest = os.path.join(static, 'mathjax')
# check for existence and permissions
if not os.access(static, os.W_OK):
raise IOError("Need have write access to %s"%static)
if os.path.exists(dest):
if replace:
if not os.access(dest, os.W_OK):
raise IOError("Need have write access to %s"%dest)
print "removing previous MathJax install"
shutil.rmtree(dest)
else:
print "offline MathJax apparently already installed"
return
# download mathjax
print "Downloading mathjax source..."
response = urllib2.urlopen(mathjax_url)
print "done"
# use 'r|gz' stream mode, because socket file-like objects can't seek:
tar = tarfile.open(fileobj=response.fp, mode='r|gz')
topdir = tar.firstmember.path
print "Extracting to %s"%dest
tar.extractall(static)
# it will be mathjax-MathJax-<sha>, rename to just mathjax
os.rename(os.path.join(static, topdir), dest) | [
"def",
"install_mathjax",
"(",
"tag",
"=",
"'v1.1'",
",",
"replace",
"=",
"False",
")",
":",
"mathjax_url",
"=",
"\"https://github.com/mathjax/MathJax/tarball/%s\"",
"%",
"tag",
"nbdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"nbmod",
".",
"__file__",
")",
")",
"static",
"=",
"os",
".",
"path",
".",
"join",
"(",
"nbdir",
",",
"'static'",
")",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"static",
",",
"'mathjax'",
")",
"# check for existence and permissions",
"if",
"not",
"os",
".",
"access",
"(",
"static",
",",
"os",
".",
"W_OK",
")",
":",
"raise",
"IOError",
"(",
"\"Need have write access to %s\"",
"%",
"static",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"if",
"replace",
":",
"if",
"not",
"os",
".",
"access",
"(",
"dest",
",",
"os",
".",
"W_OK",
")",
":",
"raise",
"IOError",
"(",
"\"Need have write access to %s\"",
"%",
"dest",
")",
"print",
"\"removing previous MathJax install\"",
"shutil",
".",
"rmtree",
"(",
"dest",
")",
"else",
":",
"print",
"\"offline MathJax apparently already installed\"",
"return",
"# download mathjax",
"print",
"\"Downloading mathjax source...\"",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"mathjax_url",
")",
"print",
"\"done\"",
"# use 'r|gz' stream mode, because socket file-like objects can't seek:",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"response",
".",
"fp",
",",
"mode",
"=",
"'r|gz'",
")",
"topdir",
"=",
"tar",
".",
"firstmember",
".",
"path",
"print",
"\"Extracting to %s\"",
"%",
"dest",
"tar",
".",
"extractall",
"(",
"static",
")",
"# it will be mathjax-MathJax-<sha>, rename to just mathjax",
"os",
".",
"rename",
"(",
"os",
".",
"path",
".",
"join",
"(",
"static",
",",
"topdir",
")",
",",
"dest",
")"
] | Download and install MathJax for offline use.
This will install mathjax to the 'static' dir in the IPython notebook
package, so it will fail if the caller does not have write access
to that location.
MathJax is a ~15MB download, and ~150MB installed.
Parameters
----------
replace : bool [False]
Whether to remove and replace an existing install.
tag : str ['v1.1']
Which tag to download. Default is 'v1.1', the current stable release,
but alternatives include 'v1.1a' and 'master'. | [
"Download",
"and",
"install",
"MathJax",
"for",
"offline",
"use",
".",
"This",
"will",
"install",
"mathjax",
"to",
"the",
"static",
"dir",
"in",
"the",
"IPython",
"notebook",
"package",
"so",
"it",
"will",
"fail",
"if",
"the",
"caller",
"does",
"not",
"have",
"write",
"access",
"to",
"that",
"location",
".",
"MathJax",
"is",
"a",
"~15MB",
"download",
"and",
"~150MB",
"installed",
".",
"Parameters",
"----------",
"replace",
":",
"bool",
"[",
"False",
"]",
"Whether",
"to",
"remove",
"and",
"replace",
"an",
"existing",
"install",
".",
"tag",
":",
"str",
"[",
"v1",
".",
"1",
"]",
"Which",
"tag",
"to",
"download",
".",
"Default",
"is",
"v1",
".",
"1",
"the",
"current",
"stable",
"release",
"but",
"alternatives",
"include",
"v1",
".",
"1a",
"and",
"master",
"."
] | python | test |
gesellkammer/sndfileio | sndfileio/dsp.py | https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/dsp.py#L32-L60 | def lowpass_cheby2_coeffs(freq, sr, maxorder=12):
# type: (float, int, int) -> Tuple[np.ndarray, np.ndarray, float]
"""
freq : The frequency above which signals are attenuated
with 95 dB
sr : Sampling rate in Hz.
maxorder: Maximal order of the designed cheby2 filter
Returns --> (b coeffs, a coeffs, freq_passband)
"""
nyquist = sr * 0.5
# rp - maximum ripple of passband, rs - attenuation of stopband
rp, rs, order = 1, 96, 1e99
ws = freq / nyquist # stop band frequency
wp = ws # pass band frequency
# raise for some bad scenarios
if ws > 1:
ws = 1.0
msg = "Selected corner frequency is above Nyquist. " + \
"Setting Nyquist as high corner."
warnings.warn(msg)
while True:
if order <= maxorder:
break
wp = wp * 0.99
order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)
b, a = signal.cheby2(order, rs, wn, btype='low', analog=0, output='ba')
return b, a, wp*nyquist | [
"def",
"lowpass_cheby2_coeffs",
"(",
"freq",
",",
"sr",
",",
"maxorder",
"=",
"12",
")",
":",
"# type: (float, int, int) -> Tuple[np.ndarray, np.ndarray, float]",
"nyquist",
"=",
"sr",
"*",
"0.5",
"# rp - maximum ripple of passband, rs - attenuation of stopband",
"rp",
",",
"rs",
",",
"order",
"=",
"1",
",",
"96",
",",
"1e99",
"ws",
"=",
"freq",
"/",
"nyquist",
"# stop band frequency",
"wp",
"=",
"ws",
"# pass band frequency",
"# raise for some bad scenarios",
"if",
"ws",
">",
"1",
":",
"ws",
"=",
"1.0",
"msg",
"=",
"\"Selected corner frequency is above Nyquist. \"",
"+",
"\"Setting Nyquist as high corner.\"",
"warnings",
".",
"warn",
"(",
"msg",
")",
"while",
"True",
":",
"if",
"order",
"<=",
"maxorder",
":",
"break",
"wp",
"=",
"wp",
"*",
"0.99",
"order",
",",
"wn",
"=",
"signal",
".",
"cheb2ord",
"(",
"wp",
",",
"ws",
",",
"rp",
",",
"rs",
",",
"analog",
"=",
"0",
")",
"b",
",",
"a",
"=",
"signal",
".",
"cheby2",
"(",
"order",
",",
"rs",
",",
"wn",
",",
"btype",
"=",
"'low'",
",",
"analog",
"=",
"0",
",",
"output",
"=",
"'ba'",
")",
"return",
"b",
",",
"a",
",",
"wp",
"*",
"nyquist"
] | freq : The frequency above which signals are attenuated
with 95 dB
sr : Sampling rate in Hz.
maxorder: Maximal order of the designed cheby2 filter
Returns --> (b coeffs, a coeffs, freq_passband) | [
"freq",
":",
"The",
"frequency",
"above",
"which",
"signals",
"are",
"attenuated",
"with",
"95",
"dB",
"sr",
":",
"Sampling",
"rate",
"in",
"Hz",
".",
"maxorder",
":",
"Maximal",
"order",
"of",
"the",
"designed",
"cheby2",
"filter"
] | python | train |
biocore/burrito-fillings | bfillings/seqprep.py | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L145-L153 | def _discarded_reads1_out_file_name(self):
"""Checks if file name is set for discarded reads1 output.
Returns absolute path."""
if self.Parameters['-3'].isOn():
discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value))
else:
raise ValueError(
"No discarded-reads1 (flag -3) output path specified")
return discarded_reads1 | [
"def",
"_discarded_reads1_out_file_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"'-3'",
"]",
".",
"isOn",
"(",
")",
":",
"discarded_reads1",
"=",
"self",
".",
"_absolute",
"(",
"str",
"(",
"self",
".",
"Parameters",
"[",
"'-3'",
"]",
".",
"Value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"No discarded-reads1 (flag -3) output path specified\"",
")",
"return",
"discarded_reads1"
] | Checks if file name is set for discarded reads1 output.
Returns absolute path. | [
"Checks",
"if",
"file",
"name",
"is",
"set",
"for",
"discarded",
"reads1",
"output",
".",
"Returns",
"absolute",
"path",
"."
] | python | train |
CivicSpleen/ambry | ambry/bundle/bundle.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1419-L1440 | def sync_in(self, force=False):
"""Synchronize from files to records, and records to objects"""
self.log('---- Sync In ----')
self.dstate = self.STATES.BUILDING
for path_name in self.source_fs.listdir():
f = self.build_source_files.instance_from_name(path_name)
if not f:
self.warn('Ignoring unknown file: {}'.format(path_name))
continue
if f and f.exists and (f.fs_is_newer or force):
self.log('Sync: {}'.format(f.record.path))
f.fs_to_record()
f.record_to_objects()
self.commit()
self.library.search.index_bundle(self, force=True) | [
"def",
"sync_in",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"self",
".",
"log",
"(",
"'---- Sync In ----'",
")",
"self",
".",
"dstate",
"=",
"self",
".",
"STATES",
".",
"BUILDING",
"for",
"path_name",
"in",
"self",
".",
"source_fs",
".",
"listdir",
"(",
")",
":",
"f",
"=",
"self",
".",
"build_source_files",
".",
"instance_from_name",
"(",
"path_name",
")",
"if",
"not",
"f",
":",
"self",
".",
"warn",
"(",
"'Ignoring unknown file: {}'",
".",
"format",
"(",
"path_name",
")",
")",
"continue",
"if",
"f",
"and",
"f",
".",
"exists",
"and",
"(",
"f",
".",
"fs_is_newer",
"or",
"force",
")",
":",
"self",
".",
"log",
"(",
"'Sync: {}'",
".",
"format",
"(",
"f",
".",
"record",
".",
"path",
")",
")",
"f",
".",
"fs_to_record",
"(",
")",
"f",
".",
"record_to_objects",
"(",
")",
"self",
".",
"commit",
"(",
")",
"self",
".",
"library",
".",
"search",
".",
"index_bundle",
"(",
"self",
",",
"force",
"=",
"True",
")"
] | Synchronize from files to records, and records to objects | [
"Synchronize",
"from",
"files",
"to",
"records",
"and",
"records",
"to",
"objects"
] | python | train |
SheffieldML/GPy | GPy/kern/src/stationary.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/stationary.py#L118-L123 | def dK_dr_via_X(self, X, X2):
"""
compute the derivative of K wrt X going through X
"""
#a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2)) | [
"def",
"dK_dr_via_X",
"(",
"self",
",",
"X",
",",
"X2",
")",
":",
"#a convenience function, so we can cache dK_dr",
"return",
"self",
".",
"dK_dr",
"(",
"self",
".",
"_scaled_dist",
"(",
"X",
",",
"X2",
")",
")"
] | compute the derivative of K wrt X going through X | [
"compute",
"the",
"derivative",
"of",
"K",
"wrt",
"X",
"going",
"through",
"X"
] | python | train |
dropbox/stone | stone/backends/python_types.py | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_types.py#L660-L713 | def _generate_custom_annotation_processors(self, ns, data_type, extra_annotations=()):
"""
Generates code that will run a custom function 'processor' on every
field with a custom annotation, no matter how deep (recursively) it
might be located in data_type (incl. in elements of lists or maps).
If extra_annotations is passed, it's assumed to be a list of custom
annotation applied directly onto data_type (e.g. because it's a field
in a struct).
Yields pairs of (annotation_type, code) where code is code that
evaluates to a function that should be executed with an instance of
data_type as the only parameter, and whose return value should replace
that instance.
"""
# annotations applied to members of this type
dt, _, _ = unwrap(data_type)
if is_struct_type(dt) or is_union_type(dt):
annotation_types_seen = set()
for annotation in get_custom_annotations_recursive(dt):
if annotation.annotation_type not in annotation_types_seen:
yield (annotation.annotation_type,
generate_func_call(
'bb.make_struct_annotation_processor',
args=[class_name_for_annotation_type(annotation.annotation_type, ns),
'processor']
))
annotation_types_seen.add(annotation.annotation_type)
elif is_list_type(dt):
for annotation_type, recursive_processor in self._generate_custom_annotation_processors(
ns, dt.data_type):
# every member needs to be replaced---use handwritten processor
yield (annotation_type,
generate_func_call(
'bb.make_list_annotation_processor',
args=[recursive_processor]
))
elif is_map_type(dt):
for annotation_type, recursive_processor in self._generate_custom_annotation_processors(
ns, dt.value_data_type):
# every value needs to be replaced---use handwritten processor
yield (annotation_type,
generate_func_call(
'bb.make_map_value_annotation_processor',
args=[recursive_processor]
))
# annotations applied directly to this type (through aliases or
# passed in from the caller)
for annotation in itertools.chain(get_custom_annotations_for_alias(data_type),
extra_annotations):
yield (annotation.annotation_type,
generate_func_call(
'bb.partially_apply',
args=['processor', self._generate_custom_annotation_instance(ns, annotation)]
)) | [
"def",
"_generate_custom_annotation_processors",
"(",
"self",
",",
"ns",
",",
"data_type",
",",
"extra_annotations",
"=",
"(",
")",
")",
":",
"# annotations applied to members of this type",
"dt",
",",
"_",
",",
"_",
"=",
"unwrap",
"(",
"data_type",
")",
"if",
"is_struct_type",
"(",
"dt",
")",
"or",
"is_union_type",
"(",
"dt",
")",
":",
"annotation_types_seen",
"=",
"set",
"(",
")",
"for",
"annotation",
"in",
"get_custom_annotations_recursive",
"(",
"dt",
")",
":",
"if",
"annotation",
".",
"annotation_type",
"not",
"in",
"annotation_types_seen",
":",
"yield",
"(",
"annotation",
".",
"annotation_type",
",",
"generate_func_call",
"(",
"'bb.make_struct_annotation_processor'",
",",
"args",
"=",
"[",
"class_name_for_annotation_type",
"(",
"annotation",
".",
"annotation_type",
",",
"ns",
")",
",",
"'processor'",
"]",
")",
")",
"annotation_types_seen",
".",
"add",
"(",
"annotation",
".",
"annotation_type",
")",
"elif",
"is_list_type",
"(",
"dt",
")",
":",
"for",
"annotation_type",
",",
"recursive_processor",
"in",
"self",
".",
"_generate_custom_annotation_processors",
"(",
"ns",
",",
"dt",
".",
"data_type",
")",
":",
"# every member needs to be replaced---use handwritten processor",
"yield",
"(",
"annotation_type",
",",
"generate_func_call",
"(",
"'bb.make_list_annotation_processor'",
",",
"args",
"=",
"[",
"recursive_processor",
"]",
")",
")",
"elif",
"is_map_type",
"(",
"dt",
")",
":",
"for",
"annotation_type",
",",
"recursive_processor",
"in",
"self",
".",
"_generate_custom_annotation_processors",
"(",
"ns",
",",
"dt",
".",
"value_data_type",
")",
":",
"# every value needs to be replaced---use handwritten processor",
"yield",
"(",
"annotation_type",
",",
"generate_func_call",
"(",
"'bb.make_map_value_annotation_processor'",
",",
"args",
"=",
"[",
"recursive_processor",
"]",
")",
")",
"# annotations applied directly to this type (through aliases or",
"# passed in from the caller)",
"for",
"annotation",
"in",
"itertools",
".",
"chain",
"(",
"get_custom_annotations_for_alias",
"(",
"data_type",
")",
",",
"extra_annotations",
")",
":",
"yield",
"(",
"annotation",
".",
"annotation_type",
",",
"generate_func_call",
"(",
"'bb.partially_apply'",
",",
"args",
"=",
"[",
"'processor'",
",",
"self",
".",
"_generate_custom_annotation_instance",
"(",
"ns",
",",
"annotation",
")",
"]",
")",
")"
] | Generates code that will run a custom function 'processor' on every
field with a custom annotation, no matter how deep (recursively) it
might be located in data_type (incl. in elements of lists or maps).
If extra_annotations is passed, it's assumed to be a list of custom
annotation applied directly onto data_type (e.g. because it's a field
in a struct).
Yields pairs of (annotation_type, code) where code is code that
evaluates to a function that should be executed with an instance of
data_type as the only parameter, and whose return value should replace
that instance. | [
"Generates",
"code",
"that",
"will",
"run",
"a",
"custom",
"function",
"processor",
"on",
"every",
"field",
"with",
"a",
"custom",
"annotation",
"no",
"matter",
"how",
"deep",
"(",
"recursively",
")",
"it",
"might",
"be",
"located",
"in",
"data_type",
"(",
"incl",
".",
"in",
"elements",
"of",
"lists",
"or",
"maps",
")",
".",
"If",
"extra_annotations",
"is",
"passed",
"it",
"s",
"assumed",
"to",
"be",
"a",
"list",
"of",
"custom",
"annotation",
"applied",
"directly",
"onto",
"data_type",
"(",
"e",
".",
"g",
".",
"because",
"it",
"s",
"a",
"field",
"in",
"a",
"struct",
")",
".",
"Yields",
"pairs",
"of",
"(",
"annotation_type",
"code",
")",
"where",
"code",
"is",
"code",
"that",
"evaluates",
"to",
"a",
"function",
"that",
"should",
"be",
"executed",
"with",
"an",
"instance",
"of",
"data_type",
"as",
"the",
"only",
"parameter",
"and",
"whose",
"return",
"value",
"should",
"replace",
"that",
"instance",
"."
] | python | train |
tanghaibao/goatools | goatools/gosubdag/rpt/wr_xlsx.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L51-L55 | def get_nts_sections(self, sections, sortby=None):
"""Given a list of sections containing GO IDs, get a list of sections w/GO nts."""
goids = self.get_goids_sections(sections)
gosubdag = GoSubDag(goids, self.go2obj)
return [(sec, gosubdag.get_nts(gos, sortby)) for sec, gos in sections] | [
"def",
"get_nts_sections",
"(",
"self",
",",
"sections",
",",
"sortby",
"=",
"None",
")",
":",
"goids",
"=",
"self",
".",
"get_goids_sections",
"(",
"sections",
")",
"gosubdag",
"=",
"GoSubDag",
"(",
"goids",
",",
"self",
".",
"go2obj",
")",
"return",
"[",
"(",
"sec",
",",
"gosubdag",
".",
"get_nts",
"(",
"gos",
",",
"sortby",
")",
")",
"for",
"sec",
",",
"gos",
"in",
"sections",
"]"
] | Given a list of sections containing GO IDs, get a list of sections w/GO nts. | [
"Given",
"a",
"list",
"of",
"sections",
"containing",
"GO",
"IDs",
"get",
"a",
"list",
"of",
"sections",
"w",
"/",
"GO",
"nts",
"."
] | python | train |
saltstack/salt | salt/states/memcached.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/memcached.py#L34-L101 | def managed(name,
value=None,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME,
min_compress_len=DEFAULT_MIN_COMPRESS_LEN):
'''
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
try:
cur = __salt__['memcached.get'](name, host, port)
except CommandExecutionError as exc:
ret['comment'] = six.text_type(exc)
return ret
if cur == value:
ret['result'] = True
ret['comment'] = 'Key \'{0}\' does not need to be updated'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
if cur is None:
ret['comment'] = 'Key \'{0}\' would be added'.format(name)
else:
ret['comment'] = 'Value of key \'{0}\' would be changed'.format(name)
return ret
try:
ret['result'] = __salt__['memcached.set'](
name, value, host, port, time, min_compress_len
)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['comment'] = six.text_type(exc)
else:
if ret['result']:
ret['comment'] = 'Successfully set key \'{0}\''.format(name)
if cur is not None:
ret['changes'] = {'old': cur, 'new': value}
else:
ret['changes'] = {'key added': name, 'value': value}
else:
ret['comment'] = 'Failed to set key \'{0}\''.format(name)
return ret | [
"def",
"managed",
"(",
"name",
",",
"value",
"=",
"None",
",",
"host",
"=",
"DEFAULT_HOST",
",",
"port",
"=",
"DEFAULT_PORT",
",",
"time",
"=",
"DEFAULT_TIME",
",",
"min_compress_len",
"=",
"DEFAULT_MIN_COMPRESS_LEN",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"try",
":",
"cur",
"=",
"__salt__",
"[",
"'memcached.get'",
"]",
"(",
"name",
",",
"host",
",",
"port",
")",
"except",
"CommandExecutionError",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"six",
".",
"text_type",
"(",
"exc",
")",
"return",
"ret",
"if",
"cur",
"==",
"value",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Key \\'{0}\\' does not need to be updated'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"if",
"cur",
"is",
"None",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Key \\'{0}\\' would be added'",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Value of key \\'{0}\\' would be changed'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"try",
":",
"ret",
"[",
"'result'",
"]",
"=",
"__salt__",
"[",
"'memcached.set'",
"]",
"(",
"name",
",",
"value",
",",
"host",
",",
"port",
",",
"time",
",",
"min_compress_len",
")",
"except",
"(",
"CommandExecutionError",
",",
"SaltInvocationError",
")",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"six",
".",
"text_type",
"(",
"exc",
")",
"else",
":",
"if",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Successfully set key \\'{0}\\''",
".",
"format",
"(",
"name",
")",
"if",
"cur",
"is",
"not",
"None",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"cur",
",",
"'new'",
":",
"value",
"}",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'key added'",
":",
"name",
",",
"'value'",
":",
"value",
"}",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to set key \\'{0}\\''",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar | [
"Manage",
"a",
"memcached",
"key",
"."
] | python | train |
keon/algorithms | algorithms/bit/subsets.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/bit/subsets.py#L21-L34 | def subsets(nums):
"""
:param nums: List[int]
:return: Set[tuple]
"""
n = len(nums)
total = 1 << n
res = set()
for i in range(total):
subset = tuple(num for j, num in enumerate(nums) if i & 1 << j)
res.add(subset)
return res | [
"def",
"subsets",
"(",
"nums",
")",
":",
"n",
"=",
"len",
"(",
"nums",
")",
"total",
"=",
"1",
"<<",
"n",
"res",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"total",
")",
":",
"subset",
"=",
"tuple",
"(",
"num",
"for",
"j",
",",
"num",
"in",
"enumerate",
"(",
"nums",
")",
"if",
"i",
"&",
"1",
"<<",
"j",
")",
"res",
".",
"add",
"(",
"subset",
")",
"return",
"res"
] | :param nums: List[int]
:return: Set[tuple] | [
":",
"param",
"nums",
":",
"List",
"[",
"int",
"]",
":",
"return",
":",
"Set",
"[",
"tuple",
"]"
] | python | train |
Bogdanp/dramatiq | dramatiq/worker.py | https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/worker.py#L102-L106 | def resume(self):
"""Resumes all the worker threads.
"""
for child in chain(self.consumers.values(), self.workers):
child.resume() | [
"def",
"resume",
"(",
"self",
")",
":",
"for",
"child",
"in",
"chain",
"(",
"self",
".",
"consumers",
".",
"values",
"(",
")",
",",
"self",
".",
"workers",
")",
":",
"child",
".",
"resume",
"(",
")"
] | Resumes all the worker threads. | [
"Resumes",
"all",
"the",
"worker",
"threads",
"."
] | python | train |
pauleveritt/kaybee | kaybee/plugins/resources/base_resource.py | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/resources/base_resource.py#L76-L102 | def acquire(self, resources, prop_name):
""" Starting with self, walk until you find prop or None """
# Instance
custom_prop = getattr(self.props, prop_name, None)
if custom_prop:
return custom_prop
# Parents...can't use acquire as have to keep going on acquireds
for parent in self.parents(resources):
acquireds = parent.props.acquireds
if acquireds:
# First try in the per-type acquireds
rtype_acquireds = acquireds.get(self.rtype)
if rtype_acquireds:
prop_acquired = rtype_acquireds.get(prop_name)
if prop_acquired:
return prop_acquired
# Next in the "all" section of acquireds
all_acquireds = acquireds.get('all')
if all_acquireds:
prop_acquired = all_acquireds.get(prop_name)
if prop_acquired:
return prop_acquired
return | [
"def",
"acquire",
"(",
"self",
",",
"resources",
",",
"prop_name",
")",
":",
"# Instance",
"custom_prop",
"=",
"getattr",
"(",
"self",
".",
"props",
",",
"prop_name",
",",
"None",
")",
"if",
"custom_prop",
":",
"return",
"custom_prop",
"# Parents...can't use acquire as have to keep going on acquireds",
"for",
"parent",
"in",
"self",
".",
"parents",
"(",
"resources",
")",
":",
"acquireds",
"=",
"parent",
".",
"props",
".",
"acquireds",
"if",
"acquireds",
":",
"# First try in the per-type acquireds",
"rtype_acquireds",
"=",
"acquireds",
".",
"get",
"(",
"self",
".",
"rtype",
")",
"if",
"rtype_acquireds",
":",
"prop_acquired",
"=",
"rtype_acquireds",
".",
"get",
"(",
"prop_name",
")",
"if",
"prop_acquired",
":",
"return",
"prop_acquired",
"# Next in the \"all\" section of acquireds",
"all_acquireds",
"=",
"acquireds",
".",
"get",
"(",
"'all'",
")",
"if",
"all_acquireds",
":",
"prop_acquired",
"=",
"all_acquireds",
".",
"get",
"(",
"prop_name",
")",
"if",
"prop_acquired",
":",
"return",
"prop_acquired",
"return"
] | Starting with self, walk until you find prop or None | [
"Starting",
"with",
"self",
"walk",
"until",
"you",
"find",
"prop",
"or",
"None"
] | python | train |
10gen/mongo-orchestration | mongo_orchestration/apps/links.py | https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/apps/links.py#L112-L127 | def all_base_links(rel_to=None):
"""Get a list of all links to be included to base (/) API requests."""
links = [
base_link('get-releases'),
base_link('service'),
server_link('get-servers'),
server_link('add-server'),
replica_set_link('add-replica-set'),
replica_set_link('get-replica-sets'),
sharded_cluster_link('add-sharded-cluster'),
sharded_cluster_link('get-sharded-clusters')
]
for link in links:
if link['rel'] == rel_to:
link['rel'] = 'self'
return links | [
"def",
"all_base_links",
"(",
"rel_to",
"=",
"None",
")",
":",
"links",
"=",
"[",
"base_link",
"(",
"'get-releases'",
")",
",",
"base_link",
"(",
"'service'",
")",
",",
"server_link",
"(",
"'get-servers'",
")",
",",
"server_link",
"(",
"'add-server'",
")",
",",
"replica_set_link",
"(",
"'add-replica-set'",
")",
",",
"replica_set_link",
"(",
"'get-replica-sets'",
")",
",",
"sharded_cluster_link",
"(",
"'add-sharded-cluster'",
")",
",",
"sharded_cluster_link",
"(",
"'get-sharded-clusters'",
")",
"]",
"for",
"link",
"in",
"links",
":",
"if",
"link",
"[",
"'rel'",
"]",
"==",
"rel_to",
":",
"link",
"[",
"'rel'",
"]",
"=",
"'self'",
"return",
"links"
] | Get a list of all links to be included to base (/) API requests. | [
"Get",
"a",
"list",
"of",
"all",
"links",
"to",
"be",
"included",
"to",
"base",
"(",
"/",
")",
"API",
"requests",
"."
] | python | train |
tcalmant/python-javaobj | javaobj/core.py | https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L941-L952 | def do_string(self, parent=None, ident=0):
"""
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba | [
"def",
"do_string",
"(",
"self",
",",
"parent",
"=",
"None",
",",
"ident",
"=",
"0",
")",
":",
"log_debug",
"(",
"\"[string]\"",
",",
"ident",
")",
"ba",
"=",
"JavaString",
"(",
"self",
".",
"_readString",
"(",
")",
")",
"self",
".",
"_add_reference",
"(",
"ba",
",",
"ident",
")",
"return",
"ba"
] | Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string | [
"Handles",
"a",
"TC_STRING",
"opcode"
] | python | train |
decryptus/httpdis | httpdis/httpdis.py | https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L489-L523 | def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers)) | [
"def",
"send_error_explain",
"(",
"self",
",",
"code",
",",
"message",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"content_type",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"if",
"code",
"in",
"self",
".",
"responses",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"self",
".",
"responses",
"[",
"code",
"]",
"[",
"0",
"]",
"explain",
"=",
"self",
".",
"responses",
"[",
"code",
"]",
"[",
"1",
"]",
"else",
":",
"explain",
"=",
"\"\"",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"\"\"",
"if",
"not",
"isinstance",
"(",
"headers",
",",
"dict",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"not",
"content_type",
":",
"if",
"self",
".",
"_cmd",
"and",
"self",
".",
"_cmd",
".",
"content_type",
":",
"content_type",
"=",
"self",
".",
"_cmd",
".",
"content_type",
"else",
":",
"content_type",
"=",
"self",
".",
"_DEFAULT_CONTENT_TYPE",
"if",
"self",
".",
"_cmd",
"and",
"self",
".",
"_cmd",
".",
"charset",
":",
"charset",
"=",
"self",
".",
"_cmd",
".",
"charset",
"else",
":",
"charset",
"=",
"DEFAULT_CHARSET",
"headers",
"[",
"'Content-type'",
"]",
"=",
"\"%s; charset=%s\"",
"%",
"(",
"content_type",
",",
"charset",
")",
"data",
"=",
"self",
".",
"_mk_error_explain_data",
"(",
"code",
",",
"message",
",",
"explain",
")",
"self",
".",
"end_response",
"(",
"self",
".",
"build_response",
"(",
"code",
",",
"data",
",",
"headers",
")",
")"
] | do not use directly | [
"do",
"not",
"use",
"directly"
] | python | train |
kisom/pypcapfile | pcapfile/protocols/linklayer/ethernet.py | https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/ethernet.py#L31-L47 | def load_network(self, layers=1):
"""
Given an Ethernet frame, determine the appropriate sub-protocol;
If layers is greater than zerol determine the type of the payload
and load the appropriate type of network packet. It is expected
that the payload be a hexified string. The layers argument determines
how many layers to descend while parsing the packet.
"""
if layers:
ctor = payload_type(self.type)[0]
if ctor:
ctor = ctor
payload = self.payload
self.payload = ctor(payload, layers - 1)
else:
# if no type is found, do not touch the packet.
pass | [
"def",
"load_network",
"(",
"self",
",",
"layers",
"=",
"1",
")",
":",
"if",
"layers",
":",
"ctor",
"=",
"payload_type",
"(",
"self",
".",
"type",
")",
"[",
"0",
"]",
"if",
"ctor",
":",
"ctor",
"=",
"ctor",
"payload",
"=",
"self",
".",
"payload",
"self",
".",
"payload",
"=",
"ctor",
"(",
"payload",
",",
"layers",
"-",
"1",
")",
"else",
":",
"# if no type is found, do not touch the packet.",
"pass"
] | Given an Ethernet frame, determine the appropriate sub-protocol;
If layers is greater than zerol determine the type of the payload
and load the appropriate type of network packet. It is expected
that the payload be a hexified string. The layers argument determines
how many layers to descend while parsing the packet. | [
"Given",
"an",
"Ethernet",
"frame",
"determine",
"the",
"appropriate",
"sub",
"-",
"protocol",
";",
"If",
"layers",
"is",
"greater",
"than",
"zerol",
"determine",
"the",
"type",
"of",
"the",
"payload",
"and",
"load",
"the",
"appropriate",
"type",
"of",
"network",
"packet",
".",
"It",
"is",
"expected",
"that",
"the",
"payload",
"be",
"a",
"hexified",
"string",
".",
"The",
"layers",
"argument",
"determines",
"how",
"many",
"layers",
"to",
"descend",
"while",
"parsing",
"the",
"packet",
"."
] | python | valid |
rigetti/pyquil | pyquil/reference_simulator.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/reference_simulator.py#L120-L130 | def expectation(self, operator: Union[PauliTerm, PauliSum]):
"""
Compute the expectation of an operator.
:param operator: The operator
:return: The operator's expectation value
"""
if not isinstance(operator, PauliSum):
operator = PauliSum([operator])
return sum(_term_expectation(self.wf, term, n_qubits=self.n_qubits) for term in operator) | [
"def",
"expectation",
"(",
"self",
",",
"operator",
":",
"Union",
"[",
"PauliTerm",
",",
"PauliSum",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"operator",
",",
"PauliSum",
")",
":",
"operator",
"=",
"PauliSum",
"(",
"[",
"operator",
"]",
")",
"return",
"sum",
"(",
"_term_expectation",
"(",
"self",
".",
"wf",
",",
"term",
",",
"n_qubits",
"=",
"self",
".",
"n_qubits",
")",
"for",
"term",
"in",
"operator",
")"
] | Compute the expectation of an operator.
:param operator: The operator
:return: The operator's expectation value | [
"Compute",
"the",
"expectation",
"of",
"an",
"operator",
"."
] | python | train |
WoLpH/mailjet | mailjet/contrib/django_mailjet/forms.py | https://github.com/WoLpH/mailjet/blob/f7f5102bf52be6a4a9c62afe474387481c806e27/mailjet/contrib/django_mailjet/forms.py#L54-L68 | def list_id(self):
""" Get or create the list id. """
list_id = getattr(self, '_list_id', None)
if list_id is None:
for l in self.api.lists.all()['lists']:
if l['name'] == self.list_name:
self._list_id = l['id']
if not getattr(self, '_list_id', None):
self._list_id = self.api.lists.create(
label=self.list_label, name=self.list_name,
method='POST')['list_id']
return self._list_id | [
"def",
"list_id",
"(",
"self",
")",
":",
"list_id",
"=",
"getattr",
"(",
"self",
",",
"'_list_id'",
",",
"None",
")",
"if",
"list_id",
"is",
"None",
":",
"for",
"l",
"in",
"self",
".",
"api",
".",
"lists",
".",
"all",
"(",
")",
"[",
"'lists'",
"]",
":",
"if",
"l",
"[",
"'name'",
"]",
"==",
"self",
".",
"list_name",
":",
"self",
".",
"_list_id",
"=",
"l",
"[",
"'id'",
"]",
"if",
"not",
"getattr",
"(",
"self",
",",
"'_list_id'",
",",
"None",
")",
":",
"self",
".",
"_list_id",
"=",
"self",
".",
"api",
".",
"lists",
".",
"create",
"(",
"label",
"=",
"self",
".",
"list_label",
",",
"name",
"=",
"self",
".",
"list_name",
",",
"method",
"=",
"'POST'",
")",
"[",
"'list_id'",
"]",
"return",
"self",
".",
"_list_id"
] | Get or create the list id. | [
"Get",
"or",
"create",
"the",
"list",
"id",
"."
] | python | train |
portfors-lab/sparkle | sparkle/gui/dialogs/saving_dlg.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/saving_dlg.py#L43-L59 | def getfile(self):
"""Gets the full file path of the entered/selected file
:returns: str -- the name of the data file to open/create
"""
current_file = str(self.selectedFiles()[0])
if os.path.isfile(current_file):
print 'current_file', current_file
if current_file.endswith('.raw') or current_file.endswith('.pst'):
fmode = 'r'
else:
fmode = 'a'
else:
if not current_file.endswith('.hdf5') and not current_file.endswith('.h5'):
current_file += '.hdf5'
fmode = 'w-'
return current_file, fmode | [
"def",
"getfile",
"(",
"self",
")",
":",
"current_file",
"=",
"str",
"(",
"self",
".",
"selectedFiles",
"(",
")",
"[",
"0",
"]",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"current_file",
")",
":",
"print",
"'current_file'",
",",
"current_file",
"if",
"current_file",
".",
"endswith",
"(",
"'.raw'",
")",
"or",
"current_file",
".",
"endswith",
"(",
"'.pst'",
")",
":",
"fmode",
"=",
"'r'",
"else",
":",
"fmode",
"=",
"'a'",
"else",
":",
"if",
"not",
"current_file",
".",
"endswith",
"(",
"'.hdf5'",
")",
"and",
"not",
"current_file",
".",
"endswith",
"(",
"'.h5'",
")",
":",
"current_file",
"+=",
"'.hdf5'",
"fmode",
"=",
"'w-'",
"return",
"current_file",
",",
"fmode"
] | Gets the full file path of the entered/selected file
:returns: str -- the name of the data file to open/create | [
"Gets",
"the",
"full",
"file",
"path",
"of",
"the",
"entered",
"/",
"selected",
"file"
] | python | train |
cokelaer/spectrum | src/spectrum/mtm.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/mtm.py#L359-L418 | def _other_dpss_method(N, NW, Kmax):
"""Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from:
"""
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian sequences,
# or discrete prolate spheroidal sequences (DPSS). Only the first K,
# K = 2NW/dt orders of DPSS will exhibit good spectral concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue problem
# such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
from scipy import linalg as la
Kmax = int(Kmax)
W = float(NW)/N
ab = np.zeros((2,N), 'd')
nidx = np.arange(N)
ab[0,1:] = nidx[1:]*(N-nidx[1:])/2.
ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W)
# only calculate the highest Kmax-1 eigenvectors
l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1))
dpss = v.transpose()[::-1]
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2*i] *= -1
fix_skew = (dpss[1::2,1] < 0)
for i, f in enumerate(fix_skew):
if f:
dpss[2*i+1] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390
# XXX : why debias false? it's all messed up o.w., even with means
# on the order of 1e-2
acvs = _autocov(dpss, debias=False) * N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = np.dot(acvs, r)
return dpss, eigvals | [
"def",
"_other_dpss_method",
"(",
"N",
",",
"NW",
",",
"Kmax",
")",
":",
"# here we want to set up an optimization problem to find a sequence",
"# whose energy is maximally concentrated within band [-W,W].",
"# Thus, the measure lambda(T,W) is the ratio between the energy within",
"# that band, and the total energy. This leads to the eigen-system",
"# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest",
"# eigenvalue is the sequence with maximally concentrated energy. The",
"# collection of eigenvectors of this system are called Slepian sequences,",
"# or discrete prolate spheroidal sequences (DPSS). Only the first K,",
"# K = 2NW/dt orders of DPSS will exhibit good spectral concentration",
"# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]",
"# Here I set up an alternative symmetric tri-diagonal eigenvalue problem",
"# such that",
"# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)",
"# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]",
"# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]",
"# [see Percival and Walden, 1993]",
"from",
"scipy",
"import",
"linalg",
"as",
"la",
"Kmax",
"=",
"int",
"(",
"Kmax",
")",
"W",
"=",
"float",
"(",
"NW",
")",
"/",
"N",
"ab",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"N",
")",
",",
"'d'",
")",
"nidx",
"=",
"np",
".",
"arange",
"(",
"N",
")",
"ab",
"[",
"0",
",",
"1",
":",
"]",
"=",
"nidx",
"[",
"1",
":",
"]",
"*",
"(",
"N",
"-",
"nidx",
"[",
"1",
":",
"]",
")",
"/",
"2.",
"ab",
"[",
"1",
"]",
"=",
"(",
"(",
"N",
"-",
"1",
"-",
"2",
"*",
"nidx",
")",
"/",
"2.",
")",
"**",
"2",
"*",
"np",
".",
"cos",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"W",
")",
"# only calculate the highest Kmax-1 eigenvectors",
"l",
",",
"v",
"=",
"la",
".",
"eig_banded",
"(",
"ab",
",",
"select",
"=",
"'i'",
",",
"select_range",
"=",
"(",
"N",
"-",
"Kmax",
",",
"N",
"-",
"1",
")",
")",
"dpss",
"=",
"v",
".",
"transpose",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"# By convention (Percival and Walden, 1993 pg 379)",
"# * symmetric tapers (k=0,2,4,...) should have a positive average.",
"# * antisymmetric tapers should begin with a positive lobe",
"fix_symmetric",
"=",
"(",
"dpss",
"[",
"0",
":",
":",
"2",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"<",
"0",
")",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fix_symmetric",
")",
":",
"if",
"f",
":",
"dpss",
"[",
"2",
"*",
"i",
"]",
"*=",
"-",
"1",
"fix_skew",
"=",
"(",
"dpss",
"[",
"1",
":",
":",
"2",
",",
"1",
"]",
"<",
"0",
")",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fix_skew",
")",
":",
"if",
"f",
":",
"dpss",
"[",
"2",
"*",
"i",
"+",
"1",
"]",
"*=",
"-",
"1",
"# Now find the eigenvalues of the original",
"# Use the autocovariance sequence technique from Percival and Walden, 1993",
"# pg 390",
"# XXX : why debias false? it's all messed up o.w., even with means",
"# on the order of 1e-2",
"acvs",
"=",
"_autocov",
"(",
"dpss",
",",
"debias",
"=",
"False",
")",
"*",
"N",
"r",
"=",
"4",
"*",
"W",
"*",
"np",
".",
"sinc",
"(",
"2",
"*",
"W",
"*",
"nidx",
")",
"r",
"[",
"0",
"]",
"=",
"2",
"*",
"W",
"eigvals",
"=",
"np",
".",
"dot",
"(",
"acvs",
",",
"r",
")",
"return",
"dpss",
",",
"eigvals"
] | Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from: | [
"Returns",
"the",
"Discrete",
"Prolate",
"Spheroidal",
"Sequences",
"of",
"orders",
"[",
"0",
"Kmax",
"-",
"1",
"]",
"for",
"a",
"given",
"frequency",
"-",
"spacing",
"multiple",
"NW",
"and",
"sequence",
"length",
"N",
"."
] | python | valid |
vtkiorg/vtki | vtki/utilities.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L263-L283 | def is_inside_bounds(point, bounds):
""" Checks if a point is inside a set of bounds. This is implemented
through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, collections.Iterable) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise AssertionError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError('Unknown input data type ({}).'.format(type(point)))
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False | [
"def",
"is_inside_bounds",
"(",
"point",
",",
"bounds",
")",
":",
"if",
"isinstance",
"(",
"point",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"point",
"=",
"[",
"point",
"]",
"if",
"isinstance",
"(",
"point",
",",
"collections",
".",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"point",
",",
"collections",
".",
"deque",
")",
":",
"if",
"len",
"(",
"bounds",
")",
"<",
"2",
"*",
"len",
"(",
"point",
")",
"or",
"len",
"(",
"bounds",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"AssertionError",
"(",
"'Bounds mismatch point dimensionality'",
")",
"point",
"=",
"collections",
".",
"deque",
"(",
"point",
")",
"bounds",
"=",
"collections",
".",
"deque",
"(",
"bounds",
")",
"return",
"is_inside_bounds",
"(",
"point",
",",
"bounds",
")",
"if",
"not",
"isinstance",
"(",
"point",
",",
"collections",
".",
"deque",
")",
":",
"raise",
"TypeError",
"(",
"'Unknown input data type ({}).'",
".",
"format",
"(",
"type",
"(",
"point",
")",
")",
")",
"if",
"len",
"(",
"point",
")",
"<",
"1",
":",
"return",
"True",
"p",
"=",
"point",
".",
"popleft",
"(",
")",
"lower",
",",
"upper",
"=",
"bounds",
".",
"popleft",
"(",
")",
",",
"bounds",
".",
"popleft",
"(",
")",
"if",
"lower",
"<=",
"p",
"<=",
"upper",
":",
"return",
"is_inside_bounds",
"(",
"point",
",",
"bounds",
")",
"return",
"False"
] | Checks if a point is inside a set of bounds. This is implemented
through recursion so that this is N-dimensional. | [
"Checks",
"if",
"a",
"point",
"is",
"inside",
"a",
"set",
"of",
"bounds",
".",
"This",
"is",
"implemented",
"through",
"recursion",
"so",
"that",
"this",
"is",
"N",
"-",
"dimensional",
"."
] | python | train |
saltstack/salt | salt/proxy/fx2.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/fx2.py#L351-L368 | def ping():
'''
Is the chassis responding?
:return: Returns False if the chassis didn't respond, True otherwise.
'''
r = __salt__['dracr.system_info'](host=DETAILS['host'],
admin_username=DETAILS['admin_username'],
admin_password=DETAILS['admin_password'])
if r.get('retcode', 0) == 1:
return False
else:
return True
try:
return r['dict'].get('ret', False)
except Exception:
return False | [
"def",
"ping",
"(",
")",
":",
"r",
"=",
"__salt__",
"[",
"'dracr.system_info'",
"]",
"(",
"host",
"=",
"DETAILS",
"[",
"'host'",
"]",
",",
"admin_username",
"=",
"DETAILS",
"[",
"'admin_username'",
"]",
",",
"admin_password",
"=",
"DETAILS",
"[",
"'admin_password'",
"]",
")",
"if",
"r",
".",
"get",
"(",
"'retcode'",
",",
"0",
")",
"==",
"1",
":",
"return",
"False",
"else",
":",
"return",
"True",
"try",
":",
"return",
"r",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'ret'",
",",
"False",
")",
"except",
"Exception",
":",
"return",
"False"
] | Is the chassis responding?
:return: Returns False if the chassis didn't respond, True otherwise. | [
"Is",
"the",
"chassis",
"responding?"
] | python | train |
matthiask/django-cte-forest | cte_forest/models.py | https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L335-L346 | def branches(self):
""" Returns a :class:`QuerySet` of all branch nodes (nodes with at least
one child).
:return: A :class:`QuerySet` of all leaf nodes (nodes with at least
one child).
"""
# We need to read the _cte_node_children attribute, so ensure it exists.
self._ensure_parameters()
return self.filter(
**{"%s__id__in" % self.model._cte_node_children: self.all()}
).distinct() | [
"def",
"branches",
"(",
"self",
")",
":",
"# We need to read the _cte_node_children attribute, so ensure it exists.",
"self",
".",
"_ensure_parameters",
"(",
")",
"return",
"self",
".",
"filter",
"(",
"*",
"*",
"{",
"\"%s__id__in\"",
"%",
"self",
".",
"model",
".",
"_cte_node_children",
":",
"self",
".",
"all",
"(",
")",
"}",
")",
".",
"distinct",
"(",
")"
] | Returns a :class:`QuerySet` of all branch nodes (nodes with at least
one child).
:return: A :class:`QuerySet` of all leaf nodes (nodes with at least
one child). | [
"Returns",
"a",
":",
"class",
":",
"QuerySet",
"of",
"all",
"branch",
"nodes",
"(",
"nodes",
"with",
"at",
"least",
"one",
"child",
")",
"."
] | python | train |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L181-L195 | def split_header(fp):
"""
Read file pointer and return pair of lines lists:
first - header, second - the rest.
"""
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith('#') and not header_ended:
# Header text
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:] | [
"def",
"split_header",
"(",
"fp",
")",
":",
"body_start",
",",
"header_ended",
"=",
"0",
",",
"False",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"fp",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
"and",
"not",
"header_ended",
":",
"# Header text",
"body_start",
"+=",
"1",
"else",
":",
"header_ended",
"=",
"True",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines",
"[",
":",
"body_start",
"]",
",",
"lines",
"[",
"body_start",
":",
"]"
] | Read file pointer and return pair of lines lists:
first - header, second - the rest. | [
"Read",
"file",
"pointer",
"and",
"return",
"pair",
"of",
"lines",
"lists",
":",
"first",
"-",
"header",
"second",
"-",
"the",
"rest",
"."
] | python | train |
PyCQA/pylint | pylint/checkers/typecheck.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/typecheck.py#L1434-L1439 | def visit_unaryop(self, node):
"""Detect TypeErrors for unary operands."""
for error in node.type_errors():
# Let the error customize its output.
self.add_message("invalid-unary-operand-type", args=str(error), node=node) | [
"def",
"visit_unaryop",
"(",
"self",
",",
"node",
")",
":",
"for",
"error",
"in",
"node",
".",
"type_errors",
"(",
")",
":",
"# Let the error customize its output.",
"self",
".",
"add_message",
"(",
"\"invalid-unary-operand-type\"",
",",
"args",
"=",
"str",
"(",
"error",
")",
",",
"node",
"=",
"node",
")"
] | Detect TypeErrors for unary operands. | [
"Detect",
"TypeErrors",
"for",
"unary",
"operands",
"."
] | python | test |
littlemo/moear-spider-zhihudaily | moear_spider_zhihudaily/spiders/zhihu_daily.py | https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/spiders/zhihu_daily.py#L126-L166 | def parse_post(self, response):
'''
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
'''
content = json.loads(response.body.decode(), encoding='UTF-8')
post = response.meta['post']
post['origin_url'] = content.get('share_url', '')
if not all([post['origin_url']]):
raise ValueError('原文地址为空')
post['title'] = html.escape(content.get('title', ''))
if not all([post['title']]):
raise ValueError('文章标题为空 - {}'.format(post.get('origin_url')))
# 单独处理type字段为1的情况,即该文章为站外转发文章
if content.get('type') == 1:
self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title']))
return post
soup = BeautifulSoup(content.get('body', ''), 'lxml')
author_obj = soup.select('span.author')
self.logger.debug(author_obj)
if author_obj:
author_list = []
for author in author_obj:
author_list.append(
author.string.rstrip(',, ').replace(',', ','))
author_list = list(set(author_list))
post['author'] = html.escape(','.join(author_list))
post['content'] = str(soup.div)
# 继续填充post数据
image_back = content.get('images', [None])[0]
if image_back:
post['meta']['moear.cover_image_slug'] = \
content.get('image', image_back)
self.logger.debug(post) | [
"def",
"parse_post",
"(",
"self",
",",
"response",
")",
":",
"content",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"body",
".",
"decode",
"(",
")",
",",
"encoding",
"=",
"'UTF-8'",
")",
"post",
"=",
"response",
".",
"meta",
"[",
"'post'",
"]",
"post",
"[",
"'origin_url'",
"]",
"=",
"content",
".",
"get",
"(",
"'share_url'",
",",
"''",
")",
"if",
"not",
"all",
"(",
"[",
"post",
"[",
"'origin_url'",
"]",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'原文地址为空')",
"",
"post",
"[",
"'title'",
"]",
"=",
"html",
".",
"escape",
"(",
"content",
".",
"get",
"(",
"'title'",
",",
"''",
")",
")",
"if",
"not",
"all",
"(",
"[",
"post",
"[",
"'title'",
"]",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'文章标题为空 - {}'.format(post",
".",
"get('o",
"r",
"igin",
"_",
"url",
"'",
")))",
"",
"",
"",
"# 单独处理type字段为1的情况,即该文章为站外转发文章",
"if",
"content",
".",
"get",
"(",
"'type'",
")",
"==",
"1",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'遇到站外文章,单独处理 - {}'.format(post['title'])",
")",
"",
"",
"",
"",
"",
"",
"",
"",
"return",
"post",
"soup",
"=",
"BeautifulSoup",
"(",
"content",
".",
"get",
"(",
"'body'",
",",
"''",
")",
",",
"'lxml'",
")",
"author_obj",
"=",
"soup",
".",
"select",
"(",
"'span.author'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"author_obj",
")",
"if",
"author_obj",
":",
"author_list",
"=",
"[",
"]",
"for",
"author",
"in",
"author_obj",
":",
"author_list",
".",
"append",
"(",
"author",
".",
"string",
".",
"rstrip",
"(",
"',, ').",
"r",
"e",
"place('",
",",
"', ',",
"'",
")",
"",
"",
"author_list",
"=",
"list",
"(",
"set",
"(",
"author_list",
")",
")",
"post",
"[",
"'author'",
"]",
"=",
"html",
".",
"escape",
"(",
"','.j",
"o",
"in(a",
"u",
"thor_list))",
"",
"",
"post",
"[",
"'content'",
"]",
"=",
"str",
"(",
"soup",
".",
"div",
")",
"# 继续填充post数据",
"image_back",
"=",
"content",
".",
"get",
"(",
"'images'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"if",
"image_back",
":",
"post",
"[",
"'meta'",
"]",
"[",
"'moear.cover_image_slug'",
"]",
"=",
"content",
".",
"get",
"(",
"'image'",
",",
"image_back",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"post",
")"
] | 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 | [
"根据",
":",
"meth",
":",
".",
"ZhihuDailySpider",
".",
"parse",
"中生成的具体文章地址,获取到文章内容,",
"并对其进行格式化处理,结果填充到对象属性",
"item_list",
"中"
] | python | train |
SoCo/SoCo | soco/services.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/services.py#L373-L424 | def build_command(self, action, args=None):
"""Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
"""
# A complete request should look something like this:
# POST path of control URL HTTP/1.1
# HOST: host of control URL:port of control URL
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName"
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionName
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>in arg value</argumentName>
# ... other in args and their values go here, if any
# </u:actionName>
# </s:Body>
# </s:Envelope>
arguments = self.wrap_arguments(args)
body = self.soap_body_template.format(
arguments=arguments, action=action, service_type=self.service_type,
version=self.version)
soap_action_template = \
"urn:schemas-upnp-org:service:{service_type}:{version}#{action}"
soap_action = soap_action_template.format(
service_type=self.service_type, version=self.version,
action=action)
headers = {'Content-Type': 'text/xml; charset="utf-8"',
'SOAPACTION': soap_action}
# Note that although we set the charset to utf-8 here, in fact the
# body is still unicode. It will only be converted to bytes when it
# is set over the network
return (headers, body) | [
"def",
"build_command",
"(",
"self",
",",
"action",
",",
"args",
"=",
"None",
")",
":",
"# A complete request should look something like this:",
"# POST path of control URL HTTP/1.1",
"# HOST: host of control URL:port of control URL",
"# CONTENT-LENGTH: bytes in body",
"# CONTENT-TYPE: text/xml; charset=\"utf-8\"",
"# SOAPACTION: \"urn:schemas-upnp-org:service:serviceType:v#actionName\"",
"#",
"# <?xml version=\"1.0\"?>",
"# <s:Envelope",
"# xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\"",
"# s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">",
"# <s:Body>",
"# <u:actionName",
"# xmlns:u=\"urn:schemas-upnp-org:service:serviceType:v\">",
"# <argumentName>in arg value</argumentName>",
"# ... other in args and their values go here, if any",
"# </u:actionName>",
"# </s:Body>",
"# </s:Envelope>",
"arguments",
"=",
"self",
".",
"wrap_arguments",
"(",
"args",
")",
"body",
"=",
"self",
".",
"soap_body_template",
".",
"format",
"(",
"arguments",
"=",
"arguments",
",",
"action",
"=",
"action",
",",
"service_type",
"=",
"self",
".",
"service_type",
",",
"version",
"=",
"self",
".",
"version",
")",
"soap_action_template",
"=",
"\"urn:schemas-upnp-org:service:{service_type}:{version}#{action}\"",
"soap_action",
"=",
"soap_action_template",
".",
"format",
"(",
"service_type",
"=",
"self",
".",
"service_type",
",",
"version",
"=",
"self",
".",
"version",
",",
"action",
"=",
"action",
")",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'text/xml; charset=\"utf-8\"'",
",",
"'SOAPACTION'",
":",
"soap_action",
"}",
"# Note that although we set the charset to utf-8 here, in fact the",
"# body is still unicode. It will only be converted to bytes when it",
"# is set over the network",
"return",
"(",
"headers",
",",
"body",
")"
] | Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending. | [
"Build",
"a",
"SOAP",
"request",
"."
] | python | train |
spacetelescope/drizzlepac | drizzlepac/imageObject.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L1151-L1168 | def compute_wcslin(self,undistort=True):
""" Compute the undistorted WCS based solely on the known distortion
model information associated with the WCS.
"""
for chip in range(1,self._numchips+1,1):
sci_chip = self._image[self.scienceExt,chip]
chip_wcs = sci_chip.wcs.copy()
if chip_wcs.sip is None or not undistort or chip_wcs.instrument=='DEFAULT':
chip_wcs.sip = None
chip_wcs.cpdis1 = None
chip_wcs.cpdis2 = None
chip_wcs.det2im = None
undistort=False
# compute the undistorted 'natural' plate scale for this chip
wcslin = distortion.utils.output_wcs([chip_wcs],undistort=undistort)
sci_chip.wcslin_pscale = wcslin.pscale | [
"def",
"compute_wcslin",
"(",
"self",
",",
"undistort",
"=",
"True",
")",
":",
"for",
"chip",
"in",
"range",
"(",
"1",
",",
"self",
".",
"_numchips",
"+",
"1",
",",
"1",
")",
":",
"sci_chip",
"=",
"self",
".",
"_image",
"[",
"self",
".",
"scienceExt",
",",
"chip",
"]",
"chip_wcs",
"=",
"sci_chip",
".",
"wcs",
".",
"copy",
"(",
")",
"if",
"chip_wcs",
".",
"sip",
"is",
"None",
"or",
"not",
"undistort",
"or",
"chip_wcs",
".",
"instrument",
"==",
"'DEFAULT'",
":",
"chip_wcs",
".",
"sip",
"=",
"None",
"chip_wcs",
".",
"cpdis1",
"=",
"None",
"chip_wcs",
".",
"cpdis2",
"=",
"None",
"chip_wcs",
".",
"det2im",
"=",
"None",
"undistort",
"=",
"False",
"# compute the undistorted 'natural' plate scale for this chip",
"wcslin",
"=",
"distortion",
".",
"utils",
".",
"output_wcs",
"(",
"[",
"chip_wcs",
"]",
",",
"undistort",
"=",
"undistort",
")",
"sci_chip",
".",
"wcslin_pscale",
"=",
"wcslin",
".",
"pscale"
] | Compute the undistorted WCS based solely on the known distortion
model information associated with the WCS. | [
"Compute",
"the",
"undistorted",
"WCS",
"based",
"solely",
"on",
"the",
"known",
"distortion",
"model",
"information",
"associated",
"with",
"the",
"WCS",
"."
] | python | train |
biocore/mustached-octo-ironman | moi/job.py | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L57-L80 | def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status | [
"def",
"_status_change",
"(",
"id",
",",
"new_status",
")",
":",
"job_info",
"=",
"json",
".",
"loads",
"(",
"r_client",
".",
"get",
"(",
"id",
")",
")",
"old_status",
"=",
"job_info",
"[",
"'status'",
"]",
"job_info",
"[",
"'status'",
"]",
"=",
"new_status",
"_deposit_payload",
"(",
"job_info",
")",
"return",
"old_status"
] | Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status | [
"Update",
"the",
"status",
"of",
"a",
"job"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/disco.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/disco.py#L687-L697 | def add_feature(self,var):
"""Add a feature to `self`.
:Parameters:
- `var`: the feature name.
:Types:
- `var`: `unicode`"""
if self.has_feature(var):
return
n=self.xmlnode.newChild(None, "feature", None)
n.setProp("var", to_utf8(var)) | [
"def",
"add_feature",
"(",
"self",
",",
"var",
")",
":",
"if",
"self",
".",
"has_feature",
"(",
"var",
")",
":",
"return",
"n",
"=",
"self",
".",
"xmlnode",
".",
"newChild",
"(",
"None",
",",
"\"feature\"",
",",
"None",
")",
"n",
".",
"setProp",
"(",
"\"var\"",
",",
"to_utf8",
"(",
"var",
")",
")"
] | Add a feature to `self`.
:Parameters:
- `var`: the feature name.
:Types:
- `var`: `unicode` | [
"Add",
"a",
"feature",
"to",
"self",
"."
] | python | valid |
inveniosoftware/invenio-search | invenio_search/utils.py | https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/utils.py#L39-L60 | def schema_to_index(schema, index_names=None):
"""Get index/doc_type given a schema URL.
:param schema: The schema name
:param index_names: A list of index name.
:returns: A tuple containing (index, doc_type).
"""
parts = schema.split('/')
doc_type = os.path.splitext(parts[-1])
if doc_type[1] not in {'.json', }:
return (None, None)
if index_names is None:
return (build_index_name(current_app, *parts), doc_type[0])
for start in range(len(parts)):
index_name = build_index_name(current_app, *parts[start:])
if index_name in index_names:
return (index_name, doc_type[0])
return (None, None) | [
"def",
"schema_to_index",
"(",
"schema",
",",
"index_names",
"=",
"None",
")",
":",
"parts",
"=",
"schema",
".",
"split",
"(",
"'/'",
")",
"doc_type",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"parts",
"[",
"-",
"1",
"]",
")",
"if",
"doc_type",
"[",
"1",
"]",
"not",
"in",
"{",
"'.json'",
",",
"}",
":",
"return",
"(",
"None",
",",
"None",
")",
"if",
"index_names",
"is",
"None",
":",
"return",
"(",
"build_index_name",
"(",
"current_app",
",",
"*",
"parts",
")",
",",
"doc_type",
"[",
"0",
"]",
")",
"for",
"start",
"in",
"range",
"(",
"len",
"(",
"parts",
")",
")",
":",
"index_name",
"=",
"build_index_name",
"(",
"current_app",
",",
"*",
"parts",
"[",
"start",
":",
"]",
")",
"if",
"index_name",
"in",
"index_names",
":",
"return",
"(",
"index_name",
",",
"doc_type",
"[",
"0",
"]",
")",
"return",
"(",
"None",
",",
"None",
")"
] | Get index/doc_type given a schema URL.
:param schema: The schema name
:param index_names: A list of index name.
:returns: A tuple containing (index, doc_type). | [
"Get",
"index",
"/",
"doc_type",
"given",
"a",
"schema",
"URL",
"."
] | python | train |
santoshphilip/eppy | eppy/modeleditor.py | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L636-L652 | def initreadtxt(self, idftxt):
"""
Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file.
"""
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
idfhandle = StringIO(idftxt)
self.idfname = idfhandle
self.read() | [
"def",
"initreadtxt",
"(",
"self",
",",
"idftxt",
")",
":",
"iddfhandle",
"=",
"StringIO",
"(",
"iddcurrent",
".",
"iddtxt",
")",
"if",
"self",
".",
"getiddname",
"(",
")",
"==",
"None",
":",
"self",
".",
"setiddname",
"(",
"iddfhandle",
")",
"idfhandle",
"=",
"StringIO",
"(",
"idftxt",
")",
"self",
".",
"idfname",
"=",
"idfhandle",
"self",
".",
"read",
"(",
")"
] | Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file. | [
"Use",
"the",
"current",
"IDD",
"and",
"read",
"an",
"IDF",
"from",
"text",
"data",
".",
"If",
"the",
"IDD",
"has",
"not",
"yet",
"been",
"initialised",
"then",
"this",
"is",
"done",
"first",
"."
] | python | train |
hydpy-dev/hydpy | hydpy/models/hland/hland_model.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hland/hland_model.py#L437-L523 | def calc_tf_ic_v1(self):
"""Calculate throughfall and update the interception storage
accordingly.
Required control parameters:
|NmbZones|
|ZoneType|
|IcMax|
Required flux sequences:
|PC|
Calculated fluxes sequences:
|TF|
Updated state sequence:
|Ic|
Basic equation:
:math:`TF = \\Bigl \\lbrace
{
{PC \\ | \\ Ic = IcMax}
\\atop
{0 \\ | \\ Ic < IcMax}
}`
Examples:
Initialize six zones of different types. Assume a
generall maximum interception capacity of 2 mm. All zones receive
a 0.5 mm input of precipitation:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD)
>>> icmax(2.0)
>>> fluxes.pc = 0.5
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
For glaciers (first zone) and internal lakes (second zone) the
interception routine does not apply. Hence, all precipitation is
routed as throughfall. For fields and forests, the interception
routine is identical (usually, only larger capacities for forests
are assumed, due to their higher leaf area index). Hence, the
results of the third and the second zone are equal. The last
three zones demonstrate, that all precipitation is stored until
the interception capacity is reached; afterwards, all precepitation
is routed as throughfall. Initial storage reduces the effective
capacity of the respective simulation step:
>>> states.ic
ic(0.0, 0.0, 0.5, 0.5, 1.5, 2.0)
>>> fluxes.tf
tf(0.5, 0.5, 0.0, 0.0, 0.0, 0.5)
A zero precipitation example:
>>> fluxes.pc = 0.0
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
>>> states.ic
ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0)
>>> fluxes.tf
tf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
A high precipitation example:
>>> fluxes.pc = 5.0
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
>>> states.ic
ic(0.0, 0.0, 2.0, 2.0, 2.0, 2.0)
>>> fluxes.tf
tf(5.0, 5.0, 3.0, 3.0, 4.0, 5.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if con.zonetype[k] in (FIELD, FOREST):
flu.tf[k] = max(flu.pc[k]-(con.icmax[k]-sta.ic[k]), 0.)
sta.ic[k] += flu.pc[k]-flu.tf[k]
else:
flu.tf[k] = flu.pc[k]
sta.ic[k] = 0. | [
"def",
"calc_tf_ic_v1",
"(",
"self",
")",
":",
"con",
"=",
"self",
".",
"parameters",
".",
"control",
".",
"fastaccess",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"sta",
"=",
"self",
".",
"sequences",
".",
"states",
".",
"fastaccess",
"for",
"k",
"in",
"range",
"(",
"con",
".",
"nmbzones",
")",
":",
"if",
"con",
".",
"zonetype",
"[",
"k",
"]",
"in",
"(",
"FIELD",
",",
"FOREST",
")",
":",
"flu",
".",
"tf",
"[",
"k",
"]",
"=",
"max",
"(",
"flu",
".",
"pc",
"[",
"k",
"]",
"-",
"(",
"con",
".",
"icmax",
"[",
"k",
"]",
"-",
"sta",
".",
"ic",
"[",
"k",
"]",
")",
",",
"0.",
")",
"sta",
".",
"ic",
"[",
"k",
"]",
"+=",
"flu",
".",
"pc",
"[",
"k",
"]",
"-",
"flu",
".",
"tf",
"[",
"k",
"]",
"else",
":",
"flu",
".",
"tf",
"[",
"k",
"]",
"=",
"flu",
".",
"pc",
"[",
"k",
"]",
"sta",
".",
"ic",
"[",
"k",
"]",
"=",
"0."
] | Calculate throughfall and update the interception storage
accordingly.
Required control parameters:
|NmbZones|
|ZoneType|
|IcMax|
Required flux sequences:
|PC|
Calculated fluxes sequences:
|TF|
Updated state sequence:
|Ic|
Basic equation:
:math:`TF = \\Bigl \\lbrace
{
{PC \\ | \\ Ic = IcMax}
\\atop
{0 \\ | \\ Ic < IcMax}
}`
Examples:
Initialize six zones of different types. Assume a
generall maximum interception capacity of 2 mm. All zones receive
a 0.5 mm input of precipitation:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD)
>>> icmax(2.0)
>>> fluxes.pc = 0.5
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
For glaciers (first zone) and internal lakes (second zone) the
interception routine does not apply. Hence, all precipitation is
routed as throughfall. For fields and forests, the interception
routine is identical (usually, only larger capacities for forests
are assumed, due to their higher leaf area index). Hence, the
results of the third and the second zone are equal. The last
three zones demonstrate, that all precipitation is stored until
the interception capacity is reached; afterwards, all precepitation
is routed as throughfall. Initial storage reduces the effective
capacity of the respective simulation step:
>>> states.ic
ic(0.0, 0.0, 0.5, 0.5, 1.5, 2.0)
>>> fluxes.tf
tf(0.5, 0.5, 0.0, 0.0, 0.0, 0.5)
A zero precipitation example:
>>> fluxes.pc = 0.0
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
>>> states.ic
ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0)
>>> fluxes.tf
tf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
A high precipitation example:
>>> fluxes.pc = 5.0
>>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0
>>> model.calc_tf_ic_v1()
>>> states.ic
ic(0.0, 0.0, 2.0, 2.0, 2.0, 2.0)
>>> fluxes.tf
tf(5.0, 5.0, 3.0, 3.0, 4.0, 5.0) | [
"Calculate",
"throughfall",
"and",
"update",
"the",
"interception",
"storage",
"accordingly",
"."
] | python | train |
msmbuilder/msmbuilder | msmbuilder/cluster/__init__.py | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/cluster/__init__.py#L37-L60 | def _replace_labels(doc):
"""Really hacky find-and-replace method that modifies one of the sklearn
docstrings to change the semantics of labels_ for the subclasses"""
lines = doc.splitlines()
labelstart, labelend = None, None
foundattributes = False
for i, line in enumerate(lines):
stripped = line.strip()
if stripped == 'Attributes':
foundattributes = True
if foundattributes and not labelstart and stripped.startswith('labels_'):
labelstart = len('\n'.join(lines[:i])) + 1
if labelstart and not labelend and stripped == '':
labelend = len('\n'.join(lines[:i + 1]))
if labelstart is None or labelend is None:
return doc
replace = '\n'.join([
' labels_ : list of arrays, each of shape [sequence_length, ]',
' The label of each point is an integer in [0, n_clusters).',
'',
])
return doc[:labelstart] + replace + doc[labelend:] | [
"def",
"_replace_labels",
"(",
"doc",
")",
":",
"lines",
"=",
"doc",
".",
"splitlines",
"(",
")",
"labelstart",
",",
"labelend",
"=",
"None",
",",
"None",
"foundattributes",
"=",
"False",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"stripped",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"stripped",
"==",
"'Attributes'",
":",
"foundattributes",
"=",
"True",
"if",
"foundattributes",
"and",
"not",
"labelstart",
"and",
"stripped",
".",
"startswith",
"(",
"'labels_'",
")",
":",
"labelstart",
"=",
"len",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"]",
")",
")",
"+",
"1",
"if",
"labelstart",
"and",
"not",
"labelend",
"and",
"stripped",
"==",
"''",
":",
"labelend",
"=",
"len",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"+",
"1",
"]",
")",
")",
"if",
"labelstart",
"is",
"None",
"or",
"labelend",
"is",
"None",
":",
"return",
"doc",
"replace",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"' labels_ : list of arrays, each of shape [sequence_length, ]'",
",",
"' The label of each point is an integer in [0, n_clusters).'",
",",
"''",
",",
"]",
")",
"return",
"doc",
"[",
":",
"labelstart",
"]",
"+",
"replace",
"+",
"doc",
"[",
"labelend",
":",
"]"
] | Really hacky find-and-replace method that modifies one of the sklearn
docstrings to change the semantics of labels_ for the subclasses | [
"Really",
"hacky",
"find",
"-",
"and",
"-",
"replace",
"method",
"that",
"modifies",
"one",
"of",
"the",
"sklearn",
"docstrings",
"to",
"change",
"the",
"semantics",
"of",
"labels_",
"for",
"the",
"subclasses"
] | python | train |
davidfokkema/artist | artist/multi_plot.py | https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L491-L521 | def render(self, template=None):
"""Render the plot using a template.
Once the plot is complete, it needs to be rendered. Artist uses
the Jinja2 templating engine. The default template results in a
LaTeX file which can be included in your document.
:param template: a user-supplied template or None.
:type template: string or None.
:returns: the rendered template as string.
"""
if not template:
template = self.template
for subplot in self.subplots:
subplot._prepare_data()
response = template.render(rows=self.rows, columns=self.columns,
xmode=self.xmode, ymode=self.ymode,
width=self.width, height=self.height,
xlabel=self.xlabel, ylabel=self.ylabel,
limits=self.limits, ticks=self.ticks,
colorbar=self.colorbar,
colormap=self.colormap,
external_filename=self.external_filename,
font_options=self.font_options,
axis_options=self.axis_options,
subplots=self.subplots,
plot_template=self.template)
return response | [
"def",
"render",
"(",
"self",
",",
"template",
"=",
"None",
")",
":",
"if",
"not",
"template",
":",
"template",
"=",
"self",
".",
"template",
"for",
"subplot",
"in",
"self",
".",
"subplots",
":",
"subplot",
".",
"_prepare_data",
"(",
")",
"response",
"=",
"template",
".",
"render",
"(",
"rows",
"=",
"self",
".",
"rows",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"xmode",
"=",
"self",
".",
"xmode",
",",
"ymode",
"=",
"self",
".",
"ymode",
",",
"width",
"=",
"self",
".",
"width",
",",
"height",
"=",
"self",
".",
"height",
",",
"xlabel",
"=",
"self",
".",
"xlabel",
",",
"ylabel",
"=",
"self",
".",
"ylabel",
",",
"limits",
"=",
"self",
".",
"limits",
",",
"ticks",
"=",
"self",
".",
"ticks",
",",
"colorbar",
"=",
"self",
".",
"colorbar",
",",
"colormap",
"=",
"self",
".",
"colormap",
",",
"external_filename",
"=",
"self",
".",
"external_filename",
",",
"font_options",
"=",
"self",
".",
"font_options",
",",
"axis_options",
"=",
"self",
".",
"axis_options",
",",
"subplots",
"=",
"self",
".",
"subplots",
",",
"plot_template",
"=",
"self",
".",
"template",
")",
"return",
"response"
] | Render the plot using a template.
Once the plot is complete, it needs to be rendered. Artist uses
the Jinja2 templating engine. The default template results in a
LaTeX file which can be included in your document.
:param template: a user-supplied template or None.
:type template: string or None.
:returns: the rendered template as string. | [
"Render",
"the",
"plot",
"using",
"a",
"template",
"."
] | python | train |
samgiles/slumber | slumber/utils.py | https://github.com/samgiles/slumber/blob/af0f9ef7bd8df8bde6b47088630786c737869bce/slumber/utils.py#L9-L16 | def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) | [
"def",
"url_join",
"(",
"base",
",",
"*",
"args",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlsplit",
"(",
"base",
")",
"path",
"=",
"path",
"if",
"len",
"(",
"path",
")",
"else",
"\"/\"",
"path",
"=",
"posixpath",
".",
"join",
"(",
"path",
",",
"*",
"[",
"(",
"'%s'",
"%",
"x",
")",
"for",
"x",
"in",
"args",
"]",
")",
"return",
"urlunsplit",
"(",
"[",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"]",
")"
] | Helper function to join an arbitrary number of url segments together. | [
"Helper",
"function",
"to",
"join",
"an",
"arbitrary",
"number",
"of",
"url",
"segments",
"together",
"."
] | python | train |
saltstack/salt | salt/modules/nxos_api.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nxos_api.py#L257-L314 | def show(commands,
raw_text=True,
**kwargs):
'''
Execute one or more show (non-configuration) commands.
commands
The commands to be executed.
raw_text: ``True``
Whether to return raw text or structured data.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the NX-API connection.
password
The password to pass to the device to authenticate the NX-API connection.
port
The TCP port of the endpoint for the NX-API connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
timeout: ``60``
Time in seconds to wait for the device to respond. Default: 60 seconds.
verify: ``True``
Either a boolean, in which case it controls whether we verify the NX-API
TLS certificate, or a string, in which case it must be a path to a CA bundle
to use. Defaults to ``True``.
CLI Example:
.. code-block:: bash
salt-call --local nxos_api.show 'show version'
salt '*' nxos_api.show 'show bgp sessions' 'show processes' raw_text=False
salt 'regular-minion' nxos_api.show 'show interfaces' host=sw01.example.com username=test password=test
'''
ret = []
if raw_text:
method = 'cli_ascii'
key = 'msg'
else:
method = 'cli'
key = 'body'
response_list = _cli_command(commands,
method=method,
**kwargs)
ret = [response[key] for response in response_list if response]
return ret | [
"def",
"show",
"(",
"commands",
",",
"raw_text",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"[",
"]",
"if",
"raw_text",
":",
"method",
"=",
"'cli_ascii'",
"key",
"=",
"'msg'",
"else",
":",
"method",
"=",
"'cli'",
"key",
"=",
"'body'",
"response_list",
"=",
"_cli_command",
"(",
"commands",
",",
"method",
"=",
"method",
",",
"*",
"*",
"kwargs",
")",
"ret",
"=",
"[",
"response",
"[",
"key",
"]",
"for",
"response",
"in",
"response_list",
"if",
"response",
"]",
"return",
"ret"
] | Execute one or more show (non-configuration) commands.
commands
The commands to be executed.
raw_text: ``True``
Whether to return raw text or structured data.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the NX-API connection.
password
The password to pass to the device to authenticate the NX-API connection.
port
The TCP port of the endpoint for the NX-API connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
timeout: ``60``
Time in seconds to wait for the device to respond. Default: 60 seconds.
verify: ``True``
Either a boolean, in which case it controls whether we verify the NX-API
TLS certificate, or a string, in which case it must be a path to a CA bundle
to use. Defaults to ``True``.
CLI Example:
.. code-block:: bash
salt-call --local nxos_api.show 'show version'
salt '*' nxos_api.show 'show bgp sessions' 'show processes' raw_text=False
salt 'regular-minion' nxos_api.show 'show interfaces' host=sw01.example.com username=test password=test | [
"Execute",
"one",
"or",
"more",
"show",
"(",
"non",
"-",
"configuration",
")",
"commands",
"."
] | python | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/__init__.py | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L209-L222 | def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime) | [
"def",
"get_lexer_for_mimetype",
"(",
"_mime",
",",
"*",
"*",
"options",
")",
":",
"for",
"modname",
",",
"name",
",",
"_",
",",
"_",
",",
"mimetypes",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"_mime",
"in",
"mimetypes",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"modname",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"(",
"*",
"*",
"options",
")",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"_mime",
"in",
"cls",
".",
"mimetypes",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"raise",
"ClassNotFound",
"(",
"'no lexer for mimetype %r found'",
"%",
"_mime",
")"
] | Get a lexer for a mimetype.
Raises ClassNotFound if not found. | [
"Get",
"a",
"lexer",
"for",
"a",
"mimetype",
"."
] | python | train |
msmbuilder/msmbuilder | msmbuilder/utils/progressbar/widgets.py | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/utils/progressbar/widgets.py#L147-L163 | def update(self, pbar):
"""Updates the widget to show the ETA or total time when finished."""
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
currval1, elapsed1 = self._update_samples(pbar.currval, elapsed)
eta = self._eta(pbar.maxval, pbar.currval, elapsed)
if pbar.currval > currval1:
etasamp = self._eta(pbar.maxval - currval1,
pbar.currval - currval1,
elapsed - elapsed1)
weight = (pbar.currval / float(pbar.maxval)) ** 0.5
eta = (1 - weight) * eta + weight * etasamp
return 'ETA: %s' % self.format_time(eta) | [
"def",
"update",
"(",
"self",
",",
"pbar",
")",
":",
"if",
"pbar",
".",
"currval",
"==",
"0",
":",
"return",
"'ETA: --:--:--'",
"elif",
"pbar",
".",
"finished",
":",
"return",
"'Time: %s'",
"%",
"self",
".",
"format_time",
"(",
"pbar",
".",
"seconds_elapsed",
")",
"else",
":",
"elapsed",
"=",
"pbar",
".",
"seconds_elapsed",
"currval1",
",",
"elapsed1",
"=",
"self",
".",
"_update_samples",
"(",
"pbar",
".",
"currval",
",",
"elapsed",
")",
"eta",
"=",
"self",
".",
"_eta",
"(",
"pbar",
".",
"maxval",
",",
"pbar",
".",
"currval",
",",
"elapsed",
")",
"if",
"pbar",
".",
"currval",
">",
"currval1",
":",
"etasamp",
"=",
"self",
".",
"_eta",
"(",
"pbar",
".",
"maxval",
"-",
"currval1",
",",
"pbar",
".",
"currval",
"-",
"currval1",
",",
"elapsed",
"-",
"elapsed1",
")",
"weight",
"=",
"(",
"pbar",
".",
"currval",
"/",
"float",
"(",
"pbar",
".",
"maxval",
")",
")",
"**",
"0.5",
"eta",
"=",
"(",
"1",
"-",
"weight",
")",
"*",
"eta",
"+",
"weight",
"*",
"etasamp",
"return",
"'ETA: %s'",
"%",
"self",
".",
"format_time",
"(",
"eta",
")"
] | Updates the widget to show the ETA or total time when finished. | [
"Updates",
"the",
"widget",
"to",
"show",
"the",
"ETA",
"or",
"total",
"time",
"when",
"finished",
"."
] | python | train |
gofed/gofedlib | gofedlib/distribution/distributionsnapshot.py | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/distribution/distributionsnapshot.py#L42-L45 | def setRpms(self, package, build, build_ts, rpms):
"""Add/Update package rpm
"""
self._builds[package] = {"build": build, "build_ts": build_ts, "rpms": rpms} | [
"def",
"setRpms",
"(",
"self",
",",
"package",
",",
"build",
",",
"build_ts",
",",
"rpms",
")",
":",
"self",
".",
"_builds",
"[",
"package",
"]",
"=",
"{",
"\"build\"",
":",
"build",
",",
"\"build_ts\"",
":",
"build_ts",
",",
"\"rpms\"",
":",
"rpms",
"}"
] | Add/Update package rpm | [
"Add",
"/",
"Update",
"package",
"rpm"
] | python | train |
rigetti/grove | grove/deutsch_jozsa/deutsch_jozsa.py | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/deutsch_jozsa/deutsch_jozsa.py#L71-L89 | def _init_attr(self, bitstring_map: Dict[str, str]):
"""
Acts instead of __init__ method to instantiate the necessary Deutsch-Jozsa state.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format, used to construct the oracle in the Deutsch-Jozsa algorithm.
:return: None
:rtype: NoneType
"""
self.bit_map = bitstring_map
self.n_qubits = len(list(bitstring_map.keys())[0])
# We use one extra qubit for making the oracle,
# and one for storing the answer of the oracle.
self.n_ancillas = 2
self._qubits = list(range(self.n_qubits + self.n_ancillas))
self.computational_qubits = self._qubits[:self.n_qubits]
self.ancillas = self._qubits[self.n_qubits:]
self.unitary_matrix = self.unitary_function(bitstring_map)
self.deutsch_jozsa_circuit = self._construct_deutsch_jozsa_circuit() | [
"def",
"_init_attr",
"(",
"self",
",",
"bitstring_map",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
":",
"self",
".",
"bit_map",
"=",
"bitstring_map",
"self",
".",
"n_qubits",
"=",
"len",
"(",
"list",
"(",
"bitstring_map",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
")",
"# We use one extra qubit for making the oracle,",
"# and one for storing the answer of the oracle.",
"self",
".",
"n_ancillas",
"=",
"2",
"self",
".",
"_qubits",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"n_qubits",
"+",
"self",
".",
"n_ancillas",
")",
")",
"self",
".",
"computational_qubits",
"=",
"self",
".",
"_qubits",
"[",
":",
"self",
".",
"n_qubits",
"]",
"self",
".",
"ancillas",
"=",
"self",
".",
"_qubits",
"[",
"self",
".",
"n_qubits",
":",
"]",
"self",
".",
"unitary_matrix",
"=",
"self",
".",
"unitary_function",
"(",
"bitstring_map",
")",
"self",
".",
"deutsch_jozsa_circuit",
"=",
"self",
".",
"_construct_deutsch_jozsa_circuit",
"(",
")"
] | Acts instead of __init__ method to instantiate the necessary Deutsch-Jozsa state.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format, used to construct the oracle in the Deutsch-Jozsa algorithm.
:return: None
:rtype: NoneType | [
"Acts",
"instead",
"of",
"__init__",
"method",
"to",
"instantiate",
"the",
"necessary",
"Deutsch",
"-",
"Jozsa",
"state",
"."
] | python | train |
python-wink/python-wink | src/pywink/devices/cloud_clock.py | https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/cloud_clock.py#L183-L187 | def update_state(self):
""" Update state with latest info from Wink API. """
response = self.api_interface.get_device_state(self, id_override=self.parent_id(),
type_override=self.parent_object_type())
self._update_state_from_response(response) | [
"def",
"update_state",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"api_interface",
".",
"get_device_state",
"(",
"self",
",",
"id_override",
"=",
"self",
".",
"parent_id",
"(",
")",
",",
"type_override",
"=",
"self",
".",
"parent_object_type",
"(",
")",
")",
"self",
".",
"_update_state_from_response",
"(",
"response",
")"
] | Update state with latest info from Wink API. | [
"Update",
"state",
"with",
"latest",
"info",
"from",
"Wink",
"API",
"."
] | python | train |
kxgames/glooey | glooey/drawing/grid.py | https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/drawing/grid.py#L531-L551 | def make_grid(rect, cells={}, num_rows=0, num_cols=0, padding=None,
inner_padding=None, outer_padding=None, row_heights={}, col_widths={},
default_row_height='expand', default_col_width='expand'):
"""
Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples.
"""
grid = Grid(
bounding_rect=rect,
min_cell_rects=cells,
num_rows=num_rows,
num_cols=num_cols,
padding=padding,
inner_padding=inner_padding,
outer_padding=outer_padding,
row_heights=row_heights,
col_widths=col_widths,
default_row_height=default_row_height,
default_col_width=default_col_width,
)
return grid.make_cells() | [
"def",
"make_grid",
"(",
"rect",
",",
"cells",
"=",
"{",
"}",
",",
"num_rows",
"=",
"0",
",",
"num_cols",
"=",
"0",
",",
"padding",
"=",
"None",
",",
"inner_padding",
"=",
"None",
",",
"outer_padding",
"=",
"None",
",",
"row_heights",
"=",
"{",
"}",
",",
"col_widths",
"=",
"{",
"}",
",",
"default_row_height",
"=",
"'expand'",
",",
"default_col_width",
"=",
"'expand'",
")",
":",
"grid",
"=",
"Grid",
"(",
"bounding_rect",
"=",
"rect",
",",
"min_cell_rects",
"=",
"cells",
",",
"num_rows",
"=",
"num_rows",
",",
"num_cols",
"=",
"num_cols",
",",
"padding",
"=",
"padding",
",",
"inner_padding",
"=",
"inner_padding",
",",
"outer_padding",
"=",
"outer_padding",
",",
"row_heights",
"=",
"row_heights",
",",
"col_widths",
"=",
"col_widths",
",",
"default_row_height",
"=",
"default_row_height",
",",
"default_col_width",
"=",
"default_col_width",
",",
")",
"return",
"grid",
".",
"make_cells",
"(",
")"
] | Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples. | [
"Return",
"rectangles",
"for",
"each",
"cell",
"in",
"the",
"specified",
"grid",
".",
"The",
"rectangles",
"are",
"returned",
"in",
"a",
"dictionary",
"where",
"the",
"keys",
"are",
"(",
"row",
"col",
")",
"tuples",
"."
] | python | train |
gitpython-developers/GitPython | git/index/fun.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/fun.py#L171-L177 | def entry_key(*entry):
""":return: Key suitable to be used for the index.entries dictionary
:param entry: One instance of type BaseIndexEntry or the path and the stage"""
if len(entry) == 1:
return (entry[0].path, entry[0].stage)
else:
return tuple(entry) | [
"def",
"entry_key",
"(",
"*",
"entry",
")",
":",
"if",
"len",
"(",
"entry",
")",
"==",
"1",
":",
"return",
"(",
"entry",
"[",
"0",
"]",
".",
"path",
",",
"entry",
"[",
"0",
"]",
".",
"stage",
")",
"else",
":",
"return",
"tuple",
"(",
"entry",
")"
] | :return: Key suitable to be used for the index.entries dictionary
:param entry: One instance of type BaseIndexEntry or the path and the stage | [
":",
"return",
":",
"Key",
"suitable",
"to",
"be",
"used",
"for",
"the",
"index",
".",
"entries",
"dictionary",
":",
"param",
"entry",
":",
"One",
"instance",
"of",
"type",
"BaseIndexEntry",
"or",
"the",
"path",
"and",
"the",
"stage"
] | python | train |
abilian/abilian-core | abilian/cli/base.py | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/cli/base.py#L41-L66 | def createuser(email, password, role=None, name=None, first_name=None):
"""Create new user."""
if User.query.filter(User.email == email).count() > 0:
print(f"A user with email '{email}' already exists, aborting.")
return
# if password is None:
# password = prompt_pass("Password")
user = User(
email=email,
password=password,
last_name=name,
first_name=first_name,
can_login=True,
)
db.session.add(user)
if role in ("admin",):
# FIXME: add other valid roles
security = get_service("security")
security.grant_role(user, role)
db.session.commit()
print(f"User {email} added") | [
"def",
"createuser",
"(",
"email",
",",
"password",
",",
"role",
"=",
"None",
",",
"name",
"=",
"None",
",",
"first_name",
"=",
"None",
")",
":",
"if",
"User",
".",
"query",
".",
"filter",
"(",
"User",
".",
"email",
"==",
"email",
")",
".",
"count",
"(",
")",
">",
"0",
":",
"print",
"(",
"f\"A user with email '{email}' already exists, aborting.\"",
")",
"return",
"# if password is None:",
"# password = prompt_pass(\"Password\")",
"user",
"=",
"User",
"(",
"email",
"=",
"email",
",",
"password",
"=",
"password",
",",
"last_name",
"=",
"name",
",",
"first_name",
"=",
"first_name",
",",
"can_login",
"=",
"True",
",",
")",
"db",
".",
"session",
".",
"add",
"(",
"user",
")",
"if",
"role",
"in",
"(",
"\"admin\"",
",",
")",
":",
"# FIXME: add other valid roles",
"security",
"=",
"get_service",
"(",
"\"security\"",
")",
"security",
".",
"grant_role",
"(",
"user",
",",
"role",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"print",
"(",
"f\"User {email} added\"",
")"
] | Create new user. | [
"Create",
"new",
"user",
"."
] | python | train |
pzs741/TEDT | TEDT/candidate_title.py | https://github.com/pzs741/TEDT/blob/6b6663227b755005fe1a1e3e807a05bdb521e066/TEDT/candidate_title.py#L93-L135 | def vote(self):
"""选举新闻标题
Return:
title -- 新闻标题,str类型
"""
# 初始化
weight_queue = []
sameKV = 0
count = 0
# 相似度计算
for unit in self._queue:
unit_set = convert_to_set(unit)
for i in unit_set:
if i in self.wordvector_word:
sameKV += self.wordvector_weight[self.wordvector_word.index(i)]
if len(self._queue) >= 5:
# k是位置权重,离语料库越近的文本行,权重越大,区间【0,1】
k = (count + 1) / len(self._queue)
beta = normalized(self.beta_list[count], self.beta_list)
count += 1
else:
k = 1
beta = normalized(self.beta_list[count], self.beta_list)
count += 1
jaccard = sameKV / len(
(set(unit_set) | set(self.wordvector_word)) - (set(unit_set) & set(self.wordvector_word)))
unit_weight = k * beta * jaccard
weight_queue.append(unit_weight)
sameKV = 0
log('debug',
'文本行【{}】\n相似度计算参数,unit_weight:【{}】,k:【{}】,beta:【{}】,jaccard:【{}】\n'.format(unit, unit_weight, k, beta,
jaccard))
# 过滤
try:
title = self._queue[weight_queue.index(sorted(weight_queue, reverse=True)[0])]
except:
title = ''
return title | [
"def",
"vote",
"(",
"self",
")",
":",
"# 初始化",
"weight_queue",
"=",
"[",
"]",
"sameKV",
"=",
"0",
"count",
"=",
"0",
"# 相似度计算",
"for",
"unit",
"in",
"self",
".",
"_queue",
":",
"unit_set",
"=",
"convert_to_set",
"(",
"unit",
")",
"for",
"i",
"in",
"unit_set",
":",
"if",
"i",
"in",
"self",
".",
"wordvector_word",
":",
"sameKV",
"+=",
"self",
".",
"wordvector_weight",
"[",
"self",
".",
"wordvector_word",
".",
"index",
"(",
"i",
")",
"]",
"if",
"len",
"(",
"self",
".",
"_queue",
")",
">=",
"5",
":",
"# k是位置权重,离语料库越近的文本行,权重越大,区间【0,1】",
"k",
"=",
"(",
"count",
"+",
"1",
")",
"/",
"len",
"(",
"self",
".",
"_queue",
")",
"beta",
"=",
"normalized",
"(",
"self",
".",
"beta_list",
"[",
"count",
"]",
",",
"self",
".",
"beta_list",
")",
"count",
"+=",
"1",
"else",
":",
"k",
"=",
"1",
"beta",
"=",
"normalized",
"(",
"self",
".",
"beta_list",
"[",
"count",
"]",
",",
"self",
".",
"beta_list",
")",
"count",
"+=",
"1",
"jaccard",
"=",
"sameKV",
"/",
"len",
"(",
"(",
"set",
"(",
"unit_set",
")",
"|",
"set",
"(",
"self",
".",
"wordvector_word",
")",
")",
"-",
"(",
"set",
"(",
"unit_set",
")",
"&",
"set",
"(",
"self",
".",
"wordvector_word",
")",
")",
")",
"unit_weight",
"=",
"k",
"*",
"beta",
"*",
"jaccard",
"weight_queue",
".",
"append",
"(",
"unit_weight",
")",
"sameKV",
"=",
"0",
"log",
"(",
"'debug'",
",",
"'文本行【{}】\\n相似度计算参数,unit_weight:【{}】,k:【{}】,beta:【{}】,jaccard:【{}】\\n'.format(unit, unit_weight, k, beta,",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"jaccard",
")",
")",
"# 过滤",
"try",
":",
"title",
"=",
"self",
".",
"_queue",
"[",
"weight_queue",
".",
"index",
"(",
"sorted",
"(",
"weight_queue",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
"]",
")",
"]",
"except",
":",
"title",
"=",
"''",
"return",
"title"
] | 选举新闻标题
Return:
title -- 新闻标题,str类型 | [
"选举新闻标题"
] | python | train |
scanny/python-pptx | pptx/oxml/chart/chart.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/chart.py#L177-L193 | def iter_xCharts(self):
"""
Generate each xChart child element in document.
"""
plot_tags = (
qn('c:area3DChart'), qn('c:areaChart'), qn('c:bar3DChart'),
qn('c:barChart'), qn('c:bubbleChart'), qn('c:doughnutChart'),
qn('c:line3DChart'), qn('c:lineChart'), qn('c:ofPieChart'),
qn('c:pie3DChart'), qn('c:pieChart'), qn('c:radarChart'),
qn('c:scatterChart'), qn('c:stockChart'), qn('c:surface3DChart'),
qn('c:surfaceChart')
)
for child in self.iterchildren():
if child.tag not in plot_tags:
continue
yield child | [
"def",
"iter_xCharts",
"(",
"self",
")",
":",
"plot_tags",
"=",
"(",
"qn",
"(",
"'c:area3DChart'",
")",
",",
"qn",
"(",
"'c:areaChart'",
")",
",",
"qn",
"(",
"'c:bar3DChart'",
")",
",",
"qn",
"(",
"'c:barChart'",
")",
",",
"qn",
"(",
"'c:bubbleChart'",
")",
",",
"qn",
"(",
"'c:doughnutChart'",
")",
",",
"qn",
"(",
"'c:line3DChart'",
")",
",",
"qn",
"(",
"'c:lineChart'",
")",
",",
"qn",
"(",
"'c:ofPieChart'",
")",
",",
"qn",
"(",
"'c:pie3DChart'",
")",
",",
"qn",
"(",
"'c:pieChart'",
")",
",",
"qn",
"(",
"'c:radarChart'",
")",
",",
"qn",
"(",
"'c:scatterChart'",
")",
",",
"qn",
"(",
"'c:stockChart'",
")",
",",
"qn",
"(",
"'c:surface3DChart'",
")",
",",
"qn",
"(",
"'c:surfaceChart'",
")",
")",
"for",
"child",
"in",
"self",
".",
"iterchildren",
"(",
")",
":",
"if",
"child",
".",
"tag",
"not",
"in",
"plot_tags",
":",
"continue",
"yield",
"child"
] | Generate each xChart child element in document. | [
"Generate",
"each",
"xChart",
"child",
"element",
"in",
"document",
"."
] | python | train |
yfpeng/bioc | bioc/biocxml/decoder.py | https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocxml/decoder.py#L39-L55 | def decode(self, fp: TextIO) -> BioCCollection:
"""
Deserialize ``fp`` to a BioC collection object.
Args:
fp: a ``.read()``-supporting file-like object containing a BioC collection
Returns:
an object of BioCollection
"""
# utf8_parser = etree.XMLParser(encoding='utf-8')
tree = etree.parse(fp)
collection = self.__parse_collection(tree.getroot())
collection.encoding = tree.docinfo.encoding
collection.standalone = tree.docinfo.standalone
collection.version = tree.docinfo.xml_version
return collection | [
"def",
"decode",
"(",
"self",
",",
"fp",
":",
"TextIO",
")",
"->",
"BioCCollection",
":",
"# utf8_parser = etree.XMLParser(encoding='utf-8')",
"tree",
"=",
"etree",
".",
"parse",
"(",
"fp",
")",
"collection",
"=",
"self",
".",
"__parse_collection",
"(",
"tree",
".",
"getroot",
"(",
")",
")",
"collection",
".",
"encoding",
"=",
"tree",
".",
"docinfo",
".",
"encoding",
"collection",
".",
"standalone",
"=",
"tree",
".",
"docinfo",
".",
"standalone",
"collection",
".",
"version",
"=",
"tree",
".",
"docinfo",
".",
"xml_version",
"return",
"collection"
] | Deserialize ``fp`` to a BioC collection object.
Args:
fp: a ``.read()``-supporting file-like object containing a BioC collection
Returns:
an object of BioCollection | [
"Deserialize",
"fp",
"to",
"a",
"BioC",
"collection",
"object",
"."
] | python | train |
jalanb/pysyte | pysyte/paths.py | https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/paths.py#L103-L116 | def dirpaths(self):
"""Split the dirname into individual directory names
An absolute path starts with an empty string, a relative path does not
>>> p = DotPath(u'/path/to/x.py')
>>> p.paths == p.dirpaths()
True
"""
parts = self.parts()
result = [DotPath(parts[0] or '/')]
for name in parts[1:]:
result.append(result[-1] / name)
return result | [
"def",
"dirpaths",
"(",
"self",
")",
":",
"parts",
"=",
"self",
".",
"parts",
"(",
")",
"result",
"=",
"[",
"DotPath",
"(",
"parts",
"[",
"0",
"]",
"or",
"'/'",
")",
"]",
"for",
"name",
"in",
"parts",
"[",
"1",
":",
"]",
":",
"result",
".",
"append",
"(",
"result",
"[",
"-",
"1",
"]",
"/",
"name",
")",
"return",
"result"
] | Split the dirname into individual directory names
An absolute path starts with an empty string, a relative path does not
>>> p = DotPath(u'/path/to/x.py')
>>> p.paths == p.dirpaths()
True | [
"Split",
"the",
"dirname",
"into",
"individual",
"directory",
"names"
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/image_classifier/image_classifier.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_classifier/image_classifier.py#L546-L609 | def predict_topk(self, dataset, output_type="probability", k=3, batch_size=64):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
Images to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
dataset, _ = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset)
return self.classifier.predict_topk(extracted_features, output_type = output_type, k = k) | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"\"probability\"",
",",
"k",
"=",
"3",
",",
"batch_size",
"=",
"64",
")",
":",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"(",
"_tc",
".",
"SFrame",
",",
"_tc",
".",
"SArray",
",",
"_tc",
".",
"Image",
")",
")",
":",
"raise",
"TypeError",
"(",
"'dataset must be either an SFrame, SArray or turicreate.Image'",
")",
"if",
"(",
"batch_size",
"<",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"'batch_size' must be greater than or equal to 1\"",
")",
"dataset",
",",
"_",
"=",
"self",
".",
"_canonize_input",
"(",
"dataset",
")",
"extracted_features",
"=",
"self",
".",
"_extract_features",
"(",
"dataset",
")",
"return",
"self",
".",
"classifier",
".",
"predict_topk",
"(",
"extracted_features",
",",
"output_type",
"=",
"output_type",
",",
"k",
"=",
"k",
")"
] | Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
Images to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns] | [
"Return",
"top",
"-",
"k",
"predictions",
"for",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"Predictions",
"are",
"returned",
"as",
"an",
"SFrame",
"with",
"three",
"columns",
":",
"id",
"class",
"and",
"probability",
"margin",
"or",
"rank",
"depending",
"on",
"the",
"output_type",
"parameter",
".",
"Input",
"dataset",
"size",
"must",
"be",
"the",
"same",
"as",
"for",
"training",
"of",
"the",
"model",
"."
] | python | train |
aktaylor08/RosbagPandas | scripts/bag2csv.py | https://github.com/aktaylor08/RosbagPandas/blob/c2af9f22537102696dffdf2e61790362726a8403/scripts/bag2csv.py#L11-L32 | def buildParser():
''' Builds the parser for reading the command line arguments'''
parser = argparse.ArgumentParser(
description='Script to parse bagfile to csv file')
parser.add_argument('bag', help='Bag file to read',
type=str)
parser.add_argument('-i', '--include',
help='list or regex for topics to include',
nargs='*')
parser.add_argument('-e', '--exclude',
help='list or regex for topics to exclude',
nargs='*')
parser.add_argument('-o', '--output',
help='name of the output file',
nargs='*')
parser.add_argument('-f', '--fill',
help='Fill the bag forward and backwards so no missing values when present',
action='store_true')
parser.add_argument('--include-header',
help='Include the header fields. By default they are excluded',
action='store_true')
return parser | [
"def",
"buildParser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Script to parse bagfile to csv file'",
")",
"parser",
".",
"add_argument",
"(",
"'bag'",
",",
"help",
"=",
"'Bag file to read'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--include'",
",",
"help",
"=",
"'list or regex for topics to include'",
",",
"nargs",
"=",
"'*'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--exclude'",
",",
"help",
"=",
"'list or regex for topics to exclude'",
",",
"nargs",
"=",
"'*'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"help",
"=",
"'name of the output file'",
",",
"nargs",
"=",
"'*'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--fill'",
",",
"help",
"=",
"'Fill the bag forward and backwards so no missing values when present'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--include-header'",
",",
"help",
"=",
"'Include the header fields. By default they are excluded'",
",",
"action",
"=",
"'store_true'",
")",
"return",
"parser"
] | Builds the parser for reading the command line arguments | [
"Builds",
"the",
"parser",
"for",
"reading",
"the",
"command",
"line",
"arguments"
] | python | train |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L191-L197 | def print_layers(self):
"""Print all info of layers in the network."""
for i, layer in enumerate(self.all_layers):
# logging.info(" layer %d: %s" % (i, str(layer)))
logging.info(
" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)
) | [
"def",
"print_layers",
"(",
"self",
")",
":",
"for",
"i",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"all_layers",
")",
":",
"# logging.info(\" layer %d: %s\" % (i, str(layer)))",
"logging",
".",
"info",
"(",
"\" layer {:3}: {:20} {:15} {}\"",
".",
"format",
"(",
"i",
",",
"layer",
".",
"name",
",",
"str",
"(",
"layer",
".",
"get_shape",
"(",
")",
")",
",",
"layer",
".",
"dtype",
".",
"name",
")",
")"
] | Print all info of layers in the network. | [
"Print",
"all",
"info",
"of",
"layers",
"in",
"the",
"network",
"."
] | python | valid |
sentinel-hub/sentinelhub-py | sentinelhub/download.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/download.py#L159-L167 | def is_downloaded(self):
""" Checks if data for this request has already been downloaded and is saved to disk.
:return: returns ``True`` if data for this request has already been downloaded and is saved to disk.
:rtype: bool
"""
if self.file_path is None:
return False
return os.path.exists(self.file_path) | [
"def",
"is_downloaded",
"(",
"self",
")",
":",
"if",
"self",
".",
"file_path",
"is",
"None",
":",
"return",
"False",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"file_path",
")"
] | Checks if data for this request has already been downloaded and is saved to disk.
:return: returns ``True`` if data for this request has already been downloaded and is saved to disk.
:rtype: bool | [
"Checks",
"if",
"data",
"for",
"this",
"request",
"has",
"already",
"been",
"downloaded",
"and",
"is",
"saved",
"to",
"disk",
"."
] | python | train |
googleapis/google-cloud-python | core/google/cloud/operation.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/operation.py#L233-L250 | def _update_state(self, operation_pb):
"""Update the state of the current object based on operation.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
"""
if operation_pb.done:
self._complete = True
if operation_pb.HasField("metadata"):
self.metadata = _from_any(operation_pb.metadata)
result_type = operation_pb.WhichOneof("result")
if result_type == "error":
self.error = operation_pb.error
elif result_type == "response":
self.response = _from_any(operation_pb.response) | [
"def",
"_update_state",
"(",
"self",
",",
"operation_pb",
")",
":",
"if",
"operation_pb",
".",
"done",
":",
"self",
".",
"_complete",
"=",
"True",
"if",
"operation_pb",
".",
"HasField",
"(",
"\"metadata\"",
")",
":",
"self",
".",
"metadata",
"=",
"_from_any",
"(",
"operation_pb",
".",
"metadata",
")",
"result_type",
"=",
"operation_pb",
".",
"WhichOneof",
"(",
"\"result\"",
")",
"if",
"result_type",
"==",
"\"error\"",
":",
"self",
".",
"error",
"=",
"operation_pb",
".",
"error",
"elif",
"result_type",
"==",
"\"response\"",
":",
"self",
".",
"response",
"=",
"_from_any",
"(",
"operation_pb",
".",
"response",
")"
] | Update the state of the current object based on operation.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed. | [
"Update",
"the",
"state",
"of",
"the",
"current",
"object",
"based",
"on",
"operation",
"."
] | python | train |
treycucco/bidon | bidon/util/__init__.py | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/__init__.py#L123-L138 | def get_value(obj, name, fallback=None):
"""Calls through to has_value. If has_value[0] is True, return has_value[1] otherwise returns
fallback() if fallback is callable, else just fallback.
:obj: the object to pull values from
:name: the name to use when getting the value
:fallback: the value to return when has_value(:obj:, :name:) returns False
"""
present, value = has_value(obj, name)
if present:
return value
else:
if callable(fallback):
return fallback()
else:
return fallback | [
"def",
"get_value",
"(",
"obj",
",",
"name",
",",
"fallback",
"=",
"None",
")",
":",
"present",
",",
"value",
"=",
"has_value",
"(",
"obj",
",",
"name",
")",
"if",
"present",
":",
"return",
"value",
"else",
":",
"if",
"callable",
"(",
"fallback",
")",
":",
"return",
"fallback",
"(",
")",
"else",
":",
"return",
"fallback"
] | Calls through to has_value. If has_value[0] is True, return has_value[1] otherwise returns
fallback() if fallback is callable, else just fallback.
:obj: the object to pull values from
:name: the name to use when getting the value
:fallback: the value to return when has_value(:obj:, :name:) returns False | [
"Calls",
"through",
"to",
"has_value",
".",
"If",
"has_value",
"[",
"0",
"]",
"is",
"True",
"return",
"has_value",
"[",
"1",
"]",
"otherwise",
"returns",
"fallback",
"()",
"if",
"fallback",
"is",
"callable",
"else",
"just",
"fallback",
"."
] | python | train |
pytroll/satpy | satpy/readers/viirs_edr_active_fires.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/viirs_edr_active_fires.py#L35-L52 | def get_dataset(self, dsid, dsinfo):
"""Get dataset function
Args:
dsid: Dataset ID
param2: Dataset Information
Returns:
Dask DataArray: Data
"""
data = self[dsinfo.get('file_key', dsid.name)]
data.attrs.update(dsinfo)
data.attrs["platform_name"] = self['/attr/satellite_name']
data.attrs["sensor"] = self['/attr/instrument_name']
return data | [
"def",
"get_dataset",
"(",
"self",
",",
"dsid",
",",
"dsinfo",
")",
":",
"data",
"=",
"self",
"[",
"dsinfo",
".",
"get",
"(",
"'file_key'",
",",
"dsid",
".",
"name",
")",
"]",
"data",
".",
"attrs",
".",
"update",
"(",
"dsinfo",
")",
"data",
".",
"attrs",
"[",
"\"platform_name\"",
"]",
"=",
"self",
"[",
"'/attr/satellite_name'",
"]",
"data",
".",
"attrs",
"[",
"\"sensor\"",
"]",
"=",
"self",
"[",
"'/attr/instrument_name'",
"]",
"return",
"data"
] | Get dataset function
Args:
dsid: Dataset ID
param2: Dataset Information
Returns:
Dask DataArray: Data | [
"Get",
"dataset",
"function"
] | python | train |
elsampsa/valkka-live | valkka/live/container/video.py | https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/live/container/video.py#L234-L261 | def setDevice(self, device):
"""Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice.
"""
print(self.pre, "setDevice :", device)
if (not device and not self.device): # None can be passed as an argument when the device has not been set yet
return
if (self.device):
if self.device == device:
print(self.pre, "setDevice : same device")
return
if self.filterchain: # there's video already
self.clearDevice()
self.device = device
self.video.setDevice(self.device) # inform the video widget so it can start drags
# ManagedFilterChain.addViewPort accepts ViewPort instance
self.filterchain = self.filterchain_group.get(_id = self.device._id)
if self.filterchain:
self.viewport.setXScreenNum(self.n_xscreen)
self.viewport.setWindowId (int(self.video.winId()))
self.filterchain.addViewPort(self.viewport) | [
"def",
"setDevice",
"(",
"self",
",",
"device",
")",
":",
"print",
"(",
"self",
".",
"pre",
",",
"\"setDevice :\"",
",",
"device",
")",
"if",
"(",
"not",
"device",
"and",
"not",
"self",
".",
"device",
")",
":",
"# None can be passed as an argument when the device has not been set yet",
"return",
"if",
"(",
"self",
".",
"device",
")",
":",
"if",
"self",
".",
"device",
"==",
"device",
":",
"print",
"(",
"self",
".",
"pre",
",",
"\"setDevice : same device\"",
")",
"return",
"if",
"self",
".",
"filterchain",
":",
"# there's video already",
"self",
".",
"clearDevice",
"(",
")",
"self",
".",
"device",
"=",
"device",
"self",
".",
"video",
".",
"setDevice",
"(",
"self",
".",
"device",
")",
"# inform the video widget so it can start drags",
"# ManagedFilterChain.addViewPort accepts ViewPort instance",
"self",
".",
"filterchain",
"=",
"self",
".",
"filterchain_group",
".",
"get",
"(",
"_id",
"=",
"self",
".",
"device",
".",
"_id",
")",
"if",
"self",
".",
"filterchain",
":",
"self",
".",
"viewport",
".",
"setXScreenNum",
"(",
"self",
".",
"n_xscreen",
")",
"self",
".",
"viewport",
".",
"setWindowId",
"(",
"int",
"(",
"self",
".",
"video",
".",
"winId",
"(",
")",
")",
")",
"self",
".",
"filterchain",
".",
"addViewPort",
"(",
"self",
".",
"viewport",
")"
] | Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice. | [
"Sets",
"the",
"video",
"stream",
":",
"param",
"device",
":",
"A",
"rather",
"generic",
"device",
"class",
".",
"In",
"this",
"case",
"DataModel",
".",
"RTSPCameraDevice",
"."
] | python | train |
inveniosoftware/invenio-records-rest | invenio_records_rest/views.py | https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/views.py#L650-L683 | def delete(self, pid, record, **kwargs):
"""Delete a record.
Permissions: ``delete_permission_factory``
Procedure description:
#. The record is resolved reading the pid value from the url.
#. The ETag is checked.
#. The record is deleted.
#. All PIDs are marked as DELETED.
:param pid: Persistent identifier for record.
:param record: Record object.
"""
self.check_etag(str(record.model.version_id))
record.delete()
# mark all PIDs as DELETED
all_pids = PersistentIdentifier.query.filter(
PersistentIdentifier.object_type == pid.object_type,
PersistentIdentifier.object_uuid == pid.object_uuid,
).all()
for rec_pid in all_pids:
if not rec_pid.is_deleted():
rec_pid.delete()
db.session.commit()
if self.indexer_class:
self.indexer_class().delete(record)
return '', 204 | [
"def",
"delete",
"(",
"self",
",",
"pid",
",",
"record",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"check_etag",
"(",
"str",
"(",
"record",
".",
"model",
".",
"version_id",
")",
")",
"record",
".",
"delete",
"(",
")",
"# mark all PIDs as DELETED",
"all_pids",
"=",
"PersistentIdentifier",
".",
"query",
".",
"filter",
"(",
"PersistentIdentifier",
".",
"object_type",
"==",
"pid",
".",
"object_type",
",",
"PersistentIdentifier",
".",
"object_uuid",
"==",
"pid",
".",
"object_uuid",
",",
")",
".",
"all",
"(",
")",
"for",
"rec_pid",
"in",
"all_pids",
":",
"if",
"not",
"rec_pid",
".",
"is_deleted",
"(",
")",
":",
"rec_pid",
".",
"delete",
"(",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"if",
"self",
".",
"indexer_class",
":",
"self",
".",
"indexer_class",
"(",
")",
".",
"delete",
"(",
"record",
")",
"return",
"''",
",",
"204"
] | Delete a record.
Permissions: ``delete_permission_factory``
Procedure description:
#. The record is resolved reading the pid value from the url.
#. The ETag is checked.
#. The record is deleted.
#. All PIDs are marked as DELETED.
:param pid: Persistent identifier for record.
:param record: Record object. | [
"Delete",
"a",
"record",
"."
] | python | train |
PyCQA/pylint | pylint/checkers/strings.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/strings.py#L744-L747 | def register(linter):
"""required method to auto register this checker """
linter.register_checker(StringFormatChecker(linter))
linter.register_checker(StringConstantChecker(linter)) | [
"def",
"register",
"(",
"linter",
")",
":",
"linter",
".",
"register_checker",
"(",
"StringFormatChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"StringConstantChecker",
"(",
"linter",
")",
")"
] | required method to auto register this checker | [
"required",
"method",
"to",
"auto",
"register",
"this",
"checker"
] | python | test |
EpistasisLab/scikit-rebate | skrebate/surf.py | https://github.com/EpistasisLab/scikit-rebate/blob/67dab51a7525fa5d076b059f1e6f8cff7481c1ef/skrebate/surf.py#L71-L87 | def _find_neighbors(self, inst, avg_dist):
""" Identify nearest hits and misses within radius defined by average distance over whole distance array.
This works the same regardless of endpoint type. """
NN = []
min_indicies = []
for i in range(self._datalen):
if inst != i:
locator = [inst, i]
if i > inst:
locator.reverse()
d = self._distance_array[locator[0]][locator[1]]
if d < avg_dist: # Defining the neighborhood with an average distance radius.
min_indicies.append(i)
for i in range(len(min_indicies)):
NN.append(min_indicies[i])
return np.array(NN, dtype=np.int32) | [
"def",
"_find_neighbors",
"(",
"self",
",",
"inst",
",",
"avg_dist",
")",
":",
"NN",
"=",
"[",
"]",
"min_indicies",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_datalen",
")",
":",
"if",
"inst",
"!=",
"i",
":",
"locator",
"=",
"[",
"inst",
",",
"i",
"]",
"if",
"i",
">",
"inst",
":",
"locator",
".",
"reverse",
"(",
")",
"d",
"=",
"self",
".",
"_distance_array",
"[",
"locator",
"[",
"0",
"]",
"]",
"[",
"locator",
"[",
"1",
"]",
"]",
"if",
"d",
"<",
"avg_dist",
":",
"# Defining the neighborhood with an average distance radius.",
"min_indicies",
".",
"append",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"min_indicies",
")",
")",
":",
"NN",
".",
"append",
"(",
"min_indicies",
"[",
"i",
"]",
")",
"return",
"np",
".",
"array",
"(",
"NN",
",",
"dtype",
"=",
"np",
".",
"int32",
")"
] | Identify nearest hits and misses within radius defined by average distance over whole distance array.
This works the same regardless of endpoint type. | [
"Identify",
"nearest",
"hits",
"and",
"misses",
"within",
"radius",
"defined",
"by",
"average",
"distance",
"over",
"whole",
"distance",
"array",
".",
"This",
"works",
"the",
"same",
"regardless",
"of",
"endpoint",
"type",
"."
] | python | train |
mbr/latex | latex/jinja2.py | https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/jinja2.py#L41-L60 | def make_env(*args, **kwargs):
"""Creates an :py:class:`~jinja2.Environment` with different defaults.
Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled.
All start/end/prefix strings will be changed for a more LaTeX-friendly
version (see the docs for details).
Any arguments will be passed on to the :py:class:`~jinja2.Environment`
constructor and override new values.
Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be
replaced with a call to :func:`latex.escape`."""
ka = ENV_ARGS.copy()
ka.update(kwargs)
env = Environment(*args, **ka)
env.filters['e'] = LatexMarkup.escape
env.filters['escape'] = LatexMarkup.escape
env.filters['forceescape'] = LatexMarkup.escape # FIXME: this is a bug
return env | [
"def",
"make_env",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ka",
"=",
"ENV_ARGS",
".",
"copy",
"(",
")",
"ka",
".",
"update",
"(",
"kwargs",
")",
"env",
"=",
"Environment",
"(",
"*",
"args",
",",
"*",
"*",
"ka",
")",
"env",
".",
"filters",
"[",
"'e'",
"]",
"=",
"LatexMarkup",
".",
"escape",
"env",
".",
"filters",
"[",
"'escape'",
"]",
"=",
"LatexMarkup",
".",
"escape",
"env",
".",
"filters",
"[",
"'forceescape'",
"]",
"=",
"LatexMarkup",
".",
"escape",
"# FIXME: this is a bug",
"return",
"env"
] | Creates an :py:class:`~jinja2.Environment` with different defaults.
Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled.
All start/end/prefix strings will be changed for a more LaTeX-friendly
version (see the docs for details).
Any arguments will be passed on to the :py:class:`~jinja2.Environment`
constructor and override new values.
Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be
replaced with a call to :func:`latex.escape`. | [
"Creates",
"an",
":",
"py",
":",
"class",
":",
"~jinja2",
".",
"Environment",
"with",
"different",
"defaults",
"."
] | python | train |
Nic30/hwt | hwt/interfaces/agents/rdSynced.py | https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/interfaces/agents/rdSynced.py#L47-L49 | def doWrite(self, sim, data):
"""write data to interface"""
sim.write(data, self.intf.data) | [
"def",
"doWrite",
"(",
"self",
",",
"sim",
",",
"data",
")",
":",
"sim",
".",
"write",
"(",
"data",
",",
"self",
".",
"intf",
".",
"data",
")"
] | write data to interface | [
"write",
"data",
"to",
"interface"
] | python | test |
wheerd/multiset | multiset.py | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L365-L405 | def symmetric_difference(self, other):
r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset.
"""
other = self._as_multiset(other)
result = self.__class__()
_total = 0
_elements = result._elements
self_elements = self._elements
other_elements = other._elements
dist_elements = set(self_elements.keys()) | set(other_elements.keys())
for element in dist_elements:
multiplicity = self_elements.get(element, 0)
other_multiplicity = other_elements.get(element, 0)
new_multiplicity = (multiplicity - other_multiplicity
if multiplicity > other_multiplicity else other_multiplicity - multiplicity)
_total += new_multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
result._total = _total
return result | [
"def",
"symmetric_difference",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"_as_multiset",
"(",
"other",
")",
"result",
"=",
"self",
".",
"__class__",
"(",
")",
"_total",
"=",
"0",
"_elements",
"=",
"result",
".",
"_elements",
"self_elements",
"=",
"self",
".",
"_elements",
"other_elements",
"=",
"other",
".",
"_elements",
"dist_elements",
"=",
"set",
"(",
"self_elements",
".",
"keys",
"(",
")",
")",
"|",
"set",
"(",
"other_elements",
".",
"keys",
"(",
")",
")",
"for",
"element",
"in",
"dist_elements",
":",
"multiplicity",
"=",
"self_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"other_multiplicity",
"=",
"other_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"new_multiplicity",
"=",
"(",
"multiplicity",
"-",
"other_multiplicity",
"if",
"multiplicity",
">",
"other_multiplicity",
"else",
"other_multiplicity",
"-",
"multiplicity",
")",
"_total",
"+=",
"new_multiplicity",
"if",
"new_multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"=",
"new_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset. | [
"r",
"Return",
"a",
"new",
"set",
"with",
"elements",
"in",
"either",
"the",
"set",
"or",
"other",
"but",
"not",
"both",
"."
] | python | train |
pneff/wsgiservice | wsgiservice/status.py | https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/status.py#L208-L220 | def raise_402(instance, msg=None):
"""Abort the current request with a 402 (Payment Required) response code.
If the message is given it's output as an error message in the response
body (correctly converted to the requested MIME type).
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 402
"""
instance.response.status = 402
if msg:
instance.response.body_raw = {'error': msg}
raise ResponseException(instance.response) | [
"def",
"raise_402",
"(",
"instance",
",",
"msg",
"=",
"None",
")",
":",
"instance",
".",
"response",
".",
"status",
"=",
"402",
"if",
"msg",
":",
"instance",
".",
"response",
".",
"body_raw",
"=",
"{",
"'error'",
":",
"msg",
"}",
"raise",
"ResponseException",
"(",
"instance",
".",
"response",
")"
] | Abort the current request with a 402 (Payment Required) response code.
If the message is given it's output as an error message in the response
body (correctly converted to the requested MIME type).
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 402 | [
"Abort",
"the",
"current",
"request",
"with",
"a",
"402",
"(",
"Payment",
"Required",
")",
"response",
"code",
".",
"If",
"the",
"message",
"is",
"given",
"it",
"s",
"output",
"as",
"an",
"error",
"message",
"in",
"the",
"response",
"body",
"(",
"correctly",
"converted",
"to",
"the",
"requested",
"MIME",
"type",
")",
"."
] | python | train |
rocky/python-uncompyle6 | uncompyle6/semantics/pysource.py | https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/semantics/pysource.py#L1033-L1172 | def comprehension_walk_newer(self, node, iter_index, code_index=-5):
"""Non-closure-based comprehensions the way they are done in Python3
and some Python 2.7. Note: there are also other set comprehensions.
"""
p = self.prec
self.prec = 27
code = node[code_index].attr
assert iscode(code), node[code_index]
code = Code(code, self.scanner, self.currentclass)
ast = self.build_ast(code._tokens, code._customize)
self.customize(code._customize)
# skip over: sstmt, stmt, return, ret_expr
# and other singleton derivations
while (len(ast) == 1
or (ast in ('sstmt', 'return')
and ast[-1] in ('RETURN_LAST', 'RETURN_VALUE'))):
self.prec = 100
ast = ast[0]
# Pick out important parts of the comprehension:
# * the variable we interate over: "store"
# * the results we accumulate: "n"
is_30_dict_comp = False
store = None
n = ast[iter_index]
if ast in ('set_comp_func', 'dict_comp_func',
'list_comp', 'set_comp_func_header'):
for k in ast:
if k == 'comp_iter':
n = k
elif k == 'store':
store = k
pass
pass
pass
elif ast in ('dict_comp', 'set_comp'):
assert self.version == 3.0
for k in ast:
if k in ('dict_comp_header', 'set_comp_header'):
n = k
elif k == 'store':
store = k
elif k == 'dict_comp_iter':
is_30_dict_comp = True
n = (k[3], k[1])
pass
elif k == 'comp_iter':
n = k[0]
pass
pass
else:
assert n == 'list_iter', n
# FIXME: I'm not totally sure this is right.
# Find the list comprehension body. It is the inner-most
# node that is not list_.. .
if_node = None
comp_for = None
comp_store = None
if n == 'comp_iter':
comp_for = n
comp_store = ast[3]
have_not = False
while n in ('list_iter', 'comp_iter'):
# iterate one nesting deeper
if self.version == 3.0 and len(n) == 3:
assert n[0] == 'expr' and n[1] == 'expr'
n = n[1]
else:
n = n[0]
if n in ('list_for', 'comp_for'):
if n[2] == 'store':
store = n[2]
n = n[3]
elif n in ('list_if', 'list_if_not', 'comp_if', 'comp_if_not'):
have_not = n in ('list_if_not', 'comp_if_not')
if_node = n[0]
if n[1] == 'store':
store = n[1]
n = n[2]
pass
pass
# Python 2.7+ starts including set_comp_body
# Python 3.5+ starts including set_comp_func
# Python 3.0 is yet another snowflake
if self.version != 3.0:
assert n.kind in ('lc_body', 'comp_body', 'set_comp_func', 'set_comp_body'), ast
assert store, "Couldn't find store in list/set comprehension"
# A problem created with later Python code generation is that there
# is a lamda set up with a dummy argument name that is then called
# So we can't just translate that as is but need to replace the
# dummy name. Below we are picking out the variable name as seen
# in the code. And trying to generate code for the other parts
# that don't have the dummy argument name in it.
# Another approach might be to be able to pass in the source name
# for the dummy argument.
if is_30_dict_comp:
self.preorder(n[0])
self.write(': ')
self.preorder(n[1])
else:
self.preorder(n[0])
self.write(' for ')
if comp_store:
self.preorder(comp_store)
else:
self.preorder(store)
# FIXME this is all merely approximate
self.write(' in ')
self.preorder(node[-3])
if ast == 'list_comp' and self.version != 3.0:
list_iter = ast[1]
assert list_iter == 'list_iter'
if list_iter == 'list_for':
self.preorder(list_iter[3])
self.prec = p
return
pass
if comp_store:
self.preorder(comp_for)
elif if_node:
self.write(' if ')
if have_not:
self.write('not ')
self.preorder(if_node)
pass
self.prec = p | [
"def",
"comprehension_walk_newer",
"(",
"self",
",",
"node",
",",
"iter_index",
",",
"code_index",
"=",
"-",
"5",
")",
":",
"p",
"=",
"self",
".",
"prec",
"self",
".",
"prec",
"=",
"27",
"code",
"=",
"node",
"[",
"code_index",
"]",
".",
"attr",
"assert",
"iscode",
"(",
"code",
")",
",",
"node",
"[",
"code_index",
"]",
"code",
"=",
"Code",
"(",
"code",
",",
"self",
".",
"scanner",
",",
"self",
".",
"currentclass",
")",
"ast",
"=",
"self",
".",
"build_ast",
"(",
"code",
".",
"_tokens",
",",
"code",
".",
"_customize",
")",
"self",
".",
"customize",
"(",
"code",
".",
"_customize",
")",
"# skip over: sstmt, stmt, return, ret_expr",
"# and other singleton derivations",
"while",
"(",
"len",
"(",
"ast",
")",
"==",
"1",
"or",
"(",
"ast",
"in",
"(",
"'sstmt'",
",",
"'return'",
")",
"and",
"ast",
"[",
"-",
"1",
"]",
"in",
"(",
"'RETURN_LAST'",
",",
"'RETURN_VALUE'",
")",
")",
")",
":",
"self",
".",
"prec",
"=",
"100",
"ast",
"=",
"ast",
"[",
"0",
"]",
"# Pick out important parts of the comprehension:",
"# * the variable we interate over: \"store\"",
"# * the results we accumulate: \"n\"",
"is_30_dict_comp",
"=",
"False",
"store",
"=",
"None",
"n",
"=",
"ast",
"[",
"iter_index",
"]",
"if",
"ast",
"in",
"(",
"'set_comp_func'",
",",
"'dict_comp_func'",
",",
"'list_comp'",
",",
"'set_comp_func_header'",
")",
":",
"for",
"k",
"in",
"ast",
":",
"if",
"k",
"==",
"'comp_iter'",
":",
"n",
"=",
"k",
"elif",
"k",
"==",
"'store'",
":",
"store",
"=",
"k",
"pass",
"pass",
"pass",
"elif",
"ast",
"in",
"(",
"'dict_comp'",
",",
"'set_comp'",
")",
":",
"assert",
"self",
".",
"version",
"==",
"3.0",
"for",
"k",
"in",
"ast",
":",
"if",
"k",
"in",
"(",
"'dict_comp_header'",
",",
"'set_comp_header'",
")",
":",
"n",
"=",
"k",
"elif",
"k",
"==",
"'store'",
":",
"store",
"=",
"k",
"elif",
"k",
"==",
"'dict_comp_iter'",
":",
"is_30_dict_comp",
"=",
"True",
"n",
"=",
"(",
"k",
"[",
"3",
"]",
",",
"k",
"[",
"1",
"]",
")",
"pass",
"elif",
"k",
"==",
"'comp_iter'",
":",
"n",
"=",
"k",
"[",
"0",
"]",
"pass",
"pass",
"else",
":",
"assert",
"n",
"==",
"'list_iter'",
",",
"n",
"# FIXME: I'm not totally sure this is right.",
"# Find the list comprehension body. It is the inner-most",
"# node that is not list_.. .",
"if_node",
"=",
"None",
"comp_for",
"=",
"None",
"comp_store",
"=",
"None",
"if",
"n",
"==",
"'comp_iter'",
":",
"comp_for",
"=",
"n",
"comp_store",
"=",
"ast",
"[",
"3",
"]",
"have_not",
"=",
"False",
"while",
"n",
"in",
"(",
"'list_iter'",
",",
"'comp_iter'",
")",
":",
"# iterate one nesting deeper",
"if",
"self",
".",
"version",
"==",
"3.0",
"and",
"len",
"(",
"n",
")",
"==",
"3",
":",
"assert",
"n",
"[",
"0",
"]",
"==",
"'expr'",
"and",
"n",
"[",
"1",
"]",
"==",
"'expr'",
"n",
"=",
"n",
"[",
"1",
"]",
"else",
":",
"n",
"=",
"n",
"[",
"0",
"]",
"if",
"n",
"in",
"(",
"'list_for'",
",",
"'comp_for'",
")",
":",
"if",
"n",
"[",
"2",
"]",
"==",
"'store'",
":",
"store",
"=",
"n",
"[",
"2",
"]",
"n",
"=",
"n",
"[",
"3",
"]",
"elif",
"n",
"in",
"(",
"'list_if'",
",",
"'list_if_not'",
",",
"'comp_if'",
",",
"'comp_if_not'",
")",
":",
"have_not",
"=",
"n",
"in",
"(",
"'list_if_not'",
",",
"'comp_if_not'",
")",
"if_node",
"=",
"n",
"[",
"0",
"]",
"if",
"n",
"[",
"1",
"]",
"==",
"'store'",
":",
"store",
"=",
"n",
"[",
"1",
"]",
"n",
"=",
"n",
"[",
"2",
"]",
"pass",
"pass",
"# Python 2.7+ starts including set_comp_body",
"# Python 3.5+ starts including set_comp_func",
"# Python 3.0 is yet another snowflake",
"if",
"self",
".",
"version",
"!=",
"3.0",
":",
"assert",
"n",
".",
"kind",
"in",
"(",
"'lc_body'",
",",
"'comp_body'",
",",
"'set_comp_func'",
",",
"'set_comp_body'",
")",
",",
"ast",
"assert",
"store",
",",
"\"Couldn't find store in list/set comprehension\"",
"# A problem created with later Python code generation is that there",
"# is a lamda set up with a dummy argument name that is then called",
"# So we can't just translate that as is but need to replace the",
"# dummy name. Below we are picking out the variable name as seen",
"# in the code. And trying to generate code for the other parts",
"# that don't have the dummy argument name in it.",
"# Another approach might be to be able to pass in the source name",
"# for the dummy argument.",
"if",
"is_30_dict_comp",
":",
"self",
".",
"preorder",
"(",
"n",
"[",
"0",
"]",
")",
"self",
".",
"write",
"(",
"': '",
")",
"self",
".",
"preorder",
"(",
"n",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"preorder",
"(",
"n",
"[",
"0",
"]",
")",
"self",
".",
"write",
"(",
"' for '",
")",
"if",
"comp_store",
":",
"self",
".",
"preorder",
"(",
"comp_store",
")",
"else",
":",
"self",
".",
"preorder",
"(",
"store",
")",
"# FIXME this is all merely approximate",
"self",
".",
"write",
"(",
"' in '",
")",
"self",
".",
"preorder",
"(",
"node",
"[",
"-",
"3",
"]",
")",
"if",
"ast",
"==",
"'list_comp'",
"and",
"self",
".",
"version",
"!=",
"3.0",
":",
"list_iter",
"=",
"ast",
"[",
"1",
"]",
"assert",
"list_iter",
"==",
"'list_iter'",
"if",
"list_iter",
"==",
"'list_for'",
":",
"self",
".",
"preorder",
"(",
"list_iter",
"[",
"3",
"]",
")",
"self",
".",
"prec",
"=",
"p",
"return",
"pass",
"if",
"comp_store",
":",
"self",
".",
"preorder",
"(",
"comp_for",
")",
"elif",
"if_node",
":",
"self",
".",
"write",
"(",
"' if '",
")",
"if",
"have_not",
":",
"self",
".",
"write",
"(",
"'not '",
")",
"self",
".",
"preorder",
"(",
"if_node",
")",
"pass",
"self",
".",
"prec",
"=",
"p"
] | Non-closure-based comprehensions the way they are done in Python3
and some Python 2.7. Note: there are also other set comprehensions. | [
"Non",
"-",
"closure",
"-",
"based",
"comprehensions",
"the",
"way",
"they",
"are",
"done",
"in",
"Python3",
"and",
"some",
"Python",
"2",
".",
"7",
".",
"Note",
":",
"there",
"are",
"also",
"other",
"set",
"comprehensions",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_bin_validate.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_validate.py#L273-L353 | def check_layout_params(self):
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
ij_input_names = []
ij_output_names = []
if os.path.isfile('install.json'):
try:
with open('install.json') as fh:
ij = json.loads(fh.read())
for p in ij.get('params', []):
ij_input_names.append(p.get('name'))
for o in ij.get('playbook', {}).get('outputVariables', []):
ij_output_names.append(o.get('name'))
except Exception:
# checking parameters isn't possible if install.json can't be parsed
return
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.db_create_table(self.input_table, ij_input_names)
# inputs
status = True
for i in self.layout_json.get('inputs', []):
for p in i.get('parameters'):
if p.get('name') not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed ("{}" is defined in '
'layout.json, but not found in install.json).'.format(p.get('name'))
)
status = False
if 'sqlite3' in sys.modules:
if p.get('display'):
display_query = 'SELECT * FROM {} WHERE {}'.format(
self.input_table, p.get('display')
)
try:
self.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'Layouts input.parameters[].display validations failed ("{}" query '
'is an invalid statement).'.format(p.get('display'))
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
# outputs
status = True
for o in self.layout_json.get('outputs', []):
if o.get('name') not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts output validations failed ({} is defined in layout.json, but not '
'found in install.json).'.format(o.get('name'))
)
status = False
if 'sqlite3' in sys.modules:
if o.get('display'):
display_query = 'SELECT * FROM {} WHERE {}'.format(
self.input_table, o.get('display')
)
try:
self.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'Layouts outputs.display validations failed ("{}" query is '
'an invalid statement).'.format(o.get('display'))
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status}) | [
"def",
"check_layout_params",
"(",
"self",
")",
":",
"ij_input_names",
"=",
"[",
"]",
"ij_output_names",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"'install.json'",
")",
":",
"try",
":",
"with",
"open",
"(",
"'install.json'",
")",
"as",
"fh",
":",
"ij",
"=",
"json",
".",
"loads",
"(",
"fh",
".",
"read",
"(",
")",
")",
"for",
"p",
"in",
"ij",
".",
"get",
"(",
"'params'",
",",
"[",
"]",
")",
":",
"ij_input_names",
".",
"append",
"(",
"p",
".",
"get",
"(",
"'name'",
")",
")",
"for",
"o",
"in",
"ij",
".",
"get",
"(",
"'playbook'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'outputVariables'",
",",
"[",
"]",
")",
":",
"ij_output_names",
".",
"append",
"(",
"o",
".",
"get",
"(",
"'name'",
")",
")",
"except",
"Exception",
":",
"# checking parameters isn't possible if install.json can't be parsed",
"return",
"if",
"'sqlite3'",
"in",
"sys",
".",
"modules",
":",
"# create temporary inputs tables",
"self",
".",
"db_create_table",
"(",
"self",
".",
"input_table",
",",
"ij_input_names",
")",
"# inputs",
"status",
"=",
"True",
"for",
"i",
"in",
"self",
".",
"layout_json",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
":",
"for",
"p",
"in",
"i",
".",
"get",
"(",
"'parameters'",
")",
":",
"if",
"p",
".",
"get",
"(",
"'name'",
")",
"not",
"in",
"ij_input_names",
":",
"# update validation data errors",
"self",
".",
"validation_data",
"[",
"'errors'",
"]",
".",
"append",
"(",
"'Layouts input.parameters[].name validations failed (\"{}\" is defined in '",
"'layout.json, but not found in install.json).'",
".",
"format",
"(",
"p",
".",
"get",
"(",
"'name'",
")",
")",
")",
"status",
"=",
"False",
"if",
"'sqlite3'",
"in",
"sys",
".",
"modules",
":",
"if",
"p",
".",
"get",
"(",
"'display'",
")",
":",
"display_query",
"=",
"'SELECT * FROM {} WHERE {}'",
".",
"format",
"(",
"self",
".",
"input_table",
",",
"p",
".",
"get",
"(",
"'display'",
")",
")",
"try",
":",
"self",
".",
"db_conn",
".",
"execute",
"(",
"display_query",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
")",
"except",
"sqlite3",
".",
"Error",
":",
"self",
".",
"validation_data",
"[",
"'errors'",
"]",
".",
"append",
"(",
"'Layouts input.parameters[].display validations failed (\"{}\" query '",
"'is an invalid statement).'",
".",
"format",
"(",
"p",
".",
"get",
"(",
"'display'",
")",
")",
")",
"status",
"=",
"False",
"# update validation data for module",
"self",
".",
"validation_data",
"[",
"'layouts'",
"]",
".",
"append",
"(",
"{",
"'params'",
":",
"'inputs'",
",",
"'status'",
":",
"status",
"}",
")",
"# outputs",
"status",
"=",
"True",
"for",
"o",
"in",
"self",
".",
"layout_json",
".",
"get",
"(",
"'outputs'",
",",
"[",
"]",
")",
":",
"if",
"o",
".",
"get",
"(",
"'name'",
")",
"not",
"in",
"ij_output_names",
":",
"# update validation data errors",
"self",
".",
"validation_data",
"[",
"'errors'",
"]",
".",
"append",
"(",
"'Layouts output validations failed ({} is defined in layout.json, but not '",
"'found in install.json).'",
".",
"format",
"(",
"o",
".",
"get",
"(",
"'name'",
")",
")",
")",
"status",
"=",
"False",
"if",
"'sqlite3'",
"in",
"sys",
".",
"modules",
":",
"if",
"o",
".",
"get",
"(",
"'display'",
")",
":",
"display_query",
"=",
"'SELECT * FROM {} WHERE {}'",
".",
"format",
"(",
"self",
".",
"input_table",
",",
"o",
".",
"get",
"(",
"'display'",
")",
")",
"try",
":",
"self",
".",
"db_conn",
".",
"execute",
"(",
"display_query",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
")",
"except",
"sqlite3",
".",
"Error",
":",
"self",
".",
"validation_data",
"[",
"'errors'",
"]",
".",
"append",
"(",
"'Layouts outputs.display validations failed (\"{}\" query is '",
"'an invalid statement).'",
".",
"format",
"(",
"o",
".",
"get",
"(",
"'display'",
")",
")",
")",
"status",
"=",
"False",
"# update validation data for module",
"self",
".",
"validation_data",
"[",
"'layouts'",
"]",
".",
"append",
"(",
"{",
"'params'",
":",
"'outputs'",
",",
"'status'",
":",
"status",
"}",
")"
] | Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist. | [
"Check",
"that",
"the",
"layout",
".",
"json",
"is",
"consistent",
"with",
"install",
".",
"json",
"."
] | python | train |
mwshinn/paranoidscientist | paranoid/decorators.py | https://github.com/mwshinn/paranoidscientist/blob/a5e9198bc40b0a985174ad643cc5d6d0c46efdcd/paranoid/decorators.py#L288-L351 | def ensures(condition):
"""A function decorator to specify exit conditions for the function.
Exit conditions should be a string, which will be evaluated as
Python code. Arguments of the function may be accessed by their
name. The return value of the function may be accessed using the
special variable name "return".
The special syntax "-->" and "<-->" may be used to mean "if" and
"if and only if", respectively. They may not be contained within
sub-expressions.
Values may be compared to previous executions of the function by
including a "`" or "``" after them to check for higher order
properties of the function.
Note that globals will not be included by default, and must be
manually included using the "namespace" setting, set via
settings.Settings.
Example usage:
| @ensures("lower_bound <= return <= upper_bound")
| def search(lower_bound, upper_bound):
| ...
| @ensures("x <= x` --> return <= return`")
| def monotonic(x):
| ...
"""
def _decorator(func, condition=condition):
# @ensures decorator
if U.has_fun_prop(func, "ensures"):
if not isinstance(U.get_fun_prop(func, "ensures"), list):
raise E.InternalError("Invalid ensures strucutre")
ensures_statements = U.get_fun_prop(func, "ensures")
else:
ensures_statements = []
e = condition.replace("return", "__RETURN__")
if "<-->" in e:
e_parts = e.split("<-->")
assert len(e_parts) == 2, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__)
e = "((%s) if (%s) else True) and ((%s) if (%s) else True)" % (e_parts[1], e_parts[0], e_parts[0], e_parts[1])
assert "-->" not in e, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__)
if "-->" in e:
e_parts = e.split("-->")
assert len(e_parts) == 2, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__)
e = "(%s) if (%s) else True" % (e_parts[1], e_parts[0])
_bt = "__BACKTICK__"
_dbt = "__DOUBLEBACKTICK__"
if "``" in e:
e = e.replace("``", _dbt)
e = e.replace("`", _bt)
compiled = compile(e, '', 'eval')
U.set_fun_prop(func, "ensures", [(2, compiled, condition)]+ensures_statements)
elif "`" in e:
e = e.replace("`", _bt)
compiled = compile(e, '', 'eval')
U.set_fun_prop(func, "ensures", [(1, compiled, condition)]+ensures_statements)
else:
compiled = compile(e, '', 'eval')
U.set_fun_prop(func, "ensures", [(0, compiled, condition)]+ensures_statements)
return _wrap(func)
return _decorator | [
"def",
"ensures",
"(",
"condition",
")",
":",
"def",
"_decorator",
"(",
"func",
",",
"condition",
"=",
"condition",
")",
":",
"# @ensures decorator",
"if",
"U",
".",
"has_fun_prop",
"(",
"func",
",",
"\"ensures\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"U",
".",
"get_fun_prop",
"(",
"func",
",",
"\"ensures\"",
")",
",",
"list",
")",
":",
"raise",
"E",
".",
"InternalError",
"(",
"\"Invalid ensures strucutre\"",
")",
"ensures_statements",
"=",
"U",
".",
"get_fun_prop",
"(",
"func",
",",
"\"ensures\"",
")",
"else",
":",
"ensures_statements",
"=",
"[",
"]",
"e",
"=",
"condition",
".",
"replace",
"(",
"\"return\"",
",",
"\"__RETURN__\"",
")",
"if",
"\"<-->\"",
"in",
"e",
":",
"e_parts",
"=",
"e",
".",
"split",
"(",
"\"<-->\"",
")",
"assert",
"len",
"(",
"e_parts",
")",
"==",
"2",
",",
"\"Only one implies per statement in %s condition %s\"",
"%",
"(",
"ensurement",
",",
"func",
".",
"__qualname__",
")",
"e",
"=",
"\"((%s) if (%s) else True) and ((%s) if (%s) else True)\"",
"%",
"(",
"e_parts",
"[",
"1",
"]",
",",
"e_parts",
"[",
"0",
"]",
",",
"e_parts",
"[",
"0",
"]",
",",
"e_parts",
"[",
"1",
"]",
")",
"assert",
"\"-->\"",
"not",
"in",
"e",
",",
"\"Only one implies per statement in %s condition %s\"",
"%",
"(",
"ensurement",
",",
"func",
".",
"__qualname__",
")",
"if",
"\"-->\"",
"in",
"e",
":",
"e_parts",
"=",
"e",
".",
"split",
"(",
"\"-->\"",
")",
"assert",
"len",
"(",
"e_parts",
")",
"==",
"2",
",",
"\"Only one implies per statement in %s condition %s\"",
"%",
"(",
"ensurement",
",",
"func",
".",
"__qualname__",
")",
"e",
"=",
"\"(%s) if (%s) else True\"",
"%",
"(",
"e_parts",
"[",
"1",
"]",
",",
"e_parts",
"[",
"0",
"]",
")",
"_bt",
"=",
"\"__BACKTICK__\"",
"_dbt",
"=",
"\"__DOUBLEBACKTICK__\"",
"if",
"\"``\"",
"in",
"e",
":",
"e",
"=",
"e",
".",
"replace",
"(",
"\"``\"",
",",
"_dbt",
")",
"e",
"=",
"e",
".",
"replace",
"(",
"\"`\"",
",",
"_bt",
")",
"compiled",
"=",
"compile",
"(",
"e",
",",
"''",
",",
"'eval'",
")",
"U",
".",
"set_fun_prop",
"(",
"func",
",",
"\"ensures\"",
",",
"[",
"(",
"2",
",",
"compiled",
",",
"condition",
")",
"]",
"+",
"ensures_statements",
")",
"elif",
"\"`\"",
"in",
"e",
":",
"e",
"=",
"e",
".",
"replace",
"(",
"\"`\"",
",",
"_bt",
")",
"compiled",
"=",
"compile",
"(",
"e",
",",
"''",
",",
"'eval'",
")",
"U",
".",
"set_fun_prop",
"(",
"func",
",",
"\"ensures\"",
",",
"[",
"(",
"1",
",",
"compiled",
",",
"condition",
")",
"]",
"+",
"ensures_statements",
")",
"else",
":",
"compiled",
"=",
"compile",
"(",
"e",
",",
"''",
",",
"'eval'",
")",
"U",
".",
"set_fun_prop",
"(",
"func",
",",
"\"ensures\"",
",",
"[",
"(",
"0",
",",
"compiled",
",",
"condition",
")",
"]",
"+",
"ensures_statements",
")",
"return",
"_wrap",
"(",
"func",
")",
"return",
"_decorator"
] | A function decorator to specify exit conditions for the function.
Exit conditions should be a string, which will be evaluated as
Python code. Arguments of the function may be accessed by their
name. The return value of the function may be accessed using the
special variable name "return".
The special syntax "-->" and "<-->" may be used to mean "if" and
"if and only if", respectively. They may not be contained within
sub-expressions.
Values may be compared to previous executions of the function by
including a "`" or "``" after them to check for higher order
properties of the function.
Note that globals will not be included by default, and must be
manually included using the "namespace" setting, set via
settings.Settings.
Example usage:
| @ensures("lower_bound <= return <= upper_bound")
| def search(lower_bound, upper_bound):
| ...
| @ensures("x <= x` --> return <= return`")
| def monotonic(x):
| ... | [
"A",
"function",
"decorator",
"to",
"specify",
"exit",
"conditions",
"for",
"the",
"function",
"."
] | python | train |
mottosso/be | be/vendor/requests/sessions.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/sessions.py#L538-L615 | def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r | [
"def",
"send",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# Set defaults that the hooks can utilize to ensure they always have",
"# the correct parameters to reproduce the previous request.",
"kwargs",
".",
"setdefault",
"(",
"'stream'",
",",
"self",
".",
"stream",
")",
"kwargs",
".",
"setdefault",
"(",
"'verify'",
",",
"self",
".",
"verify",
")",
"kwargs",
".",
"setdefault",
"(",
"'cert'",
",",
"self",
".",
"cert",
")",
"kwargs",
".",
"setdefault",
"(",
"'proxies'",
",",
"self",
".",
"proxies",
")",
"# It's possible that users might accidentally send a Request object.",
"# Guard against that specific failure case.",
"if",
"not",
"isinstance",
"(",
"request",
",",
"PreparedRequest",
")",
":",
"raise",
"ValueError",
"(",
"'You can only send PreparedRequests.'",
")",
"checked_urls",
"=",
"set",
"(",
")",
"while",
"request",
".",
"url",
"in",
"self",
".",
"redirect_cache",
":",
"checked_urls",
".",
"add",
"(",
"request",
".",
"url",
")",
"new_url",
"=",
"self",
".",
"redirect_cache",
".",
"get",
"(",
"request",
".",
"url",
")",
"if",
"new_url",
"in",
"checked_urls",
":",
"break",
"request",
".",
"url",
"=",
"new_url",
"# Set up variables needed for resolve_redirects and dispatching of hooks",
"allow_redirects",
"=",
"kwargs",
".",
"pop",
"(",
"'allow_redirects'",
",",
"True",
")",
"stream",
"=",
"kwargs",
".",
"get",
"(",
"'stream'",
")",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'timeout'",
")",
"verify",
"=",
"kwargs",
".",
"get",
"(",
"'verify'",
")",
"cert",
"=",
"kwargs",
".",
"get",
"(",
"'cert'",
")",
"proxies",
"=",
"kwargs",
".",
"get",
"(",
"'proxies'",
")",
"hooks",
"=",
"request",
".",
"hooks",
"# Get the appropriate adapter to use",
"adapter",
"=",
"self",
".",
"get_adapter",
"(",
"url",
"=",
"request",
".",
"url",
")",
"# Start time (approximately) of the request",
"start",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"# Send the request",
"r",
"=",
"adapter",
".",
"send",
"(",
"request",
",",
"*",
"*",
"kwargs",
")",
"# Total elapsed time of the request (approximately)",
"r",
".",
"elapsed",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"start",
"# Response manipulation hooks",
"r",
"=",
"dispatch_hook",
"(",
"'response'",
",",
"hooks",
",",
"r",
",",
"*",
"*",
"kwargs",
")",
"# Persist cookies",
"if",
"r",
".",
"history",
":",
"# If the hooks create history then we want those cookies too",
"for",
"resp",
"in",
"r",
".",
"history",
":",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"resp",
".",
"request",
",",
"resp",
".",
"raw",
")",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"request",
",",
"r",
".",
"raw",
")",
"# Redirect resolving generator.",
"gen",
"=",
"self",
".",
"resolve_redirects",
"(",
"r",
",",
"request",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
",",
"proxies",
"=",
"proxies",
")",
"# Resolve redirects if allowed.",
"history",
"=",
"[",
"resp",
"for",
"resp",
"in",
"gen",
"]",
"if",
"allow_redirects",
"else",
"[",
"]",
"# Shuffle things around if there's history.",
"if",
"history",
":",
"# Insert the first (original) request at the start",
"history",
".",
"insert",
"(",
"0",
",",
"r",
")",
"# Get the last request made",
"r",
"=",
"history",
".",
"pop",
"(",
")",
"r",
".",
"history",
"=",
"history",
"if",
"not",
"stream",
":",
"r",
".",
"content",
"return",
"r"
] | Send a given PreparedRequest. | [
"Send",
"a",
"given",
"PreparedRequest",
"."
] | python | train |
bashu/django-easy-maps | easy_maps/geocode.py | https://github.com/bashu/django-easy-maps/blob/32f4f3274443219e8828d93d09a406bf2a126982/easy_maps/geocode.py#L15-L30 | def google_v3(address):
"""
Given an address, return ``(computed_address, (latitude, longitude))``
tuple using Google Geocoding API v3.
"""
try:
g = geocoders.GoogleV3(api_key=settings.EASY_MAPS_GOOGLE_KEY or settings.EASY_MAPS_GOOGLE_MAPS_API_KEY)
results = g.geocode(smart_str(address), exactly_one=False)
if results is not None:
return results[0]
raise Error("No results found for '%s'" % address)
except (UnboundLocalError, ValueError, GeocoderServiceError) as e:
raise Error(e) | [
"def",
"google_v3",
"(",
"address",
")",
":",
"try",
":",
"g",
"=",
"geocoders",
".",
"GoogleV3",
"(",
"api_key",
"=",
"settings",
".",
"EASY_MAPS_GOOGLE_KEY",
"or",
"settings",
".",
"EASY_MAPS_GOOGLE_MAPS_API_KEY",
")",
"results",
"=",
"g",
".",
"geocode",
"(",
"smart_str",
"(",
"address",
")",
",",
"exactly_one",
"=",
"False",
")",
"if",
"results",
"is",
"not",
"None",
":",
"return",
"results",
"[",
"0",
"]",
"raise",
"Error",
"(",
"\"No results found for '%s'\"",
"%",
"address",
")",
"except",
"(",
"UnboundLocalError",
",",
"ValueError",
",",
"GeocoderServiceError",
")",
"as",
"e",
":",
"raise",
"Error",
"(",
"e",
")"
] | Given an address, return ``(computed_address, (latitude, longitude))``
tuple using Google Geocoding API v3. | [
"Given",
"an",
"address",
"return",
"(",
"computed_address",
"(",
"latitude",
"longitude",
"))",
"tuple",
"using",
"Google",
"Geocoding",
"API",
"v3",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.