repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
openpermissions/chub | chub/api.py | https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/api.py#L61-L69 | def _sub_resource(self, path):
"""
get or create sub resource
"""
if path not in self.resource_map:
self.resource_map[path] = Resource(
path, self.fetch, self.resource_map,
default_headers=self.default_headers)
return self.resource_map[path] | [
"def",
"_sub_resource",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"not",
"in",
"self",
".",
"resource_map",
":",
"self",
".",
"resource_map",
"[",
"path",
"]",
"=",
"Resource",
"(",
"path",
",",
"self",
".",
"fetch",
",",
"self",
".",
"resource_map",
",",
"default_headers",
"=",
"self",
".",
"default_headers",
")",
"return",
"self",
".",
"resource_map",
"[",
"path",
"]"
] | get or create sub resource | [
"get",
"or",
"create",
"sub",
"resource"
] | python | train |
basho/riak-python-client | riak/codecs/pbuf.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/pbuf.py#L847-L895 | def decode_timeseries_row(self, tsrow, tscols=None,
convert_timestamp=False):
"""
Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list
"""
row = []
for i, cell in enumerate(tsrow.cells):
col = None
if tscols is not None:
col = tscols[i]
if cell.HasField('varchar_value'):
if col and not (col.type == TsColumnType.Value('VARCHAR') or
col.type == TsColumnType.Value('BLOB')):
raise TypeError('expected VARCHAR or BLOB column')
else:
row.append(cell.varchar_value)
elif cell.HasField('sint64_value'):
if col and col.type != TsColumnType.Value('SINT64'):
raise TypeError('expected SINT64 column')
else:
row.append(cell.sint64_value)
elif cell.HasField('double_value'):
if col and col.type != TsColumnType.Value('DOUBLE'):
raise TypeError('expected DOUBLE column')
else:
row.append(cell.double_value)
elif cell.HasField('timestamp_value'):
if col and col.type != TsColumnType.Value('TIMESTAMP'):
raise TypeError('expected TIMESTAMP column')
else:
dt = cell.timestamp_value
if convert_timestamp:
dt = datetime_from_unix_time_millis(
cell.timestamp_value)
row.append(dt)
elif cell.HasField('boolean_value'):
if col and col.type != TsColumnType.Value('BOOLEAN'):
raise TypeError('expected BOOLEAN column')
else:
row.append(cell.boolean_value)
else:
row.append(None)
return row | [
"def",
"decode_timeseries_row",
"(",
"self",
",",
"tsrow",
",",
"tscols",
"=",
"None",
",",
"convert_timestamp",
"=",
"False",
")",
":",
"row",
"=",
"[",
"]",
"for",
"i",
",",
"cell",
"in",
"enumerate",
"(",
"tsrow",
".",
"cells",
")",
":",
"col",
"=",
"None",
"if",
"tscols",
"is",
"not",
"None",
":",
"col",
"=",
"tscols",
"[",
"i",
"]",
"if",
"cell",
".",
"HasField",
"(",
"'varchar_value'",
")",
":",
"if",
"col",
"and",
"not",
"(",
"col",
".",
"type",
"==",
"TsColumnType",
".",
"Value",
"(",
"'VARCHAR'",
")",
"or",
"col",
".",
"type",
"==",
"TsColumnType",
".",
"Value",
"(",
"'BLOB'",
")",
")",
":",
"raise",
"TypeError",
"(",
"'expected VARCHAR or BLOB column'",
")",
"else",
":",
"row",
".",
"append",
"(",
"cell",
".",
"varchar_value",
")",
"elif",
"cell",
".",
"HasField",
"(",
"'sint64_value'",
")",
":",
"if",
"col",
"and",
"col",
".",
"type",
"!=",
"TsColumnType",
".",
"Value",
"(",
"'SINT64'",
")",
":",
"raise",
"TypeError",
"(",
"'expected SINT64 column'",
")",
"else",
":",
"row",
".",
"append",
"(",
"cell",
".",
"sint64_value",
")",
"elif",
"cell",
".",
"HasField",
"(",
"'double_value'",
")",
":",
"if",
"col",
"and",
"col",
".",
"type",
"!=",
"TsColumnType",
".",
"Value",
"(",
"'DOUBLE'",
")",
":",
"raise",
"TypeError",
"(",
"'expected DOUBLE column'",
")",
"else",
":",
"row",
".",
"append",
"(",
"cell",
".",
"double_value",
")",
"elif",
"cell",
".",
"HasField",
"(",
"'timestamp_value'",
")",
":",
"if",
"col",
"and",
"col",
".",
"type",
"!=",
"TsColumnType",
".",
"Value",
"(",
"'TIMESTAMP'",
")",
":",
"raise",
"TypeError",
"(",
"'expected TIMESTAMP column'",
")",
"else",
":",
"dt",
"=",
"cell",
".",
"timestamp_value",
"if",
"convert_timestamp",
":",
"dt",
"=",
"datetime_from_unix_time_millis",
"(",
"cell",
".",
"timestamp_value",
")",
"row",
".",
"append",
"(",
"dt",
")",
"elif",
"cell",
".",
"HasField",
"(",
"'boolean_value'",
")",
":",
"if",
"col",
"and",
"col",
".",
"type",
"!=",
"TsColumnType",
".",
"Value",
"(",
"'BOOLEAN'",
")",
":",
"raise",
"TypeError",
"(",
"'expected BOOLEAN column'",
")",
"else",
":",
"row",
".",
"append",
"(",
"cell",
".",
"boolean_value",
")",
"else",
":",
"row",
".",
"append",
"(",
"None",
")",
"return",
"row"
] | Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list | [
"Decodes",
"a",
"TsRow",
"into",
"a",
"list"
] | python | train |
yohell/python-tui | tui/__init__.py | https://github.com/yohell/python-tui/blob/de2e678e2f00e5940de52c000214dbcb8812a222/tui/__init__.py#L1513-L1533 | def strsettings(self, indent=0, maxindent=25, width=0):
"""Return user friendly help on positional arguments.
indent is the number of spaces preceeding the text on each line.
The indent of the documentation is dependent on the length of the
longest label that is shorter than maxindent. A label longer than
maxindent will be printed on its own line.
width is maximum allowed page width, use self.width if 0.
"""
out = []
makelabel = lambda name: ' ' * indent + name + ': '
settingsindent = _autoindent([makelabel(s) for s in self.options], indent, maxindent)
for name in self.option_order:
option = self.options[name]
label = makelabel(name)
settingshelp = "%s(%s): %s" % (option.formatname, option.strvalue, option.location)
wrapped = self._wrap_labelled(label, settingshelp, settingsindent, width)
out.extend(wrapped)
return '\n'.join(out) | [
"def",
"strsettings",
"(",
"self",
",",
"indent",
"=",
"0",
",",
"maxindent",
"=",
"25",
",",
"width",
"=",
"0",
")",
":",
"out",
"=",
"[",
"]",
"makelabel",
"=",
"lambda",
"name",
":",
"' '",
"*",
"indent",
"+",
"name",
"+",
"': '",
"settingsindent",
"=",
"_autoindent",
"(",
"[",
"makelabel",
"(",
"s",
")",
"for",
"s",
"in",
"self",
".",
"options",
"]",
",",
"indent",
",",
"maxindent",
")",
"for",
"name",
"in",
"self",
".",
"option_order",
":",
"option",
"=",
"self",
".",
"options",
"[",
"name",
"]",
"label",
"=",
"makelabel",
"(",
"name",
")",
"settingshelp",
"=",
"\"%s(%s): %s\"",
"%",
"(",
"option",
".",
"formatname",
",",
"option",
".",
"strvalue",
",",
"option",
".",
"location",
")",
"wrapped",
"=",
"self",
".",
"_wrap_labelled",
"(",
"label",
",",
"settingshelp",
",",
"settingsindent",
",",
"width",
")",
"out",
".",
"extend",
"(",
"wrapped",
")",
"return",
"'\\n'",
".",
"join",
"(",
"out",
")"
] | Return user friendly help on positional arguments.
indent is the number of spaces preceeding the text on each line.
The indent of the documentation is dependent on the length of the
longest label that is shorter than maxindent. A label longer than
maxindent will be printed on its own line.
width is maximum allowed page width, use self.width if 0. | [
"Return",
"user",
"friendly",
"help",
"on",
"positional",
"arguments",
"."
] | python | valid |
saltstack/salt | salt/states/pagerduty_service.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty_service.py#L87-L98 | def absent(profile='pagerduty', subdomain=None, api_key=None, **kwargs):
'''
Ensure a pagerduty service does not exist.
Name can be the service name or pagerduty service id.
'''
r = __salt__['pagerduty_util.resource_absent']('services',
['name', 'id'],
profile,
subdomain,
api_key,
**kwargs)
return r | [
"def",
"absent",
"(",
"profile",
"=",
"'pagerduty'",
",",
"subdomain",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"__salt__",
"[",
"'pagerduty_util.resource_absent'",
"]",
"(",
"'services'",
",",
"[",
"'name'",
",",
"'id'",
"]",
",",
"profile",
",",
"subdomain",
",",
"api_key",
",",
"*",
"*",
"kwargs",
")",
"return",
"r"
] | Ensure a pagerduty service does not exist.
Name can be the service name or pagerduty service id. | [
"Ensure",
"a",
"pagerduty",
"service",
"does",
"not",
"exist",
".",
"Name",
"can",
"be",
"the",
"service",
"name",
"or",
"pagerduty",
"service",
"id",
"."
] | python | train |
twilio/twilio-python | twilio/rest/notify/v1/service/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/notify/v1/service/__init__.py#L38-L87 | def create(self, friendly_name=values.unset, apn_credential_sid=values.unset,
gcm_credential_sid=values.unset, messaging_service_sid=values.unset,
facebook_messenger_page_id=values.unset,
default_apn_notification_protocol_version=values.unset,
default_gcm_notification_protocol_version=values.unset,
fcm_credential_sid=values.unset,
default_fcm_notification_protocol_version=values.unset,
log_enabled=values.unset, alexa_skill_id=values.unset,
default_alexa_notification_protocol_version=values.unset):
"""
Create a new ServiceInstance
:param unicode friendly_name: A string to describe the resource
:param unicode apn_credential_sid: The SID of the Credential to use for APN Bindings
:param unicode gcm_credential_sid: The SID of the Credential to use for GCM Bindings
:param unicode messaging_service_sid: The SID of the Messaging Service to use for SMS Bindings
:param unicode facebook_messenger_page_id: Deprecated
:param unicode default_apn_notification_protocol_version: The protocol version to use for sending APNS notifications
:param unicode default_gcm_notification_protocol_version: The protocol version to use for sending GCM notifications
:param unicode fcm_credential_sid: The SID of the Credential to use for FCM Bindings
:param unicode default_fcm_notification_protocol_version: The protocol version to use for sending FCM notifications
:param bool log_enabled: Whether to log notifications
:param unicode alexa_skill_id: Deprecated
:param unicode default_alexa_notification_protocol_version: Deprecated
:returns: Newly created ServiceInstance
:rtype: twilio.rest.notify.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'ApnCredentialSid': apn_credential_sid,
'GcmCredentialSid': gcm_credential_sid,
'MessagingServiceSid': messaging_service_sid,
'FacebookMessengerPageId': facebook_messenger_page_id,
'DefaultApnNotificationProtocolVersion': default_apn_notification_protocol_version,
'DefaultGcmNotificationProtocolVersion': default_gcm_notification_protocol_version,
'FcmCredentialSid': fcm_credential_sid,
'DefaultFcmNotificationProtocolVersion': default_fcm_notification_protocol_version,
'LogEnabled': log_enabled,
'AlexaSkillId': alexa_skill_id,
'DefaultAlexaNotificationProtocolVersion': default_alexa_notification_protocol_version,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(self._version, payload, ) | [
"def",
"create",
"(",
"self",
",",
"friendly_name",
"=",
"values",
".",
"unset",
",",
"apn_credential_sid",
"=",
"values",
".",
"unset",
",",
"gcm_credential_sid",
"=",
"values",
".",
"unset",
",",
"messaging_service_sid",
"=",
"values",
".",
"unset",
",",
"facebook_messenger_page_id",
"=",
"values",
".",
"unset",
",",
"default_apn_notification_protocol_version",
"=",
"values",
".",
"unset",
",",
"default_gcm_notification_protocol_version",
"=",
"values",
".",
"unset",
",",
"fcm_credential_sid",
"=",
"values",
".",
"unset",
",",
"default_fcm_notification_protocol_version",
"=",
"values",
".",
"unset",
",",
"log_enabled",
"=",
"values",
".",
"unset",
",",
"alexa_skill_id",
"=",
"values",
".",
"unset",
",",
"default_alexa_notification_protocol_version",
"=",
"values",
".",
"unset",
")",
":",
"data",
"=",
"values",
".",
"of",
"(",
"{",
"'FriendlyName'",
":",
"friendly_name",
",",
"'ApnCredentialSid'",
":",
"apn_credential_sid",
",",
"'GcmCredentialSid'",
":",
"gcm_credential_sid",
",",
"'MessagingServiceSid'",
":",
"messaging_service_sid",
",",
"'FacebookMessengerPageId'",
":",
"facebook_messenger_page_id",
",",
"'DefaultApnNotificationProtocolVersion'",
":",
"default_apn_notification_protocol_version",
",",
"'DefaultGcmNotificationProtocolVersion'",
":",
"default_gcm_notification_protocol_version",
",",
"'FcmCredentialSid'",
":",
"fcm_credential_sid",
",",
"'DefaultFcmNotificationProtocolVersion'",
":",
"default_fcm_notification_protocol_version",
",",
"'LogEnabled'",
":",
"log_enabled",
",",
"'AlexaSkillId'",
":",
"alexa_skill_id",
",",
"'DefaultAlexaNotificationProtocolVersion'",
":",
"default_alexa_notification_protocol_version",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"create",
"(",
"'POST'",
",",
"self",
".",
"_uri",
",",
"data",
"=",
"data",
",",
")",
"return",
"ServiceInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
")"
] | Create a new ServiceInstance
:param unicode friendly_name: A string to describe the resource
:param unicode apn_credential_sid: The SID of the Credential to use for APN Bindings
:param unicode gcm_credential_sid: The SID of the Credential to use for GCM Bindings
:param unicode messaging_service_sid: The SID of the Messaging Service to use for SMS Bindings
:param unicode facebook_messenger_page_id: Deprecated
:param unicode default_apn_notification_protocol_version: The protocol version to use for sending APNS notifications
:param unicode default_gcm_notification_protocol_version: The protocol version to use for sending GCM notifications
:param unicode fcm_credential_sid: The SID of the Credential to use for FCM Bindings
:param unicode default_fcm_notification_protocol_version: The protocol version to use for sending FCM notifications
:param bool log_enabled: Whether to log notifications
:param unicode alexa_skill_id: Deprecated
:param unicode default_alexa_notification_protocol_version: Deprecated
:returns: Newly created ServiceInstance
:rtype: twilio.rest.notify.v1.service.ServiceInstance | [
"Create",
"a",
"new",
"ServiceInstance"
] | python | train |
Josef-Friedrich/tmep | tmep/doc.py | https://github.com/Josef-Friedrich/tmep/blob/326de14f5b9498696a1f06a8be3d39e33e376102/tmep/doc.py#L33-L52 | def extract_value(self, string, key, inline_code=True):
"""Extract strings from the docstrings
.. code-block:: text
* synopsis: ``%shorten{text, max_size}``
* example: ``%shorten{$title, 32}``
* description: Shorten “text” on word boundarys.
"""
regex = r'\* ' + key + ': '
if inline_code:
regex = regex + '``(.*)``'
else:
regex = regex + '(.*)'
value = re.findall(regex, string)
if value:
return value[0].replace('``', '')
else:
return False | [
"def",
"extract_value",
"(",
"self",
",",
"string",
",",
"key",
",",
"inline_code",
"=",
"True",
")",
":",
"regex",
"=",
"r'\\* '",
"+",
"key",
"+",
"': '",
"if",
"inline_code",
":",
"regex",
"=",
"regex",
"+",
"'``(.*)``'",
"else",
":",
"regex",
"=",
"regex",
"+",
"'(.*)'",
"value",
"=",
"re",
".",
"findall",
"(",
"regex",
",",
"string",
")",
"if",
"value",
":",
"return",
"value",
"[",
"0",
"]",
".",
"replace",
"(",
"'``'",
",",
"''",
")",
"else",
":",
"return",
"False"
] | Extract strings from the docstrings
.. code-block:: text
* synopsis: ``%shorten{text, max_size}``
* example: ``%shorten{$title, 32}``
* description: Shorten “text” on word boundarys. | [
"Extract",
"strings",
"from",
"the",
"docstrings"
] | python | train |
jstitch/MambuPy | MambuPy/mambuutil.py | https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/mambuutil.py#L548-L562 | def getproductsurl(idproduct, *args, **kwargs):
"""Request loan Products URL.
If idproduct is set, you'll get a response adequate for a MambuProduct object.
If not set, you'll get a response adequate for a MambuProducts object.
See mambuproduct module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
productidparam = "" if idproduct == "" else "/"+idproduct
url = getmambuurl(*args,**kwargs) + "loanproducts" + productidparam
return url | [
"def",
"getproductsurl",
"(",
"idproduct",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"productidparam",
"=",
"\"\"",
"if",
"idproduct",
"==",
"\"\"",
"else",
"\"/\"",
"+",
"idproduct",
"url",
"=",
"getmambuurl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"+",
"\"loanproducts\"",
"+",
"productidparam",
"return",
"url"
] | Request loan Products URL.
If idproduct is set, you'll get a response adequate for a MambuProduct object.
If not set, you'll get a response adequate for a MambuProducts object.
See mambuproduct module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future. | [
"Request",
"loan",
"Products",
"URL",
"."
] | python | train |
pandas-dev/pandas | pandas/core/groupby/base.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/base.py#L33-L67 | def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
# Try to select from a DataFrame, falling back to a Series
try:
groupby = self._groupby[key]
except IndexError:
groupby = self._groupby
self = self.__class__(subset,
groupby=groupby,
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self | [
"def",
"_gotitem",
"(",
"self",
",",
"key",
",",
"ndim",
",",
"subset",
"=",
"None",
")",
":",
"# create a new object to prevent aliasing",
"if",
"subset",
"is",
"None",
":",
"subset",
"=",
"self",
".",
"obj",
"# we need to make a shallow copy of ourselves",
"# with the same groupby",
"kwargs",
"=",
"{",
"attr",
":",
"getattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"self",
".",
"_attributes",
"}",
"# Try to select from a DataFrame, falling back to a Series",
"try",
":",
"groupby",
"=",
"self",
".",
"_groupby",
"[",
"key",
"]",
"except",
"IndexError",
":",
"groupby",
"=",
"self",
".",
"_groupby",
"self",
"=",
"self",
".",
"__class__",
"(",
"subset",
",",
"groupby",
"=",
"groupby",
",",
"parent",
"=",
"self",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_reset_cache",
"(",
")",
"if",
"subset",
".",
"ndim",
"==",
"2",
":",
"if",
"is_scalar",
"(",
"key",
")",
"and",
"key",
"in",
"subset",
"or",
"is_list_like",
"(",
"key",
")",
":",
"self",
".",
"_selection",
"=",
"key",
"return",
"self"
] | Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on | [
"Sub",
"-",
"classes",
"to",
"define",
".",
"Return",
"a",
"sliced",
"object",
"."
] | python | train |
ajyoon/blur | blur/markov/graph.py | https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L152-L206 | def feather_links(self, factor=0.01, include_self=False):
"""
Feather the links of connected nodes.
Go through every node in the network and make it inherit the links
of the other nodes it is connected to. Because the link weight sum
for any given node can be very different within a graph, the weights
of inherited links are made proportional to the sum weight of the
parent nodes.
Args:
factor (float): multiplier of neighbor links
include_self (bool): whether nodes can inherit links pointing
to themselves
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_2, 1)
>>> node_2.add_link(node_1, 1)
>>> graph = Graph([node_1, node_2])
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
Two 1
>>> graph.feather_links(include_self=True)
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
Two 1
One 0.01
"""
def feather_node(node):
node_weight_sum = sum(l.weight for l in node.link_list)
# Iterate over a copy of the original link list since we will
# need to refer to this while modifying node.link_list
for original_link in node.link_list[:]:
neighbor_node = original_link.target
neighbor_weight = original_link.weight
feather_weight = neighbor_weight / node_weight_sum
neighbor_node_weight_sum = sum(l.weight for
l in neighbor_node.link_list)
# Iterate over the links belonging to the neighbor_node,
# copying its links to ``node`` with proportional weights
for neighbor_link in neighbor_node.link_list:
if (not include_self) and (neighbor_link.target == node):
continue
relative_link_weight = (neighbor_link.weight /
neighbor_node_weight_sum)
feathered_link_weight = round((relative_link_weight *
feather_weight * factor), 2)
node.add_link(neighbor_link.target, feathered_link_weight)
for n in self.node_list:
feather_node(n) | [
"def",
"feather_links",
"(",
"self",
",",
"factor",
"=",
"0.01",
",",
"include_self",
"=",
"False",
")",
":",
"def",
"feather_node",
"(",
"node",
")",
":",
"node_weight_sum",
"=",
"sum",
"(",
"l",
".",
"weight",
"for",
"l",
"in",
"node",
".",
"link_list",
")",
"# Iterate over a copy of the original link list since we will",
"# need to refer to this while modifying node.link_list",
"for",
"original_link",
"in",
"node",
".",
"link_list",
"[",
":",
"]",
":",
"neighbor_node",
"=",
"original_link",
".",
"target",
"neighbor_weight",
"=",
"original_link",
".",
"weight",
"feather_weight",
"=",
"neighbor_weight",
"/",
"node_weight_sum",
"neighbor_node_weight_sum",
"=",
"sum",
"(",
"l",
".",
"weight",
"for",
"l",
"in",
"neighbor_node",
".",
"link_list",
")",
"# Iterate over the links belonging to the neighbor_node,",
"# copying its links to ``node`` with proportional weights",
"for",
"neighbor_link",
"in",
"neighbor_node",
".",
"link_list",
":",
"if",
"(",
"not",
"include_self",
")",
"and",
"(",
"neighbor_link",
".",
"target",
"==",
"node",
")",
":",
"continue",
"relative_link_weight",
"=",
"(",
"neighbor_link",
".",
"weight",
"/",
"neighbor_node_weight_sum",
")",
"feathered_link_weight",
"=",
"round",
"(",
"(",
"relative_link_weight",
"*",
"feather_weight",
"*",
"factor",
")",
",",
"2",
")",
"node",
".",
"add_link",
"(",
"neighbor_link",
".",
"target",
",",
"feathered_link_weight",
")",
"for",
"n",
"in",
"self",
".",
"node_list",
":",
"feather_node",
"(",
"n",
")"
] | Feather the links of connected nodes.
Go through every node in the network and make it inherit the links
of the other nodes it is connected to. Because the link weight sum
for any given node can be very different within a graph, the weights
of inherited links are made proportional to the sum weight of the
parent nodes.
Args:
factor (float): multiplier of neighbor links
include_self (bool): whether nodes can inherit links pointing
to themselves
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_2, 1)
>>> node_2.add_link(node_1, 1)
>>> graph = Graph([node_1, node_2])
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
Two 1
>>> graph.feather_links(include_self=True)
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
Two 1
One 0.01 | [
"Feather",
"the",
"links",
"of",
"connected",
"nodes",
"."
] | python | train |
pmichali/whodunit | whodunit/__init__.py | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L306-L319 | def sort(self):
"""Sort by commit size, per author."""
# First sort commits by author email
users = []
# Group commits by author email, so they can be merged
for _, group in itertools.groupby(sorted(self.commits),
operator.attrgetter('author_mail')):
if group:
users.append(self.merge_user_commits(group))
# Finally sort by the (aggregated) commits' line counts
self.sorted_commits = sorted(users,
key=operator.attrgetter('line_count'),
reverse=True)
return self.sorted_commits | [
"def",
"sort",
"(",
"self",
")",
":",
"# First sort commits by author email",
"users",
"=",
"[",
"]",
"# Group commits by author email, so they can be merged",
"for",
"_",
",",
"group",
"in",
"itertools",
".",
"groupby",
"(",
"sorted",
"(",
"self",
".",
"commits",
")",
",",
"operator",
".",
"attrgetter",
"(",
"'author_mail'",
")",
")",
":",
"if",
"group",
":",
"users",
".",
"append",
"(",
"self",
".",
"merge_user_commits",
"(",
"group",
")",
")",
"# Finally sort by the (aggregated) commits' line counts",
"self",
".",
"sorted_commits",
"=",
"sorted",
"(",
"users",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"'line_count'",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"self",
".",
"sorted_commits"
] | Sort by commit size, per author. | [
"Sort",
"by",
"commit",
"size",
"per",
"author",
"."
] | python | train |
hollenstein/maspy | maspy/sil.py | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/sil.py#L271-L327 | def expectedLabelPosition(peptide, labelStateInfo, sequence=None,
modPositions=None):
"""Returns a modification description of a certain label state of a peptide.
:param peptide: Peptide sequence used to calculat the expected label state
modifications
:param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that
describes a label state
:param sequence: unmodified amino acid sequence of :var:`peptide`, if None
it is generated by :func:`maspy.peptidemethods.removeModifications()`
:param modPositions: dictionary describing the modification state of
"peptide", if None it is generated by
:func:`maspy.peptidemethods.returnModPositions()`
:returns: {sequence position: sorted list of expected label modifications
on that position, ...
}
"""
if modPositions is None:
modPositions = maspy.peptidemethods.returnModPositions(peptide,
indexStart=0
)
if sequence is None:
sequence = maspy.peptidemethods.removeModifications(peptide)
currLabelMods = dict()
for labelPosition, labelSymbols in viewitems(labelStateInfo['aminoAcidLabels']):
labelSymbols = aux.toList(labelSymbols)
if labelSymbols == ['']:
pass
elif labelPosition == 'nTerm':
currLabelMods.setdefault(0, list())
currLabelMods[0].extend(labelSymbols)
else:
for sequencePosition in aux.findAllSubstrings(sequence,
labelPosition):
currLabelMods.setdefault(sequencePosition, list())
currLabelMods[sequencePosition].extend(labelSymbols)
if labelStateInfo['excludingModifications'] is not None:
for excludingMod, excludedLabelSymbol in viewitems(labelStateInfo['excludingModifications']):
if excludingMod not in modPositions:
continue
for excludingModPos in modPositions[excludingMod]:
if excludingModPos not in currLabelMods:
continue
if excludedLabelSymbol not in currLabelMods[excludingModPos]:
continue
if len(currLabelMods[excludingModPos]) == 1:
del(currLabelMods[excludingModPos])
else:
excludedModIndex = currLabelMods[excludingModPos].index(excludedLabelSymbol)
currLabelMods[excludingModPos].pop(excludedModIndex)
for sequencePosition in list(viewkeys(currLabelMods)):
currLabelMods[sequencePosition] = sorted(currLabelMods[sequencePosition])
return currLabelMods | [
"def",
"expectedLabelPosition",
"(",
"peptide",
",",
"labelStateInfo",
",",
"sequence",
"=",
"None",
",",
"modPositions",
"=",
"None",
")",
":",
"if",
"modPositions",
"is",
"None",
":",
"modPositions",
"=",
"maspy",
".",
"peptidemethods",
".",
"returnModPositions",
"(",
"peptide",
",",
"indexStart",
"=",
"0",
")",
"if",
"sequence",
"is",
"None",
":",
"sequence",
"=",
"maspy",
".",
"peptidemethods",
".",
"removeModifications",
"(",
"peptide",
")",
"currLabelMods",
"=",
"dict",
"(",
")",
"for",
"labelPosition",
",",
"labelSymbols",
"in",
"viewitems",
"(",
"labelStateInfo",
"[",
"'aminoAcidLabels'",
"]",
")",
":",
"labelSymbols",
"=",
"aux",
".",
"toList",
"(",
"labelSymbols",
")",
"if",
"labelSymbols",
"==",
"[",
"''",
"]",
":",
"pass",
"elif",
"labelPosition",
"==",
"'nTerm'",
":",
"currLabelMods",
".",
"setdefault",
"(",
"0",
",",
"list",
"(",
")",
")",
"currLabelMods",
"[",
"0",
"]",
".",
"extend",
"(",
"labelSymbols",
")",
"else",
":",
"for",
"sequencePosition",
"in",
"aux",
".",
"findAllSubstrings",
"(",
"sequence",
",",
"labelPosition",
")",
":",
"currLabelMods",
".",
"setdefault",
"(",
"sequencePosition",
",",
"list",
"(",
")",
")",
"currLabelMods",
"[",
"sequencePosition",
"]",
".",
"extend",
"(",
"labelSymbols",
")",
"if",
"labelStateInfo",
"[",
"'excludingModifications'",
"]",
"is",
"not",
"None",
":",
"for",
"excludingMod",
",",
"excludedLabelSymbol",
"in",
"viewitems",
"(",
"labelStateInfo",
"[",
"'excludingModifications'",
"]",
")",
":",
"if",
"excludingMod",
"not",
"in",
"modPositions",
":",
"continue",
"for",
"excludingModPos",
"in",
"modPositions",
"[",
"excludingMod",
"]",
":",
"if",
"excludingModPos",
"not",
"in",
"currLabelMods",
":",
"continue",
"if",
"excludedLabelSymbol",
"not",
"in",
"currLabelMods",
"[",
"excludingModPos",
"]",
":",
"continue",
"if",
"len",
"(",
"currLabelMods",
"[",
"excludingModPos",
"]",
")",
"==",
"1",
":",
"del",
"(",
"currLabelMods",
"[",
"excludingModPos",
"]",
")",
"else",
":",
"excludedModIndex",
"=",
"currLabelMods",
"[",
"excludingModPos",
"]",
".",
"index",
"(",
"excludedLabelSymbol",
")",
"currLabelMods",
"[",
"excludingModPos",
"]",
".",
"pop",
"(",
"excludedModIndex",
")",
"for",
"sequencePosition",
"in",
"list",
"(",
"viewkeys",
"(",
"currLabelMods",
")",
")",
":",
"currLabelMods",
"[",
"sequencePosition",
"]",
"=",
"sorted",
"(",
"currLabelMods",
"[",
"sequencePosition",
"]",
")",
"return",
"currLabelMods"
] | Returns a modification description of a certain label state of a peptide.
:param peptide: Peptide sequence used to calculat the expected label state
modifications
:param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that
describes a label state
:param sequence: unmodified amino acid sequence of :var:`peptide`, if None
it is generated by :func:`maspy.peptidemethods.removeModifications()`
:param modPositions: dictionary describing the modification state of
"peptide", if None it is generated by
:func:`maspy.peptidemethods.returnModPositions()`
:returns: {sequence position: sorted list of expected label modifications
on that position, ...
} | [
"Returns",
"a",
"modification",
"description",
"of",
"a",
"certain",
"label",
"state",
"of",
"a",
"peptide",
"."
] | python | train |
ask/carrot | carrot/messaging.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L712-L772 | def send(self, message_data, routing_key=None, delivery_mode=None,
mandatory=False, immediate=False, priority=0, content_type=None,
content_encoding=None, serializer=None, exchange=None):
"""Send a message.
:param message_data: The message data to send. Can be a list,
dictionary or a string.
:keyword routing_key: A custom routing key for the message.
If not set, the default routing key set in the :attr:`routing_key`
attribute is used.
:keyword mandatory: If set, the message has mandatory routing.
By default the message is silently dropped by the server if it
can't be routed to a queue. However - If the message is mandatory,
an exception will be raised instead.
:keyword immediate: Request immediate delivery.
If the message cannot be routed to a queue consumer immediately,
an exception will be raised. This is instead of the default
behaviour, where the server will accept and queue the message,
but with no guarantee that the message will ever be consumed.
:keyword delivery_mode: Override the default :attr:`delivery_mode`.
:keyword priority: The message priority, ``0`` to ``9``.
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword serializer: Override the default :attr:`serializer`.
:keyword exchange: Override the exchange to publish to.
Note that this exchange must have been declared.
"""
headers = None
routing_key = routing_key or self.routing_key
if self.exchange_type == "headers":
headers, routing_key = routing_key, ""
exchange = exchange or self.exchange
message = self.create_message(message_data, priority=priority,
delivery_mode=delivery_mode,
content_type=content_type,
content_encoding=content_encoding,
serializer=serializer)
self.backend.publish(message,
exchange=exchange, routing_key=routing_key,
mandatory=mandatory, immediate=immediate,
headers=headers) | [
"def",
"send",
"(",
"self",
",",
"message_data",
",",
"routing_key",
"=",
"None",
",",
"delivery_mode",
"=",
"None",
",",
"mandatory",
"=",
"False",
",",
"immediate",
"=",
"False",
",",
"priority",
"=",
"0",
",",
"content_type",
"=",
"None",
",",
"content_encoding",
"=",
"None",
",",
"serializer",
"=",
"None",
",",
"exchange",
"=",
"None",
")",
":",
"headers",
"=",
"None",
"routing_key",
"=",
"routing_key",
"or",
"self",
".",
"routing_key",
"if",
"self",
".",
"exchange_type",
"==",
"\"headers\"",
":",
"headers",
",",
"routing_key",
"=",
"routing_key",
",",
"\"\"",
"exchange",
"=",
"exchange",
"or",
"self",
".",
"exchange",
"message",
"=",
"self",
".",
"create_message",
"(",
"message_data",
",",
"priority",
"=",
"priority",
",",
"delivery_mode",
"=",
"delivery_mode",
",",
"content_type",
"=",
"content_type",
",",
"content_encoding",
"=",
"content_encoding",
",",
"serializer",
"=",
"serializer",
")",
"self",
".",
"backend",
".",
"publish",
"(",
"message",
",",
"exchange",
"=",
"exchange",
",",
"routing_key",
"=",
"routing_key",
",",
"mandatory",
"=",
"mandatory",
",",
"immediate",
"=",
"immediate",
",",
"headers",
"=",
"headers",
")"
] | Send a message.
:param message_data: The message data to send. Can be a list,
dictionary or a string.
:keyword routing_key: A custom routing key for the message.
If not set, the default routing key set in the :attr:`routing_key`
attribute is used.
:keyword mandatory: If set, the message has mandatory routing.
By default the message is silently dropped by the server if it
can't be routed to a queue. However - If the message is mandatory,
an exception will be raised instead.
:keyword immediate: Request immediate delivery.
If the message cannot be routed to a queue consumer immediately,
an exception will be raised. This is instead of the default
behaviour, where the server will accept and queue the message,
but with no guarantee that the message will ever be consumed.
:keyword delivery_mode: Override the default :attr:`delivery_mode`.
:keyword priority: The message priority, ``0`` to ``9``.
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword serializer: Override the default :attr:`serializer`.
:keyword exchange: Override the exchange to publish to.
Note that this exchange must have been declared. | [
"Send",
"a",
"message",
"."
] | python | train |
apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L683-L709 | def install_toolset(self, toolset):
'''
Installs specific toolset on CI system.
'''
info = toolset_info[toolset]
if sys.platform.startswith('linux'):
os.chdir(self.work_dir)
if 'ppa' in info:
for ppa in info['ppa']:
utils.check_call(
'sudo','add-apt-repository','--yes',ppa)
if 'deb' in info:
utils.make_file('sources.list',
"deb %s"%(' '.join(info['deb'])),
"deb-src %s"%(' '.join(info['deb'])))
utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list')
if 'apt-key' in info:
for key in info['apt-key']:
utils.check_call('wget',key,'-O','apt.key')
utils.check_call('sudo','apt-key','add','apt.key')
utils.check_call(
'sudo','apt-get','update','-qq')
utils.check_call(
'sudo','apt-get','install','-qq',info['package'])
if 'debugpackage' in info and info['debugpackage']:
utils.check_call(
'sudo','apt-get','install','-qq',info['debugpackage']) | [
"def",
"install_toolset",
"(",
"self",
",",
"toolset",
")",
":",
"info",
"=",
"toolset_info",
"[",
"toolset",
"]",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"os",
".",
"chdir",
"(",
"self",
".",
"work_dir",
")",
"if",
"'ppa'",
"in",
"info",
":",
"for",
"ppa",
"in",
"info",
"[",
"'ppa'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'add-apt-repository'",
",",
"'--yes'",
",",
"ppa",
")",
"if",
"'deb'",
"in",
"info",
":",
"utils",
".",
"make_file",
"(",
"'sources.list'",
",",
"\"deb %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
",",
"\"deb-src %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'bash'",
",",
"'-c'",
",",
"'cat sources.list >> /etc/apt/sources.list'",
")",
"if",
"'apt-key'",
"in",
"info",
":",
"for",
"key",
"in",
"info",
"[",
"'apt-key'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'wget'",
",",
"key",
",",
"'-O'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-key'",
",",
"'add'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'update'",
",",
"'-qq'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'package'",
"]",
")",
"if",
"'debugpackage'",
"in",
"info",
"and",
"info",
"[",
"'debugpackage'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'debugpackage'",
"]",
")"
] | Installs specific toolset on CI system. | [
"Installs",
"specific",
"toolset",
"on",
"CI",
"system",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/distributions/poisson_lognormal.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/poisson_lognormal.py#L45-L85 | def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(
name or "vector_diffeomixture_quadrature_gauss_hermite"):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
npdt = dtype_util.as_numpy_dtype(loc.dtype)
grid = grid.astype(npdt)
probs = probs.astype(npdt)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs | [
"def",
"quadrature_scheme_lognormal_gauss_hermite",
"(",
"loc",
",",
"scale",
",",
"quadrature_size",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"vector_diffeomixture_quadrature_gauss_hermite\"",
")",
":",
"grid",
",",
"probs",
"=",
"np",
".",
"polynomial",
".",
"hermite",
".",
"hermgauss",
"(",
"deg",
"=",
"quadrature_size",
")",
"npdt",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"loc",
".",
"dtype",
")",
"grid",
"=",
"grid",
".",
"astype",
"(",
"npdt",
")",
"probs",
"=",
"probs",
".",
"astype",
"(",
"npdt",
")",
"probs",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"probs",
",",
"ord",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
",",
"name",
"=",
"\"probs\"",
",",
"dtype",
"=",
"loc",
".",
"dtype",
")",
"# The following maps the broadcast of `loc` and `scale` to each grid",
"# point, i.e., we are creating several log-rates that correspond to the",
"# different Gauss-Hermite quadrature points and (possible) batches of",
"# `loc` and `scale`.",
"grid",
"=",
"(",
"loc",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"+",
"np",
".",
"sqrt",
"(",
"2.",
")",
"*",
"scale",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"*",
"grid",
")",
"return",
"grid",
",",
"probs"
] | Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value. | [
"Use",
"Gauss",
"-",
"Hermite",
"quadrature",
"to",
"form",
"quadrature",
"on",
"positive",
"-",
"reals",
"."
] | python | test |
cbrand/vpnchooser | src/vpnchooser/helpers/parser.py | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/helpers/parser.py#L9-L44 | def id_from_url(url, param_name: str) -> int:
"""
Parses an object and tries to extract a url.
Tries to parse if a resource_url has been given
it as a url.
:raise ValueError: If no id could be extracted.
"""
if url is None:
raise ValueError('url is none')
elif isinstance(url, int):
# Seems to already be the url.
return url
if not url:
raise ValueError('Seems to be empty')
try:
return int(url)
except ValueError:
pass
parsed = urlparse(url)
try:
resource_url = app.url_map.bind(parsed.netloc).match(
parsed.path
)
except NotFound:
raise ValueError('No URL found')
if param_name in resource_url[1]:
return resource_url[1][param_name]
else:
raise ValueError(
'Parameter {name} could not be extracted'.format(
name=param_name
)
) | [
"def",
"id_from_url",
"(",
"url",
",",
"param_name",
":",
"str",
")",
"->",
"int",
":",
"if",
"url",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'url is none'",
")",
"elif",
"isinstance",
"(",
"url",
",",
"int",
")",
":",
"# Seems to already be the url.",
"return",
"url",
"if",
"not",
"url",
":",
"raise",
"ValueError",
"(",
"'Seems to be empty'",
")",
"try",
":",
"return",
"int",
"(",
"url",
")",
"except",
"ValueError",
":",
"pass",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"try",
":",
"resource_url",
"=",
"app",
".",
"url_map",
".",
"bind",
"(",
"parsed",
".",
"netloc",
")",
".",
"match",
"(",
"parsed",
".",
"path",
")",
"except",
"NotFound",
":",
"raise",
"ValueError",
"(",
"'No URL found'",
")",
"if",
"param_name",
"in",
"resource_url",
"[",
"1",
"]",
":",
"return",
"resource_url",
"[",
"1",
"]",
"[",
"param_name",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Parameter {name} could not be extracted'",
".",
"format",
"(",
"name",
"=",
"param_name",
")",
")"
] | Parses an object and tries to extract a url.
Tries to parse if a resource_url has been given
it as a url.
:raise ValueError: If no id could be extracted. | [
"Parses",
"an",
"object",
"and",
"tries",
"to",
"extract",
"a",
"url",
".",
"Tries",
"to",
"parse",
"if",
"a",
"resource_url",
"has",
"been",
"given",
"it",
"as",
"a",
"url",
".",
":",
"raise",
"ValueError",
":",
"If",
"no",
"id",
"could",
"be",
"extracted",
"."
] | python | train |
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L85-L112 | def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None | [
"def",
"update_req",
"(",
"req",
")",
":",
"if",
"not",
"req",
".",
"name",
":",
"return",
"req",
",",
"None",
"info",
"=",
"get_package_info",
"(",
"req",
".",
"name",
")",
"if",
"info",
"[",
"'info'",
"]",
".",
"get",
"(",
"'_pypi_hidden'",
")",
":",
"print",
"(",
"'{} is hidden on PyPI and will not be updated.'",
".",
"format",
"(",
"req",
")",
")",
"return",
"req",
",",
"None",
"if",
"_is_pinned",
"(",
"req",
")",
"and",
"_is_version_range",
"(",
"req",
")",
":",
"print",
"(",
"'{} is pinned to a range and will not be updated.'",
".",
"format",
"(",
"req",
")",
")",
"return",
"req",
",",
"None",
"newest_version",
"=",
"_get_newest_version",
"(",
"info",
")",
"current_spec",
"=",
"next",
"(",
"iter",
"(",
"req",
".",
"specifier",
")",
")",
"if",
"req",
".",
"specifier",
"else",
"None",
"current_version",
"=",
"current_spec",
".",
"version",
"if",
"current_spec",
"else",
"None",
"new_spec",
"=",
"Specifier",
"(",
"u'=={}'",
".",
"format",
"(",
"newest_version",
")",
")",
"if",
"not",
"current_spec",
"or",
"current_spec",
".",
"_spec",
"!=",
"new_spec",
".",
"_spec",
":",
"req",
".",
"specifier",
"=",
"new_spec",
"update_info",
"=",
"(",
"req",
".",
"name",
",",
"current_version",
",",
"newest_version",
")",
"return",
"req",
",",
"update_info",
"return",
"req",
",",
"None"
] | Updates a given req object with the latest version. | [
"Updates",
"a",
"given",
"req",
"object",
"with",
"the",
"latest",
"version",
"."
] | python | train |
markuskiller/textblob-de | textblob_de/ext/_pattern/text/search.py | https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L571-L650 | def match(self, word):
""" Return True if the given Word is part of the constraint:
- the word (or lemma) occurs in Constraint.words, OR
- the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND
- the word and/or chunk tags match those defined in the constraint.
Individual terms in Constraint.words or the taxonomy can contain wildcards (*).
Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB*
If the given word contains spaces (e.g., proper noun),
the entire chunk will also be compared.
For example: Constraint(words=["Mac OS X*"])
matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
"""
# If the constraint has a custom function it must return True.
if self.custom is not None and self.custom(word) is False:
return False
# If the constraint can only match the first word, Word.index must be 0.
if self.first and word.index > 0:
return False
# If the constraint defines excluded options, Word can not match any of these.
if self.exclude and self.exclude.match(word):
return False
# If the constraint defines allowed tags, Word.tag needs to match one of these.
if self.tags:
if find(lambda w: _match(word.tag, w), self.tags) is None:
return False
# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.
if self.chunks:
ch = word.chunk and word.chunk.tag or None
if find(lambda w: _match(ch, w), self.chunks) is None:
return False
# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.
if self.roles:
R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or []
if find(lambda w: w in R, self.roles) is None:
return False
# If the constraint defines allowed words,
# Word.string.lower() OR Word.lemma needs to match one of these.
b = True # b==True when word in constraint (or Constraints.words=[]).
if len(self.words) + len(self.taxa) > 0:
s1 = word.string.lower()
s2 = word.lemma
b = False
for w in itertools.chain(self.words, self.taxa):
# If the constraint has a word with spaces (e.g., a proper noun),
# compare it to the entire chunk.
try:
if " " in w and (s1 in w or s2 and s2 in w or "*" in w):
s1 = word.chunk and word.chunk.string.lower() or s1
s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2
except:
s1 = s1
s2 = None
# Compare the word to the allowed words (which can contain wildcards).
if _match(s1, w):
b=True; break
# Compare the word lemma to the allowed words, e.g.,
# if "was" is not in the constraint, perhaps "be" is, which is a good match.
if s2 and _match(s2, w):
b=True; break
# If the constraint defines allowed taxonomy terms,
# and the given word did not match an allowed word, traverse the taxonomy.
# The search goes up from the given word to its parents in the taxonomy.
# This is faster than traversing all the children of terms in Constraint.taxa.
# The drawback is that:
# 1) Wildcards in the taxonomy are not detected (use classifiers instead),
# 2) Classifier.children() has no effect, only Classifier.parent().
if self.taxa and (not self.words or (self.words and not b)):
for s in (
word.string, # "ants"
word.lemma, # "ant"
word.chunk and word.chunk.string or None, # "army ants"
word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant"
if s is not None:
if self.taxonomy.case_sensitive is False:
s = s.lower()
# Compare ancestors of the word to each term in Constraint.taxa.
for p in self.taxonomy.parents(s, recursive=True):
if find(lambda s: p==s, self.taxa): # No wildcards.
return True
return b | [
"def",
"match",
"(",
"self",
",",
"word",
")",
":",
"# If the constraint has a custom function it must return True.",
"if",
"self",
".",
"custom",
"is",
"not",
"None",
"and",
"self",
".",
"custom",
"(",
"word",
")",
"is",
"False",
":",
"return",
"False",
"# If the constraint can only match the first word, Word.index must be 0.",
"if",
"self",
".",
"first",
"and",
"word",
".",
"index",
">",
"0",
":",
"return",
"False",
"# If the constraint defines excluded options, Word can not match any of these.",
"if",
"self",
".",
"exclude",
"and",
"self",
".",
"exclude",
".",
"match",
"(",
"word",
")",
":",
"return",
"False",
"# If the constraint defines allowed tags, Word.tag needs to match one of these.",
"if",
"self",
".",
"tags",
":",
"if",
"find",
"(",
"lambda",
"w",
":",
"_match",
"(",
"word",
".",
"tag",
",",
"w",
")",
",",
"self",
".",
"tags",
")",
"is",
"None",
":",
"return",
"False",
"# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.",
"if",
"self",
".",
"chunks",
":",
"ch",
"=",
"word",
".",
"chunk",
"and",
"word",
".",
"chunk",
".",
"tag",
"or",
"None",
"if",
"find",
"(",
"lambda",
"w",
":",
"_match",
"(",
"ch",
",",
"w",
")",
",",
"self",
".",
"chunks",
")",
"is",
"None",
":",
"return",
"False",
"# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.",
"if",
"self",
".",
"roles",
":",
"R",
"=",
"word",
".",
"chunk",
"and",
"[",
"r2",
"for",
"r1",
",",
"r2",
"in",
"word",
".",
"chunk",
".",
"relations",
"]",
"or",
"[",
"]",
"if",
"find",
"(",
"lambda",
"w",
":",
"w",
"in",
"R",
",",
"self",
".",
"roles",
")",
"is",
"None",
":",
"return",
"False",
"# If the constraint defines allowed words,",
"# Word.string.lower() OR Word.lemma needs to match one of these.",
"b",
"=",
"True",
"# b==True when word in constraint (or Constraints.words=[]).",
"if",
"len",
"(",
"self",
".",
"words",
")",
"+",
"len",
"(",
"self",
".",
"taxa",
")",
">",
"0",
":",
"s1",
"=",
"word",
".",
"string",
".",
"lower",
"(",
")",
"s2",
"=",
"word",
".",
"lemma",
"b",
"=",
"False",
"for",
"w",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"words",
",",
"self",
".",
"taxa",
")",
":",
"# If the constraint has a word with spaces (e.g., a proper noun),",
"# compare it to the entire chunk.",
"try",
":",
"if",
"\" \"",
"in",
"w",
"and",
"(",
"s1",
"in",
"w",
"or",
"s2",
"and",
"s2",
"in",
"w",
"or",
"\"*\"",
"in",
"w",
")",
":",
"s1",
"=",
"word",
".",
"chunk",
"and",
"word",
".",
"chunk",
".",
"string",
".",
"lower",
"(",
")",
"or",
"s1",
"s2",
"=",
"word",
".",
"chunk",
"and",
"\" \"",
".",
"join",
"(",
"[",
"x",
"or",
"\"\"",
"for",
"x",
"in",
"word",
".",
"chunk",
".",
"lemmata",
"]",
")",
"or",
"s2",
"except",
":",
"s1",
"=",
"s1",
"s2",
"=",
"None",
"# Compare the word to the allowed words (which can contain wildcards).",
"if",
"_match",
"(",
"s1",
",",
"w",
")",
":",
"b",
"=",
"True",
"break",
"# Compare the word lemma to the allowed words, e.g.,",
"# if \"was\" is not in the constraint, perhaps \"be\" is, which is a good match.",
"if",
"s2",
"and",
"_match",
"(",
"s2",
",",
"w",
")",
":",
"b",
"=",
"True",
"break",
"# If the constraint defines allowed taxonomy terms,",
"# and the given word did not match an allowed word, traverse the taxonomy.",
"# The search goes up from the given word to its parents in the taxonomy.",
"# This is faster than traversing all the children of terms in Constraint.taxa.",
"# The drawback is that:",
"# 1) Wildcards in the taxonomy are not detected (use classifiers instead),",
"# 2) Classifier.children() has no effect, only Classifier.parent().",
"if",
"self",
".",
"taxa",
"and",
"(",
"not",
"self",
".",
"words",
"or",
"(",
"self",
".",
"words",
"and",
"not",
"b",
")",
")",
":",
"for",
"s",
"in",
"(",
"word",
".",
"string",
",",
"# \"ants\"",
"word",
".",
"lemma",
",",
"# \"ant\"",
"word",
".",
"chunk",
"and",
"word",
".",
"chunk",
".",
"string",
"or",
"None",
",",
"# \"army ants\"",
"word",
".",
"chunk",
"and",
"\" \"",
".",
"join",
"(",
"[",
"x",
"or",
"\"\"",
"for",
"x",
"in",
"word",
".",
"chunk",
".",
"lemmata",
"]",
")",
"or",
"None",
")",
":",
"# \"army ant\"",
"if",
"s",
"is",
"not",
"None",
":",
"if",
"self",
".",
"taxonomy",
".",
"case_sensitive",
"is",
"False",
":",
"s",
"=",
"s",
".",
"lower",
"(",
")",
"# Compare ancestors of the word to each term in Constraint.taxa.",
"for",
"p",
"in",
"self",
".",
"taxonomy",
".",
"parents",
"(",
"s",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"find",
"(",
"lambda",
"s",
":",
"p",
"==",
"s",
",",
"self",
".",
"taxa",
")",
":",
"# No wildcards.",
"return",
"True",
"return",
"b"
] | Return True if the given Word is part of the constraint:
- the word (or lemma) occurs in Constraint.words, OR
- the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND
- the word and/or chunk tags match those defined in the constraint.
Individual terms in Constraint.words or the taxonomy can contain wildcards (*).
Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB*
If the given word contains spaces (e.g., proper noun),
the entire chunk will also be compared.
For example: Constraint(words=["Mac OS X*"])
matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5"). | [
"Return",
"True",
"if",
"the",
"given",
"Word",
"is",
"part",
"of",
"the",
"constraint",
":",
"-",
"the",
"word",
"(",
"or",
"lemma",
")",
"occurs",
"in",
"Constraint",
".",
"words",
"OR",
"-",
"the",
"word",
"(",
"or",
"lemma",
")",
"occurs",
"in",
"Constraint",
".",
"taxa",
"taxonomy",
"tree",
"AND",
"-",
"the",
"word",
"and",
"/",
"or",
"chunk",
"tags",
"match",
"those",
"defined",
"in",
"the",
"constraint",
".",
"Individual",
"terms",
"in",
"Constraint",
".",
"words",
"or",
"the",
"taxonomy",
"can",
"contain",
"wildcards",
"(",
"*",
")",
".",
"Some",
"part",
"-",
"of",
"-",
"speech",
"-",
"tags",
"can",
"also",
"contain",
"wildcards",
":",
"NN",
"*",
"VB",
"*",
"JJ",
"*",
"RB",
"*",
"If",
"the",
"given",
"word",
"contains",
"spaces",
"(",
"e",
".",
"g",
".",
"proper",
"noun",
")",
"the",
"entire",
"chunk",
"will",
"also",
"be",
"compared",
".",
"For",
"example",
":",
"Constraint",
"(",
"words",
"=",
"[",
"Mac",
"OS",
"X",
"*",
"]",
")",
"matches",
"the",
"word",
"Mac",
"if",
"the",
"word",
"occurs",
"in",
"a",
"Chunk",
"(",
"Mac",
"OS",
"X",
"10",
".",
"5",
")",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_ti/mappings/group/group_types/document.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/group/group_types/document.py#L55-L67 | def file_name(self, file_name):
"""
Updates the file_name.
Args:
file_name:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
self._data['fileName'] = file_name
request = {'fileName': file_name}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request) | [
"def",
"file_name",
"(",
"self",
",",
"file_name",
")",
":",
"if",
"not",
"self",
".",
"can_update",
"(",
")",
":",
"self",
".",
"_tcex",
".",
"handle_error",
"(",
"910",
",",
"[",
"self",
".",
"type",
"]",
")",
"self",
".",
"_data",
"[",
"'fileName'",
"]",
"=",
"file_name",
"request",
"=",
"{",
"'fileName'",
":",
"file_name",
"}",
"return",
"self",
".",
"tc_requests",
".",
"update",
"(",
"self",
".",
"api_type",
",",
"self",
".",
"api_sub_type",
",",
"self",
".",
"unique_id",
",",
"request",
")"
] | Updates the file_name.
Args:
file_name: | [
"Updates",
"the",
"file_name",
"."
] | python | train |
saltstack/salt | salt/modules/inspectlib/fsdb.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/fsdb.py#L104-L113 | def flush(self, table):
'''
Flush table.
:param table:
:return:
'''
table_path = os.path.join(self.db_path, table)
if os.path.exists(table_path):
os.unlink(table_path) | [
"def",
"flush",
"(",
"self",
",",
"table",
")",
":",
"table_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"db_path",
",",
"table",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"table_path",
")",
":",
"os",
".",
"unlink",
"(",
"table_path",
")"
] | Flush table.
:param table:
:return: | [
"Flush",
"table",
"."
] | python | train |
zarr-developers/zarr | zarr/core.py | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2244-L2302 | def astype(self, dtype):
"""Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
"""
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True) | [
"def",
"astype",
"(",
"self",
",",
"dtype",
")",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"filters",
"=",
"[",
"]",
"if",
"self",
".",
"_filters",
":",
"filters",
".",
"extend",
"(",
"self",
".",
"_filters",
")",
"filters",
".",
"insert",
"(",
"0",
",",
"AsType",
"(",
"encode_dtype",
"=",
"self",
".",
"_dtype",
",",
"decode_dtype",
"=",
"dtype",
")",
")",
"return",
"self",
".",
"view",
"(",
"filters",
"=",
"filters",
",",
"dtype",
"=",
"dtype",
",",
"read_only",
"=",
"True",
")"
] | Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32) | [
"Returns",
"a",
"view",
"that",
"does",
"on",
"the",
"fly",
"type",
"conversion",
"of",
"the",
"underlying",
"data",
"."
] | python | train |
stephantul/somber | somber/plsom.py | https://github.com/stephantul/somber/blob/b7a13e646239500cc393668c01a7169c3e50b7b5/somber/plsom.py#L139-L147 | def _update_params(self, constants):
"""Update the params."""
constants = np.max(np.min(constants, 1))
self.params['r']['value'] = max([self.params['r']['value'],
constants])
epsilon = constants / self.params['r']['value']
influence = self._calculate_influence(epsilon)
# Account for learning rate
return influence * epsilon | [
"def",
"_update_params",
"(",
"self",
",",
"constants",
")",
":",
"constants",
"=",
"np",
".",
"max",
"(",
"np",
".",
"min",
"(",
"constants",
",",
"1",
")",
")",
"self",
".",
"params",
"[",
"'r'",
"]",
"[",
"'value'",
"]",
"=",
"max",
"(",
"[",
"self",
".",
"params",
"[",
"'r'",
"]",
"[",
"'value'",
"]",
",",
"constants",
"]",
")",
"epsilon",
"=",
"constants",
"/",
"self",
".",
"params",
"[",
"'r'",
"]",
"[",
"'value'",
"]",
"influence",
"=",
"self",
".",
"_calculate_influence",
"(",
"epsilon",
")",
"# Account for learning rate",
"return",
"influence",
"*",
"epsilon"
] | Update the params. | [
"Update",
"the",
"params",
"."
] | python | train |
pybel/pybel-tools | src/pybel_tools/mutation/bound.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/mutation/bound.py#L29-L38 | def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]:
"""Make a delete function that's bound to the manager."""
@in_place_transformation
def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:
"""Remove a node by identifier."""
node = manager.get_dsl_by_hash(node_hash)
graph.remove_node(node)
return delete_node_by_hash | [
"def",
"build_delete_node_by_hash",
"(",
"manager",
":",
"Manager",
")",
"->",
"Callable",
"[",
"[",
"BELGraph",
",",
"str",
"]",
",",
"None",
"]",
":",
"@",
"in_place_transformation",
"def",
"delete_node_by_hash",
"(",
"graph",
":",
"BELGraph",
",",
"node_hash",
":",
"str",
")",
"->",
"None",
":",
"\"\"\"Remove a node by identifier.\"\"\"",
"node",
"=",
"manager",
".",
"get_dsl_by_hash",
"(",
"node_hash",
")",
"graph",
".",
"remove_node",
"(",
"node",
")",
"return",
"delete_node_by_hash"
] | Make a delete function that's bound to the manager. | [
"Make",
"a",
"delete",
"function",
"that",
"s",
"bound",
"to",
"the",
"manager",
"."
] | python | valid |
ellisonbg/vizarray | vizarray/__init__.py | https://github.com/ellisonbg/vizarray/blob/3221a232ecf54e8348094aacfc5719b40d89a451/vizarray/__init__.py#L129-L137 | def disable_notebook():
"""Disable automatic visualization of NumPy arrays in the IPython Notebook."""
try:
from IPython.core.getipython import get_ipython
except ImportError:
raise ImportError('This feature requires IPython 1.0+')
ip = get_ipython()
f = ip.display_formatter.formatters['text/html']
f.type_printers.pop(np.ndarray, None) | [
"def",
"disable_notebook",
"(",
")",
":",
"try",
":",
"from",
"IPython",
".",
"core",
".",
"getipython",
"import",
"get_ipython",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'This feature requires IPython 1.0+'",
")",
"ip",
"=",
"get_ipython",
"(",
")",
"f",
"=",
"ip",
".",
"display_formatter",
".",
"formatters",
"[",
"'text/html'",
"]",
"f",
".",
"type_printers",
".",
"pop",
"(",
"np",
".",
"ndarray",
",",
"None",
")"
] | Disable automatic visualization of NumPy arrays in the IPython Notebook. | [
"Disable",
"automatic",
"visualization",
"of",
"NumPy",
"arrays",
"in",
"the",
"IPython",
"Notebook",
"."
] | python | train |
mogproject/mog-commons-python | src/mog_commons/command.py | https://github.com/mogproject/mog-commons-python/blob/951cf0fa9a56248b4d45be720be25f1d4b7e1bff/src/mog_commons/command.py#L41-L49 | def __convert_env(env, encoding):
"""Environment variables should be bytes not unicode on Windows."""
d = dict(os.environ, **(oget(env, {})))
# workaround for Windows+Python3 environment
if not SHOULD_NOT_ENCODE_ARGS:
return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
else:
return d | [
"def",
"__convert_env",
"(",
"env",
",",
"encoding",
")",
":",
"d",
"=",
"dict",
"(",
"os",
".",
"environ",
",",
"*",
"*",
"(",
"oget",
"(",
"env",
",",
"{",
"}",
")",
")",
")",
"# workaround for Windows+Python3 environment",
"if",
"not",
"SHOULD_NOT_ENCODE_ARGS",
":",
"return",
"dict",
"(",
"(",
"k",
".",
"encode",
"(",
"encoding",
")",
",",
"v",
".",
"encode",
"(",
"encoding",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
")",
"else",
":",
"return",
"d"
] | Environment variables should be bytes not unicode on Windows. | [
"Environment",
"variables",
"should",
"be",
"bytes",
"not",
"unicode",
"on",
"Windows",
"."
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/rst/dplp.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/dplp.py#L93-L114 | def dplptree2dgparentedtree(self):
"""Convert the tree from DPLP's format into a conventional binary tree,
which can be easily converted into output formats like RS3.
"""
def transform(dplp_tree):
"""Transform a DPLP parse tree into a more conventional parse tree."""
if isinstance(dplp_tree, basestring) or not hasattr(dplp_tree, 'label'):
return dplp_tree
assert len(dplp_tree) == 2, "We can only handle binary trees."
match = DPLP_REL_RE.match(dplp_tree.label())
assert match, "Relation '{}' does not match regex '{}'".format(dplp_tree.label(), DPLP_REL_RE)
left_child_nuc, right_child_nuc, relname = match.groups()
dplp_tree._label = relname
for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]):
child = dplp_tree[i]
dplp_tree[i] = Tree(child_nuclearity, [transform(child)])
return dplp_tree
tree = transform(self.parsetree)
return DGParentedTree.convert(tree) | [
"def",
"dplptree2dgparentedtree",
"(",
"self",
")",
":",
"def",
"transform",
"(",
"dplp_tree",
")",
":",
"\"\"\"Transform a DPLP parse tree into a more conventional parse tree.\"\"\"",
"if",
"isinstance",
"(",
"dplp_tree",
",",
"basestring",
")",
"or",
"not",
"hasattr",
"(",
"dplp_tree",
",",
"'label'",
")",
":",
"return",
"dplp_tree",
"assert",
"len",
"(",
"dplp_tree",
")",
"==",
"2",
",",
"\"We can only handle binary trees.\"",
"match",
"=",
"DPLP_REL_RE",
".",
"match",
"(",
"dplp_tree",
".",
"label",
"(",
")",
")",
"assert",
"match",
",",
"\"Relation '{}' does not match regex '{}'\"",
".",
"format",
"(",
"dplp_tree",
".",
"label",
"(",
")",
",",
"DPLP_REL_RE",
")",
"left_child_nuc",
",",
"right_child_nuc",
",",
"relname",
"=",
"match",
".",
"groups",
"(",
")",
"dplp_tree",
".",
"_label",
"=",
"relname",
"for",
"i",
",",
"child_nuclearity",
"in",
"enumerate",
"(",
"[",
"left_child_nuc",
",",
"right_child_nuc",
"]",
")",
":",
"child",
"=",
"dplp_tree",
"[",
"i",
"]",
"dplp_tree",
"[",
"i",
"]",
"=",
"Tree",
"(",
"child_nuclearity",
",",
"[",
"transform",
"(",
"child",
")",
"]",
")",
"return",
"dplp_tree",
"tree",
"=",
"transform",
"(",
"self",
".",
"parsetree",
")",
"return",
"DGParentedTree",
".",
"convert",
"(",
"tree",
")"
] | Convert the tree from DPLP's format into a conventional binary tree,
which can be easily converted into output formats like RS3. | [
"Convert",
"the",
"tree",
"from",
"DPLP",
"s",
"format",
"into",
"a",
"conventional",
"binary",
"tree",
"which",
"can",
"be",
"easily",
"converted",
"into",
"output",
"formats",
"like",
"RS3",
"."
] | python | train |
dossier/dossier.label | dossier/label/label.py | https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L852-L884 | def everything(self, include_deleted=False, content_id=None,
subtopic_id=None, prefix=None):
'''Returns a generator of all labels in the store.
If `include_deleted` is :const:`True`, labels that have been
overwritten with more recent labels are also included. If
`content_id` is not :const:`None`, only labels for that
content ID are retrieved; and then if `subtopic_id` is not
:const:`None`, only that subtopic is retrieved, else all
subtopics are retrieved. If `content_id` is :const:`None` but
`prefix` is not, then only labels with at least one content ID
beginning with `prefix` will be returned. The returned labels
will always be q in sorted order, content IDs first, and with
those with the same content, subtopic, and annotator IDs
sorted newest first.
:rtype: generator of :class:`Label`
'''
if content_id is not None:
ranges = [((content_id,), (content_id,))]
elif prefix is not None:
# This is the cheap, easy, and wrong way to do this
ranges = [((prefix,), (prefix + b'\xff',))]
else:
ranges = []
labels = self.kvl.scan(self.TABLE, *ranges)
labels = ifilter(self._filter_keys(content_id, prefix, subtopic_id),
labels)
labels = imap(lambda p: self._label_from_kvlayer(*p), labels)
if not include_deleted:
labels = Label.most_recent(labels)
return labels | [
"def",
"everything",
"(",
"self",
",",
"include_deleted",
"=",
"False",
",",
"content_id",
"=",
"None",
",",
"subtopic_id",
"=",
"None",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"content_id",
"is",
"not",
"None",
":",
"ranges",
"=",
"[",
"(",
"(",
"content_id",
",",
")",
",",
"(",
"content_id",
",",
")",
")",
"]",
"elif",
"prefix",
"is",
"not",
"None",
":",
"# This is the cheap, easy, and wrong way to do this",
"ranges",
"=",
"[",
"(",
"(",
"prefix",
",",
")",
",",
"(",
"prefix",
"+",
"b'\\xff'",
",",
")",
")",
"]",
"else",
":",
"ranges",
"=",
"[",
"]",
"labels",
"=",
"self",
".",
"kvl",
".",
"scan",
"(",
"self",
".",
"TABLE",
",",
"*",
"ranges",
")",
"labels",
"=",
"ifilter",
"(",
"self",
".",
"_filter_keys",
"(",
"content_id",
",",
"prefix",
",",
"subtopic_id",
")",
",",
"labels",
")",
"labels",
"=",
"imap",
"(",
"lambda",
"p",
":",
"self",
".",
"_label_from_kvlayer",
"(",
"*",
"p",
")",
",",
"labels",
")",
"if",
"not",
"include_deleted",
":",
"labels",
"=",
"Label",
".",
"most_recent",
"(",
"labels",
")",
"return",
"labels"
] | Returns a generator of all labels in the store.
If `include_deleted` is :const:`True`, labels that have been
overwritten with more recent labels are also included. If
`content_id` is not :const:`None`, only labels for that
content ID are retrieved; and then if `subtopic_id` is not
:const:`None`, only that subtopic is retrieved, else all
subtopics are retrieved. If `content_id` is :const:`None` but
`prefix` is not, then only labels with at least one content ID
beginning with `prefix` will be returned. The returned labels
will always be q in sorted order, content IDs first, and with
those with the same content, subtopic, and annotator IDs
sorted newest first.
:rtype: generator of :class:`Label` | [
"Returns",
"a",
"generator",
"of",
"all",
"labels",
"in",
"the",
"store",
"."
] | python | train |
foremast/foremast | src/foremast/utils/generate_s3_tags.py | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/utils/generate_s3_tags.py#L4-L20 | def generated_tag_data(tags):
"""Convert :obj:`dict` to S3 Tag list.
Args:
tags (dict): Dictonary of tag key and tag value passed.
Returns:
list: List of dictionaries.
"""
generated_tags = []
for key, value in tags.items():
generated_tags.append({
'Key': key,
'Value': value,
})
return generated_tags | [
"def",
"generated_tag_data",
"(",
"tags",
")",
":",
"generated_tags",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"tags",
".",
"items",
"(",
")",
":",
"generated_tags",
".",
"append",
"(",
"{",
"'Key'",
":",
"key",
",",
"'Value'",
":",
"value",
",",
"}",
")",
"return",
"generated_tags"
] | Convert :obj:`dict` to S3 Tag list.
Args:
tags (dict): Dictonary of tag key and tag value passed.
Returns:
list: List of dictionaries. | [
"Convert",
":",
"obj",
":",
"dict",
"to",
"S3",
"Tag",
"list",
"."
] | python | train |
andycasey/sick | sick/models/create.py | https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/create.py#L49-L161 | def create(output_prefix, grid_flux_filename, wavelength_filenames,
clobber=False, grid_flux_filename_format="csv", **kwargs):
"""
Create a new *sick* model from files describing the parameter names, fluxes,
and wavelengths.
"""
if not clobber:
# Check to make sure the output files won't exist already.
output_suffixes = (".yaml", ".pkl", "-wavelengths.memmap",
"-intensities.memmap")
for path in [output_prefix + suffix for suffix in output_suffixes]:
if os.path.exists(path):
raise IOError("output filename {} already exists".format(path))
# Read the grid_flux filename.
# param1 param2 param3 param4 channelname1 channelname2
kwds = kwargs.pop("__grid_flux_filename_kwargs", {})
kwds.update({"format": grid_flux_filename_format})
grid_flux_tbl = Table.read(grid_flux_filename, **kwds)
# Distinguish column names between parameters (real numbers) and filenames
str_columns = \
np.array([_[1].startswith("|S") for _ in grid_flux_tbl.dtype.descr])
# Check the number of channels provided.
if str_columns.sum() != len(wavelength_filenames):
raise ValueError("expected {0} wavelength filenames because {1} has {0}"
" string columns ({2}) but found {3} wavelength filenames".format(
sum(str_columns), grid_flux_filename,
", ".join(np.array(grid_flux_tbl.colnames)[str_columns]),
len(wavelength_filenames)))
# Create a record array of the grid points.
grid_points = \
grid_flux_tbl.as_array()[np.array(grid_flux_tbl.colnames)[~str_columns]]
# To-do: make sure they are all floats.
# Sort the grid points.
grid_indices = grid_points.argsort(order=grid_points.dtype.names)
grid_points = grid_points[grid_indices]
grid_flux_tbl = grid_flux_tbl[grid_indices]
# Check the wavelength filenames.
channel_wavelengths = np.array(map(load_simple_data, wavelength_filenames))
# Sort the channels by starting wavelength.
c_indices = np.argsort([each.min() for each in channel_wavelengths])
channel_names = np.array(grid_flux_tbl.colnames)[str_columns][c_indices]
channel_wavelengths = channel_wavelengths[c_indices]
channel_sizes = [len(_) for _ in channel_wavelengths]
num_pixels = sum(channel_sizes)
# Create the model YAML file.
with open(output_prefix + ".yaml", "w") as fp:
header = "\n".join([
"# Model created on {0}".format(strftime("%Y-%m-%d %H:%M:%S")),
"# Grid parameters: {0}".format(", ".join(grid_points.dtype.names)),
"# Channel names: {0}".format(", ".join(channel_names))
])
fp.write(header + "\n" + yaml.safe_dump({ "model_grid": {
"grid_points": output_prefix + ".pkl",
"intensities": output_prefix + "-intensities.memmap",
"wavelengths": output_prefix + "-wavelengths.memmap"
}}, stream=None, allow_unicode=True, default_flow_style=False))
# Create the pickled model file, with meta data.
metadata = {
"grid_flux_filename": grid_flux_filename,
"wavelength_filenames": wavelength_filenames,
"channel_names": channel_names,
"channel_sizes": channel_sizes,
"channel_resolutions": [float("inf")] * len(channel_names),
"sick_version": sick_version
}
logger.debug("Dumping grid points and metadata to file")
with open(output_prefix + ".pkl", "wb") as fp:
pickle.dump((grid_points, metadata), fp, -1)
# Create the memory-mapped dispersion file.
logger.debug("Creating memory-mapped dispersion file.")
wavelengths_memmap = np.memmap(output_prefix + "-wavelengths.memmap",
dtype="float32", mode="w+", shape=(num_pixels, ))
wavelengths_memmap[:] = np.hstack(channel_wavelengths)
wavelengths_memmap.flush()
del wavelengths_memmap
# Create the memory-mapped intensities file.
logger.debug("Creating memory-mapped intensities file.")
intensities_memmap = np.memmap(output_prefix + "-intensities.memmap",
shape=(grid_points.size, num_pixels), dtype="float32",
mode="w+")
n = len(grid_flux_tbl)
for i, row in enumerate(grid_flux_tbl):
logger.debug("Loading point {0}/{1} into the intensities map"\
.format(i + 1, n))
j = 0
for channel_name in channel_names:
try:
data = load_simple_data(row[channel_name])
except:
logger.exception("Could not load data from {0} for channel {1}"\
.format(row[channel_name], channel_name))
raise
intensities_memmap[i, j:j + data.size] = data
j += data.size
intensities_memmap.flush()
del intensities_memmap
return True | [
"def",
"create",
"(",
"output_prefix",
",",
"grid_flux_filename",
",",
"wavelength_filenames",
",",
"clobber",
"=",
"False",
",",
"grid_flux_filename_format",
"=",
"\"csv\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"clobber",
":",
"# Check to make sure the output files won't exist already.",
"output_suffixes",
"=",
"(",
"\".yaml\"",
",",
"\".pkl\"",
",",
"\"-wavelengths.memmap\"",
",",
"\"-intensities.memmap\"",
")",
"for",
"path",
"in",
"[",
"output_prefix",
"+",
"suffix",
"for",
"suffix",
"in",
"output_suffixes",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"IOError",
"(",
"\"output filename {} already exists\"",
".",
"format",
"(",
"path",
")",
")",
"# Read the grid_flux filename.",
"# param1 param2 param3 param4 channelname1 channelname2",
"kwds",
"=",
"kwargs",
".",
"pop",
"(",
"\"__grid_flux_filename_kwargs\"",
",",
"{",
"}",
")",
"kwds",
".",
"update",
"(",
"{",
"\"format\"",
":",
"grid_flux_filename_format",
"}",
")",
"grid_flux_tbl",
"=",
"Table",
".",
"read",
"(",
"grid_flux_filename",
",",
"*",
"*",
"kwds",
")",
"# Distinguish column names between parameters (real numbers) and filenames",
"str_columns",
"=",
"np",
".",
"array",
"(",
"[",
"_",
"[",
"1",
"]",
".",
"startswith",
"(",
"\"|S\"",
")",
"for",
"_",
"in",
"grid_flux_tbl",
".",
"dtype",
".",
"descr",
"]",
")",
"# Check the number of channels provided.",
"if",
"str_columns",
".",
"sum",
"(",
")",
"!=",
"len",
"(",
"wavelength_filenames",
")",
":",
"raise",
"ValueError",
"(",
"\"expected {0} wavelength filenames because {1} has {0}\"",
"\" string columns ({2}) but found {3} wavelength filenames\"",
".",
"format",
"(",
"sum",
"(",
"str_columns",
")",
",",
"grid_flux_filename",
",",
"\", \"",
".",
"join",
"(",
"np",
".",
"array",
"(",
"grid_flux_tbl",
".",
"colnames",
")",
"[",
"str_columns",
"]",
")",
",",
"len",
"(",
"wavelength_filenames",
")",
")",
")",
"# Create a record array of the grid points.",
"grid_points",
"=",
"grid_flux_tbl",
".",
"as_array",
"(",
")",
"[",
"np",
".",
"array",
"(",
"grid_flux_tbl",
".",
"colnames",
")",
"[",
"~",
"str_columns",
"]",
"]",
"# To-do: make sure they are all floats.",
"# Sort the grid points.",
"grid_indices",
"=",
"grid_points",
".",
"argsort",
"(",
"order",
"=",
"grid_points",
".",
"dtype",
".",
"names",
")",
"grid_points",
"=",
"grid_points",
"[",
"grid_indices",
"]",
"grid_flux_tbl",
"=",
"grid_flux_tbl",
"[",
"grid_indices",
"]",
"# Check the wavelength filenames.",
"channel_wavelengths",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"load_simple_data",
",",
"wavelength_filenames",
")",
")",
"# Sort the channels by starting wavelength.",
"c_indices",
"=",
"np",
".",
"argsort",
"(",
"[",
"each",
".",
"min",
"(",
")",
"for",
"each",
"in",
"channel_wavelengths",
"]",
")",
"channel_names",
"=",
"np",
".",
"array",
"(",
"grid_flux_tbl",
".",
"colnames",
")",
"[",
"str_columns",
"]",
"[",
"c_indices",
"]",
"channel_wavelengths",
"=",
"channel_wavelengths",
"[",
"c_indices",
"]",
"channel_sizes",
"=",
"[",
"len",
"(",
"_",
")",
"for",
"_",
"in",
"channel_wavelengths",
"]",
"num_pixels",
"=",
"sum",
"(",
"channel_sizes",
")",
"# Create the model YAML file.",
"with",
"open",
"(",
"output_prefix",
"+",
"\".yaml\"",
",",
"\"w\"",
")",
"as",
"fp",
":",
"header",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"# Model created on {0}\"",
".",
"format",
"(",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
",",
"\"# Grid parameters: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"grid_points",
".",
"dtype",
".",
"names",
")",
")",
",",
"\"# Channel names: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"channel_names",
")",
")",
"]",
")",
"fp",
".",
"write",
"(",
"header",
"+",
"\"\\n\"",
"+",
"yaml",
".",
"safe_dump",
"(",
"{",
"\"model_grid\"",
":",
"{",
"\"grid_points\"",
":",
"output_prefix",
"+",
"\".pkl\"",
",",
"\"intensities\"",
":",
"output_prefix",
"+",
"\"-intensities.memmap\"",
",",
"\"wavelengths\"",
":",
"output_prefix",
"+",
"\"-wavelengths.memmap\"",
"}",
"}",
",",
"stream",
"=",
"None",
",",
"allow_unicode",
"=",
"True",
",",
"default_flow_style",
"=",
"False",
")",
")",
"# Create the pickled model file, with meta data.",
"metadata",
"=",
"{",
"\"grid_flux_filename\"",
":",
"grid_flux_filename",
",",
"\"wavelength_filenames\"",
":",
"wavelength_filenames",
",",
"\"channel_names\"",
":",
"channel_names",
",",
"\"channel_sizes\"",
":",
"channel_sizes",
",",
"\"channel_resolutions\"",
":",
"[",
"float",
"(",
"\"inf\"",
")",
"]",
"*",
"len",
"(",
"channel_names",
")",
",",
"\"sick_version\"",
":",
"sick_version",
"}",
"logger",
".",
"debug",
"(",
"\"Dumping grid points and metadata to file\"",
")",
"with",
"open",
"(",
"output_prefix",
"+",
"\".pkl\"",
",",
"\"wb\"",
")",
"as",
"fp",
":",
"pickle",
".",
"dump",
"(",
"(",
"grid_points",
",",
"metadata",
")",
",",
"fp",
",",
"-",
"1",
")",
"# Create the memory-mapped dispersion file.",
"logger",
".",
"debug",
"(",
"\"Creating memory-mapped dispersion file.\"",
")",
"wavelengths_memmap",
"=",
"np",
".",
"memmap",
"(",
"output_prefix",
"+",
"\"-wavelengths.memmap\"",
",",
"dtype",
"=",
"\"float32\"",
",",
"mode",
"=",
"\"w+\"",
",",
"shape",
"=",
"(",
"num_pixels",
",",
")",
")",
"wavelengths_memmap",
"[",
":",
"]",
"=",
"np",
".",
"hstack",
"(",
"channel_wavelengths",
")",
"wavelengths_memmap",
".",
"flush",
"(",
")",
"del",
"wavelengths_memmap",
"# Create the memory-mapped intensities file.",
"logger",
".",
"debug",
"(",
"\"Creating memory-mapped intensities file.\"",
")",
"intensities_memmap",
"=",
"np",
".",
"memmap",
"(",
"output_prefix",
"+",
"\"-intensities.memmap\"",
",",
"shape",
"=",
"(",
"grid_points",
".",
"size",
",",
"num_pixels",
")",
",",
"dtype",
"=",
"\"float32\"",
",",
"mode",
"=",
"\"w+\"",
")",
"n",
"=",
"len",
"(",
"grid_flux_tbl",
")",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"grid_flux_tbl",
")",
":",
"logger",
".",
"debug",
"(",
"\"Loading point {0}/{1} into the intensities map\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"n",
")",
")",
"j",
"=",
"0",
"for",
"channel_name",
"in",
"channel_names",
":",
"try",
":",
"data",
"=",
"load_simple_data",
"(",
"row",
"[",
"channel_name",
"]",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"\"Could not load data from {0} for channel {1}\"",
".",
"format",
"(",
"row",
"[",
"channel_name",
"]",
",",
"channel_name",
")",
")",
"raise",
"intensities_memmap",
"[",
"i",
",",
"j",
":",
"j",
"+",
"data",
".",
"size",
"]",
"=",
"data",
"j",
"+=",
"data",
".",
"size",
"intensities_memmap",
".",
"flush",
"(",
")",
"del",
"intensities_memmap",
"return",
"True"
] | Create a new *sick* model from files describing the parameter names, fluxes,
and wavelengths. | [
"Create",
"a",
"new",
"*",
"sick",
"*",
"model",
"from",
"files",
"describing",
"the",
"parameter",
"names",
"fluxes",
"and",
"wavelengths",
"."
] | python | train |
Parquery/icontract | icontract/_recompute.py | https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_recompute.py#L275-L285 | def visit_IfExp(self, node: ast.IfExp) -> Any:
"""Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``."""
test = self.visit(node=node.test)
if test:
result = self.visit(node=node.body)
else:
result = self.visit(node=node.orelse)
self.recomputed_values[node] = result
return result | [
"def",
"visit_IfExp",
"(",
"self",
",",
"node",
":",
"ast",
".",
"IfExp",
")",
"->",
"Any",
":",
"test",
"=",
"self",
".",
"visit",
"(",
"node",
"=",
"node",
".",
"test",
")",
"if",
"test",
":",
"result",
"=",
"self",
".",
"visit",
"(",
"node",
"=",
"node",
".",
"body",
")",
"else",
":",
"result",
"=",
"self",
".",
"visit",
"(",
"node",
"=",
"node",
".",
"orelse",
")",
"self",
".",
"recomputed_values",
"[",
"node",
"]",
"=",
"result",
"return",
"result"
] | Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``. | [
"Visit",
"the",
"test",
"and",
"depending",
"on",
"its",
"outcome",
"the",
"body",
"or",
"orelse",
"."
] | python | train |
robotpy/pyfrc | lib/pyfrc/physics/visionsim.py | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/physics/visionsim.py#L190-L236 | def compute(self, now, x, y, angle):
"""
Call this when vision processing should be enabled
:param now: The value passed to ``update_sim``
:param x: Returned from physics_controller.get_position
:param y: Returned from physics_controller.get_position
:param angle: Returned from physics_controller.get_position
:returns: None or list of tuples of (found=0 or 1, capture_time, offset_degrees, distance).
The tuples are ordered by absolute offset from the
target. If a list is returned, it is guaranteed to have at
least one element in it.
Note: If your vision targeting doesn't have the ability
to focus on multiple targets, then you should ignore
the other elements.
"""
# Normalize angle to [-180,180]
output = []
angle = ((angle + math.pi) % (math.pi * 2)) - math.pi
for target in self.targets:
proposed = target.compute(now, x, y, angle)
if proposed:
output.append(proposed)
if not output:
output.append((0, now, inf, 0))
self.distance = None
else:
# order by absolute offset
output.sort(key=lambda i: abs(i[2]))
self.distance = output[-1][3]
# Only store stuff every once in awhile
if now - self.last_compute_time > self.update_period:
self.last_compute_time = now
self.send_queue.appendleft(output)
# simulate latency by delaying camera output
if self.send_queue:
output = self.send_queue[-1]
if now - output[0][1] > self.data_lag:
return self.send_queue.pop() | [
"def",
"compute",
"(",
"self",
",",
"now",
",",
"x",
",",
"y",
",",
"angle",
")",
":",
"# Normalize angle to [-180,180]",
"output",
"=",
"[",
"]",
"angle",
"=",
"(",
"(",
"angle",
"+",
"math",
".",
"pi",
")",
"%",
"(",
"math",
".",
"pi",
"*",
"2",
")",
")",
"-",
"math",
".",
"pi",
"for",
"target",
"in",
"self",
".",
"targets",
":",
"proposed",
"=",
"target",
".",
"compute",
"(",
"now",
",",
"x",
",",
"y",
",",
"angle",
")",
"if",
"proposed",
":",
"output",
".",
"append",
"(",
"proposed",
")",
"if",
"not",
"output",
":",
"output",
".",
"append",
"(",
"(",
"0",
",",
"now",
",",
"inf",
",",
"0",
")",
")",
"self",
".",
"distance",
"=",
"None",
"else",
":",
"# order by absolute offset",
"output",
".",
"sort",
"(",
"key",
"=",
"lambda",
"i",
":",
"abs",
"(",
"i",
"[",
"2",
"]",
")",
")",
"self",
".",
"distance",
"=",
"output",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
"# Only store stuff every once in awhile",
"if",
"now",
"-",
"self",
".",
"last_compute_time",
">",
"self",
".",
"update_period",
":",
"self",
".",
"last_compute_time",
"=",
"now",
"self",
".",
"send_queue",
".",
"appendleft",
"(",
"output",
")",
"# simulate latency by delaying camera output",
"if",
"self",
".",
"send_queue",
":",
"output",
"=",
"self",
".",
"send_queue",
"[",
"-",
"1",
"]",
"if",
"now",
"-",
"output",
"[",
"0",
"]",
"[",
"1",
"]",
">",
"self",
".",
"data_lag",
":",
"return",
"self",
".",
"send_queue",
".",
"pop",
"(",
")"
] | Call this when vision processing should be enabled
:param now: The value passed to ``update_sim``
:param x: Returned from physics_controller.get_position
:param y: Returned from physics_controller.get_position
:param angle: Returned from physics_controller.get_position
:returns: None or list of tuples of (found=0 or 1, capture_time, offset_degrees, distance).
The tuples are ordered by absolute offset from the
target. If a list is returned, it is guaranteed to have at
least one element in it.
Note: If your vision targeting doesn't have the ability
to focus on multiple targets, then you should ignore
the other elements. | [
"Call",
"this",
"when",
"vision",
"processing",
"should",
"be",
"enabled",
":",
"param",
"now",
":",
"The",
"value",
"passed",
"to",
"update_sim",
":",
"param",
"x",
":",
"Returned",
"from",
"physics_controller",
".",
"get_position",
":",
"param",
"y",
":",
"Returned",
"from",
"physics_controller",
".",
"get_position",
":",
"param",
"angle",
":",
"Returned",
"from",
"physics_controller",
".",
"get_position",
":",
"returns",
":",
"None",
"or",
"list",
"of",
"tuples",
"of",
"(",
"found",
"=",
"0",
"or",
"1",
"capture_time",
"offset_degrees",
"distance",
")",
".",
"The",
"tuples",
"are",
"ordered",
"by",
"absolute",
"offset",
"from",
"the",
"target",
".",
"If",
"a",
"list",
"is",
"returned",
"it",
"is",
"guaranteed",
"to",
"have",
"at",
"least",
"one",
"element",
"in",
"it",
".",
"Note",
":",
"If",
"your",
"vision",
"targeting",
"doesn",
"t",
"have",
"the",
"ability",
"to",
"focus",
"on",
"multiple",
"targets",
"then",
"you",
"should",
"ignore",
"the",
"other",
"elements",
"."
] | python | train |
caffeinehit/django-oauth2-provider | provider/views.py | https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/views.py#L469-L492 | def access_token_response(self, access_token):
"""
Returns a successful response after creating the access token
as defined in :rfc:`5.1`.
"""
response_data = {
'access_token': access_token.token,
'token_type': constants.TOKEN_TYPE,
'expires_in': access_token.get_expire_delta(),
'scope': ' '.join(scope.names(access_token.scope)),
}
# Not all access_tokens are given a refresh_token
# (for example, public clients doing password auth)
try:
rt = access_token.refresh_token
response_data['refresh_token'] = rt.token
except ObjectDoesNotExist:
pass
return HttpResponse(
json.dumps(response_data), mimetype='application/json'
) | [
"def",
"access_token_response",
"(",
"self",
",",
"access_token",
")",
":",
"response_data",
"=",
"{",
"'access_token'",
":",
"access_token",
".",
"token",
",",
"'token_type'",
":",
"constants",
".",
"TOKEN_TYPE",
",",
"'expires_in'",
":",
"access_token",
".",
"get_expire_delta",
"(",
")",
",",
"'scope'",
":",
"' '",
".",
"join",
"(",
"scope",
".",
"names",
"(",
"access_token",
".",
"scope",
")",
")",
",",
"}",
"# Not all access_tokens are given a refresh_token",
"# (for example, public clients doing password auth)",
"try",
":",
"rt",
"=",
"access_token",
".",
"refresh_token",
"response_data",
"[",
"'refresh_token'",
"]",
"=",
"rt",
".",
"token",
"except",
"ObjectDoesNotExist",
":",
"pass",
"return",
"HttpResponse",
"(",
"json",
".",
"dumps",
"(",
"response_data",
")",
",",
"mimetype",
"=",
"'application/json'",
")"
] | Returns a successful response after creating the access token
as defined in :rfc:`5.1`. | [
"Returns",
"a",
"successful",
"response",
"after",
"creating",
"the",
"access",
"token",
"as",
"defined",
"in",
":",
"rfc",
":",
"5",
".",
"1",
"."
] | python | train |
JoelBender/bacpypes | py25/bacpypes/debugging.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/debugging.py#L91-L212 | def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
"""Debug the contents of an object."""
if _debug: _log.debug("debug_contents indent=%r file=%r _ids=%r", indent, file, _ids)
klasses = list(self.__class__.__mro__)
klasses.reverse()
if _debug: _log.debug(" - klasses: %r", klasses)
# loop through the classes and look for _debug_contents
attrs = []
cids = []
ownFn = []
for klass in klasses:
if klass is DebugContents:
continue
if not issubclass(klass, DebugContents) and hasattr(klass, 'debug_contents'):
for i, seenAlready in enumerate(ownFn):
if issubclass(klass, seenAlready):
del ownFn[i]
break
ownFn.append(klass)
continue
# look for a tuple of attribute names
if not hasattr(klass, '_debug_contents'):
continue
debugContents = klass._debug_contents
if not isinstance(debugContents, tuple):
raise RuntimeError("%s._debug_contents must be a tuple" % (klass.__name__,))
# already seen it?
if id(debugContents) in cids:
continue
cids.append(id(debugContents))
for attr in debugContents:
if attr not in attrs:
attrs.append(attr)
# a bit of debugging
if _debug:
_log.debug(" - attrs: %r", attrs)
_log.debug(" - ownFn: %r", ownFn)
# make/extend the list of objects already seen
if _ids is None:
_ids = []
# loop through the attributes
for attr in attrs:
# assume you're going deep, but not into lists and dicts
goDeep = True
goListDict = False
goHexed = False
# attribute list might want to go deep
if attr.endswith("-"):
goDeep = False
attr = attr[:-1]
elif attr.endswith("*"):
goHexed = True
attr = attr[:-1]
elif attr.endswith("+"):
goDeep = False
goListDict = True
attr = attr[:-1]
if attr.endswith("+"):
goDeep = True
attr = attr[:-1]
value = getattr(self, attr, None)
# skip None
if value is None:
continue
# standard output
if goListDict and isinstance(value, list) and value:
file.write("%s%s = [\n" % (' ' * indent, attr))
indent += 1
for i, elem in enumerate(value):
file.write("%s[%d] %r\n" % (' ' * indent, i, elem))
if goDeep and hasattr(elem, 'debug_contents'):
if id(elem) not in _ids:
_ids.append(id(elem))
elem.debug_contents(indent + 1, file, _ids)
indent -= 1
file.write("%s ]\n" % (' ' * indent,))
elif goListDict and isinstance(value, dict) and value:
file.write("%s%s = {\n" % (' ' * indent, attr))
indent += 1
for key, elem in value.items():
file.write("%s%r : %r\n" % (' ' * indent, key, elem))
if goDeep and hasattr(elem, 'debug_contents'):
if id(elem) not in _ids:
_ids.append(id(elem))
elem.debug_contents(indent + 1, file, _ids)
indent -= 1
file.write("%s }\n" % (' ' * indent,))
elif goHexed and isinstance(value, str):
if len(value) > 20:
hexed = btox(value[:20], '.') + "..."
else:
hexed = btox(value, '.')
file.write("%s%s = x'%s'\n" % (' ' * indent, attr, hexed))
# elif goHexed and isinstance(value, int):
# file.write("%s%s = 0x%X\n" % (' ' * indent, attr, value))
else:
file.write("%s%s = %r\n" % (' ' * indent, attr, value))
# go nested if it is debugable
if goDeep and hasattr(value, 'debug_contents'):
if id(value) not in _ids:
_ids.append(id(value))
value.debug_contents(indent + 1, file, _ids)
# go through the functions
ownFn.reverse()
for klass in ownFn:
klass.debug_contents(self, indent, file, _ids) | [
"def",
"debug_contents",
"(",
"self",
",",
"indent",
"=",
"1",
",",
"file",
"=",
"sys",
".",
"stdout",
",",
"_ids",
"=",
"None",
")",
":",
"if",
"_debug",
":",
"_log",
".",
"debug",
"(",
"\"debug_contents indent=%r file=%r _ids=%r\"",
",",
"indent",
",",
"file",
",",
"_ids",
")",
"klasses",
"=",
"list",
"(",
"self",
".",
"__class__",
".",
"__mro__",
")",
"klasses",
".",
"reverse",
"(",
")",
"if",
"_debug",
":",
"_log",
".",
"debug",
"(",
"\" - klasses: %r\"",
",",
"klasses",
")",
"# loop through the classes and look for _debug_contents",
"attrs",
"=",
"[",
"]",
"cids",
"=",
"[",
"]",
"ownFn",
"=",
"[",
"]",
"for",
"klass",
"in",
"klasses",
":",
"if",
"klass",
"is",
"DebugContents",
":",
"continue",
"if",
"not",
"issubclass",
"(",
"klass",
",",
"DebugContents",
")",
"and",
"hasattr",
"(",
"klass",
",",
"'debug_contents'",
")",
":",
"for",
"i",
",",
"seenAlready",
"in",
"enumerate",
"(",
"ownFn",
")",
":",
"if",
"issubclass",
"(",
"klass",
",",
"seenAlready",
")",
":",
"del",
"ownFn",
"[",
"i",
"]",
"break",
"ownFn",
".",
"append",
"(",
"klass",
")",
"continue",
"# look for a tuple of attribute names",
"if",
"not",
"hasattr",
"(",
"klass",
",",
"'_debug_contents'",
")",
":",
"continue",
"debugContents",
"=",
"klass",
".",
"_debug_contents",
"if",
"not",
"isinstance",
"(",
"debugContents",
",",
"tuple",
")",
":",
"raise",
"RuntimeError",
"(",
"\"%s._debug_contents must be a tuple\"",
"%",
"(",
"klass",
".",
"__name__",
",",
")",
")",
"# already seen it?",
"if",
"id",
"(",
"debugContents",
")",
"in",
"cids",
":",
"continue",
"cids",
".",
"append",
"(",
"id",
"(",
"debugContents",
")",
")",
"for",
"attr",
"in",
"debugContents",
":",
"if",
"attr",
"not",
"in",
"attrs",
":",
"attrs",
".",
"append",
"(",
"attr",
")",
"# a bit of debugging",
"if",
"_debug",
":",
"_log",
".",
"debug",
"(",
"\" - attrs: %r\"",
",",
"attrs",
")",
"_log",
".",
"debug",
"(",
"\" - ownFn: %r\"",
",",
"ownFn",
")",
"# make/extend the list of objects already seen",
"if",
"_ids",
"is",
"None",
":",
"_ids",
"=",
"[",
"]",
"# loop through the attributes",
"for",
"attr",
"in",
"attrs",
":",
"# assume you're going deep, but not into lists and dicts",
"goDeep",
"=",
"True",
"goListDict",
"=",
"False",
"goHexed",
"=",
"False",
"# attribute list might want to go deep",
"if",
"attr",
".",
"endswith",
"(",
"\"-\"",
")",
":",
"goDeep",
"=",
"False",
"attr",
"=",
"attr",
"[",
":",
"-",
"1",
"]",
"elif",
"attr",
".",
"endswith",
"(",
"\"*\"",
")",
":",
"goHexed",
"=",
"True",
"attr",
"=",
"attr",
"[",
":",
"-",
"1",
"]",
"elif",
"attr",
".",
"endswith",
"(",
"\"+\"",
")",
":",
"goDeep",
"=",
"False",
"goListDict",
"=",
"True",
"attr",
"=",
"attr",
"[",
":",
"-",
"1",
"]",
"if",
"attr",
".",
"endswith",
"(",
"\"+\"",
")",
":",
"goDeep",
"=",
"True",
"attr",
"=",
"attr",
"[",
":",
"-",
"1",
"]",
"value",
"=",
"getattr",
"(",
"self",
",",
"attr",
",",
"None",
")",
"# skip None",
"if",
"value",
"is",
"None",
":",
"continue",
"# standard output",
"if",
"goListDict",
"and",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"value",
":",
"file",
".",
"write",
"(",
"\"%s%s = [\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"attr",
")",
")",
"indent",
"+=",
"1",
"for",
"i",
",",
"elem",
"in",
"enumerate",
"(",
"value",
")",
":",
"file",
".",
"write",
"(",
"\"%s[%d] %r\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"i",
",",
"elem",
")",
")",
"if",
"goDeep",
"and",
"hasattr",
"(",
"elem",
",",
"'debug_contents'",
")",
":",
"if",
"id",
"(",
"elem",
")",
"not",
"in",
"_ids",
":",
"_ids",
".",
"append",
"(",
"id",
"(",
"elem",
")",
")",
"elem",
".",
"debug_contents",
"(",
"indent",
"+",
"1",
",",
"file",
",",
"_ids",
")",
"indent",
"-=",
"1",
"file",
".",
"write",
"(",
"\"%s ]\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
")",
")",
"elif",
"goListDict",
"and",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"value",
":",
"file",
".",
"write",
"(",
"\"%s%s = {\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"attr",
")",
")",
"indent",
"+=",
"1",
"for",
"key",
",",
"elem",
"in",
"value",
".",
"items",
"(",
")",
":",
"file",
".",
"write",
"(",
"\"%s%r : %r\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"key",
",",
"elem",
")",
")",
"if",
"goDeep",
"and",
"hasattr",
"(",
"elem",
",",
"'debug_contents'",
")",
":",
"if",
"id",
"(",
"elem",
")",
"not",
"in",
"_ids",
":",
"_ids",
".",
"append",
"(",
"id",
"(",
"elem",
")",
")",
"elem",
".",
"debug_contents",
"(",
"indent",
"+",
"1",
",",
"file",
",",
"_ids",
")",
"indent",
"-=",
"1",
"file",
".",
"write",
"(",
"\"%s }\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
")",
")",
"elif",
"goHexed",
"and",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"if",
"len",
"(",
"value",
")",
">",
"20",
":",
"hexed",
"=",
"btox",
"(",
"value",
"[",
":",
"20",
"]",
",",
"'.'",
")",
"+",
"\"...\"",
"else",
":",
"hexed",
"=",
"btox",
"(",
"value",
",",
"'.'",
")",
"file",
".",
"write",
"(",
"\"%s%s = x'%s'\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"attr",
",",
"hexed",
")",
")",
"# elif goHexed and isinstance(value, int):",
"# file.write(\"%s%s = 0x%X\\n\" % (' ' * indent, attr, value))",
"else",
":",
"file",
".",
"write",
"(",
"\"%s%s = %r\\n\"",
"%",
"(",
"' '",
"*",
"indent",
",",
"attr",
",",
"value",
")",
")",
"# go nested if it is debugable",
"if",
"goDeep",
"and",
"hasattr",
"(",
"value",
",",
"'debug_contents'",
")",
":",
"if",
"id",
"(",
"value",
")",
"not",
"in",
"_ids",
":",
"_ids",
".",
"append",
"(",
"id",
"(",
"value",
")",
")",
"value",
".",
"debug_contents",
"(",
"indent",
"+",
"1",
",",
"file",
",",
"_ids",
")",
"# go through the functions",
"ownFn",
".",
"reverse",
"(",
")",
"for",
"klass",
"in",
"ownFn",
":",
"klass",
".",
"debug_contents",
"(",
"self",
",",
"indent",
",",
"file",
",",
"_ids",
")"
] | Debug the contents of an object. | [
"Debug",
"the",
"contents",
"of",
"an",
"object",
"."
] | python | train |
jobovy/galpy | galpy/util/bovy_coords.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L372-L415 | def sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,degree=False):
"""
NAME:
sphergal_to_rectgal
PURPOSE:
transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs)
INPUT:
l - Galactic longitude (rad)
b - Galactic lattitude (rad)
d - distance (kpc)
vr - line-of-sight velocity (km/s)
pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr)
pmbb - proper motion in the Galactic lattitude (mas/yr)
degree - (bool) if True, l and b are in degrees
OUTPUT:
(X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s)
HISTORY:
2009-10-25 - Written - Bovy (NYU)
"""
XYZ= lbd_to_XYZ(l,b,d,degree=degree)
vxvyvz= vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,XYZ=False,degree=degree)
if sc.array(l).shape == ():
return sc.array([XYZ[0],XYZ[1],XYZ[2],vxvyvz[0],vxvyvz[1],vxvyvz[2]])
else:
out=sc.zeros((len(l),6))
out[:,0:3]= XYZ
out[:,3:6]= vxvyvz
return out | [
"def",
"sphergal_to_rectgal",
"(",
"l",
",",
"b",
",",
"d",
",",
"vr",
",",
"pmll",
",",
"pmbb",
",",
"degree",
"=",
"False",
")",
":",
"XYZ",
"=",
"lbd_to_XYZ",
"(",
"l",
",",
"b",
",",
"d",
",",
"degree",
"=",
"degree",
")",
"vxvyvz",
"=",
"vrpmllpmbb_to_vxvyvz",
"(",
"vr",
",",
"pmll",
",",
"pmbb",
",",
"l",
",",
"b",
",",
"d",
",",
"XYZ",
"=",
"False",
",",
"degree",
"=",
"degree",
")",
"if",
"sc",
".",
"array",
"(",
"l",
")",
".",
"shape",
"==",
"(",
")",
":",
"return",
"sc",
".",
"array",
"(",
"[",
"XYZ",
"[",
"0",
"]",
",",
"XYZ",
"[",
"1",
"]",
",",
"XYZ",
"[",
"2",
"]",
",",
"vxvyvz",
"[",
"0",
"]",
",",
"vxvyvz",
"[",
"1",
"]",
",",
"vxvyvz",
"[",
"2",
"]",
"]",
")",
"else",
":",
"out",
"=",
"sc",
".",
"zeros",
"(",
"(",
"len",
"(",
"l",
")",
",",
"6",
")",
")",
"out",
"[",
":",
",",
"0",
":",
"3",
"]",
"=",
"XYZ",
"out",
"[",
":",
",",
"3",
":",
"6",
"]",
"=",
"vxvyvz",
"return",
"out"
] | NAME:
sphergal_to_rectgal
PURPOSE:
transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs)
INPUT:
l - Galactic longitude (rad)
b - Galactic lattitude (rad)
d - distance (kpc)
vr - line-of-sight velocity (km/s)
pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr)
pmbb - proper motion in the Galactic lattitude (mas/yr)
degree - (bool) if True, l and b are in degrees
OUTPUT:
(X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s)
HISTORY:
2009-10-25 - Written - Bovy (NYU) | [
"NAME",
":"
] | python | train |
ianmiell/shutit | emailer.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/emailer.py#L88-L125 | def __set_config(self, cfg_section):
"""Set a local config array up according to
defaults and main shutit configuration
cfg_section - see __init__
"""
defaults = [
'shutit.core.alerting.emailer.mailto', None,
'shutit.core.alerting.emailer.mailfrom', '[email protected]',
'shutit.core.alerting.emailer.smtp_server', 'localhost',
'shutit.core.alerting.emailer.smtp_port', 25,
'shutit.core.alerting.emailer.use_tls', True,
'shutit.core.alerting.emailer.send_mail', True,
'shutit.core.alerting.emailer.subject', 'Shutit Report',
'shutit.core.alerting.emailer.signature', '--Angry Shutit',
'shutit.core.alerting.emailer.compress', True,
'shutit.core.alerting.emailer.username', '',
'shutit.core.alerting.emailer.password', '',
'shutit.core.alerting.emailer.safe_mode', True,
'shutit.core.alerting.emailer.maintainer','',
'shutit.core.alerting.emailer.mailto_maintainer', True
]
for cfg_name, cfg_default in zip(defaults[0::2], defaults[1::2]):
try:
self.config[cfg_name] = self.shutit.cfg[cfg_section][cfg_name]
except KeyError:
if cfg_default is None:
raise Exception(cfg_section + ' ' + cfg_name + ' must be set')
else:
self.config[cfg_name] = cfg_default
# only send a mail to the module's maintainer if configured correctly
if self.config['shutit.core.alerting.emailer.mailto_maintainer'] and \
(self.config['shutit.core.alerting.emailer.maintainer'] == "" or \
self.config['shutit.core.alerting.emailer.maintainer'] == self.config['shutit.core.alerting.emailer.mailto']):
self.config['shutit.core.alerting.emailer.mailto_maintainer'] = False
self.config['shutit.core.alerting.emailer.maintainer'] = "" | [
"def",
"__set_config",
"(",
"self",
",",
"cfg_section",
")",
":",
"defaults",
"=",
"[",
"'shutit.core.alerting.emailer.mailto'",
",",
"None",
",",
"'shutit.core.alerting.emailer.mailfrom'",
",",
"'[email protected]'",
",",
"'shutit.core.alerting.emailer.smtp_server'",
",",
"'localhost'",
",",
"'shutit.core.alerting.emailer.smtp_port'",
",",
"25",
",",
"'shutit.core.alerting.emailer.use_tls'",
",",
"True",
",",
"'shutit.core.alerting.emailer.send_mail'",
",",
"True",
",",
"'shutit.core.alerting.emailer.subject'",
",",
"'Shutit Report'",
",",
"'shutit.core.alerting.emailer.signature'",
",",
"'--Angry Shutit'",
",",
"'shutit.core.alerting.emailer.compress'",
",",
"True",
",",
"'shutit.core.alerting.emailer.username'",
",",
"''",
",",
"'shutit.core.alerting.emailer.password'",
",",
"''",
",",
"'shutit.core.alerting.emailer.safe_mode'",
",",
"True",
",",
"'shutit.core.alerting.emailer.maintainer'",
",",
"''",
",",
"'shutit.core.alerting.emailer.mailto_maintainer'",
",",
"True",
"]",
"for",
"cfg_name",
",",
"cfg_default",
"in",
"zip",
"(",
"defaults",
"[",
"0",
":",
":",
"2",
"]",
",",
"defaults",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"try",
":",
"self",
".",
"config",
"[",
"cfg_name",
"]",
"=",
"self",
".",
"shutit",
".",
"cfg",
"[",
"cfg_section",
"]",
"[",
"cfg_name",
"]",
"except",
"KeyError",
":",
"if",
"cfg_default",
"is",
"None",
":",
"raise",
"Exception",
"(",
"cfg_section",
"+",
"' '",
"+",
"cfg_name",
"+",
"' must be set'",
")",
"else",
":",
"self",
".",
"config",
"[",
"cfg_name",
"]",
"=",
"cfg_default",
"# only send a mail to the module's maintainer if configured correctly",
"if",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.mailto_maintainer'",
"]",
"and",
"(",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.maintainer'",
"]",
"==",
"\"\"",
"or",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.maintainer'",
"]",
"==",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.mailto'",
"]",
")",
":",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.mailto_maintainer'",
"]",
"=",
"False",
"self",
".",
"config",
"[",
"'shutit.core.alerting.emailer.maintainer'",
"]",
"=",
"\"\""
] | Set a local config array up according to
defaults and main shutit configuration
cfg_section - see __init__ | [
"Set",
"a",
"local",
"config",
"array",
"up",
"according",
"to",
"defaults",
"and",
"main",
"shutit",
"configuration"
] | python | train |
hydpy-dev/hydpy | hydpy/core/sequencetools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/sequencetools.py#L198-L222 | def load_conditions(self, filename=None):
"""Read the initial conditions from a file and assign them to the
respective |StateSequence| and/or |LogSequence| objects handled by
the actual |Sequences| object.
If no filename or dirname is passed, the ones defined by the
|ConditionManager| stored in module |pub| are used.
"""
if self.hasconditions:
if not filename:
filename = self._conditiondefaultfilename
namespace = locals()
for seq in self.conditionsequences:
namespace[seq.name] = seq
namespace['model'] = self
code = hydpy.pub.conditionmanager.load_file(filename)
try:
# ToDo: raises an escape sequence deprecation sometimes
# ToDo: use runpy instead?
# ToDo: Move functionality to filetools.py?
exec(code)
except BaseException:
objecttools.augment_excmessage(
'While trying to gather initial conditions of element %s'
% objecttools.devicename(self)) | [
"def",
"load_conditions",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"self",
".",
"hasconditions",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"_conditiondefaultfilename",
"namespace",
"=",
"locals",
"(",
")",
"for",
"seq",
"in",
"self",
".",
"conditionsequences",
":",
"namespace",
"[",
"seq",
".",
"name",
"]",
"=",
"seq",
"namespace",
"[",
"'model'",
"]",
"=",
"self",
"code",
"=",
"hydpy",
".",
"pub",
".",
"conditionmanager",
".",
"load_file",
"(",
"filename",
")",
"try",
":",
"# ToDo: raises an escape sequence deprecation sometimes",
"# ToDo: use runpy instead?",
"# ToDo: Move functionality to filetools.py?",
"exec",
"(",
"code",
")",
"except",
"BaseException",
":",
"objecttools",
".",
"augment_excmessage",
"(",
"'While trying to gather initial conditions of element %s'",
"%",
"objecttools",
".",
"devicename",
"(",
"self",
")",
")"
] | Read the initial conditions from a file and assign them to the
respective |StateSequence| and/or |LogSequence| objects handled by
the actual |Sequences| object.
If no filename or dirname is passed, the ones defined by the
|ConditionManager| stored in module |pub| are used. | [
"Read",
"the",
"initial",
"conditions",
"from",
"a",
"file",
"and",
"assign",
"them",
"to",
"the",
"respective",
"|StateSequence|",
"and",
"/",
"or",
"|LogSequence|",
"objects",
"handled",
"by",
"the",
"actual",
"|Sequences|",
"object",
"."
] | python | train |
deepmind/sonnet | sonnet/python/modules/base_info.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/base_info.py#L119-L142 | def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
"""Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`.
"""
if not sparse_tensor_proto.HasField("named_tuple"):
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: expected proto tuple.")
if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: The name of the tuple "
"should have been {} but was {}.".format(
_SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))
named_tuple_map = sparse_tensor_proto.named_tuple.map
return tf.SparseTensor(
indices=process_leafs(named_tuple_map["indices"].value),
values=process_leafs(named_tuple_map["values"].value),
dense_shape=process_leafs(named_tuple_map["dense_shape"].value)) | [
"def",
"_from_proto_sparse_tensor",
"(",
"sparse_tensor_proto",
",",
"process_leafs",
")",
":",
"if",
"not",
"sparse_tensor_proto",
".",
"HasField",
"(",
"\"named_tuple\"",
")",
":",
"raise",
"base_errors",
".",
"ModuleInfoError",
"(",
"\"Error while deserializing a SparseTensor: expected proto tuple.\"",
")",
"if",
"sparse_tensor_proto",
".",
"named_tuple",
".",
"name",
"!=",
"_SPARSE_TENSOR_NAME",
":",
"raise",
"base_errors",
".",
"ModuleInfoError",
"(",
"\"Error while deserializing a SparseTensor: The name of the tuple \"",
"\"should have been {} but was {}.\"",
".",
"format",
"(",
"_SPARSE_TENSOR_NAME",
",",
"sparse_tensor_proto",
".",
"named_tuple",
".",
"name",
")",
")",
"named_tuple_map",
"=",
"sparse_tensor_proto",
".",
"named_tuple",
".",
"map",
"return",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"process_leafs",
"(",
"named_tuple_map",
"[",
"\"indices\"",
"]",
".",
"value",
")",
",",
"values",
"=",
"process_leafs",
"(",
"named_tuple_map",
"[",
"\"values\"",
"]",
".",
"value",
")",
",",
"dense_shape",
"=",
"process_leafs",
"(",
"named_tuple_map",
"[",
"\"dense_shape\"",
"]",
".",
"value",
")",
")"
] | Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`. | [
"Deserializes",
"a",
"tf",
".",
"SparseTensor",
"from",
"sparse_tensor_proto",
"."
] | python | train |
RJT1990/pyflux | pyflux/tsm.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/tsm.py#L187-L221 | def _laplace_fit(self,obj_type):
""" Performs a Laplace approximation to the posterior
Parameters
----------
obj_type : method
Whether a likelihood or a posterior
Returns
----------
None (plots posterior)
"""
# Get Mode and Inverse Hessian information
y = self.fit(method='PML',printer=False)
if y.ihessian is None:
raise Exception("No Hessian information - Laplace approximation cannot be performed")
else:
self.latent_variables.estimation_method = 'Laplace'
theta, Y, scores, states, states_var, X_names = self._categorize_model_output(self.latent_variables.get_z_values())
# Change this in future
try:
latent_variables_store = self.latent_variables.copy()
except:
latent_variables_store = self.latent_variables
return LaplaceResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name,
model_type=self.model_type, latent_variables=latent_variables_store,data=Y,index=self.index,
multivariate_model=self.multivariate_model,objective_object=obj_type,
method='Laplace',ihessian=y.ihessian,signal=theta,scores=scores,
z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=states_var) | [
"def",
"_laplace_fit",
"(",
"self",
",",
"obj_type",
")",
":",
"# Get Mode and Inverse Hessian information",
"y",
"=",
"self",
".",
"fit",
"(",
"method",
"=",
"'PML'",
",",
"printer",
"=",
"False",
")",
"if",
"y",
".",
"ihessian",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"No Hessian information - Laplace approximation cannot be performed\"",
")",
"else",
":",
"self",
".",
"latent_variables",
".",
"estimation_method",
"=",
"'Laplace'",
"theta",
",",
"Y",
",",
"scores",
",",
"states",
",",
"states_var",
",",
"X_names",
"=",
"self",
".",
"_categorize_model_output",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
")",
"# Change this in future",
"try",
":",
"latent_variables_store",
"=",
"self",
".",
"latent_variables",
".",
"copy",
"(",
")",
"except",
":",
"latent_variables_store",
"=",
"self",
".",
"latent_variables",
"return",
"LaplaceResults",
"(",
"data_name",
"=",
"self",
".",
"data_name",
",",
"X_names",
"=",
"X_names",
",",
"model_name",
"=",
"self",
".",
"model_name",
",",
"model_type",
"=",
"self",
".",
"model_type",
",",
"latent_variables",
"=",
"latent_variables_store",
",",
"data",
"=",
"Y",
",",
"index",
"=",
"self",
".",
"index",
",",
"multivariate_model",
"=",
"self",
".",
"multivariate_model",
",",
"objective_object",
"=",
"obj_type",
",",
"method",
"=",
"'Laplace'",
",",
"ihessian",
"=",
"y",
".",
"ihessian",
",",
"signal",
"=",
"theta",
",",
"scores",
"=",
"scores",
",",
"z_hide",
"=",
"self",
".",
"_z_hide",
",",
"max_lag",
"=",
"self",
".",
"max_lag",
",",
"states",
"=",
"states",
",",
"states_var",
"=",
"states_var",
")"
] | Performs a Laplace approximation to the posterior
Parameters
----------
obj_type : method
Whether a likelihood or a posterior
Returns
----------
None (plots posterior) | [
"Performs",
"a",
"Laplace",
"approximation",
"to",
"the",
"posterior"
] | python | train |
ministryofjustice/money-to-prisoners-common | mtp_common/build_tasks/executor.py | https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/executor.py#L206-L214 | def update_from(self, mapping):
"""
Updates the set of parameters from a mapping for keys that already exist
"""
for key, value in mapping.items():
if key in self:
if isinstance(value, Parameter):
value = value.value
self[key].value = value | [
"def",
"update_from",
"(",
"self",
",",
"mapping",
")",
":",
"for",
"key",
",",
"value",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"self",
":",
"if",
"isinstance",
"(",
"value",
",",
"Parameter",
")",
":",
"value",
"=",
"value",
".",
"value",
"self",
"[",
"key",
"]",
".",
"value",
"=",
"value"
] | Updates the set of parameters from a mapping for keys that already exist | [
"Updates",
"the",
"set",
"of",
"parameters",
"from",
"a",
"mapping",
"for",
"keys",
"that",
"already",
"exist"
] | python | train |
trailofbits/manticore | manticore/ethereum/solidity.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/ethereum/solidity.py#L196-L205 | def get_func_argument_types(self, hsh: bytes):
"""Returns the tuple type signature for the arguments of the function associated with the selector ``hsh``.
If no normal contract function has the specified selector,
the empty tuple type signature ``'()'`` is returned.
"""
if not isinstance(hsh, (bytes, bytearray)):
raise TypeError('The selector argument must be a concrete byte array')
sig = self._function_signatures_by_selector.get(hsh)
return '()' if sig is None else sig[sig.find('('):] | [
"def",
"get_func_argument_types",
"(",
"self",
",",
"hsh",
":",
"bytes",
")",
":",
"if",
"not",
"isinstance",
"(",
"hsh",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The selector argument must be a concrete byte array'",
")",
"sig",
"=",
"self",
".",
"_function_signatures_by_selector",
".",
"get",
"(",
"hsh",
")",
"return",
"'()'",
"if",
"sig",
"is",
"None",
"else",
"sig",
"[",
"sig",
".",
"find",
"(",
"'('",
")",
":",
"]"
] | Returns the tuple type signature for the arguments of the function associated with the selector ``hsh``.
If no normal contract function has the specified selector,
the empty tuple type signature ``'()'`` is returned. | [
"Returns",
"the",
"tuple",
"type",
"signature",
"for",
"the",
"arguments",
"of",
"the",
"function",
"associated",
"with",
"the",
"selector",
"hsh",
"."
] | python | valid |
larsyencken/csvdiff | csvdiff/patch.py | https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L326-L337 | def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance) | [
"def",
"_is_significant",
"(",
"change",
",",
"significance",
")",
":",
"try",
":",
"a",
"=",
"float",
"(",
"change",
"[",
"'from'",
"]",
")",
"b",
"=",
"float",
"(",
"change",
"[",
"'to'",
"]",
")",
"except",
"ValueError",
":",
"return",
"True",
"return",
"abs",
"(",
"a",
"-",
"b",
")",
">",
"10",
"**",
"(",
"-",
"significance",
")"
] | Return True if a change is genuinely significant given our tolerance. | [
"Return",
"True",
"if",
"a",
"change",
"is",
"genuinely",
"significant",
"given",
"our",
"tolerance",
"."
] | python | train |
muatik/naive-bayes-classifier | naiveBayesClassifier/trainer.py | https://github.com/muatik/naive-bayes-classifier/blob/cdc1d8681ef6674e946cff38e87ce3b00c732fbb/naiveBayesClassifier/trainer.py#L11-L21 | def train(self, text, className):
"""
enhances trained data using the given text and class
"""
self.data.increaseClass(className)
tokens = self.tokenizer.tokenize(text)
for token in tokens:
token = self.tokenizer.remove_stop_words(token)
token = self.tokenizer.remove_punctuation(token)
self.data.increaseToken(token, className) | [
"def",
"train",
"(",
"self",
",",
"text",
",",
"className",
")",
":",
"self",
".",
"data",
".",
"increaseClass",
"(",
"className",
")",
"tokens",
"=",
"self",
".",
"tokenizer",
".",
"tokenize",
"(",
"text",
")",
"for",
"token",
"in",
"tokens",
":",
"token",
"=",
"self",
".",
"tokenizer",
".",
"remove_stop_words",
"(",
"token",
")",
"token",
"=",
"self",
".",
"tokenizer",
".",
"remove_punctuation",
"(",
"token",
")",
"self",
".",
"data",
".",
"increaseToken",
"(",
"token",
",",
"className",
")"
] | enhances trained data using the given text and class | [
"enhances",
"trained",
"data",
"using",
"the",
"given",
"text",
"and",
"class"
] | python | train |
TkTech/Jawa | jawa/util/utf.py | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/util/utf.py#L12-L52 | def decode_modified_utf8(s: bytes) -> str:
"""
Decodes a bytestring containing modified UTF-8 as defined in section
4.4.7 of the JVM specification.
:param s: bytestring to be converted.
:returns: A unicode representation of the original string.
"""
s = bytearray(s)
buff = []
buffer_append = buff.append
ix = 0
while ix < len(s):
x = s[ix]
ix += 1
if x >> 7 == 0:
# Just an ASCII character, nothing else to do.
pass
elif x >> 6 == 6:
y = s[ix]
ix += 1
x = ((x & 0x1F) << 6) + (y & 0x3F)
elif x >> 4 == 14:
y, z = s[ix:ix+2]
ix += 2
x = ((x & 0xF) << 12) + ((y & 0x3F) << 6) + (z & 0x3F)
elif x == 0xED:
v, w, x, y, z = s[ix:ix+6]
ix += 5
x = 0x10000 + (
((v & 0x0F) << 16) +
((w & 0x3F) << 10) +
((y & 0x0F) << 6) +
(z & 0x3F)
)
elif x == 0xC0 and s[ix] == 0x80:
ix += 1
x = 0
buffer_append(x)
return u''.join(chr(b) for b in buff) | [
"def",
"decode_modified_utf8",
"(",
"s",
":",
"bytes",
")",
"->",
"str",
":",
"s",
"=",
"bytearray",
"(",
"s",
")",
"buff",
"=",
"[",
"]",
"buffer_append",
"=",
"buff",
".",
"append",
"ix",
"=",
"0",
"while",
"ix",
"<",
"len",
"(",
"s",
")",
":",
"x",
"=",
"s",
"[",
"ix",
"]",
"ix",
"+=",
"1",
"if",
"x",
">>",
"7",
"==",
"0",
":",
"# Just an ASCII character, nothing else to do.",
"pass",
"elif",
"x",
">>",
"6",
"==",
"6",
":",
"y",
"=",
"s",
"[",
"ix",
"]",
"ix",
"+=",
"1",
"x",
"=",
"(",
"(",
"x",
"&",
"0x1F",
")",
"<<",
"6",
")",
"+",
"(",
"y",
"&",
"0x3F",
")",
"elif",
"x",
">>",
"4",
"==",
"14",
":",
"y",
",",
"z",
"=",
"s",
"[",
"ix",
":",
"ix",
"+",
"2",
"]",
"ix",
"+=",
"2",
"x",
"=",
"(",
"(",
"x",
"&",
"0xF",
")",
"<<",
"12",
")",
"+",
"(",
"(",
"y",
"&",
"0x3F",
")",
"<<",
"6",
")",
"+",
"(",
"z",
"&",
"0x3F",
")",
"elif",
"x",
"==",
"0xED",
":",
"v",
",",
"w",
",",
"x",
",",
"y",
",",
"z",
"=",
"s",
"[",
"ix",
":",
"ix",
"+",
"6",
"]",
"ix",
"+=",
"5",
"x",
"=",
"0x10000",
"+",
"(",
"(",
"(",
"v",
"&",
"0x0F",
")",
"<<",
"16",
")",
"+",
"(",
"(",
"w",
"&",
"0x3F",
")",
"<<",
"10",
")",
"+",
"(",
"(",
"y",
"&",
"0x0F",
")",
"<<",
"6",
")",
"+",
"(",
"z",
"&",
"0x3F",
")",
")",
"elif",
"x",
"==",
"0xC0",
"and",
"s",
"[",
"ix",
"]",
"==",
"0x80",
":",
"ix",
"+=",
"1",
"x",
"=",
"0",
"buffer_append",
"(",
"x",
")",
"return",
"u''",
".",
"join",
"(",
"chr",
"(",
"b",
")",
"for",
"b",
"in",
"buff",
")"
] | Decodes a bytestring containing modified UTF-8 as defined in section
4.4.7 of the JVM specification.
:param s: bytestring to be converted.
:returns: A unicode representation of the original string. | [
"Decodes",
"a",
"bytestring",
"containing",
"modified",
"UTF",
"-",
"8",
"as",
"defined",
"in",
"section",
"4",
".",
"4",
".",
"7",
"of",
"the",
"JVM",
"specification",
"."
] | python | train |
google/apitools | apitools/base/py/transfer.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L286-L297 | def __SetTotal(self, info):
"""Sets the total size based off info if possible otherwise 0."""
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0 | [
"def",
"__SetTotal",
"(",
"self",
",",
"info",
")",
":",
"if",
"'content-range'",
"in",
"info",
":",
"_",
",",
"_",
",",
"total",
"=",
"info",
"[",
"'content-range'",
"]",
".",
"rpartition",
"(",
"'/'",
")",
"if",
"total",
"!=",
"'*'",
":",
"self",
".",
"__total_size",
"=",
"int",
"(",
"total",
")",
"# Note \"total_size is None\" means we don't know it; if no size",
"# info was returned on our initial range request, that means we",
"# have a 0-byte file. (That last statement has been verified",
"# empirically, but is not clearly documented anywhere.)",
"if",
"self",
".",
"total_size",
"is",
"None",
":",
"self",
".",
"__total_size",
"=",
"0"
] | Sets the total size based off info if possible otherwise 0. | [
"Sets",
"the",
"total",
"size",
"based",
"off",
"info",
"if",
"possible",
"otherwise",
"0",
"."
] | python | train |
astropy/photutils | photutils/psf/sandbox.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/sandbox.py#L365-L383 | def to_rectified(self, x, y):
"""
Convert the input (x, y) positions from the original
(unrectified) image to the rectified image.
Parameters
----------
x, y: float or array-like of float
The zero-index pixel coordinates in the original
(unrectified) image.
Returns
-------
x, y: float or array-like
The zero-index pixel coordinates in the rectified image.
"""
return self._reproject(self.wcs_original,
self.wcs_rectified)(x, y) | [
"def",
"to_rectified",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"return",
"self",
".",
"_reproject",
"(",
"self",
".",
"wcs_original",
",",
"self",
".",
"wcs_rectified",
")",
"(",
"x",
",",
"y",
")"
] | Convert the input (x, y) positions from the original
(unrectified) image to the rectified image.
Parameters
----------
x, y: float or array-like of float
The zero-index pixel coordinates in the original
(unrectified) image.
Returns
-------
x, y: float or array-like
The zero-index pixel coordinates in the rectified image. | [
"Convert",
"the",
"input",
"(",
"x",
"y",
")",
"positions",
"from",
"the",
"original",
"(",
"unrectified",
")",
"image",
"to",
"the",
"rectified",
"image",
"."
] | python | train |
linkhub-sdk/popbill.py | popbill/faxService.py | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L389-L417 | def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
""" 팩스 단건 전송
args
CorpNum : 팝빌회원 사업자번호
OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호
ReceiptNum : 팩스 접수번호
SenderNum : 발신자 번호
SenderName : 발신자명
ReceiverNum : 수신번호
ReceiverName : 수신자명
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT,
UserID, title, RequestNum) | [
"def",
"resendFaxRN",
"(",
"self",
",",
"CorpNum",
",",
"OrgRequestNum",
",",
"SenderNum",
",",
"SenderName",
",",
"ReceiverNum",
",",
"ReceiverName",
",",
"ReserveDT",
"=",
"None",
",",
"UserID",
"=",
"None",
",",
"title",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"receivers",
"=",
"None",
"if",
"ReceiverNum",
"!=",
"\"\"",
"or",
"ReceiverName",
"!=",
"\"\"",
":",
"receivers",
"=",
"[",
"]",
"receivers",
".",
"append",
"(",
"FaxReceiver",
"(",
"receiveNum",
"=",
"ReceiverNum",
",",
"receiveName",
"=",
"ReceiverName",
")",
")",
"return",
"self",
".",
"resendFaxRN_multi",
"(",
"CorpNum",
",",
"OrgRequestNum",
",",
"SenderNum",
",",
"SenderName",
",",
"receivers",
",",
"ReserveDT",
",",
"UserID",
",",
"title",
",",
"RequestNum",
")"
] | 팩스 단건 전송
args
CorpNum : 팝빌회원 사업자번호
OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호
ReceiptNum : 팩스 접수번호
SenderNum : 발신자 번호
SenderName : 발신자명
ReceiverNum : 수신번호
ReceiverName : 수신자명
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"팩스",
"단건",
"전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"OrgRequestNum",
":",
"원본",
"팩스",
"전송시",
"할당한",
"전송요청번호",
"ReceiptNum",
":",
"팩스",
"접수번호",
"SenderNum",
":",
"발신자",
"번호",
"SenderName",
":",
"발신자명",
"ReceiverNum",
":",
"수신번호",
"ReceiverName",
":",
"수신자명",
"ReserveDT",
":",
"예약시간",
"(",
"형식",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"title",
":",
"팩스제목",
"RequestNum",
":",
"전송요청시",
"할당한",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | python | train |
mottosso/be | be/vendor/requests/sessions.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/sessions.py#L92-L201 | def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp | [
"def",
"resolve_redirects",
"(",
"self",
",",
"resp",
",",
"req",
",",
"stream",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cert",
"=",
"None",
",",
"proxies",
"=",
"None",
")",
":",
"i",
"=",
"0",
"hist",
"=",
"[",
"]",
"# keep track of history",
"while",
"resp",
".",
"is_redirect",
":",
"prepared_request",
"=",
"req",
".",
"copy",
"(",
")",
"if",
"i",
">",
"0",
":",
"# Update history and keep track of redirects.",
"hist",
".",
"append",
"(",
"resp",
")",
"new_hist",
"=",
"list",
"(",
"hist",
")",
"resp",
".",
"history",
"=",
"new_hist",
"try",
":",
"resp",
".",
"content",
"# Consume socket so it can be released",
"except",
"(",
"ChunkedEncodingError",
",",
"ContentDecodingError",
",",
"RuntimeError",
")",
":",
"resp",
".",
"raw",
".",
"read",
"(",
"decode_content",
"=",
"False",
")",
"if",
"i",
">=",
"self",
".",
"max_redirects",
":",
"raise",
"TooManyRedirects",
"(",
"'Exceeded %s redirects.'",
"%",
"self",
".",
"max_redirects",
")",
"# Release the connection back into the pool.",
"resp",
".",
"close",
"(",
")",
"url",
"=",
"resp",
".",
"headers",
"[",
"'location'",
"]",
"method",
"=",
"req",
".",
"method",
"# Handle redirection without scheme (see: RFC 1808 Section 4)",
"if",
"url",
".",
"startswith",
"(",
"'//'",
")",
":",
"parsed_rurl",
"=",
"urlparse",
"(",
"resp",
".",
"url",
")",
"url",
"=",
"'%s:%s'",
"%",
"(",
"parsed_rurl",
".",
"scheme",
",",
"url",
")",
"# The scheme should be lower case...",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"url",
"=",
"parsed",
".",
"geturl",
"(",
")",
"# Facilitate relative 'location' headers, as allowed by RFC 7231.",
"# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')",
"# Compliant with RFC3986, we percent encode the url.",
"if",
"not",
"parsed",
".",
"netloc",
":",
"url",
"=",
"urljoin",
"(",
"resp",
".",
"url",
",",
"requote_uri",
"(",
"url",
")",
")",
"else",
":",
"url",
"=",
"requote_uri",
"(",
"url",
")",
"prepared_request",
".",
"url",
"=",
"to_native_string",
"(",
"url",
")",
"# Cache the url, unless it redirects to itself.",
"if",
"resp",
".",
"is_permanent_redirect",
"and",
"req",
".",
"url",
"!=",
"prepared_request",
".",
"url",
":",
"self",
".",
"redirect_cache",
"[",
"req",
".",
"url",
"]",
"=",
"prepared_request",
".",
"url",
"# http://tools.ietf.org/html/rfc7231#section-6.4.4",
"if",
"(",
"resp",
".",
"status_code",
"==",
"codes",
".",
"see_other",
"and",
"method",
"!=",
"'HEAD'",
")",
":",
"method",
"=",
"'GET'",
"# Do what the browsers do, despite standards...",
"# First, turn 302s into GETs.",
"if",
"resp",
".",
"status_code",
"==",
"codes",
".",
"found",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Second, if a POST is responded to with a 301, turn it into a GET.",
"# This bizarre behaviour is explained in Issue 1704.",
"if",
"resp",
".",
"status_code",
"==",
"codes",
".",
"moved",
"and",
"method",
"==",
"'POST'",
":",
"method",
"=",
"'GET'",
"prepared_request",
".",
"method",
"=",
"method",
"# https://github.com/kennethreitz/requests/issues/1084",
"if",
"resp",
".",
"status_code",
"not",
"in",
"(",
"codes",
".",
"temporary_redirect",
",",
"codes",
".",
"permanent_redirect",
")",
":",
"if",
"'Content-Length'",
"in",
"prepared_request",
".",
"headers",
":",
"del",
"prepared_request",
".",
"headers",
"[",
"'Content-Length'",
"]",
"prepared_request",
".",
"body",
"=",
"None",
"headers",
"=",
"prepared_request",
".",
"headers",
"try",
":",
"del",
"headers",
"[",
"'Cookie'",
"]",
"except",
"KeyError",
":",
"pass",
"# Extract any cookies sent on the response to the cookiejar",
"# in the new request. Because we've mutated our copied prepared",
"# request, use the old one that we haven't yet touched.",
"extract_cookies_to_jar",
"(",
"prepared_request",
".",
"_cookies",
",",
"req",
",",
"resp",
".",
"raw",
")",
"prepared_request",
".",
"_cookies",
".",
"update",
"(",
"self",
".",
"cookies",
")",
"prepared_request",
".",
"prepare_cookies",
"(",
"prepared_request",
".",
"_cookies",
")",
"# Rebuild auth and proxy information.",
"proxies",
"=",
"self",
".",
"rebuild_proxies",
"(",
"prepared_request",
",",
"proxies",
")",
"self",
".",
"rebuild_auth",
"(",
"prepared_request",
",",
"resp",
")",
"# Override the original request.",
"req",
"=",
"prepared_request",
"resp",
"=",
"self",
".",
"send",
"(",
"req",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
",",
"proxies",
"=",
"proxies",
",",
"allow_redirects",
"=",
"False",
",",
")",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"prepared_request",
",",
"resp",
".",
"raw",
")",
"i",
"+=",
"1",
"yield",
"resp"
] | Receives a Response. Returns a generator of Responses. | [
"Receives",
"a",
"Response",
".",
"Returns",
"a",
"generator",
"of",
"Responses",
"."
] | python | train |
NORDUnet/python-norduniclient | norduniclient/core.py | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L117-L135 | def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust) | [
"def",
"get_db_driver",
"(",
"uri",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"encrypted",
"=",
"True",
",",
"max_pool_size",
"=",
"50",
",",
"trust",
"=",
"0",
")",
":",
"return",
"GraphDatabase",
".",
"driver",
"(",
"uri",
",",
"auth",
"=",
"basic_auth",
"(",
"username",
",",
"password",
")",
",",
"encrypted",
"=",
"encrypted",
",",
"max_pool_size",
"=",
"max_pool_size",
",",
"trust",
"=",
"trust",
")"
] | :param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver | [
":",
"param",
"uri",
":",
"Bolt",
"uri",
":",
"type",
"uri",
":",
"str",
":",
"param",
"username",
":",
"Neo4j",
"username",
":",
"type",
"username",
":",
"str",
":",
"param",
"password",
":",
"Neo4j",
"password",
":",
"type",
"password",
":",
"str",
":",
"param",
"encrypted",
":",
"Use",
"TLS",
":",
"type",
"encrypted",
":",
"Boolean",
":",
"param",
"max_pool_size",
":",
"Maximum",
"number",
"of",
"idle",
"sessions",
":",
"type",
"max_pool_size",
":",
"Integer",
":",
"param",
"trust",
":",
"Trust",
"cert",
"on",
"first",
"use",
"(",
"0",
")",
"or",
"do",
"not",
"accept",
"unknown",
"cert",
"(",
"1",
")",
":",
"type",
"trust",
":",
"Integer",
":",
"return",
":",
"Neo4j",
"driver",
":",
"rtype",
":",
"neo4j",
".",
"v1",
".",
"session",
".",
"Driver"
] | python | train |
chemlab/chemlab | chemlab/core/base.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/base.py#L575-L634 | def where(self, inplace=False, **kwargs):
"""Return indices over every dimension that met the conditions.
Condition syntax:
*attribute* = value
Return indices that satisfy the condition where the attribute is equal
to the value
e.g. type_array = 'H'
*attribute* = list(value1, value2)
Return indices that satisfy the condition where the attribute is equal
to any of the value in the list.
e.g. type_array = ['H', 'O']
*dimension_index* = value: int
*dimension_index* = value: list(int)
Return only elements that correspond to the index in the specified dimension:
atom_index = 0
atom_index = [0, 1]
"""
masks = {k: np.ones(v, dtype='bool') for k,v in self.dimensions.items()}
def index_to_mask(index, n):
val = np.zeros(n, dtype='bool')
val[index] = True
return val
def masks_and(dict1, dict2):
return {k: dict1[k] & index_to_mask(dict2[k], len(dict1[k])) for k in dict1 }
for key in kwargs:
value = kwargs[key]
if key.endswith('_index'):
if isinstance(value, int):
value = [value]
dim = key[:-len('_index')]
m = self._propagate_dim(value, dim)
masks = masks_and(masks, m)
else:
attribute = self.get_attribute(key)
if isinstance(value, list):
mask = reduce(operator.or_, [attribute.value == m for m in value])
else:
mask = attribute.value == value
m = self._propagate_dim(mask, attribute.dim)
masks = masks_and(masks, m)
return masks | [
"def",
"where",
"(",
"self",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"masks",
"=",
"{",
"k",
":",
"np",
".",
"ones",
"(",
"v",
",",
"dtype",
"=",
"'bool'",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"dimensions",
".",
"items",
"(",
")",
"}",
"def",
"index_to_mask",
"(",
"index",
",",
"n",
")",
":",
"val",
"=",
"np",
".",
"zeros",
"(",
"n",
",",
"dtype",
"=",
"'bool'",
")",
"val",
"[",
"index",
"]",
"=",
"True",
"return",
"val",
"def",
"masks_and",
"(",
"dict1",
",",
"dict2",
")",
":",
"return",
"{",
"k",
":",
"dict1",
"[",
"k",
"]",
"&",
"index_to_mask",
"(",
"dict2",
"[",
"k",
"]",
",",
"len",
"(",
"dict1",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"dict1",
"}",
"for",
"key",
"in",
"kwargs",
":",
"value",
"=",
"kwargs",
"[",
"key",
"]",
"if",
"key",
".",
"endswith",
"(",
"'_index'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"value",
"=",
"[",
"value",
"]",
"dim",
"=",
"key",
"[",
":",
"-",
"len",
"(",
"'_index'",
")",
"]",
"m",
"=",
"self",
".",
"_propagate_dim",
"(",
"value",
",",
"dim",
")",
"masks",
"=",
"masks_and",
"(",
"masks",
",",
"m",
")",
"else",
":",
"attribute",
"=",
"self",
".",
"get_attribute",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"mask",
"=",
"reduce",
"(",
"operator",
".",
"or_",
",",
"[",
"attribute",
".",
"value",
"==",
"m",
"for",
"m",
"in",
"value",
"]",
")",
"else",
":",
"mask",
"=",
"attribute",
".",
"value",
"==",
"value",
"m",
"=",
"self",
".",
"_propagate_dim",
"(",
"mask",
",",
"attribute",
".",
"dim",
")",
"masks",
"=",
"masks_and",
"(",
"masks",
",",
"m",
")",
"return",
"masks"
] | Return indices over every dimension that met the conditions.
Condition syntax:
*attribute* = value
Return indices that satisfy the condition where the attribute is equal
to the value
e.g. type_array = 'H'
*attribute* = list(value1, value2)
Return indices that satisfy the condition where the attribute is equal
to any of the value in the list.
e.g. type_array = ['H', 'O']
*dimension_index* = value: int
*dimension_index* = value: list(int)
Return only elements that correspond to the index in the specified dimension:
atom_index = 0
atom_index = [0, 1] | [
"Return",
"indices",
"over",
"every",
"dimension",
"that",
"met",
"the",
"conditions",
".",
"Condition",
"syntax",
":",
"*",
"attribute",
"*",
"=",
"value",
"Return",
"indices",
"that",
"satisfy",
"the",
"condition",
"where",
"the",
"attribute",
"is",
"equal",
"to",
"the",
"value",
"e",
".",
"g",
".",
"type_array",
"=",
"H",
"*",
"attribute",
"*",
"=",
"list",
"(",
"value1",
"value2",
")",
"Return",
"indices",
"that",
"satisfy",
"the",
"condition",
"where",
"the",
"attribute",
"is",
"equal",
"to",
"any",
"of",
"the",
"value",
"in",
"the",
"list",
".",
"e",
".",
"g",
".",
"type_array",
"=",
"[",
"H",
"O",
"]",
"*",
"dimension_index",
"*",
"=",
"value",
":",
"int",
"*",
"dimension_index",
"*",
"=",
"value",
":",
"list",
"(",
"int",
")",
"Return",
"only",
"elements",
"that",
"correspond",
"to",
"the",
"index",
"in",
"the",
"specified",
"dimension",
":",
"atom_index",
"=",
"0",
"atom_index",
"=",
"[",
"0",
"1",
"]"
] | python | train |
DataDog/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py#L25-L59 | def integrations(since, to, write, force):
"""
Generates a markdown file containing the list of integrations shipped in a
given Agent release. Agent version numbers are derived inspecting tags on
`integrations-core` so running this tool might provide unexpected results
if the repo is not up to date with the Agent release process.
If neither `--since` or `--to` are passed (the most common use case), the
tool will generate the list for every Agent since version 6.3.0
(before that point we don't have enough information to build the log).
"""
agent_tags = get_agent_tags(since, to)
# get the list of integrations shipped with the agent from the requirements file
req_file_name = os.path.basename(get_agent_release_requirements())
integrations_contents = StringIO()
for tag in agent_tags:
integrations_contents.write('## Datadog Agent version {}\n\n'.format(tag))
# Requirements for current tag
file_contents = git_show_file(req_file_name, tag)
for name, ver in iteritems(parse_agent_req_file(file_contents)):
integrations_contents.write('* {}: {}\n'.format(name, ver))
integrations_contents.write('\n')
# save the changelog on disk if --write was passed
if write:
dest = get_agent_integrations_file()
# don't overwrite an existing file
if os.path.exists(dest) and not force:
msg = "Output file {} already exists, run the command again with --force to overwrite"
abort(msg.format(dest))
write_file(dest, integrations_contents.getvalue())
else:
echo_info(integrations_contents.getvalue()) | [
"def",
"integrations",
"(",
"since",
",",
"to",
",",
"write",
",",
"force",
")",
":",
"agent_tags",
"=",
"get_agent_tags",
"(",
"since",
",",
"to",
")",
"# get the list of integrations shipped with the agent from the requirements file",
"req_file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"get_agent_release_requirements",
"(",
")",
")",
"integrations_contents",
"=",
"StringIO",
"(",
")",
"for",
"tag",
"in",
"agent_tags",
":",
"integrations_contents",
".",
"write",
"(",
"'## Datadog Agent version {}\\n\\n'",
".",
"format",
"(",
"tag",
")",
")",
"# Requirements for current tag",
"file_contents",
"=",
"git_show_file",
"(",
"req_file_name",
",",
"tag",
")",
"for",
"name",
",",
"ver",
"in",
"iteritems",
"(",
"parse_agent_req_file",
"(",
"file_contents",
")",
")",
":",
"integrations_contents",
".",
"write",
"(",
"'* {}: {}\\n'",
".",
"format",
"(",
"name",
",",
"ver",
")",
")",
"integrations_contents",
".",
"write",
"(",
"'\\n'",
")",
"# save the changelog on disk if --write was passed",
"if",
"write",
":",
"dest",
"=",
"get_agent_integrations_file",
"(",
")",
"# don't overwrite an existing file",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
"and",
"not",
"force",
":",
"msg",
"=",
"\"Output file {} already exists, run the command again with --force to overwrite\"",
"abort",
"(",
"msg",
".",
"format",
"(",
"dest",
")",
")",
"write_file",
"(",
"dest",
",",
"integrations_contents",
".",
"getvalue",
"(",
")",
")",
"else",
":",
"echo_info",
"(",
"integrations_contents",
".",
"getvalue",
"(",
")",
")"
] | Generates a markdown file containing the list of integrations shipped in a
given Agent release. Agent version numbers are derived inspecting tags on
`integrations-core` so running this tool might provide unexpected results
if the repo is not up to date with the Agent release process.
If neither `--since` or `--to` are passed (the most common use case), the
tool will generate the list for every Agent since version 6.3.0
(before that point we don't have enough information to build the log). | [
"Generates",
"a",
"markdown",
"file",
"containing",
"the",
"list",
"of",
"integrations",
"shipped",
"in",
"a",
"given",
"Agent",
"release",
".",
"Agent",
"version",
"numbers",
"are",
"derived",
"inspecting",
"tags",
"on",
"integrations",
"-",
"core",
"so",
"running",
"this",
"tool",
"might",
"provide",
"unexpected",
"results",
"if",
"the",
"repo",
"is",
"not",
"up",
"to",
"date",
"with",
"the",
"Agent",
"release",
"process",
"."
] | python | train |
log2timeline/plaso | plaso/analysis/windows_services.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/windows_services.py#L171-L186 | def AddService(self, new_service):
"""Add a new service to the list of ones we know about.
Args:
new_service (WindowsService): the service to add.
"""
for service in self._services:
if new_service == service:
# If this service is the same as one we already know about, we
# just want to add where it came from.
service.sources.append(new_service.sources[0])
return
# We only add a new object to our list if we don't have
# an identical one already.
self._services.append(new_service) | [
"def",
"AddService",
"(",
"self",
",",
"new_service",
")",
":",
"for",
"service",
"in",
"self",
".",
"_services",
":",
"if",
"new_service",
"==",
"service",
":",
"# If this service is the same as one we already know about, we",
"# just want to add where it came from.",
"service",
".",
"sources",
".",
"append",
"(",
"new_service",
".",
"sources",
"[",
"0",
"]",
")",
"return",
"# We only add a new object to our list if we don't have",
"# an identical one already.",
"self",
".",
"_services",
".",
"append",
"(",
"new_service",
")"
] | Add a new service to the list of ones we know about.
Args:
new_service (WindowsService): the service to add. | [
"Add",
"a",
"new",
"service",
"to",
"the",
"list",
"of",
"ones",
"we",
"know",
"about",
"."
] | python | train |
leancloud/python-sdk | leancloud/message.py | https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/message.py#L75-L95 | def find_by_client(cls, from_client, limit=None, before_time=None, before_message_id=None):
# type: (str, Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个 client 的聊天记录
:param from_client: 要获取聊天记录的 client id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param before_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param before_message_id: 起始的消息 id,使用时必须加上对应消息的时间 before_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['from'] = from_client
if limit is not None:
query_params['limit'] = limit
if isinstance(before_time, datetime):
query_params['max_ts'] = round(before_time.timestamp() * 1000)
elif isinstance(before_time, six.integer_types) or isinstance(before_time, float):
query_params['max_ts'] = round(before_time * 1000)
if before_message_id is not None:
query_params['msgid'] = before_message_id
return list(cls._find(query_params)) | [
"def",
"find_by_client",
"(",
"cls",
",",
"from_client",
",",
"limit",
"=",
"None",
",",
"before_time",
"=",
"None",
",",
"before_message_id",
"=",
"None",
")",
":",
"# type: (str, Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]",
"query_params",
"=",
"{",
"}",
"# type: Dict[str, Any]",
"query_params",
"[",
"'from'",
"]",
"=",
"from_client",
"if",
"limit",
"is",
"not",
"None",
":",
"query_params",
"[",
"'limit'",
"]",
"=",
"limit",
"if",
"isinstance",
"(",
"before_time",
",",
"datetime",
")",
":",
"query_params",
"[",
"'max_ts'",
"]",
"=",
"round",
"(",
"before_time",
".",
"timestamp",
"(",
")",
"*",
"1000",
")",
"elif",
"isinstance",
"(",
"before_time",
",",
"six",
".",
"integer_types",
")",
"or",
"isinstance",
"(",
"before_time",
",",
"float",
")",
":",
"query_params",
"[",
"'max_ts'",
"]",
"=",
"round",
"(",
"before_time",
"*",
"1000",
")",
"if",
"before_message_id",
"is",
"not",
"None",
":",
"query_params",
"[",
"'msgid'",
"]",
"=",
"before_message_id",
"return",
"list",
"(",
"cls",
".",
"_find",
"(",
"query_params",
")",
")"
] | 获取某个 client 的聊天记录
:param from_client: 要获取聊天记录的 client id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param before_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param before_message_id: 起始的消息 id,使用时必须加上对应消息的时间 before_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录 | [
"获取某个",
"client",
"的聊天记录"
] | python | train |
DeepHorizons/iarm | iarm/arm_instructions/data_movement.py | https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/data_movement.py#L11-L25 | def MOV(self, params):
"""
MOV Rx, Ry
MOV PC, Ry
Move the value of Ry into Rx or PC
"""
Rx, Ry = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(any_registers=(Rx, Ry))
def MOV_func():
self.register[Rx] = self.register[Ry]
return MOV_func | [
"def",
"MOV",
"(",
"self",
",",
"params",
")",
":",
"Rx",
",",
"Ry",
"=",
"self",
".",
"get_two_parameters",
"(",
"self",
".",
"TWO_PARAMETER_COMMA_SEPARATED",
",",
"params",
")",
"self",
".",
"check_arguments",
"(",
"any_registers",
"=",
"(",
"Rx",
",",
"Ry",
")",
")",
"def",
"MOV_func",
"(",
")",
":",
"self",
".",
"register",
"[",
"Rx",
"]",
"=",
"self",
".",
"register",
"[",
"Ry",
"]",
"return",
"MOV_func"
] | MOV Rx, Ry
MOV PC, Ry
Move the value of Ry into Rx or PC | [
"MOV",
"Rx",
"Ry",
"MOV",
"PC",
"Ry"
] | python | train |
dlintott/gns3-converter | gns3converter/converter.py | https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/converter.py#L318-L332 | def device_id_from_name(device_name, nodes):
"""
Get the device ID when given a device name
:param str device_name: device name
:param list nodes: list of nodes from :py:meth:`generate_nodes`
:return: device ID
:rtype: int
"""
device_id = None
for node in nodes:
if device_name == node['properties']['name']:
device_id = node['id']
break
return device_id | [
"def",
"device_id_from_name",
"(",
"device_name",
",",
"nodes",
")",
":",
"device_id",
"=",
"None",
"for",
"node",
"in",
"nodes",
":",
"if",
"device_name",
"==",
"node",
"[",
"'properties'",
"]",
"[",
"'name'",
"]",
":",
"device_id",
"=",
"node",
"[",
"'id'",
"]",
"break",
"return",
"device_id"
] | Get the device ID when given a device name
:param str device_name: device name
:param list nodes: list of nodes from :py:meth:`generate_nodes`
:return: device ID
:rtype: int | [
"Get",
"the",
"device",
"ID",
"when",
"given",
"a",
"device",
"name"
] | python | train |
google/apitools | apitools/base/py/encoding_helper.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/encoding_helper.py#L137-L147 | def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
"""Convert the given dictionary to an AdditionalProperty message."""
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_) | [
"def",
"DictToAdditionalPropertyMessage",
"(",
"properties",
",",
"additional_property_type",
",",
"sort_items",
"=",
"False",
")",
":",
"items",
"=",
"properties",
".",
"items",
"(",
")",
"if",
"sort_items",
":",
"items",
"=",
"sorted",
"(",
"items",
")",
"map_",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"items",
":",
"map_",
".",
"append",
"(",
"additional_property_type",
".",
"AdditionalProperty",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
")",
"return",
"additional_property_type",
"(",
"additionalProperties",
"=",
"map_",
")"
] | Convert the given dictionary to an AdditionalProperty message. | [
"Convert",
"the",
"given",
"dictionary",
"to",
"an",
"AdditionalProperty",
"message",
"."
] | python | train |
jbeluch/xbmcswift2 | xbmcswift2/cli/create.py | https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/cli/create.py#L60-L63 | def validate_pluginid(value):
'''Returns True if the provided value is a valid pluglin id'''
valid = string.ascii_letters + string.digits + '.'
return all(c in valid for c in value) | [
"def",
"validate_pluginid",
"(",
"value",
")",
":",
"valid",
"=",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"+",
"'.'",
"return",
"all",
"(",
"c",
"in",
"valid",
"for",
"c",
"in",
"value",
")"
] | Returns True if the provided value is a valid pluglin id | [
"Returns",
"True",
"if",
"the",
"provided",
"value",
"is",
"a",
"valid",
"pluglin",
"id"
] | python | train |
Hackerfleet/hfos | hfos/ui/auth.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/auth.py#L277-L302 | def _get_profile(self, user_account):
"""Retrieves a user's profile"""
try:
# TODO: Load active profile, not just any
user_profile = objectmodels['profile'].find_one(
{'owner': str(user_account.uuid)})
self.log("Profile: ", user_profile,
user_account.uuid, lvl=debug)
except Exception as e:
self.log("No profile due to error: ", e, type(e),
lvl=error)
user_profile = None
if not user_profile:
default = {
'uuid': std_uuid(),
'owner': user_account.uuid,
'userdata': {
'notes': 'Default profile of ' + user_account.name
}
}
user_profile = objectmodels['profile'](default)
user_profile.save()
return user_profile | [
"def",
"_get_profile",
"(",
"self",
",",
"user_account",
")",
":",
"try",
":",
"# TODO: Load active profile, not just any",
"user_profile",
"=",
"objectmodels",
"[",
"'profile'",
"]",
".",
"find_one",
"(",
"{",
"'owner'",
":",
"str",
"(",
"user_account",
".",
"uuid",
")",
"}",
")",
"self",
".",
"log",
"(",
"\"Profile: \"",
",",
"user_profile",
",",
"user_account",
".",
"uuid",
",",
"lvl",
"=",
"debug",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"No profile due to error: \"",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"lvl",
"=",
"error",
")",
"user_profile",
"=",
"None",
"if",
"not",
"user_profile",
":",
"default",
"=",
"{",
"'uuid'",
":",
"std_uuid",
"(",
")",
",",
"'owner'",
":",
"user_account",
".",
"uuid",
",",
"'userdata'",
":",
"{",
"'notes'",
":",
"'Default profile of '",
"+",
"user_account",
".",
"name",
"}",
"}",
"user_profile",
"=",
"objectmodels",
"[",
"'profile'",
"]",
"(",
"default",
")",
"user_profile",
".",
"save",
"(",
")",
"return",
"user_profile"
] | Retrieves a user's profile | [
"Retrieves",
"a",
"user",
"s",
"profile"
] | python | train |
saltstack/salt | salt/modules/pyenv.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pyenv.py#L105-L117 | def install(runas=None, path=None):
'''
Install pyenv systemwide
CLI Example:
.. code-block:: bash
salt '*' pyenv.install
'''
path = path or _pyenv_path(runas)
path = os.path.expanduser(path)
return _install_pyenv(path, runas) | [
"def",
"install",
"(",
"runas",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"path",
"or",
"_pyenv_path",
"(",
"runas",
")",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"return",
"_install_pyenv",
"(",
"path",
",",
"runas",
")"
] | Install pyenv systemwide
CLI Example:
.. code-block:: bash
salt '*' pyenv.install | [
"Install",
"pyenv",
"systemwide"
] | python | train |
rosenbrockc/fortpy | fortpy/isense/classes.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/classes.py#L155-L165 | def _type_description(self):
"""Gets the completion description for a TypeExecutable."""
#This is a little tricker because the docstring is housed
#inside of the module that contains the actual executable.
#These TypeExecutables are just pointers.
iexec = self._element.target
if iexec is not None:
result = "method() | " + iexec.summary
else:
result = "Type Method: points to executable in module."
return result | [
"def",
"_type_description",
"(",
"self",
")",
":",
"#This is a little tricker because the docstring is housed",
"#inside of the module that contains the actual executable.",
"#These TypeExecutables are just pointers.",
"iexec",
"=",
"self",
".",
"_element",
".",
"target",
"if",
"iexec",
"is",
"not",
"None",
":",
"result",
"=",
"\"method() | \"",
"+",
"iexec",
".",
"summary",
"else",
":",
"result",
"=",
"\"Type Method: points to executable in module.\"",
"return",
"result"
] | Gets the completion description for a TypeExecutable. | [
"Gets",
"the",
"completion",
"description",
"for",
"a",
"TypeExecutable",
"."
] | python | train |
vtemian/buffpy | buffpy/managers/profiles.py | https://github.com/vtemian/buffpy/blob/6c9236fd3b6a8f9e2d70dbf1bc01529242b73075/buffpy/managers/profiles.py#L27-L40 | def filter(self, **kwargs):
'''
Based on some criteria, filter the profiles and return a new Profiles
Manager containing only the chosen items
If the manager doen't have any items, get all the profiles from Buffer
'''
if not len(self):
self.all()
new_list = filter(lambda item: [True for arg in kwargs if item[arg] == kwargs[arg]] != [], self)
return Profiles(self.api, new_list) | [
"def",
"filter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"len",
"(",
"self",
")",
":",
"self",
".",
"all",
"(",
")",
"new_list",
"=",
"filter",
"(",
"lambda",
"item",
":",
"[",
"True",
"for",
"arg",
"in",
"kwargs",
"if",
"item",
"[",
"arg",
"]",
"==",
"kwargs",
"[",
"arg",
"]",
"]",
"!=",
"[",
"]",
",",
"self",
")",
"return",
"Profiles",
"(",
"self",
".",
"api",
",",
"new_list",
")"
] | Based on some criteria, filter the profiles and return a new Profiles
Manager containing only the chosen items
If the manager doen't have any items, get all the profiles from Buffer | [
"Based",
"on",
"some",
"criteria",
"filter",
"the",
"profiles",
"and",
"return",
"a",
"new",
"Profiles",
"Manager",
"containing",
"only",
"the",
"chosen",
"items"
] | python | valid |
limodou/uliweb | uliweb/contrib/auth/__init__.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/auth/__init__.py#L40-L51 | def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
l = enc_password.split('$')
#only password of built-in user can split to 3
if len(l)==3:
algo, salt, hsh = l
return hsh == get_hexdigest(algo, salt, raw_password)
else:
return False | [
"def",
"check_password",
"(",
"raw_password",
",",
"enc_password",
")",
":",
"l",
"=",
"enc_password",
".",
"split",
"(",
"'$'",
")",
"#only password of built-in user can split to 3\r",
"if",
"len",
"(",
"l",
")",
"==",
"3",
":",
"algo",
",",
"salt",
",",
"hsh",
"=",
"l",
"return",
"hsh",
"==",
"get_hexdigest",
"(",
"algo",
",",
"salt",
",",
"raw_password",
")",
"else",
":",
"return",
"False"
] | Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes. | [
"Returns",
"a",
"boolean",
"of",
"whether",
"the",
"raw_password",
"was",
"correct",
".",
"Handles",
"encryption",
"formats",
"behind",
"the",
"scenes",
"."
] | python | train |
sdispater/eloquent | eloquent/schema/grammars/grammar.py | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/schema/grammars/grammar.py#L125-L135 | def _add_modifiers(self, sql, blueprint, column):
"""
Add the column modifiers to the deifinition
"""
for modifier in self._modifiers:
method = '_modify_%s' % modifier
if hasattr(self, method):
sql += getattr(self, method)(blueprint, column)
return sql | [
"def",
"_add_modifiers",
"(",
"self",
",",
"sql",
",",
"blueprint",
",",
"column",
")",
":",
"for",
"modifier",
"in",
"self",
".",
"_modifiers",
":",
"method",
"=",
"'_modify_%s'",
"%",
"modifier",
"if",
"hasattr",
"(",
"self",
",",
"method",
")",
":",
"sql",
"+=",
"getattr",
"(",
"self",
",",
"method",
")",
"(",
"blueprint",
",",
"column",
")",
"return",
"sql"
] | Add the column modifiers to the deifinition | [
"Add",
"the",
"column",
"modifiers",
"to",
"the",
"deifinition"
] | python | train |
pylast/pylast | src/pylast/__init__.py | https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L976-L984 | def execute(self, cacheable=False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response).replace("opensearch:", "")) | [
"def",
"execute",
"(",
"self",
",",
"cacheable",
"=",
"False",
")",
":",
"if",
"self",
".",
"network",
".",
"is_caching_enabled",
"(",
")",
"and",
"cacheable",
":",
"response",
"=",
"self",
".",
"_get_cached_response",
"(",
")",
"else",
":",
"response",
"=",
"self",
".",
"_download_response",
"(",
")",
"return",
"minidom",
".",
"parseString",
"(",
"_string",
"(",
"response",
")",
".",
"replace",
"(",
"\"opensearch:\"",
",",
"\"\"",
")",
")"
] | Returns the XML DOM response of the POST Request from the server | [
"Returns",
"the",
"XML",
"DOM",
"response",
"of",
"the",
"POST",
"Request",
"from",
"the",
"server"
] | python | train |
pgjones/quart | quart/blueprints.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/blueprints.py#L220-L235 | def add_app_template_filter(self, func: Callable, name: Optional[str]=None) -> None:
"""Add an application wide template filter.
This is designed to be used on the blueprint directly, and
has the same arguments as
:meth:`~quart.Quart.add_template_filter`. An example usage,
.. code-block:: python
def filter():
...
blueprint = Blueprint(__name__)
blueprint.add_app_template_filter(filter)
"""
self.record_once(lambda state: state.register_template_filter(func, name)) | [
"def",
"add_app_template_filter",
"(",
"self",
",",
"func",
":",
"Callable",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"record_once",
"(",
"lambda",
"state",
":",
"state",
".",
"register_template_filter",
"(",
"func",
",",
"name",
")",
")"
] | Add an application wide template filter.
This is designed to be used on the blueprint directly, and
has the same arguments as
:meth:`~quart.Quart.add_template_filter`. An example usage,
.. code-block:: python
def filter():
...
blueprint = Blueprint(__name__)
blueprint.add_app_template_filter(filter) | [
"Add",
"an",
"application",
"wide",
"template",
"filter",
"."
] | python | train |
cosven/feeluown-core | fuocore/player.py | https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/player.py#L296-L303 | def state(self, value):
"""set player state, emit state changed signal
outer object should not set state directly,
use ``pause`` / ``resume`` / ``stop`` / ``play`` method instead.
"""
self._state = value
self.state_changed.emit(value) | [
"def",
"state",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_state",
"=",
"value",
"self",
".",
"state_changed",
".",
"emit",
"(",
"value",
")"
] | set player state, emit state changed signal
outer object should not set state directly,
use ``pause`` / ``resume`` / ``stop`` / ``play`` method instead. | [
"set",
"player",
"state",
"emit",
"state",
"changed",
"signal"
] | python | train |
spyder-ide/spyder | spyder/utils/workers.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/workers.py#L249-L289 | def _start(self, worker=None):
"""Start threads and check for inactive workers."""
if worker:
self._queue_workers.append(worker)
if self._queue_workers and self._running_threads < self._max_threads:
#print('Queue: {0} Running: {1} Workers: {2} '
# 'Threads: {3}'.format(len(self._queue_workers),
# self._running_threads,
# len(self._workers),
# len(self._threads)))
self._running_threads += 1
worker = self._queue_workers.popleft()
thread = QThread()
if isinstance(worker, PythonWorker):
worker.moveToThread(thread)
worker.sig_finished.connect(thread.quit)
thread.started.connect(worker._start)
thread.start()
elif isinstance(worker, ProcessWorker):
thread.quit()
worker._start()
self._threads.append(thread)
else:
self._timer.start()
if self._workers:
for w in self._workers:
if w.is_finished():
self._bag_collector.append(w)
self._workers.remove(w)
if self._threads:
for t in self._threads:
if t.isFinished():
self._threads.remove(t)
self._running_threads -= 1
if len(self._threads) == 0 and len(self._workers) == 0:
self._timer.stop()
self._timer_worker_delete.start() | [
"def",
"_start",
"(",
"self",
",",
"worker",
"=",
"None",
")",
":",
"if",
"worker",
":",
"self",
".",
"_queue_workers",
".",
"append",
"(",
"worker",
")",
"if",
"self",
".",
"_queue_workers",
"and",
"self",
".",
"_running_threads",
"<",
"self",
".",
"_max_threads",
":",
"#print('Queue: {0} Running: {1} Workers: {2} '",
"# 'Threads: {3}'.format(len(self._queue_workers),",
"# self._running_threads,",
"# len(self._workers),",
"# len(self._threads)))",
"self",
".",
"_running_threads",
"+=",
"1",
"worker",
"=",
"self",
".",
"_queue_workers",
".",
"popleft",
"(",
")",
"thread",
"=",
"QThread",
"(",
")",
"if",
"isinstance",
"(",
"worker",
",",
"PythonWorker",
")",
":",
"worker",
".",
"moveToThread",
"(",
"thread",
")",
"worker",
".",
"sig_finished",
".",
"connect",
"(",
"thread",
".",
"quit",
")",
"thread",
".",
"started",
".",
"connect",
"(",
"worker",
".",
"_start",
")",
"thread",
".",
"start",
"(",
")",
"elif",
"isinstance",
"(",
"worker",
",",
"ProcessWorker",
")",
":",
"thread",
".",
"quit",
"(",
")",
"worker",
".",
"_start",
"(",
")",
"self",
".",
"_threads",
".",
"append",
"(",
"thread",
")",
"else",
":",
"self",
".",
"_timer",
".",
"start",
"(",
")",
"if",
"self",
".",
"_workers",
":",
"for",
"w",
"in",
"self",
".",
"_workers",
":",
"if",
"w",
".",
"is_finished",
"(",
")",
":",
"self",
".",
"_bag_collector",
".",
"append",
"(",
"w",
")",
"self",
".",
"_workers",
".",
"remove",
"(",
"w",
")",
"if",
"self",
".",
"_threads",
":",
"for",
"t",
"in",
"self",
".",
"_threads",
":",
"if",
"t",
".",
"isFinished",
"(",
")",
":",
"self",
".",
"_threads",
".",
"remove",
"(",
"t",
")",
"self",
".",
"_running_threads",
"-=",
"1",
"if",
"len",
"(",
"self",
".",
"_threads",
")",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"_workers",
")",
"==",
"0",
":",
"self",
".",
"_timer",
".",
"stop",
"(",
")",
"self",
".",
"_timer_worker_delete",
".",
"start",
"(",
")"
] | Start threads and check for inactive workers. | [
"Start",
"threads",
"and",
"check",
"for",
"inactive",
"workers",
"."
] | python | train |
ZELLMECHANIK-DRESDEN/dclab | dclab/downsampling.py | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L72-L167 | def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd | [
"def",
"downsample_grid",
"(",
"a",
",",
"b",
",",
"samples",
",",
"ret_idx",
"=",
"False",
")",
":",
"# fixed random state for this method",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
"=",
"47",
")",
".",
"get_state",
"(",
")",
"samples",
"=",
"int",
"(",
"samples",
")",
"if",
"samples",
"and",
"samples",
"<",
"a",
".",
"size",
":",
"# The events to keep",
"keep",
"=",
"np",
".",
"zeros_like",
"(",
"a",
",",
"dtype",
"=",
"bool",
")",
"# 1. Produce evenly distributed samples",
"# Choosing grid-size:",
"# - large numbers tend to show actual structures of the sample,",
"# which is not desired for plotting",
"# - small numbers tend will not result in too few samples and,",
"# in order to reach the desired samples, the data must be",
"# upsampled again.",
"# 300 is about the size of the plot in marker sizes and yields",
"# good results.",
"grid_size",
"=",
"300",
"xpx",
"=",
"norm",
"(",
"a",
",",
"a",
",",
"b",
")",
"*",
"grid_size",
"ypx",
"=",
"norm",
"(",
"b",
",",
"b",
",",
"a",
")",
"*",
"grid_size",
"# The events on the grid to process",
"toproc",
"=",
"np",
".",
"ones",
"(",
"(",
"grid_size",
",",
"grid_size",
")",
",",
"dtype",
"=",
"bool",
")",
"for",
"ii",
"in",
"range",
"(",
"xpx",
".",
"size",
")",
":",
"xi",
"=",
"xpx",
"[",
"ii",
"]",
"yi",
"=",
"ypx",
"[",
"ii",
"]",
"# filter for overlapping events",
"if",
"valid",
"(",
"xi",
",",
"yi",
")",
"and",
"toproc",
"[",
"int",
"(",
"xi",
"-",
"1",
")",
",",
"int",
"(",
"yi",
"-",
"1",
")",
"]",
":",
"toproc",
"[",
"int",
"(",
"xi",
"-",
"1",
")",
",",
"int",
"(",
"yi",
"-",
"1",
")",
"]",
"=",
"False",
"# include event",
"keep",
"[",
"ii",
"]",
"=",
"True",
"# 2. Make sure that we reach `samples` by adding or",
"# removing events.",
"diff",
"=",
"np",
".",
"sum",
"(",
"keep",
")",
"-",
"samples",
"if",
"diff",
">",
"0",
":",
"# Too many samples",
"rem_indices",
"=",
"np",
".",
"where",
"(",
"keep",
")",
"[",
"0",
"]",
"np",
".",
"random",
".",
"set_state",
"(",
"rs",
")",
"rem",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"rem_indices",
",",
"size",
"=",
"diff",
",",
"replace",
"=",
"False",
")",
"keep",
"[",
"rem",
"]",
"=",
"False",
"elif",
"diff",
"<",
"0",
":",
"# Not enough samples",
"add_indices",
"=",
"np",
".",
"where",
"(",
"~",
"keep",
")",
"[",
"0",
"]",
"np",
".",
"random",
".",
"set_state",
"(",
"rs",
")",
"add",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"add_indices",
",",
"size",
"=",
"abs",
"(",
"diff",
")",
",",
"replace",
"=",
"False",
")",
"keep",
"[",
"add",
"]",
"=",
"True",
"assert",
"np",
".",
"sum",
"(",
"keep",
")",
"==",
"samples",
",",
"\"sanity check\"",
"asd",
"=",
"a",
"[",
"keep",
"]",
"bsd",
"=",
"b",
"[",
"keep",
"]",
"assert",
"np",
".",
"allclose",
"(",
"a",
"[",
"keep",
"]",
",",
"asd",
",",
"equal_nan",
"=",
"True",
")",
",",
"\"sanity check\"",
"assert",
"np",
".",
"allclose",
"(",
"b",
"[",
"keep",
"]",
",",
"bsd",
",",
"equal_nan",
"=",
"True",
")",
",",
"\"sanity check\"",
"else",
":",
"keep",
"=",
"np",
".",
"ones_like",
"(",
"a",
",",
"dtype",
"=",
"bool",
")",
"asd",
"=",
"a",
"bsd",
"=",
"b",
"if",
"ret_idx",
":",
"return",
"asd",
",",
"bsd",
",",
"keep",
"else",
":",
"return",
"asd",
",",
"bsd"
] | Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa` | [
"Content",
"-",
"based",
"downsampling",
"for",
"faster",
"visualization"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/util/fourier.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/fourier.py#L59-L69 | def fft_freqs(n_fft, fs):
"""Return frequencies for DFT
Parameters
----------
n_fft : int
Number of points in the FFT.
fs : float
The sampling rate.
"""
return np.arange(0, (n_fft // 2 + 1)) / float(n_fft) * float(fs) | [
"def",
"fft_freqs",
"(",
"n_fft",
",",
"fs",
")",
":",
"return",
"np",
".",
"arange",
"(",
"0",
",",
"(",
"n_fft",
"//",
"2",
"+",
"1",
")",
")",
"/",
"float",
"(",
"n_fft",
")",
"*",
"float",
"(",
"fs",
")"
] | Return frequencies for DFT
Parameters
----------
n_fft : int
Number of points in the FFT.
fs : float
The sampling rate. | [
"Return",
"frequencies",
"for",
"DFT"
] | python | train |
smdabdoub/phylotoast | bin/diversity.py | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L54-L66 | def print_MannWhitneyU(div_calc):
"""
Compute the Mann-Whitney U test for unequal group sample sizes.
"""
try:
x = div_calc.values()[0].values()
y = div_calc.values()[1].values()
except:
return "Error setting up input arrays for Mann-Whitney U Test. Skipping "\
"significance testing."
T, p = stats.mannwhitneyu(x, y)
print "\nMann-Whitney U test statistic:", T
print "Two-tailed p-value: {}".format(2 * p) | [
"def",
"print_MannWhitneyU",
"(",
"div_calc",
")",
":",
"try",
":",
"x",
"=",
"div_calc",
".",
"values",
"(",
")",
"[",
"0",
"]",
".",
"values",
"(",
")",
"y",
"=",
"div_calc",
".",
"values",
"(",
")",
"[",
"1",
"]",
".",
"values",
"(",
")",
"except",
":",
"return",
"\"Error setting up input arrays for Mann-Whitney U Test. Skipping \"",
"\"significance testing.\"",
"T",
",",
"p",
"=",
"stats",
".",
"mannwhitneyu",
"(",
"x",
",",
"y",
")",
"print",
"\"\\nMann-Whitney U test statistic:\"",
",",
"T",
"print",
"\"Two-tailed p-value: {}\"",
".",
"format",
"(",
"2",
"*",
"p",
")"
] | Compute the Mann-Whitney U test for unequal group sample sizes. | [
"Compute",
"the",
"Mann",
"-",
"Whitney",
"U",
"test",
"for",
"unequal",
"group",
"sample",
"sizes",
"."
] | python | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/httpbakery/_client.py | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/_client.py#L123-L156 | def acquire_discharge(self, cav, payload):
''' Request a discharge macaroon from the caveat location
as an HTTP URL.
@param cav Third party {pymacaroons.Caveat} to be discharged.
@param payload External caveat data {bytes}.
@return The acquired macaroon {macaroonbakery.Macaroon}
'''
resp = self._acquire_discharge_with_token(cav, payload, None)
# TODO Fabrice what is the other http response possible ??
if resp.status_code == 200:
return bakery.Macaroon.from_dict(resp.json().get('Macaroon'))
cause = Error.from_dict(resp.json())
if cause.code != ERR_INTERACTION_REQUIRED:
raise DischargeError(cause.message)
if cause.info is None:
raise DischargeError(
'interaction-required response with no info: {}'.format(
resp.json())
)
loc = cav.location
if not loc.endswith('/'):
loc = loc + '/'
token, m = self._interact(loc, cause, payload)
if m is not None:
# We've acquired the macaroon directly via legacy interaction.
return m
# Try to acquire the discharge again, but this time with
# the token acquired by the interaction method.
resp = self._acquire_discharge_with_token(cav, payload, token)
if resp.status_code == 200:
return bakery.Macaroon.from_dict(resp.json().get('Macaroon'))
else:
raise DischargeError(
'discharge failed with code {}'.format(resp.status_code)) | [
"def",
"acquire_discharge",
"(",
"self",
",",
"cav",
",",
"payload",
")",
":",
"resp",
"=",
"self",
".",
"_acquire_discharge_with_token",
"(",
"cav",
",",
"payload",
",",
"None",
")",
"# TODO Fabrice what is the other http response possible ??",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"return",
"bakery",
".",
"Macaroon",
".",
"from_dict",
"(",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"'Macaroon'",
")",
")",
"cause",
"=",
"Error",
".",
"from_dict",
"(",
"resp",
".",
"json",
"(",
")",
")",
"if",
"cause",
".",
"code",
"!=",
"ERR_INTERACTION_REQUIRED",
":",
"raise",
"DischargeError",
"(",
"cause",
".",
"message",
")",
"if",
"cause",
".",
"info",
"is",
"None",
":",
"raise",
"DischargeError",
"(",
"'interaction-required response with no info: {}'",
".",
"format",
"(",
"resp",
".",
"json",
"(",
")",
")",
")",
"loc",
"=",
"cav",
".",
"location",
"if",
"not",
"loc",
".",
"endswith",
"(",
"'/'",
")",
":",
"loc",
"=",
"loc",
"+",
"'/'",
"token",
",",
"m",
"=",
"self",
".",
"_interact",
"(",
"loc",
",",
"cause",
",",
"payload",
")",
"if",
"m",
"is",
"not",
"None",
":",
"# We've acquired the macaroon directly via legacy interaction.",
"return",
"m",
"# Try to acquire the discharge again, but this time with",
"# the token acquired by the interaction method.",
"resp",
"=",
"self",
".",
"_acquire_discharge_with_token",
"(",
"cav",
",",
"payload",
",",
"token",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"return",
"bakery",
".",
"Macaroon",
".",
"from_dict",
"(",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"'Macaroon'",
")",
")",
"else",
":",
"raise",
"DischargeError",
"(",
"'discharge failed with code {}'",
".",
"format",
"(",
"resp",
".",
"status_code",
")",
")"
] | Request a discharge macaroon from the caveat location
as an HTTP URL.
@param cav Third party {pymacaroons.Caveat} to be discharged.
@param payload External caveat data {bytes}.
@return The acquired macaroon {macaroonbakery.Macaroon} | [
"Request",
"a",
"discharge",
"macaroon",
"from",
"the",
"caveat",
"location",
"as",
"an",
"HTTP",
"URL",
"."
] | python | train |
wummel/linkchecker | linkcheck/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/__init__.py#L114-L133 | def init_i18n (loc=None):
"""Initialize i18n with the configured locale dir. The environment
variable LOCPATH can also specify a locale dir.
@return: None
"""
if 'LOCPATH' in os.environ:
locdir = os.environ['LOCPATH']
else:
locdir = os.path.join(get_install_data(), 'share', 'locale')
i18n.init(configdata.name.lower(), locdir, loc=loc)
# install translated log level names
import logging
logging.addLevelName(logging.CRITICAL, _('CRITICAL'))
logging.addLevelName(logging.ERROR, _('ERROR'))
logging.addLevelName(logging.WARN, _('WARN'))
logging.addLevelName(logging.WARNING, _('WARNING'))
logging.addLevelName(logging.INFO, _('INFO'))
logging.addLevelName(logging.DEBUG, _('DEBUG'))
logging.addLevelName(logging.NOTSET, _('NOTSET')) | [
"def",
"init_i18n",
"(",
"loc",
"=",
"None",
")",
":",
"if",
"'LOCPATH'",
"in",
"os",
".",
"environ",
":",
"locdir",
"=",
"os",
".",
"environ",
"[",
"'LOCPATH'",
"]",
"else",
":",
"locdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_install_data",
"(",
")",
",",
"'share'",
",",
"'locale'",
")",
"i18n",
".",
"init",
"(",
"configdata",
".",
"name",
".",
"lower",
"(",
")",
",",
"locdir",
",",
"loc",
"=",
"loc",
")",
"# install translated log level names",
"import",
"logging",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"CRITICAL",
",",
"_",
"(",
"'CRITICAL'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"ERROR",
",",
"_",
"(",
"'ERROR'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"WARN",
",",
"_",
"(",
"'WARN'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"WARNING",
",",
"_",
"(",
"'WARNING'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"INFO",
",",
"_",
"(",
"'INFO'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"DEBUG",
",",
"_",
"(",
"'DEBUG'",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"NOTSET",
",",
"_",
"(",
"'NOTSET'",
")",
")"
] | Initialize i18n with the configured locale dir. The environment
variable LOCPATH can also specify a locale dir.
@return: None | [
"Initialize",
"i18n",
"with",
"the",
"configured",
"locale",
"dir",
".",
"The",
"environment",
"variable",
"LOCPATH",
"can",
"also",
"specify",
"a",
"locale",
"dir",
"."
] | python | train |
mathiasertl/django-ca | ca/django_ca/models.py | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L598-L606 | def get_authority_key_identifier(self):
"""Return the AuthorityKeyIdentifier extension used in certificates signed by this CA."""
try:
ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except x509.ExtensionNotFound:
return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key())
else:
return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski) | [
"def",
"get_authority_key_identifier",
"(",
"self",
")",
":",
"try",
":",
"ski",
"=",
"self",
".",
"x509",
".",
"extensions",
".",
"get_extension_for_class",
"(",
"x509",
".",
"SubjectKeyIdentifier",
")",
"except",
"x509",
".",
"ExtensionNotFound",
":",
"return",
"x509",
".",
"AuthorityKeyIdentifier",
".",
"from_issuer_public_key",
"(",
"self",
".",
"x509",
".",
"public_key",
"(",
")",
")",
"else",
":",
"return",
"x509",
".",
"AuthorityKeyIdentifier",
".",
"from_issuer_subject_key_identifier",
"(",
"ski",
")"
] | Return the AuthorityKeyIdentifier extension used in certificates signed by this CA. | [
"Return",
"the",
"AuthorityKeyIdentifier",
"extension",
"used",
"in",
"certificates",
"signed",
"by",
"this",
"CA",
"."
] | python | train |
dbcli/athenacli | athenacli/packages/prompt_utils.py | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/prompt_utils.py#L22-L27 | def confirm(*args, **kwargs):
"""Prompt for confirmation (yes/no) and handle any abort exceptions."""
try:
return click.confirm(*args, **kwargs)
except click.Abort:
return False | [
"def",
"confirm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"click",
".",
"confirm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"click",
".",
"Abort",
":",
"return",
"False"
] | Prompt for confirmation (yes/no) and handle any abort exceptions. | [
"Prompt",
"for",
"confirmation",
"(",
"yes",
"/",
"no",
")",
"and",
"handle",
"any",
"abort",
"exceptions",
"."
] | python | train |
spookylukey/django-paypal | paypal/pro/helpers.py | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/helpers.py#L66-L75 | def express_endpoint_for_token(token, commit=False):
"""
Returns the PayPal Express Checkout endpoint for a token.
Pass 'commit=True' if you will not prompt for confirmation when the user
returns to your site.
"""
pp_params = dict(token=token)
if commit:
pp_params['useraction'] = 'commit'
return express_endpoint() % urlencode(pp_params) | [
"def",
"express_endpoint_for_token",
"(",
"token",
",",
"commit",
"=",
"False",
")",
":",
"pp_params",
"=",
"dict",
"(",
"token",
"=",
"token",
")",
"if",
"commit",
":",
"pp_params",
"[",
"'useraction'",
"]",
"=",
"'commit'",
"return",
"express_endpoint",
"(",
")",
"%",
"urlencode",
"(",
"pp_params",
")"
] | Returns the PayPal Express Checkout endpoint for a token.
Pass 'commit=True' if you will not prompt for confirmation when the user
returns to your site. | [
"Returns",
"the",
"PayPal",
"Express",
"Checkout",
"endpoint",
"for",
"a",
"token",
".",
"Pass",
"commit",
"=",
"True",
"if",
"you",
"will",
"not",
"prompt",
"for",
"confirmation",
"when",
"the",
"user",
"returns",
"to",
"your",
"site",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/gluon/trainer.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L429-L456 | def save_states(self, fname):
"""Saves trainer states (e.g. optimizer, momentum) to a file.
Parameters
----------
fname : str
Path to output states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be saved.
"""
assert self._optimizer is not None
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
if self._update_on_kvstore:
assert not self._params_to_init, "Cannot save trainer states when some " \
"parameters are not yet initialized in kvstore."
self._kvstore.save_optimizer_states(fname, dump_optimizer=True)
else:
with open(fname, 'wb') as fout:
fout.write(self._updaters[0].get_states(dump_optimizer=True)) | [
"def",
"save_states",
"(",
"self",
",",
"fname",
")",
":",
"assert",
"self",
".",
"_optimizer",
"is",
"not",
"None",
"if",
"not",
"self",
".",
"_kv_initialized",
":",
"self",
".",
"_init_kvstore",
"(",
")",
"if",
"self",
".",
"_params_to_init",
":",
"self",
".",
"_init_params",
"(",
")",
"if",
"self",
".",
"_update_on_kvstore",
":",
"assert",
"not",
"self",
".",
"_params_to_init",
",",
"\"Cannot save trainer states when some \"",
"\"parameters are not yet initialized in kvstore.\"",
"self",
".",
"_kvstore",
".",
"save_optimizer_states",
"(",
"fname",
",",
"dump_optimizer",
"=",
"True",
")",
"else",
":",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"self",
".",
"_updaters",
"[",
"0",
"]",
".",
"get_states",
"(",
"dump_optimizer",
"=",
"True",
")",
")"
] | Saves trainer states (e.g. optimizer, momentum) to a file.
Parameters
----------
fname : str
Path to output states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be saved. | [
"Saves",
"trainer",
"states",
"(",
"e",
".",
"g",
".",
"optimizer",
"momentum",
")",
"to",
"a",
"file",
"."
] | python | train |
praekeltfoundation/seed-stage-based-messaging | subscriptions/views.py | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/views.py#L125-L153 | def post(self, request, *args, **kwargs):
""" Validates subscription data before creating Subscription message
"""
# Ensure that we check for the 'data' key in the request object before
# attempting to reference it
if "data" in request.data:
# This is a workaround for JSONField not liking blank/null refs
if "metadata" not in request.data["data"]:
request.data["data"]["metadata"] = {}
if "initial_sequence_number" not in request.data["data"]:
request.data["data"]["initial_sequence_number"] = request.data[
"data"
].get("next_sequence_number")
subscription = SubscriptionSerializer(data=request.data["data"])
if subscription.is_valid():
subscription.save()
# Return
status = 201
accepted = {"accepted": True}
return Response(accepted, status=status)
else:
status = 400
return Response(subscription.errors, status=status)
else:
status = 400
message = {"data": ["This field is required."]}
return Response(message, status=status) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Ensure that we check for the 'data' key in the request object before",
"# attempting to reference it",
"if",
"\"data\"",
"in",
"request",
".",
"data",
":",
"# This is a workaround for JSONField not liking blank/null refs",
"if",
"\"metadata\"",
"not",
"in",
"request",
".",
"data",
"[",
"\"data\"",
"]",
":",
"request",
".",
"data",
"[",
"\"data\"",
"]",
"[",
"\"metadata\"",
"]",
"=",
"{",
"}",
"if",
"\"initial_sequence_number\"",
"not",
"in",
"request",
".",
"data",
"[",
"\"data\"",
"]",
":",
"request",
".",
"data",
"[",
"\"data\"",
"]",
"[",
"\"initial_sequence_number\"",
"]",
"=",
"request",
".",
"data",
"[",
"\"data\"",
"]",
".",
"get",
"(",
"\"next_sequence_number\"",
")",
"subscription",
"=",
"SubscriptionSerializer",
"(",
"data",
"=",
"request",
".",
"data",
"[",
"\"data\"",
"]",
")",
"if",
"subscription",
".",
"is_valid",
"(",
")",
":",
"subscription",
".",
"save",
"(",
")",
"# Return",
"status",
"=",
"201",
"accepted",
"=",
"{",
"\"accepted\"",
":",
"True",
"}",
"return",
"Response",
"(",
"accepted",
",",
"status",
"=",
"status",
")",
"else",
":",
"status",
"=",
"400",
"return",
"Response",
"(",
"subscription",
".",
"errors",
",",
"status",
"=",
"status",
")",
"else",
":",
"status",
"=",
"400",
"message",
"=",
"{",
"\"data\"",
":",
"[",
"\"This field is required.\"",
"]",
"}",
"return",
"Response",
"(",
"message",
",",
"status",
"=",
"status",
")"
] | Validates subscription data before creating Subscription message | [
"Validates",
"subscription",
"data",
"before",
"creating",
"Subscription",
"message"
] | python | train |
hobson/pug-invest | pug/invest/sandbox/sim.py | https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L103-L140 | def chart(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1
normalize=True,
):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
start = util.normalize_date(start or datetime.date(2008, 1, 1))
end = util.normalize_date(end or datetime.date(2009, 12, 31))
symbols = [s.upper() for s in symbols]
timeofday = datetime.timedelta(hours=16)
timestamps = du.getNYSEdays(start, end, timeofday)
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data['close'].values
if normalize:
na_price /= na_price[0, :]
plt.clf()
plt.plot(timestamps, na_price)
plt.legend(symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('chart.pdf', format='pdf')
plt.grid(True)
plt.show()
return na_price | [
"def",
"chart",
"(",
"symbols",
"=",
"(",
"\"AAPL\"",
",",
"\"GLD\"",
",",
"\"GOOG\"",
",",
"\"$SPX\"",
",",
"\"XOM\"",
",",
"\"msft\"",
")",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2008",
",",
"1",
",",
"1",
")",
",",
"end",
"=",
"datetime",
".",
"datetime",
"(",
"2009",
",",
"12",
",",
"31",
")",
",",
"# data stops at 2013/1/1",
"normalize",
"=",
"True",
",",
")",
":",
"start",
"=",
"util",
".",
"normalize_date",
"(",
"start",
"or",
"datetime",
".",
"date",
"(",
"2008",
",",
"1",
",",
"1",
")",
")",
"end",
"=",
"util",
".",
"normalize_date",
"(",
"end",
"or",
"datetime",
".",
"date",
"(",
"2009",
",",
"12",
",",
"31",
")",
")",
"symbols",
"=",
"[",
"s",
".",
"upper",
"(",
")",
"for",
"s",
"in",
"symbols",
"]",
"timeofday",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"16",
")",
"timestamps",
"=",
"du",
".",
"getNYSEdays",
"(",
"start",
",",
"end",
",",
"timeofday",
")",
"ls_keys",
"=",
"[",
"'open'",
",",
"'high'",
",",
"'low'",
",",
"'close'",
",",
"'volume'",
",",
"'actual_close'",
"]",
"ldf_data",
"=",
"da",
".",
"get_data",
"(",
"timestamps",
",",
"symbols",
",",
"ls_keys",
")",
"d_data",
"=",
"dict",
"(",
"zip",
"(",
"ls_keys",
",",
"ldf_data",
")",
")",
"na_price",
"=",
"d_data",
"[",
"'close'",
"]",
".",
"values",
"if",
"normalize",
":",
"na_price",
"/=",
"na_price",
"[",
"0",
",",
":",
"]",
"plt",
".",
"clf",
"(",
")",
"plt",
".",
"plot",
"(",
"timestamps",
",",
"na_price",
")",
"plt",
".",
"legend",
"(",
"symbols",
")",
"plt",
".",
"ylabel",
"(",
"'Adjusted Close'",
")",
"plt",
".",
"xlabel",
"(",
"'Date'",
")",
"plt",
".",
"savefig",
"(",
"'chart.pdf'",
",",
"format",
"=",
"'pdf'",
")",
"plt",
".",
"grid",
"(",
"True",
")",
"plt",
".",
"show",
"(",
")",
"return",
"na_price"
] | Display a graph of the price history for the list of ticker symbols provided
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series. | [
"Display",
"a",
"graph",
"of",
"the",
"price",
"history",
"for",
"the",
"list",
"of",
"ticker",
"symbols",
"provided"
] | python | train |
HPAC/matchpy | matchpy/matching/one_to_one.py | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/one_to_one.py#L179-L216 | def _build_full_partition(
optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation
) -> List[Sequence[Expression]]:
"""Distribute subject operands among pattern operands.
Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence
variable gets assigned).
"""
i = 0
var_index = 0
opt_index = 0
result = []
for operand in op_iter(operation):
wrap_associative = False
if isinstance(operand, Wildcard):
count = operand.min_count if operand.optional is None else 0
if not operand.fixed_size or isinstance(operation, AssociativeOperation):
count += sequence_var_partition[var_index]
var_index += 1
wrap_associative = operand.fixed_size and operand.min_count
elif operand.optional is not None:
count = optional_parts[opt_index]
opt_index += 1
else:
count = 1
operand_expressions = list(op_iter(subjects))[i:i + count]
i += count
if wrap_associative and len(operand_expressions) > wrap_associative:
fixed = wrap_associative - 1
operand_expressions = tuple(operand_expressions[:fixed]) + (
create_operation_expression(operation, operand_expressions[fixed:]),
)
result.append(operand_expressions)
return result | [
"def",
"_build_full_partition",
"(",
"optional_parts",
",",
"sequence_var_partition",
":",
"Sequence",
"[",
"int",
"]",
",",
"subjects",
":",
"Sequence",
"[",
"Expression",
"]",
",",
"operation",
":",
"Operation",
")",
"->",
"List",
"[",
"Sequence",
"[",
"Expression",
"]",
"]",
":",
"i",
"=",
"0",
"var_index",
"=",
"0",
"opt_index",
"=",
"0",
"result",
"=",
"[",
"]",
"for",
"operand",
"in",
"op_iter",
"(",
"operation",
")",
":",
"wrap_associative",
"=",
"False",
"if",
"isinstance",
"(",
"operand",
",",
"Wildcard",
")",
":",
"count",
"=",
"operand",
".",
"min_count",
"if",
"operand",
".",
"optional",
"is",
"None",
"else",
"0",
"if",
"not",
"operand",
".",
"fixed_size",
"or",
"isinstance",
"(",
"operation",
",",
"AssociativeOperation",
")",
":",
"count",
"+=",
"sequence_var_partition",
"[",
"var_index",
"]",
"var_index",
"+=",
"1",
"wrap_associative",
"=",
"operand",
".",
"fixed_size",
"and",
"operand",
".",
"min_count",
"elif",
"operand",
".",
"optional",
"is",
"not",
"None",
":",
"count",
"=",
"optional_parts",
"[",
"opt_index",
"]",
"opt_index",
"+=",
"1",
"else",
":",
"count",
"=",
"1",
"operand_expressions",
"=",
"list",
"(",
"op_iter",
"(",
"subjects",
")",
")",
"[",
"i",
":",
"i",
"+",
"count",
"]",
"i",
"+=",
"count",
"if",
"wrap_associative",
"and",
"len",
"(",
"operand_expressions",
")",
">",
"wrap_associative",
":",
"fixed",
"=",
"wrap_associative",
"-",
"1",
"operand_expressions",
"=",
"tuple",
"(",
"operand_expressions",
"[",
":",
"fixed",
"]",
")",
"+",
"(",
"create_operation_expression",
"(",
"operation",
",",
"operand_expressions",
"[",
"fixed",
":",
"]",
")",
",",
")",
"result",
".",
"append",
"(",
"operand_expressions",
")",
"return",
"result"
] | Distribute subject operands among pattern operands.
Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence
variable gets assigned). | [
"Distribute",
"subject",
"operands",
"among",
"pattern",
"operands",
"."
] | python | train |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1508-L1585 | def _scroll_without_linewrapping(self, ui_content, width, height, cli):
"""
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
"""
cursor_position = ui_content.cursor_position or Point(0, 0)
# Without line wrapping, we will never have to scroll vertically inside
# a single line.
self.vertical_scroll_2 = 0
if ui_content.line_count == 0:
self.vertical_scroll = 0
self.horizontal_scroll = 0
return
else:
current_line_text = token_list_to_text(ui_content.get_line(cursor_position.y))
def do_scroll(current_scroll, scroll_offset_start, scroll_offset_end,
cursor_pos, window_size, content_size):
" Scrolling algorithm. Used for both horizontal and vertical scrolling. "
# Calculate the scroll offset to apply.
# This can obviously never be more than have the screen size. Also, when the
# cursor appears at the top or bottom, we don't apply the offset.
scroll_offset_start = int(min(scroll_offset_start, window_size / 2, cursor_pos))
scroll_offset_end = int(min(scroll_offset_end, window_size / 2,
content_size - 1 - cursor_pos))
# Prevent negative scroll offsets.
if current_scroll < 0:
current_scroll = 0
# Scroll back if we scrolled to much and there's still space to show more of the document.
if (not self.allow_scroll_beyond_bottom(cli) and
current_scroll > content_size - window_size):
current_scroll = max(0, content_size - window_size)
# Scroll up if cursor is before visible part.
if current_scroll > cursor_pos - scroll_offset_start:
current_scroll = max(0, cursor_pos - scroll_offset_start)
# Scroll down if cursor is after visible part.
if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end:
current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end
return current_scroll
# When a preferred scroll is given, take that first into account.
if self.get_vertical_scroll:
self.vertical_scroll = self.get_vertical_scroll(self)
assert isinstance(self.vertical_scroll, int)
if self.get_horizontal_scroll:
self.horizontal_scroll = self.get_horizontal_scroll(self)
assert isinstance(self.horizontal_scroll, int)
# Update horizontal/vertical scroll to make sure that the cursor
# remains visible.
offsets = self.scroll_offsets
self.vertical_scroll = do_scroll(
current_scroll=self.vertical_scroll,
scroll_offset_start=offsets.top,
scroll_offset_end=offsets.bottom,
cursor_pos=ui_content.cursor_position.y,
window_size=height,
content_size=ui_content.line_count)
self.horizontal_scroll = do_scroll(
current_scroll=self.horizontal_scroll,
scroll_offset_start=offsets.left,
scroll_offset_end=offsets.right,
cursor_pos=get_cwidth(current_line_text[:ui_content.cursor_position.x]),
window_size=width,
# We can only analyse the current line. Calculating the width off
# all the lines is too expensive.
content_size=max(get_cwidth(current_line_text), self.horizontal_scroll + width)) | [
"def",
"_scroll_without_linewrapping",
"(",
"self",
",",
"ui_content",
",",
"width",
",",
"height",
",",
"cli",
")",
":",
"cursor_position",
"=",
"ui_content",
".",
"cursor_position",
"or",
"Point",
"(",
"0",
",",
"0",
")",
"# Without line wrapping, we will never have to scroll vertically inside",
"# a single line.",
"self",
".",
"vertical_scroll_2",
"=",
"0",
"if",
"ui_content",
".",
"line_count",
"==",
"0",
":",
"self",
".",
"vertical_scroll",
"=",
"0",
"self",
".",
"horizontal_scroll",
"=",
"0",
"return",
"else",
":",
"current_line_text",
"=",
"token_list_to_text",
"(",
"ui_content",
".",
"get_line",
"(",
"cursor_position",
".",
"y",
")",
")",
"def",
"do_scroll",
"(",
"current_scroll",
",",
"scroll_offset_start",
",",
"scroll_offset_end",
",",
"cursor_pos",
",",
"window_size",
",",
"content_size",
")",
":",
"\" Scrolling algorithm. Used for both horizontal and vertical scrolling. \"",
"# Calculate the scroll offset to apply.",
"# This can obviously never be more than have the screen size. Also, when the",
"# cursor appears at the top or bottom, we don't apply the offset.",
"scroll_offset_start",
"=",
"int",
"(",
"min",
"(",
"scroll_offset_start",
",",
"window_size",
"/",
"2",
",",
"cursor_pos",
")",
")",
"scroll_offset_end",
"=",
"int",
"(",
"min",
"(",
"scroll_offset_end",
",",
"window_size",
"/",
"2",
",",
"content_size",
"-",
"1",
"-",
"cursor_pos",
")",
")",
"# Prevent negative scroll offsets.",
"if",
"current_scroll",
"<",
"0",
":",
"current_scroll",
"=",
"0",
"# Scroll back if we scrolled to much and there's still space to show more of the document.",
"if",
"(",
"not",
"self",
".",
"allow_scroll_beyond_bottom",
"(",
"cli",
")",
"and",
"current_scroll",
">",
"content_size",
"-",
"window_size",
")",
":",
"current_scroll",
"=",
"max",
"(",
"0",
",",
"content_size",
"-",
"window_size",
")",
"# Scroll up if cursor is before visible part.",
"if",
"current_scroll",
">",
"cursor_pos",
"-",
"scroll_offset_start",
":",
"current_scroll",
"=",
"max",
"(",
"0",
",",
"cursor_pos",
"-",
"scroll_offset_start",
")",
"# Scroll down if cursor is after visible part.",
"if",
"current_scroll",
"<",
"(",
"cursor_pos",
"+",
"1",
")",
"-",
"window_size",
"+",
"scroll_offset_end",
":",
"current_scroll",
"=",
"(",
"cursor_pos",
"+",
"1",
")",
"-",
"window_size",
"+",
"scroll_offset_end",
"return",
"current_scroll",
"# When a preferred scroll is given, take that first into account.",
"if",
"self",
".",
"get_vertical_scroll",
":",
"self",
".",
"vertical_scroll",
"=",
"self",
".",
"get_vertical_scroll",
"(",
"self",
")",
"assert",
"isinstance",
"(",
"self",
".",
"vertical_scroll",
",",
"int",
")",
"if",
"self",
".",
"get_horizontal_scroll",
":",
"self",
".",
"horizontal_scroll",
"=",
"self",
".",
"get_horizontal_scroll",
"(",
"self",
")",
"assert",
"isinstance",
"(",
"self",
".",
"horizontal_scroll",
",",
"int",
")",
"# Update horizontal/vertical scroll to make sure that the cursor",
"# remains visible.",
"offsets",
"=",
"self",
".",
"scroll_offsets",
"self",
".",
"vertical_scroll",
"=",
"do_scroll",
"(",
"current_scroll",
"=",
"self",
".",
"vertical_scroll",
",",
"scroll_offset_start",
"=",
"offsets",
".",
"top",
",",
"scroll_offset_end",
"=",
"offsets",
".",
"bottom",
",",
"cursor_pos",
"=",
"ui_content",
".",
"cursor_position",
".",
"y",
",",
"window_size",
"=",
"height",
",",
"content_size",
"=",
"ui_content",
".",
"line_count",
")",
"self",
".",
"horizontal_scroll",
"=",
"do_scroll",
"(",
"current_scroll",
"=",
"self",
".",
"horizontal_scroll",
",",
"scroll_offset_start",
"=",
"offsets",
".",
"left",
",",
"scroll_offset_end",
"=",
"offsets",
".",
"right",
",",
"cursor_pos",
"=",
"get_cwidth",
"(",
"current_line_text",
"[",
":",
"ui_content",
".",
"cursor_position",
".",
"x",
"]",
")",
",",
"window_size",
"=",
"width",
",",
"# We can only analyse the current line. Calculating the width off",
"# all the lines is too expensive.",
"content_size",
"=",
"max",
"(",
"get_cwidth",
"(",
"current_line_text",
")",
",",
"self",
".",
"horizontal_scroll",
"+",
"width",
")",
")"
] | Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`. | [
"Scroll",
"to",
"make",
"sure",
"the",
"cursor",
"position",
"is",
"visible",
"and",
"that",
"we",
"maintain",
"the",
"requested",
"scroll",
"offset",
"."
] | python | train |
twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_statistics.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_statistics.py#L36-L47 | def get(self):
"""
Constructs a TaskQueueStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext
"""
return TaskQueueStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) | [
"def",
"get",
"(",
"self",
")",
":",
"return",
"TaskQueueStatisticsContext",
"(",
"self",
".",
"_version",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_sid'",
"]",
",",
"task_queue_sid",
"=",
"self",
".",
"_solution",
"[",
"'task_queue_sid'",
"]",
",",
")"
] | Constructs a TaskQueueStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext | [
"Constructs",
"a",
"TaskQueueStatisticsContext"
] | python | train |
fabioz/PyDev.Debugger | third_party/pep8/lib2to3/lib2to3/pgen2/conv.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pgen2/conv.py#L47-L51 | def run(self, graminit_h, graminit_c):
"""Load the grammar tables from the text files written by pgen."""
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off() | [
"def",
"run",
"(",
"self",
",",
"graminit_h",
",",
"graminit_c",
")",
":",
"self",
".",
"parse_graminit_h",
"(",
"graminit_h",
")",
"self",
".",
"parse_graminit_c",
"(",
"graminit_c",
")",
"self",
".",
"finish_off",
"(",
")"
] | Load the grammar tables from the text files written by pgen. | [
"Load",
"the",
"grammar",
"tables",
"from",
"the",
"text",
"files",
"written",
"by",
"pgen",
"."
] | python | train |
ANTsX/ANTsPy | ants/utils/crop_image.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/crop_image.py#L14-L56 | def crop_image(image, label_image=None, label=1):
"""
Use a label image to crop a smaller ANTsImage from within a larger ANTsImage
ANTsR function: `cropImage`
Arguments
---------
image : ANTsImage
image to crop
label_image : ANTsImage
image with label values. If not supplied, estimated from data.
label : integer
the label value to use
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> fi = ants.image_read( ants.get_ants_data('r16') )
>>> cropped = ants.crop_image(fi)
>>> cropped = ants.crop_image(fi, fi, 100 )
"""
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
if label_image is None:
label_image = get_mask(image)
if label_image.pixeltype != 'float':
label_image = label_image.clone('float')
libfn = utils.get_lib_fn('cropImageF%i' % ndim)
itkimage = libfn(image.pointer, label_image.pointer, label, 0, [], [])
return iio.ANTsImage(pixeltype='float', dimension=ndim,
components=image.components, pointer=itkimage).clone(inpixeltype) | [
"def",
"crop_image",
"(",
"image",
",",
"label_image",
"=",
"None",
",",
"label",
"=",
"1",
")",
":",
"inpixeltype",
"=",
"image",
".",
"pixeltype",
"ndim",
"=",
"image",
".",
"dimension",
"if",
"image",
".",
"pixeltype",
"!=",
"'float'",
":",
"image",
"=",
"image",
".",
"clone",
"(",
"'float'",
")",
"if",
"label_image",
"is",
"None",
":",
"label_image",
"=",
"get_mask",
"(",
"image",
")",
"if",
"label_image",
".",
"pixeltype",
"!=",
"'float'",
":",
"label_image",
"=",
"label_image",
".",
"clone",
"(",
"'float'",
")",
"libfn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'cropImageF%i'",
"%",
"ndim",
")",
"itkimage",
"=",
"libfn",
"(",
"image",
".",
"pointer",
",",
"label_image",
".",
"pointer",
",",
"label",
",",
"0",
",",
"[",
"]",
",",
"[",
"]",
")",
"return",
"iio",
".",
"ANTsImage",
"(",
"pixeltype",
"=",
"'float'",
",",
"dimension",
"=",
"ndim",
",",
"components",
"=",
"image",
".",
"components",
",",
"pointer",
"=",
"itkimage",
")",
".",
"clone",
"(",
"inpixeltype",
")"
] | Use a label image to crop a smaller ANTsImage from within a larger ANTsImage
ANTsR function: `cropImage`
Arguments
---------
image : ANTsImage
image to crop
label_image : ANTsImage
image with label values. If not supplied, estimated from data.
label : integer
the label value to use
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> fi = ants.image_read( ants.get_ants_data('r16') )
>>> cropped = ants.crop_image(fi)
>>> cropped = ants.crop_image(fi, fi, 100 ) | [
"Use",
"a",
"label",
"image",
"to",
"crop",
"a",
"smaller",
"ANTsImage",
"from",
"within",
"a",
"larger",
"ANTsImage"
] | python | train |
lago-project/lago | lago/plugins/vm.py | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/plugins/vm.py#L464-L468 | def extract_paths(self, paths, *args, **kwargs):
"""
Thin method that just uses the provider
"""
return self.provider.extract_paths(paths, *args, **kwargs) | [
"def",
"extract_paths",
"(",
"self",
",",
"paths",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"provider",
".",
"extract_paths",
"(",
"paths",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Thin method that just uses the provider | [
"Thin",
"method",
"that",
"just",
"uses",
"the",
"provider"
] | python | train |
totalgood/nlpia | src/nlpia/book/examples/ch09.py | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L479-L486 | def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char | [
"def",
"create_dicts",
"(",
"data",
")",
":",
"chars",
"=",
"set",
"(",
")",
"for",
"sample",
"in",
"data",
":",
"chars",
".",
"update",
"(",
"set",
"(",
"sample",
")",
")",
"char_indices",
"=",
"dict",
"(",
"(",
"c",
",",
"i",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"chars",
")",
")",
"indices_char",
"=",
"dict",
"(",
"(",
"i",
",",
"c",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"chars",
")",
")",
"return",
"char_indices",
",",
"indices_char"
] | Modified from Keras LSTM example | [
"Modified",
"from",
"Keras",
"LSTM",
"example"
] | python | train |
KarchinLab/probabilistic2020 | prob2020/python/permutation.py | https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/permutation.py#L99-L207 | def position_permutation(obs_stat,
context_counts,
context_to_mut,
seq_context,
gene_seq,
gene_vest=None,
num_permutations=10000,
stop_criteria=100,
pseudo_count=0,
max_batch=25000):
"""Performs null-permutations for position-based mutation statistics
in a single gene.
Parameters
----------
obs_stat : tuple, (recur ct, entropy, delta entropy, mean vest)
tuple containing the observed statistics
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
stop_criteria : int
stop after stop_criteria iterations are more significant
then the observed statistic.
pseudo_count : int, default: 0
Pseudo-count for number of recurrent missense mutations for each
permutation for the null distribution. Increasing pseudo_count
makes the statistical test more stringent.
Returns
-------
num_recur_list : list
list of recurrent mutation counts under the null
entropy_list : list
list of position entropy values under the null
"""
# get contexts and somatic base
mycontexts = context_counts.index.tolist()
somatic_base = [base
for one_context in mycontexts
for base in context_to_mut[one_context]]
# calculate the # of batches for simulations
max_batch = min(num_permutations, max_batch)
num_batches = num_permutations // max_batch
remainder = num_permutations % max_batch
batch_sizes = [max_batch] * num_batches
if remainder:
batch_sizes += [remainder]
obs_recur, obs_ent, obs_delta_ent, obs_vest = obs_stat
num_sim = 0 # number of simulations
null_num_recur_ct, null_entropy_ct, null_delta_entropy_ct, null_vest_ct = 0, 0, 0, 0
for j, batch_size in enumerate(batch_sizes):
# stop iterations if reached sufficient precision
if null_vest_ct >= stop_criteria and null_entropy_ct >= stop_criteria:
break
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
batch_size)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# calculate position-based statistics as a result of random positions
for i, row in enumerate(tmp_mut_pos):
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# calculate position info
tmp_recur_ct, tmp_entropy, tmp_delta_entropy, _ = cutils.calc_pos_info(tmp_mut_info['Codon Pos'],
tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
pseudo_count=pseudo_count,
is_obs=0)
# get vest scores
if gene_vest:
tmp_vest = scores.compute_vest_stat(gene_vest,
tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'])
else:
tmp_vest = 0.0
# update empirical null distribution counts
if tmp_entropy-utils.epsilon <= obs_ent: null_entropy_ct += 1
if tmp_vest+utils.epsilon >= obs_vest: null_vest_ct += 1
# stop iterations if reached sufficient precision
if null_vest_ct >= stop_criteria and null_entropy_ct >= stop_criteria:
break
# update the number of simulations
num_sim += i+1
# calculate p-value from empirical null-distribution
ent_pval = float(null_entropy_ct) / (num_sim)
vest_pval = float(null_vest_ct) / (num_sim)
return ent_pval, vest_pval | [
"def",
"position_permutation",
"(",
"obs_stat",
",",
"context_counts",
",",
"context_to_mut",
",",
"seq_context",
",",
"gene_seq",
",",
"gene_vest",
"=",
"None",
",",
"num_permutations",
"=",
"10000",
",",
"stop_criteria",
"=",
"100",
",",
"pseudo_count",
"=",
"0",
",",
"max_batch",
"=",
"25000",
")",
":",
"# get contexts and somatic base",
"mycontexts",
"=",
"context_counts",
".",
"index",
".",
"tolist",
"(",
")",
"somatic_base",
"=",
"[",
"base",
"for",
"one_context",
"in",
"mycontexts",
"for",
"base",
"in",
"context_to_mut",
"[",
"one_context",
"]",
"]",
"# calculate the # of batches for simulations",
"max_batch",
"=",
"min",
"(",
"num_permutations",
",",
"max_batch",
")",
"num_batches",
"=",
"num_permutations",
"//",
"max_batch",
"remainder",
"=",
"num_permutations",
"%",
"max_batch",
"batch_sizes",
"=",
"[",
"max_batch",
"]",
"*",
"num_batches",
"if",
"remainder",
":",
"batch_sizes",
"+=",
"[",
"remainder",
"]",
"obs_recur",
",",
"obs_ent",
",",
"obs_delta_ent",
",",
"obs_vest",
"=",
"obs_stat",
"num_sim",
"=",
"0",
"# number of simulations",
"null_num_recur_ct",
",",
"null_entropy_ct",
",",
"null_delta_entropy_ct",
",",
"null_vest_ct",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
"for",
"j",
",",
"batch_size",
"in",
"enumerate",
"(",
"batch_sizes",
")",
":",
"# stop iterations if reached sufficient precision",
"if",
"null_vest_ct",
">=",
"stop_criteria",
"and",
"null_entropy_ct",
">=",
"stop_criteria",
":",
"break",
"# get random positions determined by sequence context",
"tmp_contxt_pos",
"=",
"seq_context",
".",
"random_pos",
"(",
"context_counts",
".",
"iteritems",
"(",
")",
",",
"batch_size",
")",
"tmp_mut_pos",
"=",
"np",
".",
"hstack",
"(",
"pos_array",
"for",
"base",
",",
"pos_array",
"in",
"tmp_contxt_pos",
")",
"# calculate position-based statistics as a result of random positions",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"tmp_mut_pos",
")",
":",
"# get info about mutations",
"tmp_mut_info",
"=",
"mc",
".",
"get_aa_mut_info",
"(",
"row",
",",
"somatic_base",
",",
"gene_seq",
")",
"# calculate position info",
"tmp_recur_ct",
",",
"tmp_entropy",
",",
"tmp_delta_entropy",
",",
"_",
"=",
"cutils",
".",
"calc_pos_info",
"(",
"tmp_mut_info",
"[",
"'Codon Pos'",
"]",
",",
"tmp_mut_info",
"[",
"'Reference AA'",
"]",
",",
"tmp_mut_info",
"[",
"'Somatic AA'",
"]",
",",
"pseudo_count",
"=",
"pseudo_count",
",",
"is_obs",
"=",
"0",
")",
"# get vest scores",
"if",
"gene_vest",
":",
"tmp_vest",
"=",
"scores",
".",
"compute_vest_stat",
"(",
"gene_vest",
",",
"tmp_mut_info",
"[",
"'Reference AA'",
"]",
",",
"tmp_mut_info",
"[",
"'Somatic AA'",
"]",
",",
"tmp_mut_info",
"[",
"'Codon Pos'",
"]",
")",
"else",
":",
"tmp_vest",
"=",
"0.0",
"# update empirical null distribution counts",
"if",
"tmp_entropy",
"-",
"utils",
".",
"epsilon",
"<=",
"obs_ent",
":",
"null_entropy_ct",
"+=",
"1",
"if",
"tmp_vest",
"+",
"utils",
".",
"epsilon",
">=",
"obs_vest",
":",
"null_vest_ct",
"+=",
"1",
"# stop iterations if reached sufficient precision",
"if",
"null_vest_ct",
">=",
"stop_criteria",
"and",
"null_entropy_ct",
">=",
"stop_criteria",
":",
"break",
"# update the number of simulations",
"num_sim",
"+=",
"i",
"+",
"1",
"# calculate p-value from empirical null-distribution",
"ent_pval",
"=",
"float",
"(",
"null_entropy_ct",
")",
"/",
"(",
"num_sim",
")",
"vest_pval",
"=",
"float",
"(",
"null_vest_ct",
")",
"/",
"(",
"num_sim",
")",
"return",
"ent_pval",
",",
"vest_pval"
] | Performs null-permutations for position-based mutation statistics
in a single gene.
Parameters
----------
obs_stat : tuple, (recur ct, entropy, delta entropy, mean vest)
tuple containing the observed statistics
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
stop_criteria : int
stop after stop_criteria iterations are more significant
then the observed statistic.
pseudo_count : int, default: 0
Pseudo-count for number of recurrent missense mutations for each
permutation for the null distribution. Increasing pseudo_count
makes the statistical test more stringent.
Returns
-------
num_recur_list : list
list of recurrent mutation counts under the null
entropy_list : list
list of position entropy values under the null | [
"Performs",
"null",
"-",
"permutations",
"for",
"position",
"-",
"based",
"mutation",
"statistics",
"in",
"a",
"single",
"gene",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xviewwidget/xviewprofilemanagermenu.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofilemanagermenu.py#L68-L79 | def saveProfileAs( self ):
"""
Saves the current profile as a new profile to the manager.
"""
name, ok = QInputDialog.getText(self, 'Create Profile', 'Name:')
if ( not name ):
return
manager = self.parent()
prof = manager.viewWidget().saveProfile()
prof.setName(nativestring(name))
self.parent().addProfile(prof) | [
"def",
"saveProfileAs",
"(",
"self",
")",
":",
"name",
",",
"ok",
"=",
"QInputDialog",
".",
"getText",
"(",
"self",
",",
"'Create Profile'",
",",
"'Name:'",
")",
"if",
"(",
"not",
"name",
")",
":",
"return",
"manager",
"=",
"self",
".",
"parent",
"(",
")",
"prof",
"=",
"manager",
".",
"viewWidget",
"(",
")",
".",
"saveProfile",
"(",
")",
"prof",
".",
"setName",
"(",
"nativestring",
"(",
"name",
")",
")",
"self",
".",
"parent",
"(",
")",
".",
"addProfile",
"(",
"prof",
")"
] | Saves the current profile as a new profile to the manager. | [
"Saves",
"the",
"current",
"profile",
"as",
"a",
"new",
"profile",
"to",
"the",
"manager",
"."
] | python | train |
apacha/OMR-Datasets | omrdatasettools/converters/csv_to_crop_object_conversion.py | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/csv_to_crop_object_conversion.py#L14-L48 | def convert_csv_annotations_to_cropobject(annotations_path: str, image_path: str) -> List[CropObject]:
"""
Converts a normalized dataset of objects into crop-objects.
:param annotations_path: Path to the csv-file that contains bounding boxes in the following
format for a single image:
image_name,top,left,bottom,right,class_name,confidence
CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00
:param image_path: Image that is being described by the file given under the annotations_path
:return: A list of CropObjects as being used by the MUSCIMA++ dataset including the binary image-masks
"""
annotations = pd.read_csv(annotations_path)
image = Image.open(image_path) # type: Image.Image
crop_objects = []
node_id = 0
for index, annotation in annotations.iterrows():
# Annotation example:
# image_name,top,left,bottom,right,class_name,confidence
# CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00
image_name = annotation["image_name"]
class_name = annotation["class_name"]
top = round(annotation["top"])
left = round(annotation["left"])
width = round(annotation["right"] - annotation["left"])
heigth = round(annotation["bottom"] - annotation["top"])
crop_object = CropObject(node_id, class_name, top, left, width, heigth)
crop_object.set_doc(image_name)
crop_image = image.crop((left, top, crop_object.right, crop_object.bottom)).convert("1")
# noinspection PyTypeChecker
cropped_image_mask = np.array(crop_image)
crop_object.set_mask(cropped_image_mask)
crop_objects.append(crop_object)
node_id += 1
return crop_objects | [
"def",
"convert_csv_annotations_to_cropobject",
"(",
"annotations_path",
":",
"str",
",",
"image_path",
":",
"str",
")",
"->",
"List",
"[",
"CropObject",
"]",
":",
"annotations",
"=",
"pd",
".",
"read_csv",
"(",
"annotations_path",
")",
"image",
"=",
"Image",
".",
"open",
"(",
"image_path",
")",
"# type: Image.Image",
"crop_objects",
"=",
"[",
"]",
"node_id",
"=",
"0",
"for",
"index",
",",
"annotation",
"in",
"annotations",
".",
"iterrows",
"(",
")",
":",
"# Annotation example:",
"# image_name,top,left,bottom,right,class_name,confidence",
"# CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00",
"image_name",
"=",
"annotation",
"[",
"\"image_name\"",
"]",
"class_name",
"=",
"annotation",
"[",
"\"class_name\"",
"]",
"top",
"=",
"round",
"(",
"annotation",
"[",
"\"top\"",
"]",
")",
"left",
"=",
"round",
"(",
"annotation",
"[",
"\"left\"",
"]",
")",
"width",
"=",
"round",
"(",
"annotation",
"[",
"\"right\"",
"]",
"-",
"annotation",
"[",
"\"left\"",
"]",
")",
"heigth",
"=",
"round",
"(",
"annotation",
"[",
"\"bottom\"",
"]",
"-",
"annotation",
"[",
"\"top\"",
"]",
")",
"crop_object",
"=",
"CropObject",
"(",
"node_id",
",",
"class_name",
",",
"top",
",",
"left",
",",
"width",
",",
"heigth",
")",
"crop_object",
".",
"set_doc",
"(",
"image_name",
")",
"crop_image",
"=",
"image",
".",
"crop",
"(",
"(",
"left",
",",
"top",
",",
"crop_object",
".",
"right",
",",
"crop_object",
".",
"bottom",
")",
")",
".",
"convert",
"(",
"\"1\"",
")",
"# noinspection PyTypeChecker",
"cropped_image_mask",
"=",
"np",
".",
"array",
"(",
"crop_image",
")",
"crop_object",
".",
"set_mask",
"(",
"cropped_image_mask",
")",
"crop_objects",
".",
"append",
"(",
"crop_object",
")",
"node_id",
"+=",
"1",
"return",
"crop_objects"
] | Converts a normalized dataset of objects into crop-objects.
:param annotations_path: Path to the csv-file that contains bounding boxes in the following
format for a single image:
image_name,top,left,bottom,right,class_name,confidence
CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00
:param image_path: Image that is being described by the file given under the annotations_path
:return: A list of CropObjects as being used by the MUSCIMA++ dataset including the binary image-masks | [
"Converts",
"a",
"normalized",
"dataset",
"of",
"objects",
"into",
"crop",
"-",
"objects",
".",
":",
"param",
"annotations_path",
":",
"Path",
"to",
"the",
"csv",
"-",
"file",
"that",
"contains",
"bounding",
"boxes",
"in",
"the",
"following",
"format",
"for",
"a",
"single",
"image",
":",
"image_name",
"top",
"left",
"bottom",
"right",
"class_name",
"confidence",
"CVC",
"-",
"MUSCIMA_W",
"-",
"01_N",
"-",
"10_D",
"-",
"ideal_1",
".",
"png",
"138",
".",
"93",
"2286",
".",
"36",
"185",
".",
"20",
"2316",
".",
"52",
"8th_flag",
"1",
".",
"00",
":",
"param",
"image_path",
":",
"Image",
"that",
"is",
"being",
"described",
"by",
"the",
"file",
"given",
"under",
"the",
"annotations_path",
":",
"return",
":",
"A",
"list",
"of",
"CropObjects",
"as",
"being",
"used",
"by",
"the",
"MUSCIMA",
"++",
"dataset",
"including",
"the",
"binary",
"image",
"-",
"masks"
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_playbook.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L1089-L1101 | def indicator_arrays(tc_entity_array):
"""Convert TCEntityArray to Indicator Type dictionary.
Args:
tc_entity_array (dictionary): The TCEntityArray to convert.
Returns:
(dictionary): Dictionary containing arrays of indicators for each indicator type.
"""
type_dict = {}
for ea in tc_entity_array:
type_dict.setdefault(ea['type'], []).append(ea['value'])
return type_dict | [
"def",
"indicator_arrays",
"(",
"tc_entity_array",
")",
":",
"type_dict",
"=",
"{",
"}",
"for",
"ea",
"in",
"tc_entity_array",
":",
"type_dict",
".",
"setdefault",
"(",
"ea",
"[",
"'type'",
"]",
",",
"[",
"]",
")",
".",
"append",
"(",
"ea",
"[",
"'value'",
"]",
")",
"return",
"type_dict"
] | Convert TCEntityArray to Indicator Type dictionary.
Args:
tc_entity_array (dictionary): The TCEntityArray to convert.
Returns:
(dictionary): Dictionary containing arrays of indicators for each indicator type. | [
"Convert",
"TCEntityArray",
"to",
"Indicator",
"Type",
"dictionary",
"."
] | python | train |
svetlyak40wt/python-repr | src/magic_repr/__init__.py | https://github.com/svetlyak40wt/python-repr/blob/49e358e77b97d74f29f4977ea009ab2d64c254e8/src/magic_repr/__init__.py#L125-L177 | def format_value(value):
"""This function should return unicode representation of the value
"""
value_id = id(value)
if value_id in recursion_breaker.processed:
return u'<recursion>'
recursion_breaker.processed.add(value_id)
try:
if isinstance(value, six.binary_type):
# suppose, all byte strings are in unicode
# don't know if everybody in the world uses anything else?
return u"'{0}'".format(value.decode('utf-8'))
elif isinstance(value, six.text_type):
return u"u'{0}'".format(value)
elif isinstance(value, (list, tuple)):
# long lists or lists with multiline items
# will be shown vertically
values = list(map(format_value, value))
result = serialize_list(u'[', values, delimiter=u',') + u']'
return force_unicode(result)
elif isinstance(value, dict):
items = six.iteritems(value)
# format each key/value pair as a text,
# calling format_value recursively
items = (tuple(map(format_value, item))
for item in items)
items = list(items)
# sort by keys for readability
items.sort()
# for each item value
items = [
serialize_text(
u'{0}: '.format(key),
item_value)
for key, item_value in items]
# and serialize these pieces as a list, enclosing
# them into a curve brackets
result = serialize_list(u'{', items, delimiter=u',') + u'}'
return force_unicode(result)
return force_unicode(repr(value))
finally:
recursion_breaker.processed.remove(value_id) | [
"def",
"format_value",
"(",
"value",
")",
":",
"value_id",
"=",
"id",
"(",
"value",
")",
"if",
"value_id",
"in",
"recursion_breaker",
".",
"processed",
":",
"return",
"u'<recursion>'",
"recursion_breaker",
".",
"processed",
".",
"add",
"(",
"value_id",
")",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"binary_type",
")",
":",
"# suppose, all byte strings are in unicode",
"# don't know if everybody in the world uses anything else?",
"return",
"u\"'{0}'\"",
".",
"format",
"(",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"text_type",
")",
":",
"return",
"u\"u'{0}'\"",
".",
"format",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# long lists or lists with multiline items",
"# will be shown vertically",
"values",
"=",
"list",
"(",
"map",
"(",
"format_value",
",",
"value",
")",
")",
"result",
"=",
"serialize_list",
"(",
"u'['",
",",
"values",
",",
"delimiter",
"=",
"u','",
")",
"+",
"u']'",
"return",
"force_unicode",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"items",
"=",
"six",
".",
"iteritems",
"(",
"value",
")",
"# format each key/value pair as a text,",
"# calling format_value recursively",
"items",
"=",
"(",
"tuple",
"(",
"map",
"(",
"format_value",
",",
"item",
")",
")",
"for",
"item",
"in",
"items",
")",
"items",
"=",
"list",
"(",
"items",
")",
"# sort by keys for readability",
"items",
".",
"sort",
"(",
")",
"# for each item value",
"items",
"=",
"[",
"serialize_text",
"(",
"u'{0}: '",
".",
"format",
"(",
"key",
")",
",",
"item_value",
")",
"for",
"key",
",",
"item_value",
"in",
"items",
"]",
"# and serialize these pieces as a list, enclosing",
"# them into a curve brackets",
"result",
"=",
"serialize_list",
"(",
"u'{'",
",",
"items",
",",
"delimiter",
"=",
"u','",
")",
"+",
"u'}'",
"return",
"force_unicode",
"(",
"result",
")",
"return",
"force_unicode",
"(",
"repr",
"(",
"value",
")",
")",
"finally",
":",
"recursion_breaker",
".",
"processed",
".",
"remove",
"(",
"value_id",
")"
] | This function should return unicode representation of the value | [
"This",
"function",
"should",
"return",
"unicode",
"representation",
"of",
"the",
"value"
] | python | valid |
hhatto/pgmagick | pgmagick/api.py | https://github.com/hhatto/pgmagick/blob/5dce5fa4681400b4c059431ad69233e6a3e5799a/pgmagick/api.py#L868-L875 | def matte(self, x, y, paint_method):
"""
:param paint_method: 'point' or 'replace' or 'floodfill' or
'filltoborder' or 'reset'
:type paint_method: str or pgmagick.PaintMethod
"""
paint_method = _convert_paintmethod(paint_method)
self.drawer.append(pgmagick.DrawableMatte(x, y, paint_method)) | [
"def",
"matte",
"(",
"self",
",",
"x",
",",
"y",
",",
"paint_method",
")",
":",
"paint_method",
"=",
"_convert_paintmethod",
"(",
"paint_method",
")",
"self",
".",
"drawer",
".",
"append",
"(",
"pgmagick",
".",
"DrawableMatte",
"(",
"x",
",",
"y",
",",
"paint_method",
")",
")"
] | :param paint_method: 'point' or 'replace' or 'floodfill' or
'filltoborder' or 'reset'
:type paint_method: str or pgmagick.PaintMethod | [
":",
"param",
"paint_method",
":",
"point",
"or",
"replace",
"or",
"floodfill",
"or",
"filltoborder",
"or",
"reset",
":",
"type",
"paint_method",
":",
"str",
"or",
"pgmagick",
".",
"PaintMethod"
] | python | valid |
gem/oq-engine | openquake/calculators/export/hazard.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/hazard.py#L223-L235 | def export_hmaps_csv(key, dest, sitemesh, array, comment):
"""
Export the hazard maps of the given realization into CSV.
:param key: output_type and export_type
:param dest: name of the exported file
:param sitemesh: site collection
:param array: a composite array of dtype hmap_dt
:param comment: comment to use as header of the exported CSV file
"""
curves = util.compose_arrays(sitemesh, array)
writers.write_csv(dest, curves, comment=comment)
return [dest] | [
"def",
"export_hmaps_csv",
"(",
"key",
",",
"dest",
",",
"sitemesh",
",",
"array",
",",
"comment",
")",
":",
"curves",
"=",
"util",
".",
"compose_arrays",
"(",
"sitemesh",
",",
"array",
")",
"writers",
".",
"write_csv",
"(",
"dest",
",",
"curves",
",",
"comment",
"=",
"comment",
")",
"return",
"[",
"dest",
"]"
] | Export the hazard maps of the given realization into CSV.
:param key: output_type and export_type
:param dest: name of the exported file
:param sitemesh: site collection
:param array: a composite array of dtype hmap_dt
:param comment: comment to use as header of the exported CSV file | [
"Export",
"the",
"hazard",
"maps",
"of",
"the",
"given",
"realization",
"into",
"CSV",
"."
] | python | train |
cocaine/cocaine-tools | cocaine/tools/cli.py | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/cli.py#L245-L250 | def loop(self):
"""Lazy event loop initialization"""
if not self._loop:
self._loop = IOLoop.current()
return self._loop
return self._loop | [
"def",
"loop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_loop",
":",
"self",
".",
"_loop",
"=",
"IOLoop",
".",
"current",
"(",
")",
"return",
"self",
".",
"_loop",
"return",
"self",
".",
"_loop"
] | Lazy event loop initialization | [
"Lazy",
"event",
"loop",
"initialization"
] | python | train |
saltstack/salt | salt/modules/pip.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pip.py#L259-L274 | def _resolve_requirements_chain(requirements):
'''
Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum
'''
chain = []
if isinstance(requirements, six.string_types):
requirements = [requirements]
for req_file in requirements:
chain.append(req_file)
chain.extend(_resolve_requirements_chain(_find_req(req_file)))
return chain | [
"def",
"_resolve_requirements_chain",
"(",
"requirements",
")",
":",
"chain",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"requirements",
",",
"six",
".",
"string_types",
")",
":",
"requirements",
"=",
"[",
"requirements",
"]",
"for",
"req_file",
"in",
"requirements",
":",
"chain",
".",
"append",
"(",
"req_file",
")",
"chain",
".",
"extend",
"(",
"_resolve_requirements_chain",
"(",
"_find_req",
"(",
"req_file",
")",
")",
")",
"return",
"chain"
] | Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum | [
"Return",
"an",
"array",
"of",
"requirements",
"file",
"paths",
"that",
"can",
"be",
"used",
"to",
"complete",
"the",
"no_chown",
"==",
"False",
"&&",
"user",
"!",
"=",
"None",
"conundrum"
] | python | train |
casebeer/audiogen | audiogen/util.py | https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L252-L266 | def channelize(gen, channels):
'''
Break multi-channel generator into one sub-generator per channel
Takes a generator producing n-tuples of samples and returns n generators,
each producing samples for a single channel.
Since multi-channel generators are the only reasonable way to synchronize samples
across channels, and the sampler functions only take tuples of generators,
you must use this function to process synchronized streams for output.
'''
def pick(g, channel):
for samples in g:
yield samples[channel]
return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))] | [
"def",
"channelize",
"(",
"gen",
",",
"channels",
")",
":",
"def",
"pick",
"(",
"g",
",",
"channel",
")",
":",
"for",
"samples",
"in",
"g",
":",
"yield",
"samples",
"[",
"channel",
"]",
"return",
"[",
"pick",
"(",
"gen_copy",
",",
"channel",
")",
"for",
"channel",
",",
"gen_copy",
"in",
"enumerate",
"(",
"itertools",
".",
"tee",
"(",
"gen",
",",
"channels",
")",
")",
"]"
] | Break multi-channel generator into one sub-generator per channel
Takes a generator producing n-tuples of samples and returns n generators,
each producing samples for a single channel.
Since multi-channel generators are the only reasonable way to synchronize samples
across channels, and the sampler functions only take tuples of generators,
you must use this function to process synchronized streams for output. | [
"Break",
"multi",
"-",
"channel",
"generator",
"into",
"one",
"sub",
"-",
"generator",
"per",
"channel"
] | python | train |
quantmind/pulsar | pulsar/apps/wsgi/content.py | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/content.py#L168-L182 | def http_response(self, request):
'''Return a :class:`.WsgiResponse` or a :class:`~asyncio.Future`.
This method asynchronously wait for :meth:`stream` and subsequently
returns a :class:`.WsgiResponse`.
'''
content_types = request.content_types
if not content_types or self._content_type in content_types:
response = request.response
response.content_type = self._content_type
response.encoding = self.charset
response.content = self.to_bytes()
return response
else:
raise HttpException(status=415, msg=request.content_types) | [
"def",
"http_response",
"(",
"self",
",",
"request",
")",
":",
"content_types",
"=",
"request",
".",
"content_types",
"if",
"not",
"content_types",
"or",
"self",
".",
"_content_type",
"in",
"content_types",
":",
"response",
"=",
"request",
".",
"response",
"response",
".",
"content_type",
"=",
"self",
".",
"_content_type",
"response",
".",
"encoding",
"=",
"self",
".",
"charset",
"response",
".",
"content",
"=",
"self",
".",
"to_bytes",
"(",
")",
"return",
"response",
"else",
":",
"raise",
"HttpException",
"(",
"status",
"=",
"415",
",",
"msg",
"=",
"request",
".",
"content_types",
")"
] | Return a :class:`.WsgiResponse` or a :class:`~asyncio.Future`.
This method asynchronously wait for :meth:`stream` and subsequently
returns a :class:`.WsgiResponse`. | [
"Return",
"a",
":",
"class",
":",
".",
"WsgiResponse",
"or",
"a",
":",
"class",
":",
"~asyncio",
".",
"Future",
"."
] | python | train |
knipknap/SpiffWorkflow | SpiffWorkflow/bpmn/parser/TaskParser.py | https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/parser/TaskParser.py#L158-L170 | def connect_outgoing(self, outgoing_task, outgoing_task_node,
sequence_flow_node, is_default):
"""
Connects this task to the indicating outgoing task, with the details in
the sequence flow. A subclass can override this method to get extra
information from the node.
"""
self.task.connect_outgoing(
outgoing_task, sequence_flow_node.get('id'),
sequence_flow_node.get(
'name', None),
self.parser._parse_documentation(sequence_flow_node,
task_parser=self)) | [
"def",
"connect_outgoing",
"(",
"self",
",",
"outgoing_task",
",",
"outgoing_task_node",
",",
"sequence_flow_node",
",",
"is_default",
")",
":",
"self",
".",
"task",
".",
"connect_outgoing",
"(",
"outgoing_task",
",",
"sequence_flow_node",
".",
"get",
"(",
"'id'",
")",
",",
"sequence_flow_node",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"self",
".",
"parser",
".",
"_parse_documentation",
"(",
"sequence_flow_node",
",",
"task_parser",
"=",
"self",
")",
")"
] | Connects this task to the indicating outgoing task, with the details in
the sequence flow. A subclass can override this method to get extra
information from the node. | [
"Connects",
"this",
"task",
"to",
"the",
"indicating",
"outgoing",
"task",
"with",
"the",
"details",
"in",
"the",
"sequence",
"flow",
".",
"A",
"subclass",
"can",
"override",
"this",
"method",
"to",
"get",
"extra",
"information",
"from",
"the",
"node",
"."
] | python | valid |
pyblish/pyblish-nuke | pyblish_nuke/vendor/Qt.py | https://github.com/pyblish/pyblish-nuke/blob/5fbd766774e999e5e3015201094a07a92d800c4f/pyblish_nuke/vendor/Qt.py#L77-L108 | def _remap(object, name, value, safe=True):
"""Prevent accidental assignment of existing members
Arguments:
object (object): Parent of new attribute
name (str): Name of new attribute
value (object): Value of new attribute
safe (bool): Whether or not to guarantee that
the new attribute was not overwritten.
Can be set to False under condition that
it is superseded by extensive testing.
"""
if os.getenv("QT_TESTING") is not None and safe:
# Cannot alter original binding.
if hasattr(object, name):
raise AttributeError("Cannot override existing name: "
"%s.%s" % (object.__name__, name))
# Cannot alter classes of functions
if type(object).__name__ != "module":
raise AttributeError("%s != 'module': Cannot alter "
"anything but modules" % object)
elif hasattr(object, name):
# Keep track of modifications
self.__modified__.append(name)
self.__remapped__.append(name)
setattr(object, name, value) | [
"def",
"_remap",
"(",
"object",
",",
"name",
",",
"value",
",",
"safe",
"=",
"True",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"\"QT_TESTING\"",
")",
"is",
"not",
"None",
"and",
"safe",
":",
"# Cannot alter original binding.",
"if",
"hasattr",
"(",
"object",
",",
"name",
")",
":",
"raise",
"AttributeError",
"(",
"\"Cannot override existing name: \"",
"\"%s.%s\"",
"%",
"(",
"object",
".",
"__name__",
",",
"name",
")",
")",
"# Cannot alter classes of functions",
"if",
"type",
"(",
"object",
")",
".",
"__name__",
"!=",
"\"module\"",
":",
"raise",
"AttributeError",
"(",
"\"%s != 'module': Cannot alter \"",
"\"anything but modules\"",
"%",
"object",
")",
"elif",
"hasattr",
"(",
"object",
",",
"name",
")",
":",
"# Keep track of modifications",
"self",
".",
"__modified__",
".",
"append",
"(",
"name",
")",
"self",
".",
"__remapped__",
".",
"append",
"(",
"name",
")",
"setattr",
"(",
"object",
",",
"name",
",",
"value",
")"
] | Prevent accidental assignment of existing members
Arguments:
object (object): Parent of new attribute
name (str): Name of new attribute
value (object): Value of new attribute
safe (bool): Whether or not to guarantee that
the new attribute was not overwritten.
Can be set to False under condition that
it is superseded by extensive testing. | [
"Prevent",
"accidental",
"assignment",
"of",
"existing",
"members"
] | python | train |
ddorn/GUI | GUI/gui_examples/bezier.py | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/gui_examples/bezier.py#L17-L105 | def gui():
"""Main function"""
# #######
# setup all objects
# #######
zones = [ALL]
last_zones = []
COLORS.remove(WHITE)
screen = pygame.display.set_mode(SCREEN_SIZE, DOUBLEBUF)
pygame.display.set_caption('Bezier simulator')
pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])
points = [
(40, 40),
(100, 400),
(200, 100),
(650, 420)
]
bezier = Bezier((0, 0), SCREEN_SIZE, points, ORANGE, 8)
points = [Point(p, 24, choice(COLORS)) for p in points]
clock = pygame.time.Clock()
fps = FPSIndicator(clock)
dragging = None
render = True
while True:
# #######
# Input loop
# #######
mouse = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == QUIT:
return 0
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
return 0
if e.key == K_F4 and e.mod & KMOD_ALT:
return 0
elif e.type == MOUSEBUTTONDOWN:
if e.button == 1:
dragging = not dragging
if e.button == 3:
points.append(Point(mouse, 24, choice(COLORS)))
bezier.points.append(V2(mouse))
render = True
if dragging:
mdist = 10000
the_p = None
for i, p in enumerate(points):
if p.dist_to(mouse) < mdist:
mdist = p.dist_to(mouse)
the_p = i
render = points[the_p].pos != mouse
points[the_p].pos = mouse
bezier.points[the_p] = V2(mouse)
# #######
# Draw all
# #######
if render:
render = False
screen.fill(WHITE)
bezier.render(screen)
for p in points:
p.render(screen)
zones.append(ALL)
_ = fps.render(screen)
zones.append(_)
pygame.display.update(zones + last_zones)
last_zones = zones[:]
zones.clear()
clock.tick(FPS) | [
"def",
"gui",
"(",
")",
":",
"# #######",
"# setup all objects",
"# #######",
"zones",
"=",
"[",
"ALL",
"]",
"last_zones",
"=",
"[",
"]",
"COLORS",
".",
"remove",
"(",
"WHITE",
")",
"screen",
"=",
"pygame",
".",
"display",
".",
"set_mode",
"(",
"SCREEN_SIZE",
",",
"DOUBLEBUF",
")",
"pygame",
".",
"display",
".",
"set_caption",
"(",
"'Bezier simulator'",
")",
"pygame",
".",
"event",
".",
"set_allowed",
"(",
"[",
"QUIT",
",",
"KEYDOWN",
",",
"MOUSEBUTTONDOWN",
"]",
")",
"points",
"=",
"[",
"(",
"40",
",",
"40",
")",
",",
"(",
"100",
",",
"400",
")",
",",
"(",
"200",
",",
"100",
")",
",",
"(",
"650",
",",
"420",
")",
"]",
"bezier",
"=",
"Bezier",
"(",
"(",
"0",
",",
"0",
")",
",",
"SCREEN_SIZE",
",",
"points",
",",
"ORANGE",
",",
"8",
")",
"points",
"=",
"[",
"Point",
"(",
"p",
",",
"24",
",",
"choice",
"(",
"COLORS",
")",
")",
"for",
"p",
"in",
"points",
"]",
"clock",
"=",
"pygame",
".",
"time",
".",
"Clock",
"(",
")",
"fps",
"=",
"FPSIndicator",
"(",
"clock",
")",
"dragging",
"=",
"None",
"render",
"=",
"True",
"while",
"True",
":",
"# #######",
"# Input loop",
"# #######",
"mouse",
"=",
"pygame",
".",
"mouse",
".",
"get_pos",
"(",
")",
"for",
"e",
"in",
"pygame",
".",
"event",
".",
"get",
"(",
")",
":",
"if",
"e",
".",
"type",
"==",
"QUIT",
":",
"return",
"0",
"elif",
"e",
".",
"type",
"==",
"KEYDOWN",
":",
"if",
"e",
".",
"key",
"==",
"K_ESCAPE",
":",
"return",
"0",
"if",
"e",
".",
"key",
"==",
"K_F4",
"and",
"e",
".",
"mod",
"&",
"KMOD_ALT",
":",
"return",
"0",
"elif",
"e",
".",
"type",
"==",
"MOUSEBUTTONDOWN",
":",
"if",
"e",
".",
"button",
"==",
"1",
":",
"dragging",
"=",
"not",
"dragging",
"if",
"e",
".",
"button",
"==",
"3",
":",
"points",
".",
"append",
"(",
"Point",
"(",
"mouse",
",",
"24",
",",
"choice",
"(",
"COLORS",
")",
")",
")",
"bezier",
".",
"points",
".",
"append",
"(",
"V2",
"(",
"mouse",
")",
")",
"render",
"=",
"True",
"if",
"dragging",
":",
"mdist",
"=",
"10000",
"the_p",
"=",
"None",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"points",
")",
":",
"if",
"p",
".",
"dist_to",
"(",
"mouse",
")",
"<",
"mdist",
":",
"mdist",
"=",
"p",
".",
"dist_to",
"(",
"mouse",
")",
"the_p",
"=",
"i",
"render",
"=",
"points",
"[",
"the_p",
"]",
".",
"pos",
"!=",
"mouse",
"points",
"[",
"the_p",
"]",
".",
"pos",
"=",
"mouse",
"bezier",
".",
"points",
"[",
"the_p",
"]",
"=",
"V2",
"(",
"mouse",
")",
"# #######",
"# Draw all",
"# #######",
"if",
"render",
":",
"render",
"=",
"False",
"screen",
".",
"fill",
"(",
"WHITE",
")",
"bezier",
".",
"render",
"(",
"screen",
")",
"for",
"p",
"in",
"points",
":",
"p",
".",
"render",
"(",
"screen",
")",
"zones",
".",
"append",
"(",
"ALL",
")",
"_",
"=",
"fps",
".",
"render",
"(",
"screen",
")",
"zones",
".",
"append",
"(",
"_",
")",
"pygame",
".",
"display",
".",
"update",
"(",
"zones",
"+",
"last_zones",
")",
"last_zones",
"=",
"zones",
"[",
":",
"]",
"zones",
".",
"clear",
"(",
")",
"clock",
".",
"tick",
"(",
"FPS",
")"
] | Main function | [
"Main",
"function"
] | python | train |
google/grr | grr/core/grr_response_core/lib/util/filesystem.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/util/filesystem.py#L187-L211 | def Get(self, path, follow_symlink = True):
"""Stats given file or returns a cached result if available.
Args:
path: A path to the file to perform `stat` on.
follow_symlink: True if `stat` of a symlink should be returned instead of
a file that it points to. For non-symlinks this setting has no effect.
Returns:
`Stat` object corresponding to the given path.
"""
key = self._Key(path=path, follow_symlink=follow_symlink)
try:
return self._cache[key]
except KeyError:
value = Stat.FromPath(path, follow_symlink=follow_symlink)
self._cache[key] = value
# If we are not following symlinks and the file is a not symlink then
# the stat result for this file stays the same even if we want to follow
# symlinks.
if not follow_symlink and not value.IsSymlink():
self._cache[self._Key(path=path, follow_symlink=True)] = value
return value | [
"def",
"Get",
"(",
"self",
",",
"path",
",",
"follow_symlink",
"=",
"True",
")",
":",
"key",
"=",
"self",
".",
"_Key",
"(",
"path",
"=",
"path",
",",
"follow_symlink",
"=",
"follow_symlink",
")",
"try",
":",
"return",
"self",
".",
"_cache",
"[",
"key",
"]",
"except",
"KeyError",
":",
"value",
"=",
"Stat",
".",
"FromPath",
"(",
"path",
",",
"follow_symlink",
"=",
"follow_symlink",
")",
"self",
".",
"_cache",
"[",
"key",
"]",
"=",
"value",
"# If we are not following symlinks and the file is a not symlink then",
"# the stat result for this file stays the same even if we want to follow",
"# symlinks.",
"if",
"not",
"follow_symlink",
"and",
"not",
"value",
".",
"IsSymlink",
"(",
")",
":",
"self",
".",
"_cache",
"[",
"self",
".",
"_Key",
"(",
"path",
"=",
"path",
",",
"follow_symlink",
"=",
"True",
")",
"]",
"=",
"value",
"return",
"value"
] | Stats given file or returns a cached result if available.
Args:
path: A path to the file to perform `stat` on.
follow_symlink: True if `stat` of a symlink should be returned instead of
a file that it points to. For non-symlinks this setting has no effect.
Returns:
`Stat` object corresponding to the given path. | [
"Stats",
"given",
"file",
"or",
"returns",
"a",
"cached",
"result",
"if",
"available",
"."
] | python | train |
seequent/properties | properties/images.py | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/images.py#L86-L97 | def to_json(value, **kwargs):
"""Convert a PNG Image to base64-encoded JSON
to_json assumes that value has passed validation.
"""
b64rep = base64.b64encode(value.read())
value.seek(0)
jsonrep = '{preamble}{b64}'.format(
preamble=PNG_PREAMBLE,
b64=b64rep.decode(),
)
return jsonrep | [
"def",
"to_json",
"(",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"b64rep",
"=",
"base64",
".",
"b64encode",
"(",
"value",
".",
"read",
"(",
")",
")",
"value",
".",
"seek",
"(",
"0",
")",
"jsonrep",
"=",
"'{preamble}{b64}'",
".",
"format",
"(",
"preamble",
"=",
"PNG_PREAMBLE",
",",
"b64",
"=",
"b64rep",
".",
"decode",
"(",
")",
",",
")",
"return",
"jsonrep"
] | Convert a PNG Image to base64-encoded JSON
to_json assumes that value has passed validation. | [
"Convert",
"a",
"PNG",
"Image",
"to",
"base64",
"-",
"encoded",
"JSON"
] | python | train |
belbio/bel | bel/lang/semantics.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L46-L88 | def validate_functions(ast: BELAst, bo):
"""Recursively validate function signatures
Determine if function matches one of the available signatures. Also,
1. Add entity types to AST NSArg, e.g. Abundance, ...
2. Add optional to AST Arg (optional means it is not a
fixed, required argument and needs to be sorted for
canonicalization, e.g. reactants(A, B, C) )
Args:
bo: bel object
Returns:
bel object
"""
if isinstance(ast, Function):
log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}")
function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"]
function_name = ast.name
(valid_function, messages) = check_function_args(
ast.args, function_signatures, function_name
)
if not valid_function:
message = ", ".join(messages)
bo.validation_messages.append(
(
"ERROR",
"Invalid BEL Statement function {} - problem with function signatures: {}".format(
ast.to_string(), message
),
)
)
bo.parse_valid = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
validate_functions(arg, bo)
return bo | [
"def",
"validate_functions",
"(",
"ast",
":",
"BELAst",
",",
"bo",
")",
":",
"if",
"isinstance",
"(",
"ast",
",",
"Function",
")",
":",
"log",
".",
"debug",
"(",
"f\"Validating: {ast.name}, {ast.function_type}, {ast.args}\"",
")",
"function_signatures",
"=",
"bo",
".",
"spec",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"ast",
".",
"name",
"]",
"[",
"\"signatures\"",
"]",
"function_name",
"=",
"ast",
".",
"name",
"(",
"valid_function",
",",
"messages",
")",
"=",
"check_function_args",
"(",
"ast",
".",
"args",
",",
"function_signatures",
",",
"function_name",
")",
"if",
"not",
"valid_function",
":",
"message",
"=",
"\", \"",
".",
"join",
"(",
"messages",
")",
"bo",
".",
"validation_messages",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"\"Invalid BEL Statement function {} - problem with function signatures: {}\"",
".",
"format",
"(",
"ast",
".",
"to_string",
"(",
")",
",",
"message",
")",
",",
")",
")",
"bo",
".",
"parse_valid",
"=",
"False",
"# Recursively process every NSArg by processing BELAst and Functions",
"if",
"hasattr",
"(",
"ast",
",",
"\"args\"",
")",
":",
"for",
"arg",
"in",
"ast",
".",
"args",
":",
"validate_functions",
"(",
"arg",
",",
"bo",
")",
"return",
"bo"
] | Recursively validate function signatures
Determine if function matches one of the available signatures. Also,
1. Add entity types to AST NSArg, e.g. Abundance, ...
2. Add optional to AST Arg (optional means it is not a
fixed, required argument and needs to be sorted for
canonicalization, e.g. reactants(A, B, C) )
Args:
bo: bel object
Returns:
bel object | [
"Recursively",
"validate",
"function",
"signatures"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.