repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
DLR-RM/RAFCON | source/rafcon/gui/controllers/utils/tree_view_controller.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L141-L144 | def state_machine_selection_changed(self, state_machine_m, signal_name, signal_msg):
"""Notify tree view about state machine selection"""
if self.CORE_ELEMENT_CLASS in signal_msg.arg.affected_core_element_classes:
self.update_selection_sm_prior() | [
"def",
"state_machine_selection_changed",
"(",
"self",
",",
"state_machine_m",
",",
"signal_name",
",",
"signal_msg",
")",
":",
"if",
"self",
".",
"CORE_ELEMENT_CLASS",
"in",
"signal_msg",
".",
"arg",
".",
"affected_core_element_classes",
":",
"self",
".",
"update_selection_sm_prior",
"(",
")"
] | Notify tree view about state machine selection | [
"Notify",
"tree",
"view",
"about",
"state",
"machine",
"selection"
] | python | train |
what-studio/gauge | recipes/namedgauge.py | https://github.com/what-studio/gauge/blob/4624602c87c9287de1a8a0bcb2a68f827af49ccb/recipes/namedgauge.py#L36-L51 | def get_momentum_by_name(self, name):
"""Gets a momentum by the given name.
:param name: the momentum name.
:returns: a momentum found.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`.
"""
if name is None:
raise TypeError('\'name\' should not be None')
for momentum in self.momenta:
if momentum.name == name:
return momentum
raise KeyError('No such momentum named {0}'.format(name)) | [
"def",
"get_momentum_by_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'\\'name\\' should not be None'",
")",
"for",
"momentum",
"in",
"self",
".",
"momenta",
":",
"if",
"momentum",
".",
"name",
"==",
"name",
":",
"return",
"momentum",
"raise",
"KeyError",
"(",
"'No such momentum named {0}'",
".",
"format",
"(",
"name",
")",
")"
] | Gets a momentum by the given name.
:param name: the momentum name.
:returns: a momentum found.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`. | [
"Gets",
"a",
"momentum",
"by",
"the",
"given",
"name",
"."
] | python | train |
datastax/python-driver | cassandra/io/twistedreactor.py | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/io/twistedreactor.py#L283-L300 | def close(self):
"""
Disconnect and error-out all requests.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
reactor.callFromThread(self.connector.disconnect)
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
# don't leave in-progress operations hanging
self.connected_event.set() | [
"def",
"close",
"(",
"self",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"is_closed",
":",
"return",
"self",
".",
"is_closed",
"=",
"True",
"log",
".",
"debug",
"(",
"\"Closing connection (%s) to %s\"",
",",
"id",
"(",
"self",
")",
",",
"self",
".",
"endpoint",
")",
"reactor",
".",
"callFromThread",
"(",
"self",
".",
"connector",
".",
"disconnect",
")",
"log",
".",
"debug",
"(",
"\"Closed socket to %s\"",
",",
"self",
".",
"endpoint",
")",
"if",
"not",
"self",
".",
"is_defunct",
":",
"self",
".",
"error_all_requests",
"(",
"ConnectionShutdown",
"(",
"\"Connection to %s was closed\"",
"%",
"self",
".",
"endpoint",
")",
")",
"# don't leave in-progress operations hanging",
"self",
".",
"connected_event",
".",
"set",
"(",
")"
] | Disconnect and error-out all requests. | [
"Disconnect",
"and",
"error",
"-",
"out",
"all",
"requests",
"."
] | python | train |
quantopian/zipline | zipline/data/hdf5_daily_bars.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L562-L583 | def _validate_assets(self, assets):
"""Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars.
"""
missing_sids = np.setdiff1d(assets, self.sids)
if len(missing_sids):
raise NoDataForSid(
'Assets not contained in daily pricing file: {}'.format(
missing_sids
)
) | [
"def",
"_validate_assets",
"(",
"self",
",",
"assets",
")",
":",
"missing_sids",
"=",
"np",
".",
"setdiff1d",
"(",
"assets",
",",
"self",
".",
"sids",
")",
"if",
"len",
"(",
"missing_sids",
")",
":",
"raise",
"NoDataForSid",
"(",
"'Assets not contained in daily pricing file: {}'",
".",
"format",
"(",
"missing_sids",
")",
")"
] | Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars. | [
"Validate",
"that",
"asset",
"identifiers",
"are",
"contained",
"in",
"the",
"daily",
"bars",
"."
] | python | train |
eighthave/pyvendapin | vendapin.py | https://github.com/eighthave/pyvendapin/blob/270c4da5c31ab4a0435660b25b655692fdffcf01/vendapin.py#L180-L188 | def parsedata(self, packet):
'''parse the data section of a packet, it can range from 0 to many bytes'''
data = []
datalength = ord(packet[3])
position = 4
while position < datalength + 4:
data.append(packet[position])
position += 1
return data | [
"def",
"parsedata",
"(",
"self",
",",
"packet",
")",
":",
"data",
"=",
"[",
"]",
"datalength",
"=",
"ord",
"(",
"packet",
"[",
"3",
"]",
")",
"position",
"=",
"4",
"while",
"position",
"<",
"datalength",
"+",
"4",
":",
"data",
".",
"append",
"(",
"packet",
"[",
"position",
"]",
")",
"position",
"+=",
"1",
"return",
"data"
] | parse the data section of a packet, it can range from 0 to many bytes | [
"parse",
"the",
"data",
"section",
"of",
"a",
"packet",
"it",
"can",
"range",
"from",
"0",
"to",
"many",
"bytes"
] | python | train |
mitsei/dlkit | dlkit/handcar/osid/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/objects.py#L984-L1005 | def set_display_name(self, display_name=None):
"""Sets a display name.
A display name is required and if not set, will be set by the
provider.
arg: displayName (string): the new display name
raise: InvalidArgument - displayName is invalid
raise: NoAccess - metadata.is_readonly() is true
raise: NullArgument - displayName is null
compliance: mandatory - This method must be implemented.
"""
if display_name is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['display_name'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(display_name, metadata, array=False):
self._my_map['displayName']['text'] = display_name
else:
raise InvalidArgument | [
"def",
"set_display_name",
"(",
"self",
",",
"display_name",
"=",
"None",
")",
":",
"if",
"display_name",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"metadata",
"=",
"Metadata",
"(",
"*",
"*",
"settings",
".",
"METADATA",
"[",
"'display_name'",
"]",
")",
"if",
"metadata",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"self",
".",
"_is_valid_input",
"(",
"display_name",
",",
"metadata",
",",
"array",
"=",
"False",
")",
":",
"self",
".",
"_my_map",
"[",
"'displayName'",
"]",
"[",
"'text'",
"]",
"=",
"display_name",
"else",
":",
"raise",
"InvalidArgument"
] | Sets a display name.
A display name is required and if not set, will be set by the
provider.
arg: displayName (string): the new display name
raise: InvalidArgument - displayName is invalid
raise: NoAccess - metadata.is_readonly() is true
raise: NullArgument - displayName is null
compliance: mandatory - This method must be implemented. | [
"Sets",
"a",
"display",
"name",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12085-L12095 | def terrain_request_encode(self, lat, lon, grid_spacing, mask):
'''
Request for terrain data and terrain status
lat : Latitude of SW corner of first grid (degrees *10^7) (int32_t)
lon : Longitude of SW corner of first grid (in degrees *10^7) (int32_t)
grid_spacing : Grid spacing in meters (uint16_t)
mask : Bitmask of requested 4x4 grids (row major 8x7 array of grids, 56 bits) (uint64_t)
'''
return MAVLink_terrain_request_message(lat, lon, grid_spacing, mask) | [
"def",
"terrain_request_encode",
"(",
"self",
",",
"lat",
",",
"lon",
",",
"grid_spacing",
",",
"mask",
")",
":",
"return",
"MAVLink_terrain_request_message",
"(",
"lat",
",",
"lon",
",",
"grid_spacing",
",",
"mask",
")"
] | Request for terrain data and terrain status
lat : Latitude of SW corner of first grid (degrees *10^7) (int32_t)
lon : Longitude of SW corner of first grid (in degrees *10^7) (int32_t)
grid_spacing : Grid spacing in meters (uint16_t)
mask : Bitmask of requested 4x4 grids (row major 8x7 array of grids, 56 bits) (uint64_t) | [
"Request",
"for",
"terrain",
"data",
"and",
"terrain",
"status"
] | python | train |
SchroterQuentin/django-search-listview | fabfile.py | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/fabfile.py#L68-L85 | def dev():
"""Define dev stage"""
env.roledefs = {
'web': ['192.168.1.2'],
'lb': ['192.168.1.2'],
}
env.user = 'vagrant'
env.backends = env.roledefs['web']
env.server_name = 'django_search_model-dev.net'
env.short_server_name = 'django_search_model-dev'
env.static_folder = '/site_media/'
env.server_ip = '192.168.1.2'
env.no_shared_sessions = False
env.server_ssl_on = False
env.goal = 'dev'
env.socket_port = '8001'
env.map_settings = {}
execute(build_env) | [
"def",
"dev",
"(",
")",
":",
"env",
".",
"roledefs",
"=",
"{",
"'web'",
":",
"[",
"'192.168.1.2'",
"]",
",",
"'lb'",
":",
"[",
"'192.168.1.2'",
"]",
",",
"}",
"env",
".",
"user",
"=",
"'vagrant'",
"env",
".",
"backends",
"=",
"env",
".",
"roledefs",
"[",
"'web'",
"]",
"env",
".",
"server_name",
"=",
"'django_search_model-dev.net'",
"env",
".",
"short_server_name",
"=",
"'django_search_model-dev'",
"env",
".",
"static_folder",
"=",
"'/site_media/'",
"env",
".",
"server_ip",
"=",
"'192.168.1.2'",
"env",
".",
"no_shared_sessions",
"=",
"False",
"env",
".",
"server_ssl_on",
"=",
"False",
"env",
".",
"goal",
"=",
"'dev'",
"env",
".",
"socket_port",
"=",
"'8001'",
"env",
".",
"map_settings",
"=",
"{",
"}",
"execute",
"(",
"build_env",
")"
] | Define dev stage | [
"Define",
"dev",
"stage"
] | python | train |
Josef-Friedrich/tmep | tmep/functions.py | https://github.com/Josef-Friedrich/tmep/blob/326de14f5b9498696a1f06a8be3d39e33e376102/tmep/functions.py#L159-L177 | def tmpl_ifdef(self, field, trueval=u'', falseval=u''):
"""If field exists return trueval or the field (default) otherwise,
emit return falseval (if provided).
* synopsis: ``%ifdef{field}``, ``%ifdef{field,text}`` or \
``%ifdef{field,text,falsetext}``
* description: If field exists, then return truetext or field \
(default). Otherwise, returns falsetext. The field should be \
entered without $.
:param field: The name of the field
:param trueval: The string if the condition is true
:param falseval: The string if the condition is false
:return: The string, based on condition
"""
if field in self.values:
return trueval
else:
return falseval | [
"def",
"tmpl_ifdef",
"(",
"self",
",",
"field",
",",
"trueval",
"=",
"u''",
",",
"falseval",
"=",
"u''",
")",
":",
"if",
"field",
"in",
"self",
".",
"values",
":",
"return",
"trueval",
"else",
":",
"return",
"falseval"
] | If field exists return trueval or the field (default) otherwise,
emit return falseval (if provided).
* synopsis: ``%ifdef{field}``, ``%ifdef{field,text}`` or \
``%ifdef{field,text,falsetext}``
* description: If field exists, then return truetext or field \
(default). Otherwise, returns falsetext. The field should be \
entered without $.
:param field: The name of the field
:param trueval: The string if the condition is true
:param falseval: The string if the condition is false
:return: The string, based on condition | [
"If",
"field",
"exists",
"return",
"trueval",
"or",
"the",
"field",
"(",
"default",
")",
"otherwise",
"emit",
"return",
"falseval",
"(",
"if",
"provided",
")",
"."
] | python | train |
vilmibm/done | parsedatetime/parsedatetime_consts.py | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L1087-L1118 | def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources | [
"def",
"buildSources",
"(",
"self",
",",
"sourceTime",
"=",
"None",
")",
":",
"if",
"sourceTime",
"is",
"None",
":",
"(",
"yr",
",",
"mth",
",",
"dy",
",",
"hr",
",",
"mn",
",",
"sec",
",",
"wd",
",",
"yd",
",",
"isdst",
")",
"=",
"time",
".",
"localtime",
"(",
")",
"else",
":",
"(",
"yr",
",",
"mth",
",",
"dy",
",",
"hr",
",",
"mn",
",",
"sec",
",",
"wd",
",",
"yd",
",",
"isdst",
")",
"=",
"sourceTime",
"sources",
"=",
"{",
"}",
"defaults",
"=",
"{",
"'yr'",
":",
"yr",
",",
"'mth'",
":",
"mth",
",",
"'dy'",
":",
"dy",
",",
"'hr'",
":",
"hr",
",",
"'mn'",
":",
"mn",
",",
"'sec'",
":",
"sec",
",",
"}",
"for",
"item",
"in",
"self",
".",
"re_sources",
":",
"values",
"=",
"{",
"}",
"source",
"=",
"self",
".",
"re_sources",
"[",
"item",
"]",
"for",
"key",
"in",
"defaults",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"source",
":",
"values",
"[",
"key",
"]",
"=",
"source",
"[",
"key",
"]",
"else",
":",
"values",
"[",
"key",
"]",
"=",
"defaults",
"[",
"key",
"]",
"sources",
"[",
"item",
"]",
"=",
"(",
"values",
"[",
"'yr'",
"]",
",",
"values",
"[",
"'mth'",
"]",
",",
"values",
"[",
"'dy'",
"]",
",",
"values",
"[",
"'hr'",
"]",
",",
"values",
"[",
"'mn'",
"]",
",",
"values",
"[",
"'sec'",
"]",
",",
"wd",
",",
"yd",
",",
"isdst",
")",
"return",
"sources"
] | Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned. | [
"Return",
"a",
"dictionary",
"of",
"date",
"/",
"time",
"tuples",
"based",
"on",
"the",
"keys",
"found",
"in",
"self",
".",
"re_sources",
"."
] | python | train |
mozilla-services/python-dockerflow | src/dockerflow/flask/app.py | https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L203-L230 | def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id() | [
"def",
"user_id",
"(",
"self",
")",
":",
"# This needs flask-login to be installed",
"if",
"not",
"has_flask_login",
":",
"return",
"# and the actual login manager installed",
"if",
"not",
"hasattr",
"(",
"current_app",
",",
"'login_manager'",
")",
":",
"return",
"# fail if no current_user was attached to the request context",
"try",
":",
"is_authenticated",
"=",
"current_user",
".",
"is_authenticated",
"except",
"AttributeError",
":",
"return",
"# because is_authenticated could be a callable, call it",
"if",
"callable",
"(",
"is_authenticated",
")",
":",
"is_authenticated",
"=",
"is_authenticated",
"(",
")",
"# and fail if the user isn't authenticated",
"if",
"not",
"is_authenticated",
":",
"return",
"# finally return the user id",
"return",
"current_user",
".",
"get_id",
"(",
")"
] | Return the ID of the current request's user | [
"Return",
"the",
"ID",
"of",
"the",
"current",
"request",
"s",
"user"
] | python | train |
anteater/anteater | anteater/src/virus_total.py | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L138-L154 | def send_ip(self, ipaddr, apikey):
"""
Send IP address for list of past malicous domain associations
"""
url = self.base_url + "ip-address/report"
parameters = {"ip": ipaddr, "apikey": apikey}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.get(url, params=parameters)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", ipaddr, response.status_code)
time.sleep(self.public_api_sleep_time) | [
"def",
"send_ip",
"(",
"self",
",",
"ipaddr",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"ip-address/report\"",
"parameters",
"=",
"{",
"\"ip\"",
":",
"ipaddr",
",",
"\"apikey\"",
":",
"apikey",
"}",
"rate_limit_clear",
"=",
"self",
".",
"rate_limit",
"(",
")",
"if",
"rate_limit_clear",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"parameters",
")",
"if",
"response",
".",
"status_code",
"==",
"self",
".",
"HTTP_OK",
":",
"json_response",
"=",
"response",
".",
"json",
"(",
")",
"return",
"json_response",
"elif",
"response",
".",
"status_code",
"==",
"self",
".",
"HTTP_RATE_EXCEEDED",
":",
"time",
".",
"sleep",
"(",
"20",
")",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"sent: %s, HTTP: %d\"",
",",
"ipaddr",
",",
"response",
".",
"status_code",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"public_api_sleep_time",
")"
] | Send IP address for list of past malicous domain associations | [
"Send",
"IP",
"address",
"for",
"list",
"of",
"past",
"malicous",
"domain",
"associations"
] | python | train |
YosaiProject/yosai | yosai/web/session/session.py | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/web/session/session.py#L281-L288 | def pop_flash(self, queue='default'):
"""
:rtype: list
"""
flash_messages = self.get_internal_attribute('flash_messages')
messages = flash_messages.pop(queue, None)
self.set_internal_attribute('flash_messages', flash_messages)
return messages | [
"def",
"pop_flash",
"(",
"self",
",",
"queue",
"=",
"'default'",
")",
":",
"flash_messages",
"=",
"self",
".",
"get_internal_attribute",
"(",
"'flash_messages'",
")",
"messages",
"=",
"flash_messages",
".",
"pop",
"(",
"queue",
",",
"None",
")",
"self",
".",
"set_internal_attribute",
"(",
"'flash_messages'",
",",
"flash_messages",
")",
"return",
"messages"
] | :rtype: list | [
":",
"rtype",
":",
"list"
] | python | train |
fozzle/python-brotherprint | brotherprint/brotherprint.py | https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L363-L377 | def abs_horz_pos(self, amount):
'''Calling this function sets the absoulte print position for the next data, this is
the position from the left margin.
Args:
amount: desired positioning. Can be a number from 0 to 2362. The actual positioning
is calculated as (amount/60)inches from the left margin.
Returns:
None
Raises:
None
'''
n1 = amount%256
n2 = amount/256
self.send(chr(27)+'${n1}{n2}'.format(n1=chr(n1), n2=chr(n2))) | [
"def",
"abs_horz_pos",
"(",
"self",
",",
"amount",
")",
":",
"n1",
"=",
"amount",
"%",
"256",
"n2",
"=",
"amount",
"/",
"256",
"self",
".",
"send",
"(",
"chr",
"(",
"27",
")",
"+",
"'${n1}{n2}'",
".",
"format",
"(",
"n1",
"=",
"chr",
"(",
"n1",
")",
",",
"n2",
"=",
"chr",
"(",
"n2",
")",
")",
")"
] | Calling this function sets the absoulte print position for the next data, this is
the position from the left margin.
Args:
amount: desired positioning. Can be a number from 0 to 2362. The actual positioning
is calculated as (amount/60)inches from the left margin.
Returns:
None
Raises:
None | [
"Calling",
"this",
"function",
"sets",
"the",
"absoulte",
"print",
"position",
"for",
"the",
"next",
"data",
"this",
"is",
"the",
"position",
"from",
"the",
"left",
"margin",
".",
"Args",
":",
"amount",
":",
"desired",
"positioning",
".",
"Can",
"be",
"a",
"number",
"from",
"0",
"to",
"2362",
".",
"The",
"actual",
"positioning",
"is",
"calculated",
"as",
"(",
"amount",
"/",
"60",
")",
"inches",
"from",
"the",
"left",
"margin",
".",
"Returns",
":",
"None",
"Raises",
":",
"None"
] | python | train |
yyuu/botornado | boto/ec2/connection.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L709-L723 | def start_instances(self, instance_ids=None):
"""
Start the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to start
:rtype: list
:return: A list of the instances started
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StartInstances', params,
[('item', Instance)], verb='POST') | [
"def",
"start_instances",
"(",
"self",
",",
"instance_ids",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"instance_ids",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"instance_ids",
",",
"'InstanceId'",
")",
"return",
"self",
".",
"get_list",
"(",
"'StartInstances'",
",",
"params",
",",
"[",
"(",
"'item'",
",",
"Instance",
")",
"]",
",",
"verb",
"=",
"'POST'",
")"
] | Start the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to start
:rtype: list
:return: A list of the instances started | [
"Start",
"the",
"instances",
"specified"
] | python | train |
hollenstein/maspy | maspy/featuremethods.py | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/featuremethods.py#L257-L379 | def rtCalibration(fiContainer, allowedRtDev=60, allowedMzDev=2.5,
reference=None, specfiles=None, showPlots=False,
plotDir=None, minIntensity=1e5):
"""Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles.
:ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles`
:ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched
:ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched
:ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration
:ivar plotDir: if not None and showPlots is True, the plots are saved to
this location.
:ivar reference: Can be used to specifically specify a reference specfile
:ivar specfiles: Limit alignment to those specfiles in the fiContainer
:ivar minIntensity: consider only features with an intensity above this value
"""
#TODO: long function, maybe split into subfunctions
specfiles = [_ for _ in viewkeys(fiContainer.info)] if specfiles is None else specfiles
matchCharge = True
refMzKey = 'mz'
mzKey = 'mz'
if reference is not None:
if reference in specfiles:
specfiles = [reference] + list(set(specfiles).difference(set([reference])))
else:
print('Specified reference specfile not present, using reference: ', specfiles[0])
for featureItem in fiContainer.getItems(specfiles=specfiles):
if not hasattr(featureItem, 'obsRt'):
setattr(featureItem, 'obsRt', featureItem.rt)
referenceArrays = None
for specfile in specfiles:
featureArrays = fiContainer.getArrays(['rt', 'charge', 'mz', 'intensity'],
specfiles=specfile, sort='rt'
)
if minIntensity is not None:
intensityMask = (featureArrays['intensity'] > minIntensity)
for key in list(viewkeys(featureArrays)):
featureArrays[key] = featureArrays[key][intensityMask]
if referenceArrays is None:
referenceArrays = featureArrays
if showPlots:
print('Reference: '+specfile)
continue
rtPosList = list()
rtDevList = list()
mzDevRelList = list()
mzDevAbsList = list()
for featurePos in range(len(featureArrays[mzKey])):
currRt = featureArrays['rt'][featurePos]
currMz = featureArrays[mzKey][featurePos]
currZ = featureArrays['charge'][featurePos]
mzLimitUp = currMz*(1+allowedMzDev*1E-6)
mzLimitLow = currMz*(1-allowedMzDev*1E-6)
rtLimitUp = currRt+allowedRtDev
rtLimitLow = currRt-allowedRtDev
posL = bisect.bisect_left(referenceArrays['rt'], rtLimitLow)
posU = bisect.bisect_right(referenceArrays['rt'], rtLimitUp)
refMask = (referenceArrays[refMzKey][posL:posU] <= mzLimitUp) & (referenceArrays[refMzKey][posL:posU] >= mzLimitLow)
if matchCharge:
refMask = refMask & (referenceArrays['charge'][posL:posU] == currZ)
currMzDev = abs(referenceArrays[refMzKey][posL:posU][refMask] - currMz)
bestHitMask = currMzDev.argsort()
for refRt, refMz in zip(referenceArrays['rt'][posL:posU][refMask][bestHitMask],
referenceArrays[refMzKey][posL:posU][refMask][bestHitMask]):
rtPosList.append(currRt)
rtDevList.append(currRt - refRt)
mzDevRelList.append((1 - currMz / refMz)*1E6)
mzDevAbsList.append(currMz - refMz)
break
rtPosList = numpy.array(rtPosList)
rtDevList = numpy.array(rtDevList)
splineInitialKnots = int(max(rtPosList) - min(rtPosList))
dataFit = aux.DataFit(rtDevList, rtPosList)
dataFit.splineInitialKnots = splineInitialKnots
dataFit.splineTerminalExpansion = 0.2
dataFit.processInput(dataAveraging='median', windowSize=10)
dataFit.generateSplines()
if showPlots:
corrDevArr = rtDevList - dataFit.corrArray(rtPosList)
timePoints = [min(rtPosList) + x for x in range(int(max(rtPosList)-min(rtPosList)))]
corrValues = dataFit.corrArray(timePoints)
fig, ax = plt.subplots(3, 2, sharex=False, sharey=False, figsize=(20, 18))
fig.suptitle(specfile)
ax[0][0].hist(rtDevList, bins=100, color='grey', alpha=0.5, label='observed')
ax[0][0].hist(corrDevArr, bins=100, color='red', alpha=0.5, label='corrected')
ax[0][0].set_title('Retention time deviation')
ax[0][0].legend()
ax[0][0].set_xlim(allowedRtDev*-1, allowedRtDev)
ax[0][1].hist(mzDevRelList, bins=100, color='grey')
ax[0][1].set_title('Mz deviation [ppm]')
ax[1][0].scatter(rtPosList, rtDevList, color='grey', alpha=0.1, label='observed')
ax[1][0].plot(timePoints,corrValues, color='red', alpha=0.5, label='correction function')
ax[1][0].set_title('Retention time deviation over time')
ax[1][0].legend()
ax[1][0].set_ylim(allowedRtDev*-1, allowedRtDev)
ax[1][1].scatter(rtPosList, mzDevRelList, color='grey', alpha=0.1)
ax[1][1].set_title('Mz deviation over time')
ax[1][1].set_ylim(allowedMzDev*-1, allowedMzDev)
ax[2][0].scatter(rtPosList, corrDevArr, color='grey', alpha=0.1)
ax[2][0].set_title('Aligned retention time deviation over time')
ax[2][0].set_ylim(allowedRtDev*-1, allowedRtDev)
if plotDir is not None:
plotloc = aux.joinpath(plotDir, specfile+'.rtAlign.png')
fig.savefig(plotloc)
else:
fig.show()
featureArrays = fiContainer.getArrays(['rt'], specfiles=specfile, sort='rt')
featureArrays['corrRt'] = featureArrays['rt'] - dataFit.corrArray(featureArrays['rt'])
for featureId, corrRt, rt in zip(featureArrays['id'], featureArrays['corrRt'], featureArrays['rt']):
fiContainer.container[specfile][featureId].rt = corrRt | [
"def",
"rtCalibration",
"(",
"fiContainer",
",",
"allowedRtDev",
"=",
"60",
",",
"allowedMzDev",
"=",
"2.5",
",",
"reference",
"=",
"None",
",",
"specfiles",
"=",
"None",
",",
"showPlots",
"=",
"False",
",",
"plotDir",
"=",
"None",
",",
"minIntensity",
"=",
"1e5",
")",
":",
"#TODO: long function, maybe split into subfunctions",
"specfiles",
"=",
"[",
"_",
"for",
"_",
"in",
"viewkeys",
"(",
"fiContainer",
".",
"info",
")",
"]",
"if",
"specfiles",
"is",
"None",
"else",
"specfiles",
"matchCharge",
"=",
"True",
"refMzKey",
"=",
"'mz'",
"mzKey",
"=",
"'mz'",
"if",
"reference",
"is",
"not",
"None",
":",
"if",
"reference",
"in",
"specfiles",
":",
"specfiles",
"=",
"[",
"reference",
"]",
"+",
"list",
"(",
"set",
"(",
"specfiles",
")",
".",
"difference",
"(",
"set",
"(",
"[",
"reference",
"]",
")",
")",
")",
"else",
":",
"print",
"(",
"'Specified reference specfile not present, using reference: '",
",",
"specfiles",
"[",
"0",
"]",
")",
"for",
"featureItem",
"in",
"fiContainer",
".",
"getItems",
"(",
"specfiles",
"=",
"specfiles",
")",
":",
"if",
"not",
"hasattr",
"(",
"featureItem",
",",
"'obsRt'",
")",
":",
"setattr",
"(",
"featureItem",
",",
"'obsRt'",
",",
"featureItem",
".",
"rt",
")",
"referenceArrays",
"=",
"None",
"for",
"specfile",
"in",
"specfiles",
":",
"featureArrays",
"=",
"fiContainer",
".",
"getArrays",
"(",
"[",
"'rt'",
",",
"'charge'",
",",
"'mz'",
",",
"'intensity'",
"]",
",",
"specfiles",
"=",
"specfile",
",",
"sort",
"=",
"'rt'",
")",
"if",
"minIntensity",
"is",
"not",
"None",
":",
"intensityMask",
"=",
"(",
"featureArrays",
"[",
"'intensity'",
"]",
">",
"minIntensity",
")",
"for",
"key",
"in",
"list",
"(",
"viewkeys",
"(",
"featureArrays",
")",
")",
":",
"featureArrays",
"[",
"key",
"]",
"=",
"featureArrays",
"[",
"key",
"]",
"[",
"intensityMask",
"]",
"if",
"referenceArrays",
"is",
"None",
":",
"referenceArrays",
"=",
"featureArrays",
"if",
"showPlots",
":",
"print",
"(",
"'Reference: '",
"+",
"specfile",
")",
"continue",
"rtPosList",
"=",
"list",
"(",
")",
"rtDevList",
"=",
"list",
"(",
")",
"mzDevRelList",
"=",
"list",
"(",
")",
"mzDevAbsList",
"=",
"list",
"(",
")",
"for",
"featurePos",
"in",
"range",
"(",
"len",
"(",
"featureArrays",
"[",
"mzKey",
"]",
")",
")",
":",
"currRt",
"=",
"featureArrays",
"[",
"'rt'",
"]",
"[",
"featurePos",
"]",
"currMz",
"=",
"featureArrays",
"[",
"mzKey",
"]",
"[",
"featurePos",
"]",
"currZ",
"=",
"featureArrays",
"[",
"'charge'",
"]",
"[",
"featurePos",
"]",
"mzLimitUp",
"=",
"currMz",
"*",
"(",
"1",
"+",
"allowedMzDev",
"*",
"1E-6",
")",
"mzLimitLow",
"=",
"currMz",
"*",
"(",
"1",
"-",
"allowedMzDev",
"*",
"1E-6",
")",
"rtLimitUp",
"=",
"currRt",
"+",
"allowedRtDev",
"rtLimitLow",
"=",
"currRt",
"-",
"allowedRtDev",
"posL",
"=",
"bisect",
".",
"bisect_left",
"(",
"referenceArrays",
"[",
"'rt'",
"]",
",",
"rtLimitLow",
")",
"posU",
"=",
"bisect",
".",
"bisect_right",
"(",
"referenceArrays",
"[",
"'rt'",
"]",
",",
"rtLimitUp",
")",
"refMask",
"=",
"(",
"referenceArrays",
"[",
"refMzKey",
"]",
"[",
"posL",
":",
"posU",
"]",
"<=",
"mzLimitUp",
")",
"&",
"(",
"referenceArrays",
"[",
"refMzKey",
"]",
"[",
"posL",
":",
"posU",
"]",
">=",
"mzLimitLow",
")",
"if",
"matchCharge",
":",
"refMask",
"=",
"refMask",
"&",
"(",
"referenceArrays",
"[",
"'charge'",
"]",
"[",
"posL",
":",
"posU",
"]",
"==",
"currZ",
")",
"currMzDev",
"=",
"abs",
"(",
"referenceArrays",
"[",
"refMzKey",
"]",
"[",
"posL",
":",
"posU",
"]",
"[",
"refMask",
"]",
"-",
"currMz",
")",
"bestHitMask",
"=",
"currMzDev",
".",
"argsort",
"(",
")",
"for",
"refRt",
",",
"refMz",
"in",
"zip",
"(",
"referenceArrays",
"[",
"'rt'",
"]",
"[",
"posL",
":",
"posU",
"]",
"[",
"refMask",
"]",
"[",
"bestHitMask",
"]",
",",
"referenceArrays",
"[",
"refMzKey",
"]",
"[",
"posL",
":",
"posU",
"]",
"[",
"refMask",
"]",
"[",
"bestHitMask",
"]",
")",
":",
"rtPosList",
".",
"append",
"(",
"currRt",
")",
"rtDevList",
".",
"append",
"(",
"currRt",
"-",
"refRt",
")",
"mzDevRelList",
".",
"append",
"(",
"(",
"1",
"-",
"currMz",
"/",
"refMz",
")",
"*",
"1E6",
")",
"mzDevAbsList",
".",
"append",
"(",
"currMz",
"-",
"refMz",
")",
"break",
"rtPosList",
"=",
"numpy",
".",
"array",
"(",
"rtPosList",
")",
"rtDevList",
"=",
"numpy",
".",
"array",
"(",
"rtDevList",
")",
"splineInitialKnots",
"=",
"int",
"(",
"max",
"(",
"rtPosList",
")",
"-",
"min",
"(",
"rtPosList",
")",
")",
"dataFit",
"=",
"aux",
".",
"DataFit",
"(",
"rtDevList",
",",
"rtPosList",
")",
"dataFit",
".",
"splineInitialKnots",
"=",
"splineInitialKnots",
"dataFit",
".",
"splineTerminalExpansion",
"=",
"0.2",
"dataFit",
".",
"processInput",
"(",
"dataAveraging",
"=",
"'median'",
",",
"windowSize",
"=",
"10",
")",
"dataFit",
".",
"generateSplines",
"(",
")",
"if",
"showPlots",
":",
"corrDevArr",
"=",
"rtDevList",
"-",
"dataFit",
".",
"corrArray",
"(",
"rtPosList",
")",
"timePoints",
"=",
"[",
"min",
"(",
"rtPosList",
")",
"+",
"x",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"max",
"(",
"rtPosList",
")",
"-",
"min",
"(",
"rtPosList",
")",
")",
")",
"]",
"corrValues",
"=",
"dataFit",
".",
"corrArray",
"(",
"timePoints",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"3",
",",
"2",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"False",
",",
"figsize",
"=",
"(",
"20",
",",
"18",
")",
")",
"fig",
".",
"suptitle",
"(",
"specfile",
")",
"ax",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"hist",
"(",
"rtDevList",
",",
"bins",
"=",
"100",
",",
"color",
"=",
"'grey'",
",",
"alpha",
"=",
"0.5",
",",
"label",
"=",
"'observed'",
")",
"ax",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"hist",
"(",
"corrDevArr",
",",
"bins",
"=",
"100",
",",
"color",
"=",
"'red'",
",",
"alpha",
"=",
"0.5",
",",
"label",
"=",
"'corrected'",
")",
"ax",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"set_title",
"(",
"'Retention time deviation'",
")",
"ax",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"legend",
"(",
")",
"ax",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"set_xlim",
"(",
"allowedRtDev",
"*",
"-",
"1",
",",
"allowedRtDev",
")",
"ax",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"hist",
"(",
"mzDevRelList",
",",
"bins",
"=",
"100",
",",
"color",
"=",
"'grey'",
")",
"ax",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"set_title",
"(",
"'Mz deviation [ppm]'",
")",
"ax",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"scatter",
"(",
"rtPosList",
",",
"rtDevList",
",",
"color",
"=",
"'grey'",
",",
"alpha",
"=",
"0.1",
",",
"label",
"=",
"'observed'",
")",
"ax",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"plot",
"(",
"timePoints",
",",
"corrValues",
",",
"color",
"=",
"'red'",
",",
"alpha",
"=",
"0.5",
",",
"label",
"=",
"'correction function'",
")",
"ax",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"set_title",
"(",
"'Retention time deviation over time'",
")",
"ax",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"legend",
"(",
")",
"ax",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"set_ylim",
"(",
"allowedRtDev",
"*",
"-",
"1",
",",
"allowedRtDev",
")",
"ax",
"[",
"1",
"]",
"[",
"1",
"]",
".",
"scatter",
"(",
"rtPosList",
",",
"mzDevRelList",
",",
"color",
"=",
"'grey'",
",",
"alpha",
"=",
"0.1",
")",
"ax",
"[",
"1",
"]",
"[",
"1",
"]",
".",
"set_title",
"(",
"'Mz deviation over time'",
")",
"ax",
"[",
"1",
"]",
"[",
"1",
"]",
".",
"set_ylim",
"(",
"allowedMzDev",
"*",
"-",
"1",
",",
"allowedMzDev",
")",
"ax",
"[",
"2",
"]",
"[",
"0",
"]",
".",
"scatter",
"(",
"rtPosList",
",",
"corrDevArr",
",",
"color",
"=",
"'grey'",
",",
"alpha",
"=",
"0.1",
")",
"ax",
"[",
"2",
"]",
"[",
"0",
"]",
".",
"set_title",
"(",
"'Aligned retention time deviation over time'",
")",
"ax",
"[",
"2",
"]",
"[",
"0",
"]",
".",
"set_ylim",
"(",
"allowedRtDev",
"*",
"-",
"1",
",",
"allowedRtDev",
")",
"if",
"plotDir",
"is",
"not",
"None",
":",
"plotloc",
"=",
"aux",
".",
"joinpath",
"(",
"plotDir",
",",
"specfile",
"+",
"'.rtAlign.png'",
")",
"fig",
".",
"savefig",
"(",
"plotloc",
")",
"else",
":",
"fig",
".",
"show",
"(",
")",
"featureArrays",
"=",
"fiContainer",
".",
"getArrays",
"(",
"[",
"'rt'",
"]",
",",
"specfiles",
"=",
"specfile",
",",
"sort",
"=",
"'rt'",
")",
"featureArrays",
"[",
"'corrRt'",
"]",
"=",
"featureArrays",
"[",
"'rt'",
"]",
"-",
"dataFit",
".",
"corrArray",
"(",
"featureArrays",
"[",
"'rt'",
"]",
")",
"for",
"featureId",
",",
"corrRt",
",",
"rt",
"in",
"zip",
"(",
"featureArrays",
"[",
"'id'",
"]",
",",
"featureArrays",
"[",
"'corrRt'",
"]",
",",
"featureArrays",
"[",
"'rt'",
"]",
")",
":",
"fiContainer",
".",
"container",
"[",
"specfile",
"]",
"[",
"featureId",
"]",
".",
"rt",
"=",
"corrRt"
] | Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles.
:ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles`
:ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched
:ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched
:ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration
:ivar plotDir: if not None and showPlots is True, the plots are saved to
this location.
:ivar reference: Can be used to specifically specify a reference specfile
:ivar specfiles: Limit alignment to those specfiles in the fiContainer
:ivar minIntensity: consider only features with an intensity above this value | [
"Performs",
"a",
"retention",
"time",
"calibration",
"between",
":",
"class",
":",
"FeatureItem",
"of",
"multiple",
"specfiles",
"."
] | python | train |
grycap/cpyutils | evaluate.py | https://github.com/grycap/cpyutils/blob/fa966fc6d2ae1e1e799e19941561aa79b617f1b1/evaluate.py#L599-L605 | def p_l_expression(self, p):
''' l : expression
'''
_LOGGER.debug("l -> expresion")
l = TypedList( [ p[1] ] )
p[0] = l | [
"def",
"p_l_expression",
"(",
"self",
",",
"p",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"l -> expresion\"",
")",
"l",
"=",
"TypedList",
"(",
"[",
"p",
"[",
"1",
"]",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"l"
] | l : expression | [
"l",
":",
"expression"
] | python | train |
taborlab/FlowCal | FlowCal/transform.py | https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/transform.py#L81-L247 | def to_rfi(data,
channels=None,
amplification_type=None,
amplifier_gain=None,
resolution=None):
"""
Transform flow cytometry data to Relative Fluorescence Units (RFI).
If ``amplification_type[0]`` is different from zero, data has been
taken using a log amplifier. Therefore, to transform to RFI, the
following operation is applied::
y = a[1]*10^(a[0] * (x/r))
Where ``x`` and ``y`` are the original and transformed data,
respectively; ``a`` is `amplification_type` argument, and ``r`` is
`resolution`. This will transform flow cytometry data taken with a log
amplifier and an ADC of range ``r`` to linear RFIs, such
that it covers ``a[0]`` decades of signal with a minimum value of
``a[1]``.
If ``amplification_type[0]==0``, however, a linear amplifier has been
used and the following operation is applied instead::
y = x/g
Where ``g`` is `amplifier_gain`. This will transform flow cytometry
data taken with a linear amplifier of gain ``g`` back to RFIs.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels.
amplification_type : tuple or list of tuple
The amplification type of the specified channel(s). This should be
reported as a tuple, in which the first element indicates how many
decades the logarithmic amplifier covers, and the second indicates
the linear value that corresponds to a channel value of zero. If
the first element is zero, the amplification type is linear. This
is similar to the $PnE keyword from the FCS standard. If None, take
`amplification_type` from ``data.amplification_type(channel)``.
amplifier_gain : float or list of floats, optional
The linear amplifier gain of the specified channel(s). Only used if
``amplification_type[0]==0`` (linear amplifier). If None,
take `amplifier_gain` from ``data.amplifier_gain(channel)``. If
`data` does not contain ``amplifier_gain()``, use 1.0.
resolution : int, float, or list of int or float, optional
Maximum range, for each specified channel. Only needed if
``amplification_type[0]!=0`` (log amplifier). If None, take
`resolution` from ``len(data.domain(channel))``.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
"""
# Default: all channels
if channels is None:
channels = range(data.shape[1])
if not (hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types)):
# If channels is not an iterable, convert it, along with resolution,
# amplification_type, and amplifier_gain.
channels = [channels]
amplification_type = [amplification_type]
amplifier_gain = [amplifier_gain]
resolution = [resolution]
else:
# If channels is an iterable, check that the other attributes are either
# None, or iterables of the same length.
if amplification_type is None:
# If None, propagate None for all channels
amplification_type = [None]*len(channels)
elif hasattr(amplification_type, '__iter__'):
# If it's a list, it should be the same length as channels
if len(amplification_type) != len(channels):
raise ValueError("channels and amplification_type should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and amplification_type should have the "
"same length")
if amplifier_gain is None:
# If None, propagate None for all channels
amplifier_gain = [None]*len(channels)
elif hasattr(amplifier_gain, '__iter__'):
# If it's a list, it should be the same length as channels
if len(amplifier_gain) != len(channels):
raise ValueError("channels and amplifier_gain should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and amplifier_gain should have the "
"same length")
if resolution is None:
# If None, propagate None for all channels
resolution = [None]*len(channels)
elif hasattr(resolution, '__iter__'):
# If it's a list, it should be the same length as channels
if len(resolution) != len(channels):
raise ValueError("channels and resolution should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and resolution should have the "
"same length")
# Convert channels to integers
if hasattr(data, '_name_to_index'):
channels = data._name_to_index(channels)
else:
channels = channels
# Copy data array
data_t = data.copy().astype(np.float64)
# Iterate over channels
for channel, r, at, ag in \
zip(channels, resolution, amplification_type, amplifier_gain):
# If amplification type is None, try to obtain from data
if at is None:
if hasattr(data, 'amplification_type'):
at = data.amplification_type(channel)
else:
raise ValueError('amplification_type should be specified')
# Define transformation, depending on at[0]
if at[0]==0:
# Linear amplifier
# If no amplifier gain has been specified, try to obtain from data,
# otherwise assume one
if ag is None:
if hasattr(data, 'amplifier_gain') and \
hasattr(data.amplifier_gain, '__call__'):
ag = data.amplifier_gain(channel)
# If the linear gain has not been specified, it should be
# assumed to be one.
if ag is None:
ag = 1.
else:
ag = 1.
tf = lambda x: x/ag
else:
# Log amplifier
# If no range has been specified, try to obtain from data.
if r is None:
if hasattr(data, 'resolution'):
r = data.resolution(channel)
else:
raise ValueError('range should be specified')
tf = lambda x: at[1] * 10**(at[0]/float(r) * x)
# Apply transformation to event list
data_t[:,channel] = tf(data_t[:,channel])
# Apply transformation to range
if hasattr(data_t, '_range') and data_t._range[channel] is not None:
data_t._range[channel] = [tf(data_t._range[channel][0]),
tf(data_t._range[channel][1])]
return data_t | [
"def",
"to_rfi",
"(",
"data",
",",
"channels",
"=",
"None",
",",
"amplification_type",
"=",
"None",
",",
"amplifier_gain",
"=",
"None",
",",
"resolution",
"=",
"None",
")",
":",
"# Default: all channels",
"if",
"channels",
"is",
"None",
":",
"channels",
"=",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"not",
"(",
"hasattr",
"(",
"channels",
",",
"'__iter__'",
")",
"and",
"not",
"isinstance",
"(",
"channels",
",",
"six",
".",
"string_types",
")",
")",
":",
"# If channels is not an iterable, convert it, along with resolution,",
"# amplification_type, and amplifier_gain.",
"channels",
"=",
"[",
"channels",
"]",
"amplification_type",
"=",
"[",
"amplification_type",
"]",
"amplifier_gain",
"=",
"[",
"amplifier_gain",
"]",
"resolution",
"=",
"[",
"resolution",
"]",
"else",
":",
"# If channels is an iterable, check that the other attributes are either",
"# None, or iterables of the same length.",
"if",
"amplification_type",
"is",
"None",
":",
"# If None, propagate None for all channels",
"amplification_type",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"channels",
")",
"elif",
"hasattr",
"(",
"amplification_type",
",",
"'__iter__'",
")",
":",
"# If it's a list, it should be the same length as channels",
"if",
"len",
"(",
"amplification_type",
")",
"!=",
"len",
"(",
"channels",
")",
":",
"raise",
"ValueError",
"(",
"\"channels and amplification_type should have \"",
"\"the same length\"",
")",
"else",
":",
"# If it's not a list or None, raise error",
"raise",
"ValueError",
"(",
"\"channels and amplification_type should have the \"",
"\"same length\"",
")",
"if",
"amplifier_gain",
"is",
"None",
":",
"# If None, propagate None for all channels",
"amplifier_gain",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"channels",
")",
"elif",
"hasattr",
"(",
"amplifier_gain",
",",
"'__iter__'",
")",
":",
"# If it's a list, it should be the same length as channels",
"if",
"len",
"(",
"amplifier_gain",
")",
"!=",
"len",
"(",
"channels",
")",
":",
"raise",
"ValueError",
"(",
"\"channels and amplifier_gain should have \"",
"\"the same length\"",
")",
"else",
":",
"# If it's not a list or None, raise error",
"raise",
"ValueError",
"(",
"\"channels and amplifier_gain should have the \"",
"\"same length\"",
")",
"if",
"resolution",
"is",
"None",
":",
"# If None, propagate None for all channels",
"resolution",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"channels",
")",
"elif",
"hasattr",
"(",
"resolution",
",",
"'__iter__'",
")",
":",
"# If it's a list, it should be the same length as channels",
"if",
"len",
"(",
"resolution",
")",
"!=",
"len",
"(",
"channels",
")",
":",
"raise",
"ValueError",
"(",
"\"channels and resolution should have \"",
"\"the same length\"",
")",
"else",
":",
"# If it's not a list or None, raise error",
"raise",
"ValueError",
"(",
"\"channels and resolution should have the \"",
"\"same length\"",
")",
"# Convert channels to integers",
"if",
"hasattr",
"(",
"data",
",",
"'_name_to_index'",
")",
":",
"channels",
"=",
"data",
".",
"_name_to_index",
"(",
"channels",
")",
"else",
":",
"channels",
"=",
"channels",
"# Copy data array",
"data_t",
"=",
"data",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# Iterate over channels",
"for",
"channel",
",",
"r",
",",
"at",
",",
"ag",
"in",
"zip",
"(",
"channels",
",",
"resolution",
",",
"amplification_type",
",",
"amplifier_gain",
")",
":",
"# If amplification type is None, try to obtain from data",
"if",
"at",
"is",
"None",
":",
"if",
"hasattr",
"(",
"data",
",",
"'amplification_type'",
")",
":",
"at",
"=",
"data",
".",
"amplification_type",
"(",
"channel",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'amplification_type should be specified'",
")",
"# Define transformation, depending on at[0]",
"if",
"at",
"[",
"0",
"]",
"==",
"0",
":",
"# Linear amplifier",
"# If no amplifier gain has been specified, try to obtain from data,",
"# otherwise assume one",
"if",
"ag",
"is",
"None",
":",
"if",
"hasattr",
"(",
"data",
",",
"'amplifier_gain'",
")",
"and",
"hasattr",
"(",
"data",
".",
"amplifier_gain",
",",
"'__call__'",
")",
":",
"ag",
"=",
"data",
".",
"amplifier_gain",
"(",
"channel",
")",
"# If the linear gain has not been specified, it should be",
"# assumed to be one.",
"if",
"ag",
"is",
"None",
":",
"ag",
"=",
"1.",
"else",
":",
"ag",
"=",
"1.",
"tf",
"=",
"lambda",
"x",
":",
"x",
"/",
"ag",
"else",
":",
"# Log amplifier",
"# If no range has been specified, try to obtain from data.",
"if",
"r",
"is",
"None",
":",
"if",
"hasattr",
"(",
"data",
",",
"'resolution'",
")",
":",
"r",
"=",
"data",
".",
"resolution",
"(",
"channel",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'range should be specified'",
")",
"tf",
"=",
"lambda",
"x",
":",
"at",
"[",
"1",
"]",
"*",
"10",
"**",
"(",
"at",
"[",
"0",
"]",
"/",
"float",
"(",
"r",
")",
"*",
"x",
")",
"# Apply transformation to event list",
"data_t",
"[",
":",
",",
"channel",
"]",
"=",
"tf",
"(",
"data_t",
"[",
":",
",",
"channel",
"]",
")",
"# Apply transformation to range",
"if",
"hasattr",
"(",
"data_t",
",",
"'_range'",
")",
"and",
"data_t",
".",
"_range",
"[",
"channel",
"]",
"is",
"not",
"None",
":",
"data_t",
".",
"_range",
"[",
"channel",
"]",
"=",
"[",
"tf",
"(",
"data_t",
".",
"_range",
"[",
"channel",
"]",
"[",
"0",
"]",
")",
",",
"tf",
"(",
"data_t",
".",
"_range",
"[",
"channel",
"]",
"[",
"1",
"]",
")",
"]",
"return",
"data_t"
] | Transform flow cytometry data to Relative Fluorescence Units (RFI).
If ``amplification_type[0]`` is different from zero, data has been
taken using a log amplifier. Therefore, to transform to RFI, the
following operation is applied::
y = a[1]*10^(a[0] * (x/r))
Where ``x`` and ``y`` are the original and transformed data,
respectively; ``a`` is `amplification_type` argument, and ``r`` is
`resolution`. This will transform flow cytometry data taken with a log
amplifier and an ADC of range ``r`` to linear RFIs, such
that it covers ``a[0]`` decades of signal with a minimum value of
``a[1]``.
If ``amplification_type[0]==0``, however, a linear amplifier has been
used and the following operation is applied instead::
y = x/g
Where ``g`` is `amplifier_gain`. This will transform flow cytometry
data taken with a linear amplifier of gain ``g`` back to RFIs.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels.
amplification_type : tuple or list of tuple
The amplification type of the specified channel(s). This should be
reported as a tuple, in which the first element indicates how many
decades the logarithmic amplifier covers, and the second indicates
the linear value that corresponds to a channel value of zero. If
the first element is zero, the amplification type is linear. This
is similar to the $PnE keyword from the FCS standard. If None, take
`amplification_type` from ``data.amplification_type(channel)``.
amplifier_gain : float or list of floats, optional
The linear amplifier gain of the specified channel(s). Only used if
``amplification_type[0]==0`` (linear amplifier). If None,
take `amplifier_gain` from ``data.amplifier_gain(channel)``. If
`data` does not contain ``amplifier_gain()``, use 1.0.
resolution : int, float, or list of int or float, optional
Maximum range, for each specified channel. Only needed if
``amplification_type[0]!=0`` (log amplifier). If None, take
`resolution` from ``len(data.domain(channel))``.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data. | [
"Transform",
"flow",
"cytometry",
"data",
"to",
"Relative",
"Fluorescence",
"Units",
"(",
"RFI",
")",
"."
] | python | train |
uogbuji/versa | tools/py/reader/__init__.py | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/reader/__init__.py#L6-L16 | def statement_prep(link):
'''
Prepare a statement into a triple ready for rdflib
'''
from rdflib import URIRef, Literal
from rdflib import BNode
s, p, o = link[:3]
if not isinstance(s, BNode): s = URIRef(s)
p = URIRef(p)
if not isinstance(o, BNode): o = URIRef(o) if isinstance(o, I) else Literal(o)
return s, p, o | [
"def",
"statement_prep",
"(",
"link",
")",
":",
"from",
"rdflib",
"import",
"URIRef",
",",
"Literal",
"from",
"rdflib",
"import",
"BNode",
"s",
",",
"p",
",",
"o",
"=",
"link",
"[",
":",
"3",
"]",
"if",
"not",
"isinstance",
"(",
"s",
",",
"BNode",
")",
":",
"s",
"=",
"URIRef",
"(",
"s",
")",
"p",
"=",
"URIRef",
"(",
"p",
")",
"if",
"not",
"isinstance",
"(",
"o",
",",
"BNode",
")",
":",
"o",
"=",
"URIRef",
"(",
"o",
")",
"if",
"isinstance",
"(",
"o",
",",
"I",
")",
"else",
"Literal",
"(",
"o",
")",
"return",
"s",
",",
"p",
",",
"o"
] | Prepare a statement into a triple ready for rdflib | [
"Prepare",
"a",
"statement",
"into",
"a",
"triple",
"ready",
"for",
"rdflib"
] | python | train |
mushkevych/scheduler | synergy/system/time_helper.py | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/time_helper.py#L78-L82 | def day_to_month(timeperiod):
""":param timeperiod: as string in YYYYMMDD00 format
:return string in YYYYMM0000 format"""
t = datetime.strptime(timeperiod, SYNERGY_DAILY_PATTERN)
return t.strftime(SYNERGY_MONTHLY_PATTERN) | [
"def",
"day_to_month",
"(",
"timeperiod",
")",
":",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"timeperiod",
",",
"SYNERGY_DAILY_PATTERN",
")",
"return",
"t",
".",
"strftime",
"(",
"SYNERGY_MONTHLY_PATTERN",
")"
] | :param timeperiod: as string in YYYYMMDD00 format
:return string in YYYYMM0000 format | [
":",
"param",
"timeperiod",
":",
"as",
"string",
"in",
"YYYYMMDD00",
"format",
":",
"return",
"string",
"in",
"YYYYMM0000",
"format"
] | python | train |
hcpl/xkbgroup | xkbgroup/core.py | https://github.com/hcpl/xkbgroup/blob/fcf4709a3c8221e0cdf62c09e5cccda232b0104c/xkbgroup/core.py#L149-L197 | def open_display(self):
"""Establishes connection with X server and prepares objects
necessary to retrieve and send data.
"""
self.close_display() # Properly finish previous open_display()
XkbIgnoreExtension(False)
display_name = None
major = c_int(XkbMajorVersion)
minor = c_int(XkbMinorVersion)
reason = c_int()
self._display = XkbOpenDisplay(
display_name,
None, None, byref(major), byref(minor), byref(reason))
if not self._display:
if reason.value in OPEN_DISPLAY_ERRORS:
# Assume POSIX conformance
display_name = os.getenv("DISPLAY") or "default"
raise X11Error(OPEN_DISPLAY_ERRORS[reason.value].format(
libname="xkbgroup",
used_major=XkbMajorVersion,
used_minor=XkbMinorVersion,
found_major=major.value,
found_minor=minor.value,
display_name=display_name)
+ ".")
else:
raise X11Error("Unknown error {} from XkbOpenDisplay.".format(reason.value))
self._keyboard_description = XkbGetMap(self._display, 0, XkbUseCoreKbd)
if not self._keyboard_description:
self.close_display()
raise X11Error("Failed to get keyboard description.")
# Controls mask doesn't affect the availability of xkb->ctrls->num_groups anyway
# Just use a valid value, and xkb->ctrls->num_groups will be definitely set
status = XkbGetControls(self._display, XkbAllControlsMask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_CONTROLS_ERRORS[status] + ".")
names_mask = XkbSymbolsNameMask | XkbGroupNamesMask
status = XkbGetNames(self._display, names_mask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_NAMES_ERRORS[status] + ".") | [
"def",
"open_display",
"(",
"self",
")",
":",
"self",
".",
"close_display",
"(",
")",
"# Properly finish previous open_display()",
"XkbIgnoreExtension",
"(",
"False",
")",
"display_name",
"=",
"None",
"major",
"=",
"c_int",
"(",
"XkbMajorVersion",
")",
"minor",
"=",
"c_int",
"(",
"XkbMinorVersion",
")",
"reason",
"=",
"c_int",
"(",
")",
"self",
".",
"_display",
"=",
"XkbOpenDisplay",
"(",
"display_name",
",",
"None",
",",
"None",
",",
"byref",
"(",
"major",
")",
",",
"byref",
"(",
"minor",
")",
",",
"byref",
"(",
"reason",
")",
")",
"if",
"not",
"self",
".",
"_display",
":",
"if",
"reason",
".",
"value",
"in",
"OPEN_DISPLAY_ERRORS",
":",
"# Assume POSIX conformance",
"display_name",
"=",
"os",
".",
"getenv",
"(",
"\"DISPLAY\"",
")",
"or",
"\"default\"",
"raise",
"X11Error",
"(",
"OPEN_DISPLAY_ERRORS",
"[",
"reason",
".",
"value",
"]",
".",
"format",
"(",
"libname",
"=",
"\"xkbgroup\"",
",",
"used_major",
"=",
"XkbMajorVersion",
",",
"used_minor",
"=",
"XkbMinorVersion",
",",
"found_major",
"=",
"major",
".",
"value",
",",
"found_minor",
"=",
"minor",
".",
"value",
",",
"display_name",
"=",
"display_name",
")",
"+",
"\".\"",
")",
"else",
":",
"raise",
"X11Error",
"(",
"\"Unknown error {} from XkbOpenDisplay.\"",
".",
"format",
"(",
"reason",
".",
"value",
")",
")",
"self",
".",
"_keyboard_description",
"=",
"XkbGetMap",
"(",
"self",
".",
"_display",
",",
"0",
",",
"XkbUseCoreKbd",
")",
"if",
"not",
"self",
".",
"_keyboard_description",
":",
"self",
".",
"close_display",
"(",
")",
"raise",
"X11Error",
"(",
"\"Failed to get keyboard description.\"",
")",
"# Controls mask doesn't affect the availability of xkb->ctrls->num_groups anyway",
"# Just use a valid value, and xkb->ctrls->num_groups will be definitely set",
"status",
"=",
"XkbGetControls",
"(",
"self",
".",
"_display",
",",
"XkbAllControlsMask",
",",
"self",
".",
"_keyboard_description",
")",
"if",
"status",
"!=",
"Success",
":",
"self",
".",
"close_display",
"(",
")",
"raise",
"X11Error",
"(",
"GET_CONTROLS_ERRORS",
"[",
"status",
"]",
"+",
"\".\"",
")",
"names_mask",
"=",
"XkbSymbolsNameMask",
"|",
"XkbGroupNamesMask",
"status",
"=",
"XkbGetNames",
"(",
"self",
".",
"_display",
",",
"names_mask",
",",
"self",
".",
"_keyboard_description",
")",
"if",
"status",
"!=",
"Success",
":",
"self",
".",
"close_display",
"(",
")",
"raise",
"X11Error",
"(",
"GET_NAMES_ERRORS",
"[",
"status",
"]",
"+",
"\".\"",
")"
] | Establishes connection with X server and prepares objects
necessary to retrieve and send data. | [
"Establishes",
"connection",
"with",
"X",
"server",
"and",
"prepares",
"objects",
"necessary",
"to",
"retrieve",
"and",
"send",
"data",
"."
] | python | train |
biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/peakdelta.py | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/peakdelta.py#L15-L81 | def peakdelta(v, delta, x=None):
"""
Returns two arrays
function [maxtab, mintab]=peakdelta(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = peakdelta(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = peakdelta(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab) | [
"def",
"peakdelta",
"(",
"v",
",",
"delta",
",",
"x",
"=",
"None",
")",
":",
"maxtab",
"=",
"[",
"]",
"mintab",
"=",
"[",
"]",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"arange",
"(",
"len",
"(",
"v",
")",
")",
"v",
"=",
"asarray",
"(",
"v",
")",
"if",
"len",
"(",
"v",
")",
"!=",
"len",
"(",
"x",
")",
":",
"sys",
".",
"exit",
"(",
"'Input vectors v and x must have same length'",
")",
"if",
"not",
"isscalar",
"(",
"delta",
")",
":",
"sys",
".",
"exit",
"(",
"'Input argument delta must be a scalar'",
")",
"if",
"delta",
"<=",
"0",
":",
"sys",
".",
"exit",
"(",
"'Input argument delta must be positive'",
")",
"mn",
",",
"mx",
"=",
"Inf",
",",
"-",
"Inf",
"mnpos",
",",
"mxpos",
"=",
"NaN",
",",
"NaN",
"lookformax",
"=",
"True",
"for",
"i",
"in",
"arange",
"(",
"len",
"(",
"v",
")",
")",
":",
"this",
"=",
"v",
"[",
"i",
"]",
"if",
"this",
">",
"mx",
":",
"mx",
"=",
"this",
"mxpos",
"=",
"x",
"[",
"i",
"]",
"if",
"this",
"<",
"mn",
":",
"mn",
"=",
"this",
"mnpos",
"=",
"x",
"[",
"i",
"]",
"if",
"lookformax",
":",
"if",
"this",
"<",
"mx",
"-",
"delta",
":",
"maxtab",
".",
"append",
"(",
"(",
"mxpos",
",",
"mx",
")",
")",
"mn",
"=",
"this",
"mnpos",
"=",
"x",
"[",
"i",
"]",
"lookformax",
"=",
"False",
"else",
":",
"if",
"this",
">",
"mn",
"+",
"delta",
":",
"mintab",
".",
"append",
"(",
"(",
"mnpos",
",",
"mn",
")",
")",
"mx",
"=",
"this",
"mxpos",
"=",
"x",
"[",
"i",
"]",
"lookformax",
"=",
"True",
"return",
"array",
"(",
"maxtab",
")",
",",
"array",
"(",
"mintab",
")"
] | Returns two arrays
function [maxtab, mintab]=peakdelta(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = peakdelta(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = peakdelta(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed. | [
"Returns",
"two",
"arrays"
] | python | train |
minhhoit/yacms | yacms/project_template/fabfile.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/project_template/fabfile.py#L225-L259 | def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload the
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command) | [
"def",
"upload_template_and_reload",
"(",
"name",
")",
":",
"template",
"=",
"get_templates",
"(",
")",
"[",
"name",
"]",
"local_path",
"=",
"template",
"[",
"\"local_path\"",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"local_path",
")",
":",
"project_root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"local_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_root",
",",
"local_path",
")",
"remote_path",
"=",
"template",
"[",
"\"remote_path\"",
"]",
"reload_command",
"=",
"template",
".",
"get",
"(",
"\"reload_command\"",
")",
"owner",
"=",
"template",
".",
"get",
"(",
"\"owner\"",
")",
"mode",
"=",
"template",
".",
"get",
"(",
"\"mode\"",
")",
"remote_data",
"=",
"\"\"",
"if",
"exists",
"(",
"remote_path",
")",
":",
"with",
"hide",
"(",
"\"stdout\"",
")",
":",
"remote_data",
"=",
"sudo",
"(",
"\"cat %s\"",
"%",
"remote_path",
",",
"show",
"=",
"False",
")",
"with",
"open",
"(",
"local_path",
",",
"\"r\"",
")",
"as",
"f",
":",
"local_data",
"=",
"f",
".",
"read",
"(",
")",
"# Escape all non-string-formatting-placeholder occurrences of '%':",
"local_data",
"=",
"re",
".",
"sub",
"(",
"r\"%(?!\\(\\w+\\)s)\"",
",",
"\"%%\"",
",",
"local_data",
")",
"if",
"\"%(db_pass)s\"",
"in",
"local_data",
":",
"env",
".",
"db_pass",
"=",
"db_pass",
"(",
")",
"local_data",
"%=",
"env",
"clean",
"=",
"lambda",
"s",
":",
"s",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"\\r\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"if",
"clean",
"(",
"remote_data",
")",
"==",
"clean",
"(",
"local_data",
")",
":",
"return",
"upload_template",
"(",
"local_path",
",",
"remote_path",
",",
"env",
",",
"use_sudo",
"=",
"True",
",",
"backup",
"=",
"False",
")",
"if",
"owner",
":",
"sudo",
"(",
"\"chown %s %s\"",
"%",
"(",
"owner",
",",
"remote_path",
")",
")",
"if",
"mode",
":",
"sudo",
"(",
"\"chmod %s %s\"",
"%",
"(",
"mode",
",",
"remote_path",
")",
")",
"if",
"reload_command",
":",
"sudo",
"(",
"reload_command",
")"
] | Uploads a template only if it has changed, and if so, reload the
related service. | [
"Uploads",
"a",
"template",
"only",
"if",
"it",
"has",
"changed",
"and",
"if",
"so",
"reload",
"the",
"related",
"service",
"."
] | python | train |
DistrictDataLabs/yellowbrick | yellowbrick/utils/kneed.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/kneed.py#L155-L199 | def find_knee(self, ):
"""
Finds and returns the "knee"or "elbow" value, the normalized knee
value, and the x value where the knee is located.
"""
if not self.xmx_idx.size:
warning_message = \
'No "knee" or "elbow point" detected ' \
'This could be due to bad clustering, no '\
'actual clusters being formed etc.'
warnings.warn(warning_message,YellowbrickWarning)
return None, None, None
mxmx_iter = np.arange(self.xmx_idx[0], len(self.xsn))
xmx_idx_iter = np.append(self.xmx_idx, len(self.xsn))
knee_, norm_knee_, knee_x = 0.0, 0.0, None
for mxmx_i, mxmx in enumerate(xmx_idx_iter):
# stopping criteria for exhasuting array
if mxmx_i == len(xmx_idx_iter) - 1:
break
# indices between maxima/minima
idxs = (mxmx_iter > xmx_idx_iter[mxmx_i]) * \
(mxmx_iter < xmx_idx_iter[mxmx_i + 1])
between_local_mx = mxmx_iter[np.where(idxs)]
for j in between_local_mx:
if j in self.xmn_idx:
# reached a minima, x indices are unique
# only need to check if j is a min
if self.yd[j + 1] > self.yd[j]:
self.Tmx[mxmx_i] = 0
knee_x = None # reset x where yd crossed Tmx
elif self.yd[j + 1] <= self.yd[j]:
warning_message="If this is a minima, " \
"how would you ever get here."
warnings.warn(warning_message, YellowbrickWarning)
if self.yd[j] < self.Tmx[mxmx_i] or self.Tmx[mxmx_i] < 0:
# declare a knee
if not knee_x:
knee_x = j
knee_ = self.x[self.xmx_idx[mxmx_i]]
norm_knee_ = self.xsn[self.xmx_idx[mxmx_i]]
return knee_, norm_knee_, knee_x | [
"def",
"find_knee",
"(",
"self",
",",
")",
":",
"if",
"not",
"self",
".",
"xmx_idx",
".",
"size",
":",
"warning_message",
"=",
"'No \"knee\" or \"elbow point\" detected '",
"'This could be due to bad clustering, no '",
"'actual clusters being formed etc.'",
"warnings",
".",
"warn",
"(",
"warning_message",
",",
"YellowbrickWarning",
")",
"return",
"None",
",",
"None",
",",
"None",
"mxmx_iter",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"xmx_idx",
"[",
"0",
"]",
",",
"len",
"(",
"self",
".",
"xsn",
")",
")",
"xmx_idx_iter",
"=",
"np",
".",
"append",
"(",
"self",
".",
"xmx_idx",
",",
"len",
"(",
"self",
".",
"xsn",
")",
")",
"knee_",
",",
"norm_knee_",
",",
"knee_x",
"=",
"0.0",
",",
"0.0",
",",
"None",
"for",
"mxmx_i",
",",
"mxmx",
"in",
"enumerate",
"(",
"xmx_idx_iter",
")",
":",
"# stopping criteria for exhasuting array",
"if",
"mxmx_i",
"==",
"len",
"(",
"xmx_idx_iter",
")",
"-",
"1",
":",
"break",
"# indices between maxima/minima",
"idxs",
"=",
"(",
"mxmx_iter",
">",
"xmx_idx_iter",
"[",
"mxmx_i",
"]",
")",
"*",
"(",
"mxmx_iter",
"<",
"xmx_idx_iter",
"[",
"mxmx_i",
"+",
"1",
"]",
")",
"between_local_mx",
"=",
"mxmx_iter",
"[",
"np",
".",
"where",
"(",
"idxs",
")",
"]",
"for",
"j",
"in",
"between_local_mx",
":",
"if",
"j",
"in",
"self",
".",
"xmn_idx",
":",
"# reached a minima, x indices are unique",
"# only need to check if j is a min",
"if",
"self",
".",
"yd",
"[",
"j",
"+",
"1",
"]",
">",
"self",
".",
"yd",
"[",
"j",
"]",
":",
"self",
".",
"Tmx",
"[",
"mxmx_i",
"]",
"=",
"0",
"knee_x",
"=",
"None",
"# reset x where yd crossed Tmx",
"elif",
"self",
".",
"yd",
"[",
"j",
"+",
"1",
"]",
"<=",
"self",
".",
"yd",
"[",
"j",
"]",
":",
"warning_message",
"=",
"\"If this is a minima, \"",
"\"how would you ever get here.\"",
"warnings",
".",
"warn",
"(",
"warning_message",
",",
"YellowbrickWarning",
")",
"if",
"self",
".",
"yd",
"[",
"j",
"]",
"<",
"self",
".",
"Tmx",
"[",
"mxmx_i",
"]",
"or",
"self",
".",
"Tmx",
"[",
"mxmx_i",
"]",
"<",
"0",
":",
"# declare a knee",
"if",
"not",
"knee_x",
":",
"knee_x",
"=",
"j",
"knee_",
"=",
"self",
".",
"x",
"[",
"self",
".",
"xmx_idx",
"[",
"mxmx_i",
"]",
"]",
"norm_knee_",
"=",
"self",
".",
"xsn",
"[",
"self",
".",
"xmx_idx",
"[",
"mxmx_i",
"]",
"]",
"return",
"knee_",
",",
"norm_knee_",
",",
"knee_x"
] | Finds and returns the "knee"or "elbow" value, the normalized knee
value, and the x value where the knee is located. | [
"Finds",
"and",
"returns",
"the",
"knee",
"or",
"elbow",
"value",
"the",
"normalized",
"knee",
"value",
"and",
"the",
"x",
"value",
"where",
"the",
"knee",
"is",
"located",
"."
] | python | train |
inveniosoftware-attic/invenio-utils | invenio_utils/html.py | https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L681-L693 | def unescape(s, quote=False):
"""
The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated.
"""
s = s.replace('<', '<')
s = s.replace('>', '>')
if quote:
s = s.replace('"', '"')
s = s.replace('&', '&')
return s | [
"def",
"unescape",
"(",
"s",
",",
"quote",
"=",
"False",
")",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'<'",
",",
"'<'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'>'",
",",
"'>'",
")",
"if",
"quote",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'"'",
",",
"'\"'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
"return",
"s"
] | The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated. | [
"The",
"opposite",
"of",
"the",
"cgi",
".",
"escape",
"function",
".",
"Replace",
"escaped",
"characters",
"&",
";",
"<",
";",
"and",
">",
";",
"with",
"the",
"corresponding",
"regular",
"characters",
".",
"If",
"the",
"optional",
"flag",
"quote",
"is",
"true",
"the",
"escaped",
"quotation",
"mark",
"character",
"(",
""",
";",
")",
"is",
"also",
"translated",
"."
] | python | train |
boolangery/py-lua-parser | luaparser/printers.py | https://github.com/boolangery/py-lua-parser/blob/578f2bf75f6f84c4b52c2affba56a4ec569d7ce7/luaparser/printers.py#L139-L147 | def raw(text):
"""Returns a raw string representation of text"""
new_string = ''
for char in text:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string | [
"def",
"raw",
"(",
"text",
")",
":",
"new_string",
"=",
"''",
"for",
"char",
"in",
"text",
":",
"try",
":",
"new_string",
"+=",
"escape_dict",
"[",
"char",
"]",
"except",
"KeyError",
":",
"new_string",
"+=",
"char",
"return",
"new_string"
] | Returns a raw string representation of text | [
"Returns",
"a",
"raw",
"string",
"representation",
"of",
"text"
] | python | train |
usc-isi-i2/etk | examples/html_tables/table_sample.py | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/examples/html_tables/table_sample.py#L17-L39 | def process_document(self, doc):
"""
Add your code for processing the document
"""
d = doc.select_segments("$.raw_content")[0]
tables = doc.extract(self.table_extractor, d)
for t in tables:
doc.store([t], t.tag, group_by_tags=False)
table_data_extractor = EntityTableDataExtraction()
table_data_extractor.add_glossary(etk.load_glossary("./resources/address_dict.txt"), "address")
table_data_extractor.add_glossary(etk.load_glossary("./resources/calibre_dict.txt"), "caliber")
table_data_extractor.add_glossary(etk.load_glossary("./resources/capacity_dict.txt"), "capacity")
table_data_extractor.add_glossary(etk.load_glossary("./resources/manufacturer_dict.txt"), "manufacturer")
table_data_extractor.add_glossary(etk.load_glossary("./resources/price_dict.txt"), "price")
tables = doc.select_segments("$.tables[*]")
for t in tables:
extractions = doc.extract(table_data_extractor, t)
doc.store(extractions, "table_data_extraction")
return list() | [
"def",
"process_document",
"(",
"self",
",",
"doc",
")",
":",
"d",
"=",
"doc",
".",
"select_segments",
"(",
"\"$.raw_content\"",
")",
"[",
"0",
"]",
"tables",
"=",
"doc",
".",
"extract",
"(",
"self",
".",
"table_extractor",
",",
"d",
")",
"for",
"t",
"in",
"tables",
":",
"doc",
".",
"store",
"(",
"[",
"t",
"]",
",",
"t",
".",
"tag",
",",
"group_by_tags",
"=",
"False",
")",
"table_data_extractor",
"=",
"EntityTableDataExtraction",
"(",
")",
"table_data_extractor",
".",
"add_glossary",
"(",
"etk",
".",
"load_glossary",
"(",
"\"./resources/address_dict.txt\"",
")",
",",
"\"address\"",
")",
"table_data_extractor",
".",
"add_glossary",
"(",
"etk",
".",
"load_glossary",
"(",
"\"./resources/calibre_dict.txt\"",
")",
",",
"\"caliber\"",
")",
"table_data_extractor",
".",
"add_glossary",
"(",
"etk",
".",
"load_glossary",
"(",
"\"./resources/capacity_dict.txt\"",
")",
",",
"\"capacity\"",
")",
"table_data_extractor",
".",
"add_glossary",
"(",
"etk",
".",
"load_glossary",
"(",
"\"./resources/manufacturer_dict.txt\"",
")",
",",
"\"manufacturer\"",
")",
"table_data_extractor",
".",
"add_glossary",
"(",
"etk",
".",
"load_glossary",
"(",
"\"./resources/price_dict.txt\"",
")",
",",
"\"price\"",
")",
"tables",
"=",
"doc",
".",
"select_segments",
"(",
"\"$.tables[*]\"",
")",
"for",
"t",
"in",
"tables",
":",
"extractions",
"=",
"doc",
".",
"extract",
"(",
"table_data_extractor",
",",
"t",
")",
"doc",
".",
"store",
"(",
"extractions",
",",
"\"table_data_extraction\"",
")",
"return",
"list",
"(",
")"
] | Add your code for processing the document | [
"Add",
"your",
"code",
"for",
"processing",
"the",
"document"
] | python | train |
dossier/dossier.label | dossier/label/relation_label.py | https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/relation_label.py#L227-L231 | def _value_from_label(self, label):
'''Convert a label into a kvl value.
'''
unser_val = (label.rel_strength.value, label.meta)
return cbor.dumps(unser_val) | [
"def",
"_value_from_label",
"(",
"self",
",",
"label",
")",
":",
"unser_val",
"=",
"(",
"label",
".",
"rel_strength",
".",
"value",
",",
"label",
".",
"meta",
")",
"return",
"cbor",
".",
"dumps",
"(",
"unser_val",
")"
] | Convert a label into a kvl value. | [
"Convert",
"a",
"label",
"into",
"a",
"kvl",
"value",
"."
] | python | train |
indico/indico-plugins | livesync/indico_livesync/simplify.py | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/simplify.py#L114-L160 | def _process_cascaded_event_contents(records, additional_events=None):
"""
Flatten a series of records into its most basic elements (subcontribution level).
Yields results.
:param records: queue records to process
:param additional_events: events whose content will be included in addition to those
found in records
"""
changed_events = additional_events or set()
changed_contributions = set()
changed_subcontributions = set()
session_records = {rec.session_id for rec in records if rec.type == EntryType.session}
contribution_records = {rec.contrib_id for rec in records if rec.type == EntryType.contribution}
subcontribution_records = {rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution}
event_records = {rec.event_id for rec in records if rec.type == EntryType.event}
if event_records:
changed_events.update(Event.find(Event.id.in_(event_records)))
for event in changed_events:
yield event
# Sessions are added (explicitly changed only, since they don't need to be sent anywhere)
if session_records:
changed_contributions.update(Contribution
.find(Contribution.session_id.in_(session_records), ~Contribution.is_deleted))
# Contributions are added (implictly + explicitly changed)
changed_event_ids = {ev.id for ev in changed_events}
condition = Contribution.event_id.in_(changed_event_ids) & ~Contribution.is_deleted
if contribution_records:
condition = db.or_(condition, Contribution.id.in_(contribution_records))
contrib_query = Contribution.find(condition).options(joinedload('subcontributions'))
for contribution in contrib_query:
yield contribution
changed_subcontributions.update(contribution.subcontributions)
# Same for subcontributions
if subcontribution_records:
changed_subcontributions.update(SubContribution.find(SubContribution.id.in_(subcontribution_records)))
for subcontrib in changed_subcontributions:
yield subcontrib | [
"def",
"_process_cascaded_event_contents",
"(",
"records",
",",
"additional_events",
"=",
"None",
")",
":",
"changed_events",
"=",
"additional_events",
"or",
"set",
"(",
")",
"changed_contributions",
"=",
"set",
"(",
")",
"changed_subcontributions",
"=",
"set",
"(",
")",
"session_records",
"=",
"{",
"rec",
".",
"session_id",
"for",
"rec",
"in",
"records",
"if",
"rec",
".",
"type",
"==",
"EntryType",
".",
"session",
"}",
"contribution_records",
"=",
"{",
"rec",
".",
"contrib_id",
"for",
"rec",
"in",
"records",
"if",
"rec",
".",
"type",
"==",
"EntryType",
".",
"contribution",
"}",
"subcontribution_records",
"=",
"{",
"rec",
".",
"subcontrib_id",
"for",
"rec",
"in",
"records",
"if",
"rec",
".",
"type",
"==",
"EntryType",
".",
"subcontribution",
"}",
"event_records",
"=",
"{",
"rec",
".",
"event_id",
"for",
"rec",
"in",
"records",
"if",
"rec",
".",
"type",
"==",
"EntryType",
".",
"event",
"}",
"if",
"event_records",
":",
"changed_events",
".",
"update",
"(",
"Event",
".",
"find",
"(",
"Event",
".",
"id",
".",
"in_",
"(",
"event_records",
")",
")",
")",
"for",
"event",
"in",
"changed_events",
":",
"yield",
"event",
"# Sessions are added (explicitly changed only, since they don't need to be sent anywhere)",
"if",
"session_records",
":",
"changed_contributions",
".",
"update",
"(",
"Contribution",
".",
"find",
"(",
"Contribution",
".",
"session_id",
".",
"in_",
"(",
"session_records",
")",
",",
"~",
"Contribution",
".",
"is_deleted",
")",
")",
"# Contributions are added (implictly + explicitly changed)",
"changed_event_ids",
"=",
"{",
"ev",
".",
"id",
"for",
"ev",
"in",
"changed_events",
"}",
"condition",
"=",
"Contribution",
".",
"event_id",
".",
"in_",
"(",
"changed_event_ids",
")",
"&",
"~",
"Contribution",
".",
"is_deleted",
"if",
"contribution_records",
":",
"condition",
"=",
"db",
".",
"or_",
"(",
"condition",
",",
"Contribution",
".",
"id",
".",
"in_",
"(",
"contribution_records",
")",
")",
"contrib_query",
"=",
"Contribution",
".",
"find",
"(",
"condition",
")",
".",
"options",
"(",
"joinedload",
"(",
"'subcontributions'",
")",
")",
"for",
"contribution",
"in",
"contrib_query",
":",
"yield",
"contribution",
"changed_subcontributions",
".",
"update",
"(",
"contribution",
".",
"subcontributions",
")",
"# Same for subcontributions",
"if",
"subcontribution_records",
":",
"changed_subcontributions",
".",
"update",
"(",
"SubContribution",
".",
"find",
"(",
"SubContribution",
".",
"id",
".",
"in_",
"(",
"subcontribution_records",
")",
")",
")",
"for",
"subcontrib",
"in",
"changed_subcontributions",
":",
"yield",
"subcontrib"
] | Flatten a series of records into its most basic elements (subcontribution level).
Yields results.
:param records: queue records to process
:param additional_events: events whose content will be included in addition to those
found in records | [
"Flatten",
"a",
"series",
"of",
"records",
"into",
"its",
"most",
"basic",
"elements",
"(",
"subcontribution",
"level",
")",
"."
] | python | train |
openstack/monasca-common | monasca_common/kafka_lib/consumer/multiprocess.py | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/multiprocess.py#L40-L112 | def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Initial interval for retries in seconds.
interval = 1
while not events.exit.is_set():
try:
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except queue.Full:
if events.exit.is_set():
break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
except KafkaError as e:
# Retry with exponential backoff
log.error(
"Problem communicating with Kafka (%s), retrying in %d seconds..." % (e, interval))
time.sleep(interval)
interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS | [
"def",
"_mp_consume",
"(",
"client",
",",
"group",
",",
"topic",
",",
"queue",
",",
"size",
",",
"events",
",",
"*",
"*",
"consumer_options",
")",
":",
"# Initial interval for retries in seconds.",
"interval",
"=",
"1",
"while",
"not",
"events",
".",
"exit",
".",
"is_set",
"(",
")",
":",
"try",
":",
"# Make the child processes open separate socket connections",
"client",
".",
"reinit",
"(",
")",
"# We will start consumers without auto-commit. Auto-commit will be",
"# done by the master controller process.",
"consumer",
"=",
"SimpleConsumer",
"(",
"client",
",",
"group",
",",
"topic",
",",
"auto_commit",
"=",
"False",
",",
"auto_commit_every_n",
"=",
"None",
",",
"auto_commit_every_t",
"=",
"None",
",",
"*",
"*",
"consumer_options",
")",
"# Ensure that the consumer provides the partition information",
"consumer",
".",
"provide_partition_info",
"(",
")",
"while",
"True",
":",
"# Wait till the controller indicates us to start consumption",
"events",
".",
"start",
".",
"wait",
"(",
")",
"# If we are asked to quit, do so",
"if",
"events",
".",
"exit",
".",
"is_set",
"(",
")",
":",
"break",
"# Consume messages and add them to the queue. If the controller",
"# indicates a specific number of messages, follow that advice",
"count",
"=",
"0",
"message",
"=",
"consumer",
".",
"get_message",
"(",
")",
"if",
"message",
":",
"while",
"True",
":",
"try",
":",
"queue",
".",
"put",
"(",
"message",
",",
"timeout",
"=",
"FULL_QUEUE_WAIT_TIME_SECONDS",
")",
"break",
"except",
"queue",
".",
"Full",
":",
"if",
"events",
".",
"exit",
".",
"is_set",
"(",
")",
":",
"break",
"count",
"+=",
"1",
"# We have reached the required size. The controller might have",
"# more than what he needs. Wait for a while.",
"# Without this logic, it is possible that we run into a big",
"# loop consuming all available messages before the controller",
"# can reset the 'start' event",
"if",
"count",
"==",
"size",
".",
"value",
":",
"events",
".",
"pause",
".",
"wait",
"(",
")",
"else",
":",
"# In case we did not receive any message, give up the CPU for",
"# a while before we try again",
"time",
".",
"sleep",
"(",
"NO_MESSAGES_WAIT_TIME_SECONDS",
")",
"consumer",
".",
"stop",
"(",
")",
"except",
"KafkaError",
"as",
"e",
":",
"# Retry with exponential backoff",
"log",
".",
"error",
"(",
"\"Problem communicating with Kafka (%s), retrying in %d seconds...\"",
"%",
"(",
"e",
",",
"interval",
")",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"interval",
"=",
"interval",
"*",
"2",
"if",
"interval",
"*",
"2",
"<",
"MAX_BACKOFF_SECONDS",
"else",
"MAX_BACKOFF_SECONDS"
] | A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class | [
"A",
"child",
"process",
"worker",
"which",
"consumes",
"messages",
"based",
"on",
"the",
"notifications",
"given",
"by",
"the",
"controller",
"process"
] | python | train |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L155-L162 | def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr]) | [
"def",
"set",
"(",
"self",
",",
"style",
"=",
"{",
"}",
")",
":",
"_style",
"=",
"{",
"}",
"for",
"attr",
"in",
"style",
":",
"if",
"attr",
"in",
"self",
".",
"cmds",
"and",
"not",
"style",
"[",
"attr",
"]",
"in",
"self",
".",
"cmds",
"[",
"attr",
"]",
":",
"print",
"'WARNING: ESC/POS PRINTING: ignoring invalid value: '",
"+",
"utfstr",
"(",
"style",
"[",
"attr",
"]",
")",
"+",
"' for style: '",
"+",
"utfstr",
"(",
"attr",
")",
"else",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"[",
"attr",
"]",
"=",
"self",
".",
"enforce_type",
"(",
"attr",
",",
"style",
"[",
"attr",
"]",
")"
] | overrides style values at the current stack level | [
"overrides",
"style",
"values",
"at",
"the",
"current",
"stack",
"level"
] | python | train |
quantopian/zipline | zipline/assets/assets.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1416-L1456 | def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
]) | [
"def",
"_compute_asset_lifetimes",
"(",
"self",
",",
"country_codes",
")",
":",
"equities_cols",
"=",
"self",
".",
"equities",
".",
"c",
"if",
"country_codes",
":",
"buf",
"=",
"np",
".",
"array",
"(",
"tuple",
"(",
"sa",
".",
"select",
"(",
"(",
"equities_cols",
".",
"sid",
",",
"equities_cols",
".",
"start_date",
",",
"equities_cols",
".",
"end_date",
",",
")",
")",
".",
"where",
"(",
"(",
"self",
".",
"exchanges",
".",
"c",
".",
"exchange",
"==",
"equities_cols",
".",
"exchange",
")",
"&",
"(",
"self",
".",
"exchanges",
".",
"c",
".",
"country_code",
".",
"in_",
"(",
"country_codes",
")",
")",
")",
".",
"execute",
"(",
")",
",",
")",
",",
"dtype",
"=",
"'f8'",
",",
"# use doubles so we get NaNs",
")",
"else",
":",
"buf",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'f8'",
")",
"lifetimes",
"=",
"np",
".",
"recarray",
"(",
"buf",
"=",
"buf",
",",
"shape",
"=",
"(",
"len",
"(",
"buf",
")",
",",
")",
",",
"dtype",
"=",
"[",
"(",
"'sid'",
",",
"'f8'",
")",
",",
"(",
"'start'",
",",
"'f8'",
")",
",",
"(",
"'end'",
",",
"'f8'",
")",
"]",
",",
")",
"start",
"=",
"lifetimes",
".",
"start",
"end",
"=",
"lifetimes",
".",
"end",
"start",
"[",
"np",
".",
"isnan",
"(",
"start",
")",
"]",
"=",
"0",
"# convert missing starts to 0",
"end",
"[",
"np",
".",
"isnan",
"(",
"end",
")",
"]",
"=",
"np",
".",
"iinfo",
"(",
"int",
")",
".",
"max",
"# convert missing end to INTMAX",
"# Cast the results back down to int.",
"return",
"lifetimes",
".",
"astype",
"(",
"[",
"(",
"'sid'",
",",
"'i8'",
")",
",",
"(",
"'start'",
",",
"'i8'",
")",
",",
"(",
"'end'",
",",
"'i8'",
")",
",",
"]",
")"
] | Compute and cache a recarray of asset lifetimes. | [
"Compute",
"and",
"cache",
"a",
"recarray",
"of",
"asset",
"lifetimes",
"."
] | python | train |
spookylukey/django-paypal | paypal/pro/helpers.py | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/helpers.py#L335-L345 | def _check_and_update_params(self, required, params):
"""
Ensure all required parameters were passed to the API call and format
them correctly.
"""
for r in required:
if r not in params:
raise PayPalError("Missing required param: %s" % r)
# Upper case all the parameters for PayPal.
return (dict((k.upper(), v) for k, v in params.items())) | [
"def",
"_check_and_update_params",
"(",
"self",
",",
"required",
",",
"params",
")",
":",
"for",
"r",
"in",
"required",
":",
"if",
"r",
"not",
"in",
"params",
":",
"raise",
"PayPalError",
"(",
"\"Missing required param: %s\"",
"%",
"r",
")",
"# Upper case all the parameters for PayPal.",
"return",
"(",
"dict",
"(",
"(",
"k",
".",
"upper",
"(",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
")",
")"
] | Ensure all required parameters were passed to the API call and format
them correctly. | [
"Ensure",
"all",
"required",
"parameters",
"were",
"passed",
"to",
"the",
"API",
"call",
"and",
"format",
"them",
"correctly",
"."
] | python | train |
tompollard/tableone | tableone.py | https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L334-L339 | def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers) | [
"def",
"_outliers",
"(",
"self",
",",
"x",
")",
":",
"outliers",
"=",
"self",
".",
"_tukey",
"(",
"x",
",",
"threshold",
"=",
"1.5",
")",
"return",
"np",
".",
"size",
"(",
"outliers",
")"
] | Compute number of outliers | [
"Compute",
"number",
"of",
"outliers"
] | python | train |
tensorlayer/tensorlayer | tensorlayer/nlp.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L836-L870 | def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")):
"""Very basic tokenizer: split the sentence into a list of tokens.
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
_WORD_SPLIT : regular expression for word spliting.
Examples
--------
>>> see create_vocabulary
>>> from tensorflow.python.platform import gfile
>>> train_path = "wmt/giga-fren.release2"
>>> with gfile.GFile(train_path + ".en", mode="rb") as f:
>>> for line in f:
>>> tokens = tl.nlp.basic_tokenizer(line)
>>> tl.logging.info(tokens)
>>> exit()
[b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',
b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',
b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',
b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',
b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
words = []
sentence = tf.compat.as_bytes(sentence)
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w] | [
"def",
"basic_tokenizer",
"(",
"sentence",
",",
"_WORD_SPLIT",
"=",
"re",
".",
"compile",
"(",
"b\"([.,!?\\\"':;)(])\"",
")",
")",
":",
"words",
"=",
"[",
"]",
"sentence",
"=",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"sentence",
")",
"for",
"space_separated_fragment",
"in",
"sentence",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
":",
"words",
".",
"extend",
"(",
"re",
".",
"split",
"(",
"_WORD_SPLIT",
",",
"space_separated_fragment",
")",
")",
"return",
"[",
"w",
"for",
"w",
"in",
"words",
"if",
"w",
"]"
] | Very basic tokenizer: split the sentence into a list of tokens.
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
_WORD_SPLIT : regular expression for word spliting.
Examples
--------
>>> see create_vocabulary
>>> from tensorflow.python.platform import gfile
>>> train_path = "wmt/giga-fren.release2"
>>> with gfile.GFile(train_path + ".en", mode="rb") as f:
>>> for line in f:
>>> tokens = tl.nlp.basic_tokenizer(line)
>>> tl.logging.info(tokens)
>>> exit()
[b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',
b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',
b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',
b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',
b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py`` | [
"Very",
"basic",
"tokenizer",
":",
"split",
"the",
"sentence",
"into",
"a",
"list",
"of",
"tokens",
"."
] | python | valid |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_firmware.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_firmware.py#L98-L108 | def row_is_filtered(self, row_subs, filters):
'''returns True if row should NOT be included according to filters'''
for filtername in filters:
filtervalue = filters[filtername]
if filtername in row_subs:
row_subs_value = row_subs[filtername]
if str(row_subs_value) != str(filtervalue):
return True
else:
print("fw: Unknown filter keyword (%s)" % (filtername,))
return False | [
"def",
"row_is_filtered",
"(",
"self",
",",
"row_subs",
",",
"filters",
")",
":",
"for",
"filtername",
"in",
"filters",
":",
"filtervalue",
"=",
"filters",
"[",
"filtername",
"]",
"if",
"filtername",
"in",
"row_subs",
":",
"row_subs_value",
"=",
"row_subs",
"[",
"filtername",
"]",
"if",
"str",
"(",
"row_subs_value",
")",
"!=",
"str",
"(",
"filtervalue",
")",
":",
"return",
"True",
"else",
":",
"print",
"(",
"\"fw: Unknown filter keyword (%s)\"",
"%",
"(",
"filtername",
",",
")",
")",
"return",
"False"
] | returns True if row should NOT be included according to filters | [
"returns",
"True",
"if",
"row",
"should",
"NOT",
"be",
"included",
"according",
"to",
"filters"
] | python | train |
jmoiron/speedparser | speedparser/speedparser.py | https://github.com/jmoiron/speedparser/blob/e7e8d79daf73b35c9259695ad1e379476e1dfc77/speedparser/speedparser.py#L94-L104 | def strip_outer_tag(text):
"""Strips the outer tag, if text starts with a tag. Not entity aware;
designed to quickly strip outer tags from lxml cleaner output. Only
checks for <p> and <div> outer tags."""
if not text or not isinstance(text, basestring):
return text
stripped = text.strip()
if (stripped.startswith('<p>') or stripped.startswith('<div>')) and \
(stripped.endswith('</p>') or stripped.endswith('</div>')):
return stripped[stripped.index('>')+1:stripped.rindex('<')]
return text | [
"def",
"strip_outer_tag",
"(",
"text",
")",
":",
"if",
"not",
"text",
"or",
"not",
"isinstance",
"(",
"text",
",",
"basestring",
")",
":",
"return",
"text",
"stripped",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"(",
"stripped",
".",
"startswith",
"(",
"'<p>'",
")",
"or",
"stripped",
".",
"startswith",
"(",
"'<div>'",
")",
")",
"and",
"(",
"stripped",
".",
"endswith",
"(",
"'</p>'",
")",
"or",
"stripped",
".",
"endswith",
"(",
"'</div>'",
")",
")",
":",
"return",
"stripped",
"[",
"stripped",
".",
"index",
"(",
"'>'",
")",
"+",
"1",
":",
"stripped",
".",
"rindex",
"(",
"'<'",
")",
"]",
"return",
"text"
] | Strips the outer tag, if text starts with a tag. Not entity aware;
designed to quickly strip outer tags from lxml cleaner output. Only
checks for <p> and <div> outer tags. | [
"Strips",
"the",
"outer",
"tag",
"if",
"text",
"starts",
"with",
"a",
"tag",
".",
"Not",
"entity",
"aware",
";",
"designed",
"to",
"quickly",
"strip",
"outer",
"tags",
"from",
"lxml",
"cleaner",
"output",
".",
"Only",
"checks",
"for",
"<p",
">",
"and",
"<div",
">",
"outer",
"tags",
"."
] | python | train |
mbr/tinyrpc | tinyrpc/transports/zmq.py | https://github.com/mbr/tinyrpc/blob/59ccf62452b3f37e8411ff0309a3a99857d05e19/tinyrpc/transports/zmq.py#L28-L42 | def create(cls, zmq_context, endpoint):
"""Create new server transport.
Instead of creating the socket yourself, you can call this function and
merely pass the :py:class:`zmq.core.context.Context` instance.
By passing a context imported from :py:mod:`zmq.green`, you can use
green (gevent) 0mq sockets as well.
:param zmq_context: A 0mq context.
:param endpoint: The endpoint clients will connect to.
"""
socket = zmq_context.socket(zmq.ROUTER)
socket.bind(endpoint)
return cls(socket) | [
"def",
"create",
"(",
"cls",
",",
"zmq_context",
",",
"endpoint",
")",
":",
"socket",
"=",
"zmq_context",
".",
"socket",
"(",
"zmq",
".",
"ROUTER",
")",
"socket",
".",
"bind",
"(",
"endpoint",
")",
"return",
"cls",
"(",
"socket",
")"
] | Create new server transport.
Instead of creating the socket yourself, you can call this function and
merely pass the :py:class:`zmq.core.context.Context` instance.
By passing a context imported from :py:mod:`zmq.green`, you can use
green (gevent) 0mq sockets as well.
:param zmq_context: A 0mq context.
:param endpoint: The endpoint clients will connect to. | [
"Create",
"new",
"server",
"transport",
"."
] | python | train |
dossier/dossier.fc | python/dossier/fc/dump.py | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/dump.py#L132-L140 | def only_specific_multisets(ent, multisets_to_show):
'''
returns a pretty-printed string for specific features in a FeatureCollection
'''
out_str = []
for mset_name in multisets_to_show:
for key, count in ent[mset_name].items():
out_str.append( '%s - %d: %s' % (mset_name, count, key) )
return '\n'.join(out_str) | [
"def",
"only_specific_multisets",
"(",
"ent",
",",
"multisets_to_show",
")",
":",
"out_str",
"=",
"[",
"]",
"for",
"mset_name",
"in",
"multisets_to_show",
":",
"for",
"key",
",",
"count",
"in",
"ent",
"[",
"mset_name",
"]",
".",
"items",
"(",
")",
":",
"out_str",
".",
"append",
"(",
"'%s - %d: %s'",
"%",
"(",
"mset_name",
",",
"count",
",",
"key",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"out_str",
")"
] | returns a pretty-printed string for specific features in a FeatureCollection | [
"returns",
"a",
"pretty",
"-",
"printed",
"string",
"for",
"specific",
"features",
"in",
"a",
"FeatureCollection"
] | python | train |
tanghaibao/jcvi | jcvi/utils/progressbar.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L562-L566 | def _update_widgets(self):
'Checks all widgets for the time sensitive bit'
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets) | [
"def",
"_update_widgets",
"(",
"self",
")",
":",
"self",
".",
"_time_sensitive",
"=",
"any",
"(",
"getattr",
"(",
"w",
",",
"'TIME_SENSITIVE'",
",",
"False",
")",
"for",
"w",
"in",
"self",
".",
"widgets",
")"
] | Checks all widgets for the time sensitive bit | [
"Checks",
"all",
"widgets",
"for",
"the",
"time",
"sensitive",
"bit"
] | python | train |
PyGithub/PyGithub | github/Repository.py | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L2445-L2455 | def get_subscribers(self):
"""
:calls: `GET /repos/:owner/:repo/subscribers <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/subscribers",
None
) | [
"def",
"get_subscribers",
"(",
"self",
")",
":",
"return",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"(",
"github",
".",
"NamedUser",
".",
"NamedUser",
",",
"self",
".",
"_requester",
",",
"self",
".",
"url",
"+",
"\"/subscribers\"",
",",
"None",
")"
] | :calls: `GET /repos/:owner/:repo/subscribers <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` | [
":",
"calls",
":",
"GET",
"/",
"repos",
"/",
":",
"owner",
"/",
":",
"repo",
"/",
"subscribers",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"activity",
"/",
"watching",
">",
"_",
":",
"rtype",
":",
":",
"class",
":",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"of",
":",
"class",
":",
"github",
".",
"NamedUser",
".",
"NamedUser"
] | python | train |
fjwCode/cerium | cerium/service.py | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/service.py#L78-L83 | def devices_l(self) -> Dict:
'''List connected devices (-l for long output).'''
output, _ = self._execute('devices', '-l')
devices = output.split()[4::6]
models = output.split()[7::6]
return dict(zip(devices, models)) | [
"def",
"devices_l",
"(",
"self",
")",
"->",
"Dict",
":",
"output",
",",
"_",
"=",
"self",
".",
"_execute",
"(",
"'devices'",
",",
"'-l'",
")",
"devices",
"=",
"output",
".",
"split",
"(",
")",
"[",
"4",
":",
":",
"6",
"]",
"models",
"=",
"output",
".",
"split",
"(",
")",
"[",
"7",
":",
":",
"6",
"]",
"return",
"dict",
"(",
"zip",
"(",
"devices",
",",
"models",
")",
")"
] | List connected devices (-l for long output). | [
"List",
"connected",
"devices",
"(",
"-",
"l",
"for",
"long",
"output",
")",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L339-L353 | def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
vrfs_raw = parse.find_lines("^vrf definition")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info("VRFs:%s", vrfs)
return vrfs | [
"def",
"_get_vrfs",
"(",
"self",
")",
":",
"vrfs",
"=",
"[",
"]",
"ios_cfg",
"=",
"self",
".",
"_get_running_config",
"(",
")",
"parse",
"=",
"HTParser",
"(",
"ios_cfg",
")",
"vrfs_raw",
"=",
"parse",
".",
"find_lines",
"(",
"\"^vrf definition\"",
")",
"for",
"line",
"in",
"vrfs_raw",
":",
"# raw format ['ip vrf <vrf-name>',....]",
"vrf_name",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"[",
"2",
"]",
"vrfs",
".",
"append",
"(",
"vrf_name",
")",
"LOG",
".",
"info",
"(",
"\"VRFs:%s\"",
",",
"vrfs",
")",
"return",
"vrfs"
] | Get the current VRFs configured in the device.
:return: A list of vrf names as string | [
"Get",
"the",
"current",
"VRFs",
"configured",
"in",
"the",
"device",
"."
] | python | train |
geopy/geopy | geopy/geocoders/ignfrance.py | https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/ignfrance.py#L444-L527 | def _xml_to_json_places(tree, is_reverse=False):
"""
Transform the xml ElementTree due to XML webservice return to json
"""
select_multi = (
'GeocodedAddress'
if not is_reverse
else 'ReverseGeocodedLocation'
)
adresses = tree.findall('.//' + select_multi)
places = []
sel_pl = './/Address/Place[@type="{}"]'
for adr in adresses:
el = {}
el['pos'] = adr.find('./Point/pos')
el['street'] = adr.find('.//Address/StreetAddress/Street')
el['freeformaddress'] = adr.find('.//Address/freeFormAddress')
el['municipality'] = adr.find(sel_pl.format('Municipality'))
el['numero'] = adr.find(sel_pl.format('Numero'))
el['feuille'] = adr.find(sel_pl.format('Feuille'))
el['section'] = adr.find(sel_pl.format('Section'))
el['departement'] = adr.find(sel_pl.format('Departement'))
el['commune_absorbee'] = adr.find(sel_pl.format('CommuneAbsorbee'))
el['commune'] = adr.find(sel_pl.format('Commune'))
el['insee'] = adr.find(sel_pl.format('INSEE'))
el['qualite'] = adr.find(sel_pl.format('Qualite'))
el['territoire'] = adr.find(sel_pl.format('Territoire'))
el['id'] = adr.find(sel_pl.format('ID'))
el['id_tr'] = adr.find(sel_pl.format('ID_TR'))
el['bbox'] = adr.find(sel_pl.format('Bbox'))
el['nature'] = adr.find(sel_pl.format('Nature'))
el['postal_code'] = adr.find('.//Address/PostalCode')
el['extended_geocode_match_code'] = adr.find(
'.//ExtendedGeocodeMatchCode'
)
place = {}
def testContentAttrib(selector, key):
"""
Helper to select by attribute and if not attribute,
value set to empty string
"""
return selector.attrib.get(
key,
None
) if selector is not None else None
place['accuracy'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'accuracy')
place['match_type'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'matchType')
place['building'] = testContentAttrib(
adr.find('.//Address/StreetAddress/Building'), 'number')
place['search_centre_distance'] = testContentAttrib(
adr.find('.//SearchCentreDistance'), 'value')
for key, value in iteritems(el):
if value is not None:
place[key] = value.text
if value.text is None:
place[key] = None
else:
place[key] = None
# We check if lat lng is not empty and unpack accordingly
if place['pos']:
lat, lng = place['pos'].split(' ')
place['lat'] = lat.strip()
place['lng'] = lng.strip()
else:
place['lat'] = place['lng'] = None
# We removed the unused key
place.pop("pos", None)
places.append(place)
return places | [
"def",
"_xml_to_json_places",
"(",
"tree",
",",
"is_reverse",
"=",
"False",
")",
":",
"select_multi",
"=",
"(",
"'GeocodedAddress'",
"if",
"not",
"is_reverse",
"else",
"'ReverseGeocodedLocation'",
")",
"adresses",
"=",
"tree",
".",
"findall",
"(",
"'.//'",
"+",
"select_multi",
")",
"places",
"=",
"[",
"]",
"sel_pl",
"=",
"'.//Address/Place[@type=\"{}\"]'",
"for",
"adr",
"in",
"adresses",
":",
"el",
"=",
"{",
"}",
"el",
"[",
"'pos'",
"]",
"=",
"adr",
".",
"find",
"(",
"'./Point/pos'",
")",
"el",
"[",
"'street'",
"]",
"=",
"adr",
".",
"find",
"(",
"'.//Address/StreetAddress/Street'",
")",
"el",
"[",
"'freeformaddress'",
"]",
"=",
"adr",
".",
"find",
"(",
"'.//Address/freeFormAddress'",
")",
"el",
"[",
"'municipality'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Municipality'",
")",
")",
"el",
"[",
"'numero'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Numero'",
")",
")",
"el",
"[",
"'feuille'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Feuille'",
")",
")",
"el",
"[",
"'section'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Section'",
")",
")",
"el",
"[",
"'departement'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Departement'",
")",
")",
"el",
"[",
"'commune_absorbee'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'CommuneAbsorbee'",
")",
")",
"el",
"[",
"'commune'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Commune'",
")",
")",
"el",
"[",
"'insee'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'INSEE'",
")",
")",
"el",
"[",
"'qualite'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Qualite'",
")",
")",
"el",
"[",
"'territoire'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Territoire'",
")",
")",
"el",
"[",
"'id'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'ID'",
")",
")",
"el",
"[",
"'id_tr'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'ID_TR'",
")",
")",
"el",
"[",
"'bbox'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Bbox'",
")",
")",
"el",
"[",
"'nature'",
"]",
"=",
"adr",
".",
"find",
"(",
"sel_pl",
".",
"format",
"(",
"'Nature'",
")",
")",
"el",
"[",
"'postal_code'",
"]",
"=",
"adr",
".",
"find",
"(",
"'.//Address/PostalCode'",
")",
"el",
"[",
"'extended_geocode_match_code'",
"]",
"=",
"adr",
".",
"find",
"(",
"'.//ExtendedGeocodeMatchCode'",
")",
"place",
"=",
"{",
"}",
"def",
"testContentAttrib",
"(",
"selector",
",",
"key",
")",
":",
"\"\"\"\n Helper to select by attribute and if not attribute,\n value set to empty string\n \"\"\"",
"return",
"selector",
".",
"attrib",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"selector",
"is",
"not",
"None",
"else",
"None",
"place",
"[",
"'accuracy'",
"]",
"=",
"testContentAttrib",
"(",
"adr",
".",
"find",
"(",
"'.//GeocodeMatchCode'",
")",
",",
"'accuracy'",
")",
"place",
"[",
"'match_type'",
"]",
"=",
"testContentAttrib",
"(",
"adr",
".",
"find",
"(",
"'.//GeocodeMatchCode'",
")",
",",
"'matchType'",
")",
"place",
"[",
"'building'",
"]",
"=",
"testContentAttrib",
"(",
"adr",
".",
"find",
"(",
"'.//Address/StreetAddress/Building'",
")",
",",
"'number'",
")",
"place",
"[",
"'search_centre_distance'",
"]",
"=",
"testContentAttrib",
"(",
"adr",
".",
"find",
"(",
"'.//SearchCentreDistance'",
")",
",",
"'value'",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"el",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"place",
"[",
"key",
"]",
"=",
"value",
".",
"text",
"if",
"value",
".",
"text",
"is",
"None",
":",
"place",
"[",
"key",
"]",
"=",
"None",
"else",
":",
"place",
"[",
"key",
"]",
"=",
"None",
"# We check if lat lng is not empty and unpack accordingly",
"if",
"place",
"[",
"'pos'",
"]",
":",
"lat",
",",
"lng",
"=",
"place",
"[",
"'pos'",
"]",
".",
"split",
"(",
"' '",
")",
"place",
"[",
"'lat'",
"]",
"=",
"lat",
".",
"strip",
"(",
")",
"place",
"[",
"'lng'",
"]",
"=",
"lng",
".",
"strip",
"(",
")",
"else",
":",
"place",
"[",
"'lat'",
"]",
"=",
"place",
"[",
"'lng'",
"]",
"=",
"None",
"# We removed the unused key",
"place",
".",
"pop",
"(",
"\"pos\"",
",",
"None",
")",
"places",
".",
"append",
"(",
"place",
")",
"return",
"places"
] | Transform the xml ElementTree due to XML webservice return to json | [
"Transform",
"the",
"xml",
"ElementTree",
"due",
"to",
"XML",
"webservice",
"return",
"to",
"json"
] | python | train |
h2oai/h2o-3 | h2o-py/h2o/utils/typechecks.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/typechecks.py#L181-L188 | def name(self, src=None):
"""Return string representing the name of this type."""
res = [_get_type_name(tt, src) for tt in self._types]
if len(res) == 2 and "None" in res:
res.remove("None")
return "?" + res[0]
else:
return " | ".join(res) | [
"def",
"name",
"(",
"self",
",",
"src",
"=",
"None",
")",
":",
"res",
"=",
"[",
"_get_type_name",
"(",
"tt",
",",
"src",
")",
"for",
"tt",
"in",
"self",
".",
"_types",
"]",
"if",
"len",
"(",
"res",
")",
"==",
"2",
"and",
"\"None\"",
"in",
"res",
":",
"res",
".",
"remove",
"(",
"\"None\"",
")",
"return",
"\"?\"",
"+",
"res",
"[",
"0",
"]",
"else",
":",
"return",
"\" | \"",
".",
"join",
"(",
"res",
")"
] | Return string representing the name of this type. | [
"Return",
"string",
"representing",
"the",
"name",
"of",
"this",
"type",
"."
] | python | test |
bitesofcode/projex | projex/envmanager.py | https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/envmanager.py#L39-L66 | def _setup():
"""
Sets up the global import environment variables by registering the
sub-folders for projex as import locations. When defining your
custom manager, you will want to overload this method to do any
sort of global initialization that you wish before continuing.
:warning This method is called by the _setup method, and should
not be called directly.
"""
projex_path = os.getenv('PROJEX_PATH')
if not projex_path:
return
base_path = os.path.dirname(__file__)
logger.debug('Loading PROJEX_PATH: %s' % projex_path)
# load the defaults from the install directory
# load the paths from the environment
paths = projex_path.split(os.path.pathsep)
paths += [
os.path.join(base_path, 'userplug'),
os.path.join(base_path, 'stdplug'),
os.path.join(base_path, 'lib'),
]
sys.path = paths + sys.path | [
"def",
"_setup",
"(",
")",
":",
"projex_path",
"=",
"os",
".",
"getenv",
"(",
"'PROJEX_PATH'",
")",
"if",
"not",
"projex_path",
":",
"return",
"base_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"logger",
".",
"debug",
"(",
"'Loading PROJEX_PATH: %s'",
"%",
"projex_path",
")",
"# load the defaults from the install directory",
"# load the paths from the environment",
"paths",
"=",
"projex_path",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"paths",
"+=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'userplug'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'stdplug'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'lib'",
")",
",",
"]",
"sys",
".",
"path",
"=",
"paths",
"+",
"sys",
".",
"path"
] | Sets up the global import environment variables by registering the
sub-folders for projex as import locations. When defining your
custom manager, you will want to overload this method to do any
sort of global initialization that you wish before continuing.
:warning This method is called by the _setup method, and should
not be called directly. | [
"Sets",
"up",
"the",
"global",
"import",
"environment",
"variables",
"by",
"registering",
"the",
"sub",
"-",
"folders",
"for",
"projex",
"as",
"import",
"locations",
".",
"When",
"defining",
"your",
"custom",
"manager",
"you",
"will",
"want",
"to",
"overload",
"this",
"method",
"to",
"do",
"any",
"sort",
"of",
"global",
"initialization",
"that",
"you",
"wish",
"before",
"continuing",
".",
":",
"warning",
"This",
"method",
"is",
"called",
"by",
"the",
"_setup",
"method",
"and",
"should",
"not",
"be",
"called",
"directly",
"."
] | python | train |
inveniosoftware/invenio-search-ui | examples/app.py | https://github.com/inveniosoftware/invenio-search-ui/blob/4b61737f938cbfdc1aad6602a73f3a24d53b3312/examples/app.py#L206-L233 | def records():
"""Load records."""
import pkg_resources
import uuid
from dojson.contrib.marc21 import marc21
from dojson.contrib.marc21.utils import create_record, split_blob
from invenio_pidstore import current_pidstore
from invenio_records.api import Record
# pkg resources the demodata
data_path = pkg_resources.resource_filename(
'invenio_records', 'data/marc21/bibliographic.xml'
)
with open(data_path) as source:
indexer = RecordIndexer()
with db.session.begin_nested():
for index, data in enumerate(split_blob(source.read()), start=1):
# create uuid
rec_uuid = uuid.uuid4()
# do translate
record = marc21.do(create_record(data))
# create PID
current_pidstore.minters['recid'](
rec_uuid, record
)
# create record
indexer.index(Record.create(record, id_=rec_uuid))
db.session.commit() | [
"def",
"records",
"(",
")",
":",
"import",
"pkg_resources",
"import",
"uuid",
"from",
"dojson",
".",
"contrib",
".",
"marc21",
"import",
"marc21",
"from",
"dojson",
".",
"contrib",
".",
"marc21",
".",
"utils",
"import",
"create_record",
",",
"split_blob",
"from",
"invenio_pidstore",
"import",
"current_pidstore",
"from",
"invenio_records",
".",
"api",
"import",
"Record",
"# pkg resources the demodata",
"data_path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"'invenio_records'",
",",
"'data/marc21/bibliographic.xml'",
")",
"with",
"open",
"(",
"data_path",
")",
"as",
"source",
":",
"indexer",
"=",
"RecordIndexer",
"(",
")",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"for",
"index",
",",
"data",
"in",
"enumerate",
"(",
"split_blob",
"(",
"source",
".",
"read",
"(",
")",
")",
",",
"start",
"=",
"1",
")",
":",
"# create uuid",
"rec_uuid",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"# do translate",
"record",
"=",
"marc21",
".",
"do",
"(",
"create_record",
"(",
"data",
")",
")",
"# create PID",
"current_pidstore",
".",
"minters",
"[",
"'recid'",
"]",
"(",
"rec_uuid",
",",
"record",
")",
"# create record",
"indexer",
".",
"index",
"(",
"Record",
".",
"create",
"(",
"record",
",",
"id_",
"=",
"rec_uuid",
")",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Load records. | [
"Load",
"records",
"."
] | python | train |
Metatab/metapack | metapack/appurl.py | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L496-L507 | def search(self):
"""Search for a url by returning the value from the first callback that
returns a non-None value"""
for cb in SearchUrl.search_callbacks:
try:
v = cb(self)
if v is not None:
return v
except Exception as e:
raise | [
"def",
"search",
"(",
"self",
")",
":",
"for",
"cb",
"in",
"SearchUrl",
".",
"search_callbacks",
":",
"try",
":",
"v",
"=",
"cb",
"(",
"self",
")",
"if",
"v",
"is",
"not",
"None",
":",
"return",
"v",
"except",
"Exception",
"as",
"e",
":",
"raise"
] | Search for a url by returning the value from the first callback that
returns a non-None value | [
"Search",
"for",
"a",
"url",
"by",
"returning",
"the",
"value",
"from",
"the",
"first",
"callback",
"that",
"returns",
"a",
"non",
"-",
"None",
"value"
] | python | train |
LogicalDash/LiSE | ELiDE/ELiDE/card.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L50-L67 | def get_pos_hint_x(poshints, sizehintx):
"""Return ``poshints['x']`` if available, or its computed equivalent
otherwise.
"""
if 'x' in poshints:
return poshints['x']
elif sizehintx is not None:
if 'center_x' in poshints:
return (
poshints['center_x'] -
sizehintx / 2
)
elif 'right' in poshints:
return (
poshints['right'] -
sizehintx
) | [
"def",
"get_pos_hint_x",
"(",
"poshints",
",",
"sizehintx",
")",
":",
"if",
"'x'",
"in",
"poshints",
":",
"return",
"poshints",
"[",
"'x'",
"]",
"elif",
"sizehintx",
"is",
"not",
"None",
":",
"if",
"'center_x'",
"in",
"poshints",
":",
"return",
"(",
"poshints",
"[",
"'center_x'",
"]",
"-",
"sizehintx",
"/",
"2",
")",
"elif",
"'right'",
"in",
"poshints",
":",
"return",
"(",
"poshints",
"[",
"'right'",
"]",
"-",
"sizehintx",
")"
] | Return ``poshints['x']`` if available, or its computed equivalent
otherwise. | [
"Return",
"poshints",
"[",
"x",
"]",
"if",
"available",
"or",
"its",
"computed",
"equivalent",
"otherwise",
"."
] | python | train |
AtteqCom/zsl | src/zsl/application/containers/container.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/application/containers/container.py#L24-L35 | def modules(cls):
"""Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]]
"""
members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))
modules = [module for name, module in members if not name.startswith('_')]
return modules | [
"def",
"modules",
"(",
"cls",
")",
":",
"members",
"=",
"inspect",
".",
"getmembers",
"(",
"cls",
",",
"lambda",
"a",
":",
"not",
"(",
"inspect",
".",
"isroutine",
"(",
"a",
")",
"and",
"a",
".",
"__name__",
"==",
"'modules'",
")",
")",
"modules",
"=",
"[",
"module",
"for",
"name",
",",
"module",
"in",
"members",
"if",
"not",
"name",
".",
"startswith",
"(",
"'_'",
")",
"]",
"return",
"modules"
] | Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]] | [
"Collect",
"all",
"the",
"public",
"class",
"attributes",
"."
] | python | train |
f3at/feat | src/feat/database/tools.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/database/tools.py#L186-L191 | def tupletize_version(version_string):
'''
Given "1.2.3-6" returns a tuple of (1, 2, 3, 6).
This is used for sorting versions.
'''
return tuple(int(x) for x in re.findall(r'[0-9]+', version_string)) | [
"def",
"tupletize_version",
"(",
"version_string",
")",
":",
"return",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"re",
".",
"findall",
"(",
"r'[0-9]+'",
",",
"version_string",
")",
")"
] | Given "1.2.3-6" returns a tuple of (1, 2, 3, 6).
This is used for sorting versions. | [
"Given",
"1",
".",
"2",
".",
"3",
"-",
"6",
"returns",
"a",
"tuple",
"of",
"(",
"1",
"2",
"3",
"6",
")",
".",
"This",
"is",
"used",
"for",
"sorting",
"versions",
"."
] | python | train |
wright-group/WrightTools | WrightTools/data/_data.py | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L1421-L1458 | def rename_channels(self, *, verbose=True, **kwargs):
"""Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
# finish
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v)) | [
"def",
"rename_channels",
"(",
"self",
",",
"*",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# ensure that items will remain unique",
"changed",
"=",
"kwargs",
".",
"keys",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"v",
"not",
"in",
"changed",
"and",
"v",
"in",
"self",
".",
"keys",
"(",
")",
":",
"raise",
"wt_exceptions",
".",
"NameNotUniqueError",
"(",
"v",
")",
"# compile references to items that are changing",
"new",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"obj",
"=",
"self",
"[",
"k",
"]",
"index",
"=",
"self",
".",
"channel_names",
".",
"index",
"(",
"k",
")",
"# rename",
"new",
"[",
"v",
"]",
"=",
"obj",
",",
"index",
"Group",
".",
"_instances",
".",
"pop",
"(",
"obj",
".",
"fullpath",
",",
"None",
")",
"obj",
".",
"natural_name",
"=",
"str",
"(",
"v",
")",
"# remove old references",
"del",
"self",
"[",
"k",
"]",
"# apply new references",
"names",
"=",
"list",
"(",
"self",
".",
"channel_names",
")",
"for",
"v",
",",
"value",
"in",
"new",
".",
"items",
"(",
")",
":",
"obj",
",",
"index",
"=",
"value",
"self",
"[",
"v",
"]",
"=",
"obj",
"names",
"[",
"index",
"]",
"=",
"v",
"self",
".",
"channel_names",
"=",
"names",
"# finish",
"if",
"verbose",
":",
"print",
"(",
"\"{0} channel(s) renamed:\"",
".",
"format",
"(",
"len",
"(",
"kwargs",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"print",
"(",
"\" {0} --> {1}\"",
".",
"format",
"(",
"k",
",",
"v",
")",
")"
] | Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True | [
"Rename",
"a",
"set",
"of",
"channels",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/__init__.py#L96-L117 | def _set_net(self, v, load=False):
"""
Setter method for net, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/net (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_net is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_net() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("net_cmd",net.net, yang_name="net", rest_name="net", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='net-cmd', extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}), is_container='list', yang_name="net", rest_name="net", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """net must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("net_cmd",net.net, yang_name="net", rest_name="net", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='net-cmd', extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}), is_container='list', yang_name="net", rest_name="net", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)""",
})
self.__net = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_net",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"net_cmd\"",
",",
"net",
".",
"net",
",",
"yang_name",
"=",
"\"net\"",
",",
"rest_name",
"=",
"\"net\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'net-cmd'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Define NSAP address'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-suppress-key-abbreviation'",
":",
"None",
",",
"u'callpoint'",
":",
"u'IsisNet'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"net\"",
",",
"rest_name",
"=",
"\"net\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Define NSAP address'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-suppress-key-abbreviation'",
":",
"None",
",",
"u'callpoint'",
":",
"u'IsisNet'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-isis'",
",",
"defining_module",
"=",
"'brocade-isis'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"net must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"net_cmd\",net.net, yang_name=\"net\", rest_name=\"net\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='net-cmd', extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}), is_container='list', yang_name=\"net\", rest_name=\"net\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define NSAP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IsisNet'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__net",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for net, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/net (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_net is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_net() directly. | [
"Setter",
"method",
"for",
"net",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"router",
"/",
"isis",
"/",
"router_isis_cmds_holder",
"/",
"net",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_net",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_net",
"()",
"directly",
"."
] | python | train |
apache/spark | python/pyspark/context.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L456-L480 | def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices) | [
"def",
"range",
"(",
"self",
",",
"start",
",",
"end",
"=",
"None",
",",
"step",
"=",
"1",
",",
"numSlices",
"=",
"None",
")",
":",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"start",
"start",
"=",
"0",
"return",
"self",
".",
"parallelize",
"(",
"xrange",
"(",
"start",
",",
"end",
",",
"step",
")",
",",
"numSlices",
")"
] | Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5] | [
"Create",
"a",
"new",
"RDD",
"of",
"int",
"containing",
"elements",
"from",
"start",
"to",
"end",
"(",
"exclusive",
")",
"increased",
"by",
"step",
"every",
"element",
".",
"Can",
"be",
"called",
"the",
"same",
"way",
"as",
"python",
"s",
"built",
"-",
"in",
"range",
"()",
"function",
".",
"If",
"called",
"with",
"a",
"single",
"argument",
"the",
"argument",
"is",
"interpreted",
"as",
"end",
"and",
"start",
"is",
"set",
"to",
"0",
"."
] | python | train |
praekeltfoundation/seaworthy | docs/apigen.py | https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/docs/apigen.py#L176-L236 | def main(argv=sys.argv):
# type: (List[str]) -> int
"""Parse and check the command line arguments."""
parser = optparse.OptionParser(
usage="""\
usage: %prog [options] -o <output_path> <module_path> [exclude_pattern, ...]
Look recursively in <module_path> for Python modules and packages and create
one reST file with automodule directives per package in the <output_path>.
The <exclude_pattern>s can be file and/or directory patterns that will be
excluded from generation.
Note: By default this script will not overwrite already created files.""")
parser.add_option('-o', '--output-dir', action='store', dest='destdir',
help='Directory to place all output', default='api')
parser.add_option('-s', '--source-dir', action='store', dest='srcdir',
help='Documentation source directory', default=BASEDIR)
parser.add_option('-n', '--docname', action='store', dest='docname',
help='Index document name', default='api')
parser.add_option('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help='Follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.')
parser.add_option('-P', '--private', action='store_true',
dest='includeprivate',
help='Include "_private" modules')
parser.add_option('--implicit-namespaces', action='store_true',
dest='implicit_namespaces',
help='Interpret module paths according to PEP-0420 '
'implicit namespaces specification')
parser.add_option('--version', action='store_true', dest='show_version',
help='Show version information and exit')
parser.add_option('--clean', action='store_true', dest='cleanup',
help='Clean up generated files and exit')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
(opts, args) = parser.parse_args(argv[1:])
# Make this more explicitly the current directory.
if not opts.srcdir:
opts.srcdir = '.'
if opts.show_version:
print('Sphinx (sphinx-apidoc) %s' % __display_version__)
return 0
if opts.cleanup:
print("Removing generated API docs from '{}'...".format(opts.srcdir))
return cleanup_api_docs(opts)
if not args:
parser.error('A package path is required.')
opts.rootpath, opts.excludes = args[0], args[1:]
return generate_api_docs(opts) | [
"def",
"main",
"(",
"argv",
"=",
"sys",
".",
"argv",
")",
":",
"# type: (List[str]) -> int",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"usage",
"=",
"\"\"\"\\\nusage: %prog [options] -o <output_path> <module_path> [exclude_pattern, ...]\n\nLook recursively in <module_path> for Python modules and packages and create\none reST file with automodule directives per package in the <output_path>.\n\nThe <exclude_pattern>s can be file and/or directory patterns that will be\nexcluded from generation.\n\nNote: By default this script will not overwrite already created files.\"\"\"",
")",
"parser",
".",
"add_option",
"(",
"'-o'",
",",
"'--output-dir'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'destdir'",
",",
"help",
"=",
"'Directory to place all output'",
",",
"default",
"=",
"'api'",
")",
"parser",
".",
"add_option",
"(",
"'-s'",
",",
"'--source-dir'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'srcdir'",
",",
"help",
"=",
"'Documentation source directory'",
",",
"default",
"=",
"BASEDIR",
")",
"parser",
".",
"add_option",
"(",
"'-n'",
",",
"'--docname'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'docname'",
",",
"help",
"=",
"'Index document name'",
",",
"default",
"=",
"'api'",
")",
"parser",
".",
"add_option",
"(",
"'-l'",
",",
"'--follow-links'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'followlinks'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Follow symbolic links. Powerful when combined '",
"'with collective.recipe.omelette.'",
")",
"parser",
".",
"add_option",
"(",
"'-P'",
",",
"'--private'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'includeprivate'",
",",
"help",
"=",
"'Include \"_private\" modules'",
")",
"parser",
".",
"add_option",
"(",
"'--implicit-namespaces'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'implicit_namespaces'",
",",
"help",
"=",
"'Interpret module paths according to PEP-0420 '",
"'implicit namespaces specification'",
")",
"parser",
".",
"add_option",
"(",
"'--version'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'show_version'",
",",
"help",
"=",
"'Show version information and exit'",
")",
"parser",
".",
"add_option",
"(",
"'--clean'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'cleanup'",
",",
"help",
"=",
"'Clean up generated files and exit'",
")",
"group",
"=",
"parser",
".",
"add_option_group",
"(",
"'Extension options'",
")",
"for",
"ext",
"in",
"EXTENSIONS",
":",
"group",
".",
"add_option",
"(",
"'--ext-'",
"+",
"ext",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'ext_'",
"+",
"ext",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'enable %s extension'",
"%",
"ext",
")",
"(",
"opts",
",",
"args",
")",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
")",
"# Make this more explicitly the current directory.",
"if",
"not",
"opts",
".",
"srcdir",
":",
"opts",
".",
"srcdir",
"=",
"'.'",
"if",
"opts",
".",
"show_version",
":",
"print",
"(",
"'Sphinx (sphinx-apidoc) %s'",
"%",
"__display_version__",
")",
"return",
"0",
"if",
"opts",
".",
"cleanup",
":",
"print",
"(",
"\"Removing generated API docs from '{}'...\"",
".",
"format",
"(",
"opts",
".",
"srcdir",
")",
")",
"return",
"cleanup_api_docs",
"(",
"opts",
")",
"if",
"not",
"args",
":",
"parser",
".",
"error",
"(",
"'A package path is required.'",
")",
"opts",
".",
"rootpath",
",",
"opts",
".",
"excludes",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
":",
"]",
"return",
"generate_api_docs",
"(",
"opts",
")"
] | Parse and check the command line arguments. | [
"Parse",
"and",
"check",
"the",
"command",
"line",
"arguments",
"."
] | python | train |
summa-tx/riemann | riemann/tx/sprout.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L199-L217 | def copy(self, version=None, tx_ins=None, tx_outs=None, lock_time=None,
tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None):
'''
SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces.
'''
return SproutTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | [
"def",
"copy",
"(",
"self",
",",
"version",
"=",
"None",
",",
"tx_ins",
"=",
"None",
",",
"tx_outs",
"=",
"None",
",",
"lock_time",
"=",
"None",
",",
"tx_joinsplits",
"=",
"None",
",",
"joinsplit_pubkey",
"=",
"None",
",",
"joinsplit_sig",
"=",
"None",
")",
":",
"return",
"SproutTx",
"(",
"version",
"=",
"version",
"if",
"version",
"is",
"not",
"None",
"else",
"self",
".",
"version",
",",
"tx_ins",
"=",
"tx_ins",
"if",
"tx_ins",
"is",
"not",
"None",
"else",
"self",
".",
"tx_ins",
",",
"tx_outs",
"=",
"tx_outs",
"if",
"tx_outs",
"is",
"not",
"None",
"else",
"self",
".",
"tx_outs",
",",
"lock_time",
"=",
"(",
"lock_time",
"if",
"lock_time",
"is",
"not",
"None",
"else",
"self",
".",
"lock_time",
")",
",",
"tx_joinsplits",
"=",
"(",
"tx_joinsplits",
"if",
"tx_joinsplits",
"is",
"not",
"None",
"else",
"self",
".",
"tx_joinsplits",
")",
",",
"joinsplit_pubkey",
"=",
"(",
"joinsplit_pubkey",
"if",
"joinsplit_pubkey",
"is",
"not",
"None",
"else",
"self",
".",
"joinsplit_pubkey",
")",
",",
"joinsplit_sig",
"=",
"(",
"joinsplit_sig",
"if",
"joinsplit_sig",
"is",
"not",
"None",
"else",
"self",
".",
"joinsplit_sig",
")",
")"
] | SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces. | [
"SproutTx",
"...",
"-",
">",
"Tx"
] | python | train |
limix/bgen-reader-py | bgen_reader/_dosage.py | https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_dosage.py#L245-L350 | def allele_expectation(bgen, variant_idx):
r""" Allele expectation.
Compute the expectation of each allele from the genotype probabilities.
Parameters
----------
bgen : bgen_file
Bgen file handler.
variant_idx : int
Variant index.
Returns
-------
:class:`numpy.ndarray`
Samples-by-alleles matrix of allele expectations.
Note
----
This function supports unphased genotypes only.
Examples
--------
.. doctest::
>>> from bgen_reader import allele_expectation, example_files, read_bgen
>>>
>>> from texttable import Texttable
>>>
>>> # Download an example.
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Read the example.
>>> bgen = read_bgen(filepath, verbose=False)
>>>
>>> variants = bgen["variants"]
>>> samples = bgen["samples"]
>>> genotype = bgen["genotype"]
>>>
>>> genotype = bgen["genotype"]
>>> # This `compute` call will return a pandas data frame,
>>> variant = variants[variants["rsid"] == "RSID_6"].compute()
>>> # from which we retrieve the variant index.
>>> variant_idx = variant.index.item()
>>> print(variant)
id rsid chrom pos nalleles allele_ids vaddr
4 SNPID_6 RSID_6 01 6000 2 A,G 19377
>>> genotype = bgen["genotype"]
>>> # Samples is a pandas series, and we retrieve the
>>> # sample index from the sample name.
>>> sample_idx = samples[samples == "sample_005"].index.item()
>>>
>>> genotype = bgen["genotype"]
>>> # This `compute` call will return a dictionary from which
>>> # we can get the probability matrix the corresponding
>>> # variant.
>>> p = genotype[variant_idx].compute()["probs"][sample_idx]
>>>
>>> genotype = bgen["genotype"]
>>> # Allele expectation makes sense for unphased genotypes only,
>>> # which is the case here.
>>> e = allele_expectation(bgen, variant_idx)[sample_idx]
>>>
>>> genotype = bgen["genotype"]
>>> alleles = variant["allele_ids"].item().split(",")
>>>
>>> genotype = bgen["genotype"]
>>>
>>> # Print what we have got in a nice format.
>>> table = Texttable()
>>> table = table.add_rows(
... [
... ["", "AA", "AG", "GG", "E[.]"],
... ["p"] + list(p) + ["na"],
... ["#" + alleles[0], 2, 1, 0, e[0]],
... ["#" + alleles[1], 0, 1, 2, e[1]],
... ]
... )
>>> print(table.draw())
+----+-------+-------+-------+-------+
| | AA | AG | GG | E[.] |
+====+=======+=======+=======+=======+
| p | 0.012 | 0.987 | 0.001 | na |
+----+-------+-------+-------+-------+
| #A | 2 | 1 | 0 | 1.011 |
+----+-------+-------+-------+-------+
| #G | 0 | 1 | 2 | 0.989 |
+----+-------+-------+-------+-------+
>>>
>>> # Clean-up.
>>> example.close()
"""
geno = bgen["genotype"][variant_idx].compute()
if geno["phased"]:
raise ValueError("Allele expectation is define for unphased genotypes only.")
nalleles = bgen["variants"].loc[variant_idx, "nalleles"].compute().item()
genotypes = get_genotypes(geno["ploidy"], nalleles)
expec = []
for i in range(len(genotypes)):
count = asarray(genotypes_to_allele_counts(genotypes[i]), float)
n = count.shape[0]
expec.append((count.T * geno["probs"][i, :n]).sum(1))
return stack(expec, axis=0) | [
"def",
"allele_expectation",
"(",
"bgen",
",",
"variant_idx",
")",
":",
"geno",
"=",
"bgen",
"[",
"\"genotype\"",
"]",
"[",
"variant_idx",
"]",
".",
"compute",
"(",
")",
"if",
"geno",
"[",
"\"phased\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"Allele expectation is define for unphased genotypes only.\"",
")",
"nalleles",
"=",
"bgen",
"[",
"\"variants\"",
"]",
".",
"loc",
"[",
"variant_idx",
",",
"\"nalleles\"",
"]",
".",
"compute",
"(",
")",
".",
"item",
"(",
")",
"genotypes",
"=",
"get_genotypes",
"(",
"geno",
"[",
"\"ploidy\"",
"]",
",",
"nalleles",
")",
"expec",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"genotypes",
")",
")",
":",
"count",
"=",
"asarray",
"(",
"genotypes_to_allele_counts",
"(",
"genotypes",
"[",
"i",
"]",
")",
",",
"float",
")",
"n",
"=",
"count",
".",
"shape",
"[",
"0",
"]",
"expec",
".",
"append",
"(",
"(",
"count",
".",
"T",
"*",
"geno",
"[",
"\"probs\"",
"]",
"[",
"i",
",",
":",
"n",
"]",
")",
".",
"sum",
"(",
"1",
")",
")",
"return",
"stack",
"(",
"expec",
",",
"axis",
"=",
"0",
")"
] | r""" Allele expectation.
Compute the expectation of each allele from the genotype probabilities.
Parameters
----------
bgen : bgen_file
Bgen file handler.
variant_idx : int
Variant index.
Returns
-------
:class:`numpy.ndarray`
Samples-by-alleles matrix of allele expectations.
Note
----
This function supports unphased genotypes only.
Examples
--------
.. doctest::
>>> from bgen_reader import allele_expectation, example_files, read_bgen
>>>
>>> from texttable import Texttable
>>>
>>> # Download an example.
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Read the example.
>>> bgen = read_bgen(filepath, verbose=False)
>>>
>>> variants = bgen["variants"]
>>> samples = bgen["samples"]
>>> genotype = bgen["genotype"]
>>>
>>> genotype = bgen["genotype"]
>>> # This `compute` call will return a pandas data frame,
>>> variant = variants[variants["rsid"] == "RSID_6"].compute()
>>> # from which we retrieve the variant index.
>>> variant_idx = variant.index.item()
>>> print(variant)
id rsid chrom pos nalleles allele_ids vaddr
4 SNPID_6 RSID_6 01 6000 2 A,G 19377
>>> genotype = bgen["genotype"]
>>> # Samples is a pandas series, and we retrieve the
>>> # sample index from the sample name.
>>> sample_idx = samples[samples == "sample_005"].index.item()
>>>
>>> genotype = bgen["genotype"]
>>> # This `compute` call will return a dictionary from which
>>> # we can get the probability matrix the corresponding
>>> # variant.
>>> p = genotype[variant_idx].compute()["probs"][sample_idx]
>>>
>>> genotype = bgen["genotype"]
>>> # Allele expectation makes sense for unphased genotypes only,
>>> # which is the case here.
>>> e = allele_expectation(bgen, variant_idx)[sample_idx]
>>>
>>> genotype = bgen["genotype"]
>>> alleles = variant["allele_ids"].item().split(",")
>>>
>>> genotype = bgen["genotype"]
>>>
>>> # Print what we have got in a nice format.
>>> table = Texttable()
>>> table = table.add_rows(
... [
... ["", "AA", "AG", "GG", "E[.]"],
... ["p"] + list(p) + ["na"],
... ["#" + alleles[0], 2, 1, 0, e[0]],
... ["#" + alleles[1], 0, 1, 2, e[1]],
... ]
... )
>>> print(table.draw())
+----+-------+-------+-------+-------+
| | AA | AG | GG | E[.] |
+====+=======+=======+=======+=======+
| p | 0.012 | 0.987 | 0.001 | na |
+----+-------+-------+-------+-------+
| #A | 2 | 1 | 0 | 1.011 |
+----+-------+-------+-------+-------+
| #G | 0 | 1 | 2 | 0.989 |
+----+-------+-------+-------+-------+
>>>
>>> # Clean-up.
>>> example.close() | [
"r",
"Allele",
"expectation",
"."
] | python | valid |
OCR-D/core | ocrd/ocrd/workspace.py | https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/workspace.py#L95-L129 | def add_file(self, file_grp, content=None, **kwargs):
"""
Add an output file. Creates an :class:`OcrdFile` to pass around and adds that to the
OcrdMets OUTPUT section.
"""
log.debug(
'outputfile file_grp=%s local_filename=%s content=%s',
file_grp,
kwargs.get('local_filename'),
content is not None)
if content is not None and 'local_filename' not in kwargs:
raise Exception("'content' was set but no 'local_filename'")
oldpwd = os.getcwd()
try:
os.chdir(self.directory)
if 'local_filename' in kwargs:
local_filename_dir = kwargs['local_filename'].rsplit('/', 1)[0]
if not os.path.isdir(local_filename_dir):
os.makedirs(local_filename_dir)
if 'url' not in kwargs:
kwargs['url'] = kwargs['local_filename']
# print(kwargs)
ret = self.mets.add_file(file_grp, **kwargs)
if content is not None:
with open(kwargs['local_filename'], 'wb') as f:
if isinstance(content, str):
content = bytes(content, 'utf-8')
f.write(content)
finally:
os.chdir(oldpwd)
return ret | [
"def",
"add_file",
"(",
"self",
",",
"file_grp",
",",
"content",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"'outputfile file_grp=%s local_filename=%s content=%s'",
",",
"file_grp",
",",
"kwargs",
".",
"get",
"(",
"'local_filename'",
")",
",",
"content",
"is",
"not",
"None",
")",
"if",
"content",
"is",
"not",
"None",
"and",
"'local_filename'",
"not",
"in",
"kwargs",
":",
"raise",
"Exception",
"(",
"\"'content' was set but no 'local_filename'\"",
")",
"oldpwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"self",
".",
"directory",
")",
"if",
"'local_filename'",
"in",
"kwargs",
":",
"local_filename_dir",
"=",
"kwargs",
"[",
"'local_filename'",
"]",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"local_filename_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"local_filename_dir",
")",
"if",
"'url'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"kwargs",
"[",
"'local_filename'",
"]",
"# print(kwargs)",
"ret",
"=",
"self",
".",
"mets",
".",
"add_file",
"(",
"file_grp",
",",
"*",
"*",
"kwargs",
")",
"if",
"content",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"kwargs",
"[",
"'local_filename'",
"]",
",",
"'wb'",
")",
"as",
"f",
":",
"if",
"isinstance",
"(",
"content",
",",
"str",
")",
":",
"content",
"=",
"bytes",
"(",
"content",
",",
"'utf-8'",
")",
"f",
".",
"write",
"(",
"content",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"oldpwd",
")",
"return",
"ret"
] | Add an output file. Creates an :class:`OcrdFile` to pass around and adds that to the
OcrdMets OUTPUT section. | [
"Add",
"an",
"output",
"file",
".",
"Creates",
"an",
":",
"class",
":",
"OcrdFile",
"to",
"pass",
"around",
"and",
"adds",
"that",
"to",
"the",
"OcrdMets",
"OUTPUT",
"section",
"."
] | python | train |
limix/limix-core | limix_core/covar/cov2kronSum.py | https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/covar/cov2kronSum.py#L264-L274 | def solve_t(self, Mt):
"""
Mt is dim_r x dim_c x d tensor
"""
if len(Mt.shape)==2: _Mt = Mt[:, :, sp.newaxis]
else: _Mt = Mt
LMt = vei_CoR_veX(_Mt, R=self.Lr(), C=self.Lc())
DLMt = self.D()[:, :, sp.newaxis] * LMt
RV = vei_CoR_veX(DLMt, R=self.Lr().T, C=self.Lc().T)
if len(Mt.shape)==2: RV = RV[:, :, 0]
return RV | [
"def",
"solve_t",
"(",
"self",
",",
"Mt",
")",
":",
"if",
"len",
"(",
"Mt",
".",
"shape",
")",
"==",
"2",
":",
"_Mt",
"=",
"Mt",
"[",
":",
",",
":",
",",
"sp",
".",
"newaxis",
"]",
"else",
":",
"_Mt",
"=",
"Mt",
"LMt",
"=",
"vei_CoR_veX",
"(",
"_Mt",
",",
"R",
"=",
"self",
".",
"Lr",
"(",
")",
",",
"C",
"=",
"self",
".",
"Lc",
"(",
")",
")",
"DLMt",
"=",
"self",
".",
"D",
"(",
")",
"[",
":",
",",
":",
",",
"sp",
".",
"newaxis",
"]",
"*",
"LMt",
"RV",
"=",
"vei_CoR_veX",
"(",
"DLMt",
",",
"R",
"=",
"self",
".",
"Lr",
"(",
")",
".",
"T",
",",
"C",
"=",
"self",
".",
"Lc",
"(",
")",
".",
"T",
")",
"if",
"len",
"(",
"Mt",
".",
"shape",
")",
"==",
"2",
":",
"RV",
"=",
"RV",
"[",
":",
",",
":",
",",
"0",
"]",
"return",
"RV"
] | Mt is dim_r x dim_c x d tensor | [
"Mt",
"is",
"dim_r",
"x",
"dim_c",
"x",
"d",
"tensor"
] | python | train |
nerdvegas/rez | src/rez/vendor/pygraph/classes/graph.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/classes/graph.py#L170-L182 | def del_edge(self, edge):
"""
Remove an edge from the graph.
@type edge: tuple
@param edge: Edge.
"""
u, v = edge
self.node_neighbors[u].remove(v)
self.del_edge_labeling((u, v))
if (u != v):
self.node_neighbors[v].remove(u)
self.del_edge_labeling((v, u)) | [
"def",
"del_edge",
"(",
"self",
",",
"edge",
")",
":",
"u",
",",
"v",
"=",
"edge",
"self",
".",
"node_neighbors",
"[",
"u",
"]",
".",
"remove",
"(",
"v",
")",
"self",
".",
"del_edge_labeling",
"(",
"(",
"u",
",",
"v",
")",
")",
"if",
"(",
"u",
"!=",
"v",
")",
":",
"self",
".",
"node_neighbors",
"[",
"v",
"]",
".",
"remove",
"(",
"u",
")",
"self",
".",
"del_edge_labeling",
"(",
"(",
"v",
",",
"u",
")",
")"
] | Remove an edge from the graph.
@type edge: tuple
@param edge: Edge. | [
"Remove",
"an",
"edge",
"from",
"the",
"graph",
"."
] | python | train |
deepmipt/DeepPavlov | deeppavlov/core/common/registry.py | https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/registry.py#L43-L57 | def register(name: str = None) -> type:
"""
Register classes that could be initialized from JSON configuration file.
If name is not passed, the class name is converted to snake-case.
"""
def decorate(model_cls: type, reg_name: str = None) -> type:
model_name = reg_name or short_name(model_cls)
global _REGISTRY
cls_name = model_cls.__module__ + ':' + model_cls.__name__
if model_name in _REGISTRY and _REGISTRY[model_name] != cls_name:
logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name))
_REGISTRY[model_name] = cls_name
return model_cls
return lambda model_cls_name: decorate(model_cls_name, name) | [
"def",
"register",
"(",
"name",
":",
"str",
"=",
"None",
")",
"->",
"type",
":",
"def",
"decorate",
"(",
"model_cls",
":",
"type",
",",
"reg_name",
":",
"str",
"=",
"None",
")",
"->",
"type",
":",
"model_name",
"=",
"reg_name",
"or",
"short_name",
"(",
"model_cls",
")",
"global",
"_REGISTRY",
"cls_name",
"=",
"model_cls",
".",
"__module__",
"+",
"':'",
"+",
"model_cls",
".",
"__name__",
"if",
"model_name",
"in",
"_REGISTRY",
"and",
"_REGISTRY",
"[",
"model_name",
"]",
"!=",
"cls_name",
":",
"logger",
".",
"warning",
"(",
"'Registry name \"{}\" has been already registered and will be overwritten.'",
".",
"format",
"(",
"model_name",
")",
")",
"_REGISTRY",
"[",
"model_name",
"]",
"=",
"cls_name",
"return",
"model_cls",
"return",
"lambda",
"model_cls_name",
":",
"decorate",
"(",
"model_cls_name",
",",
"name",
")"
] | Register classes that could be initialized from JSON configuration file.
If name is not passed, the class name is converted to snake-case. | [
"Register",
"classes",
"that",
"could",
"be",
"initialized",
"from",
"JSON",
"configuration",
"file",
".",
"If",
"name",
"is",
"not",
"passed",
"the",
"class",
"name",
"is",
"converted",
"to",
"snake",
"-",
"case",
"."
] | python | test |
globality-corp/microcosm-flask | microcosm_flask/forwarding.py | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/forwarding.py#L15-L51 | def use_forwarded_port(graph):
"""
Inject the `X-Forwarded-Port` (if any) into the current URL adapter.
The URL adapter is used by `url_for` to build a URLs.
"""
# There must be a better way!
context = _request_ctx_stack.top
if _request_ctx_stack is None:
return None
# determine the configured overrides
forwarded_host = graph.config.port_forwarding.get("host")
forwarded_port = request.headers.get("X-Forwarded-Port")
if not forwarded_port and not forwarded_host:
return None
# determine the current server name
if ":" in context.url_adapter.server_name:
server_host, server_port = context.url_adapter.server_name.split(":", 1)
else:
server_host = context.url_adapter.server_name
server_port = 443 if context.url_adapter.url_scheme == "https" else 80
# choose a new server name
if forwarded_host:
server_name = forwarded_host
elif server_port:
server_name = "{}:{}".format(server_host, forwarded_port)
else:
server_name = "{}:{}".format(server_host, server_port)
context.url_adapter.server_name = server_name
return server_name | [
"def",
"use_forwarded_port",
"(",
"graph",
")",
":",
"# There must be a better way!",
"context",
"=",
"_request_ctx_stack",
".",
"top",
"if",
"_request_ctx_stack",
"is",
"None",
":",
"return",
"None",
"# determine the configured overrides",
"forwarded_host",
"=",
"graph",
".",
"config",
".",
"port_forwarding",
".",
"get",
"(",
"\"host\"",
")",
"forwarded_port",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"\"X-Forwarded-Port\"",
")",
"if",
"not",
"forwarded_port",
"and",
"not",
"forwarded_host",
":",
"return",
"None",
"# determine the current server name",
"if",
"\":\"",
"in",
"context",
".",
"url_adapter",
".",
"server_name",
":",
"server_host",
",",
"server_port",
"=",
"context",
".",
"url_adapter",
".",
"server_name",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"else",
":",
"server_host",
"=",
"context",
".",
"url_adapter",
".",
"server_name",
"server_port",
"=",
"443",
"if",
"context",
".",
"url_adapter",
".",
"url_scheme",
"==",
"\"https\"",
"else",
"80",
"# choose a new server name",
"if",
"forwarded_host",
":",
"server_name",
"=",
"forwarded_host",
"elif",
"server_port",
":",
"server_name",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"server_host",
",",
"forwarded_port",
")",
"else",
":",
"server_name",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"server_host",
",",
"server_port",
")",
"context",
".",
"url_adapter",
".",
"server_name",
"=",
"server_name",
"return",
"server_name"
] | Inject the `X-Forwarded-Port` (if any) into the current URL adapter.
The URL adapter is used by `url_for` to build a URLs. | [
"Inject",
"the",
"X",
"-",
"Forwarded",
"-",
"Port",
"(",
"if",
"any",
")",
"into",
"the",
"current",
"URL",
"adapter",
"."
] | python | train |
earlye/nephele | nephele/AwsProcessor.py | https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsProcessor.py#L178-L191 | def do_slash(self,args):
"""
Navigate back to the root level.
For example, if you are in `(aws)/stack:.../asg:.../`, executing `slash` will place you in `(aws)/`.
slash -h for more details
"""
parser = CommandArgumentParser("slash")
args = vars(parser.parse_args(args))
if None == self.parent:
print "You're at the root. Try 'quit' to quit"
else:
raise SlashException() | [
"def",
"do_slash",
"(",
"self",
",",
"args",
")",
":",
"parser",
"=",
"CommandArgumentParser",
"(",
"\"slash\"",
")",
"args",
"=",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"args",
")",
")",
"if",
"None",
"==",
"self",
".",
"parent",
":",
"print",
"\"You're at the root. Try 'quit' to quit\"",
"else",
":",
"raise",
"SlashException",
"(",
")"
] | Navigate back to the root level.
For example, if you are in `(aws)/stack:.../asg:.../`, executing `slash` will place you in `(aws)/`.
slash -h for more details | [
"Navigate",
"back",
"to",
"the",
"root",
"level",
"."
] | python | train |
jochym/Elastic | parcalc/parcalc.py | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/parcalc/parcalc.py#L502-L526 | def ParallelCalculate(cls,syslst,properties=['energy'],system_changes=all_changes):
'''
Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties.
'''
print('Launching:',end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
try :
s.calc.block=False
s.calc.calculate(atoms=s,properties=properties,system_changes=system_changes)
except CalcNotReadyError:
s.calc.block=True
print(n+1, end=' ')
sys.stdout.flush()
print()
print(' Done:', end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
s.calc.read_results()
print( n+1, end=' ')
sys.stdout.flush()
print()
return syslst | [
"def",
"ParallelCalculate",
"(",
"cls",
",",
"syslst",
",",
"properties",
"=",
"[",
"'energy'",
"]",
",",
"system_changes",
"=",
"all_changes",
")",
":",
"print",
"(",
"'Launching:'",
",",
"end",
"=",
"' '",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"for",
"n",
",",
"s",
"in",
"enumerate",
"(",
"syslst",
")",
":",
"try",
":",
"s",
".",
"calc",
".",
"block",
"=",
"False",
"s",
".",
"calc",
".",
"calculate",
"(",
"atoms",
"=",
"s",
",",
"properties",
"=",
"properties",
",",
"system_changes",
"=",
"system_changes",
")",
"except",
"CalcNotReadyError",
":",
"s",
".",
"calc",
".",
"block",
"=",
"True",
"print",
"(",
"n",
"+",
"1",
",",
"end",
"=",
"' '",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"print",
"(",
")",
"print",
"(",
"' Done:'",
",",
"end",
"=",
"' '",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"for",
"n",
",",
"s",
"in",
"enumerate",
"(",
"syslst",
")",
":",
"s",
".",
"calc",
".",
"read_results",
"(",
")",
"print",
"(",
"n",
"+",
"1",
",",
"end",
"=",
"' '",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"print",
"(",
")",
"return",
"syslst"
] | Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties. | [
"Run",
"a",
"series",
"of",
"calculations",
"in",
"parallel",
"using",
"(",
"implicitely",
")",
"some",
"remote",
"machine",
"/",
"cluster",
".",
"The",
"function",
"returns",
"the",
"list",
"of",
"systems",
"ready",
"for",
"the",
"extraction",
"of",
"calculated",
"properties",
"."
] | python | train |
RedHatInsights/insights-core | insights/core/plugins.py | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L414-L425 | def adjust_for_length(self, key, r, kwargs):
"""
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
"""
length = len(str(kwargs))
if length > settings.defaults["max_detail_length"]:
self._log_length_error(key, length)
r["max_detail_length_error"] = length
return r
return kwargs | [
"def",
"adjust_for_length",
"(",
"self",
",",
"key",
",",
"r",
",",
"kwargs",
")",
":",
"length",
"=",
"len",
"(",
"str",
"(",
"kwargs",
")",
")",
"if",
"length",
">",
"settings",
".",
"defaults",
"[",
"\"max_detail_length\"",
"]",
":",
"self",
".",
"_log_length_error",
"(",
"key",
",",
"length",
")",
"r",
"[",
"\"max_detail_length_error\"",
"]",
"=",
"length",
"return",
"r",
"return",
"kwargs"
] | Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead. | [
"Converts",
"the",
"response",
"to",
"a",
"string",
"and",
"compares",
"its",
"length",
"to",
"a",
"max",
"length",
"specified",
"in",
"settings",
".",
"If",
"the",
"response",
"is",
"too",
"long",
"an",
"error",
"is",
"logged",
"and",
"an",
"abbreviated",
"response",
"is",
"returned",
"instead",
"."
] | python | train |
KrishnaswamyLab/graphtools | graphtools/base.py | https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/base.py#L513-L535 | def diff_aff(self):
"""Symmetric diffusion affinity matrix
Return or calculate the symmetric diffusion affinity matrix
.. math:: A(x,y) = K(x,y) (d(x) d(y))^{-1/2}
where :math:`d` is the degrees (row sums of the kernel.)
Returns
-------
diff_aff : array-like, shape=[n_samples, n_samples]
symmetric diffusion affinity matrix defined as a
doubly-stochastic form of the kernel matrix
"""
row_degrees = np.array(self.kernel.sum(axis=1)).reshape(-1, 1)
col_degrees = np.array(self.kernel.sum(axis=0)).reshape(1, -1)
if sparse.issparse(self.kernel):
return self.kernel.multiply(1 / np.sqrt(row_degrees)).multiply(
1 / np.sqrt(col_degrees))
else:
return (self.kernel / np.sqrt(row_degrees)) / np.sqrt(col_degrees) | [
"def",
"diff_aff",
"(",
"self",
")",
":",
"row_degrees",
"=",
"np",
".",
"array",
"(",
"self",
".",
"kernel",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"col_degrees",
"=",
"np",
".",
"array",
"(",
"self",
".",
"kernel",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"if",
"sparse",
".",
"issparse",
"(",
"self",
".",
"kernel",
")",
":",
"return",
"self",
".",
"kernel",
".",
"multiply",
"(",
"1",
"/",
"np",
".",
"sqrt",
"(",
"row_degrees",
")",
")",
".",
"multiply",
"(",
"1",
"/",
"np",
".",
"sqrt",
"(",
"col_degrees",
")",
")",
"else",
":",
"return",
"(",
"self",
".",
"kernel",
"/",
"np",
".",
"sqrt",
"(",
"row_degrees",
")",
")",
"/",
"np",
".",
"sqrt",
"(",
"col_degrees",
")"
] | Symmetric diffusion affinity matrix
Return or calculate the symmetric diffusion affinity matrix
.. math:: A(x,y) = K(x,y) (d(x) d(y))^{-1/2}
where :math:`d` is the degrees (row sums of the kernel.)
Returns
-------
diff_aff : array-like, shape=[n_samples, n_samples]
symmetric diffusion affinity matrix defined as a
doubly-stochastic form of the kernel matrix | [
"Symmetric",
"diffusion",
"affinity",
"matrix"
] | python | train |
budacom/trading-bots | trading_bots/contrib/clients.py | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L552-L554 | def place_market_order(self, side: Side, amount: Number) -> Order:
"""Place a market order."""
return self.place_order(side, OrderType.MARKET, amount) | [
"def",
"place_market_order",
"(",
"self",
",",
"side",
":",
"Side",
",",
"amount",
":",
"Number",
")",
"->",
"Order",
":",
"return",
"self",
".",
"place_order",
"(",
"side",
",",
"OrderType",
".",
"MARKET",
",",
"amount",
")"
] | Place a market order. | [
"Place",
"a",
"market",
"order",
"."
] | python | train |
delph-in/pydelphin | delphin/mrs/semi.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/semi.py#L322-L330 | def from_dict(cls, d):
"""Instantiate a SemI from a dictionary representation."""
read = lambda cls: (lambda pair: (pair[0], cls.from_dict(pair[1])))
return cls(
variables=map(read(Variable), d.get('variables', {}).items()),
properties=map(read(Property), d.get('properties', {}).items()),
roles=map(read(Role), d.get('roles', {}).items()),
predicates=map(read(Predicate), d.get('predicates', {}).items())
) | [
"def",
"from_dict",
"(",
"cls",
",",
"d",
")",
":",
"read",
"=",
"lambda",
"cls",
":",
"(",
"lambda",
"pair",
":",
"(",
"pair",
"[",
"0",
"]",
",",
"cls",
".",
"from_dict",
"(",
"pair",
"[",
"1",
"]",
")",
")",
")",
"return",
"cls",
"(",
"variables",
"=",
"map",
"(",
"read",
"(",
"Variable",
")",
",",
"d",
".",
"get",
"(",
"'variables'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
",",
"properties",
"=",
"map",
"(",
"read",
"(",
"Property",
")",
",",
"d",
".",
"get",
"(",
"'properties'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
",",
"roles",
"=",
"map",
"(",
"read",
"(",
"Role",
")",
",",
"d",
".",
"get",
"(",
"'roles'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
",",
"predicates",
"=",
"map",
"(",
"read",
"(",
"Predicate",
")",
",",
"d",
".",
"get",
"(",
"'predicates'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
")"
] | Instantiate a SemI from a dictionary representation. | [
"Instantiate",
"a",
"SemI",
"from",
"a",
"dictionary",
"representation",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/speech_to_text_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L3785-L3797 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'out_of_vocabulary_words'
) and self.out_of_vocabulary_words is not None:
_dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'error') and self.error is not None:
_dict['error'] = self.error
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'name'",
")",
"and",
"self",
".",
"name",
"is",
"not",
"None",
":",
"_dict",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"if",
"hasattr",
"(",
"self",
",",
"'out_of_vocabulary_words'",
")",
"and",
"self",
".",
"out_of_vocabulary_words",
"is",
"not",
"None",
":",
"_dict",
"[",
"'out_of_vocabulary_words'",
"]",
"=",
"self",
".",
"out_of_vocabulary_words",
"if",
"hasattr",
"(",
"self",
",",
"'status'",
")",
"and",
"self",
".",
"status",
"is",
"not",
"None",
":",
"_dict",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
"if",
"hasattr",
"(",
"self",
",",
"'error'",
")",
"and",
"self",
".",
"error",
"is",
"not",
"None",
":",
"_dict",
"[",
"'error'",
"]",
"=",
"self",
".",
"error",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
sparknetworks/pgpm | pgpm/lib/utils/db.py | https://github.com/sparknetworks/pgpm/blob/1a060df46a886095181f692ea870a73a32510a2e/pgpm/lib/utils/db.py#L179-L188 | def grant_default_usage_install_privileges(cls, cur, schema_name, roles):
"""
Sets search path
"""
cur.execute('ALTER DEFAULT PRIVILEGES IN SCHEMA {0} '
'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO {1};'
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT EXECUTE ON FUNCTIONS TO {1};'
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} '
'GRANT USAGE, SELECT ON SEQUENCES TO {1};'
.format(schema_name, roles)) | [
"def",
"grant_default_usage_install_privileges",
"(",
"cls",
",",
"cur",
",",
"schema_name",
",",
"roles",
")",
":",
"cur",
".",
"execute",
"(",
"'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} '",
"'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO {1};'",
"'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT EXECUTE ON FUNCTIONS TO {1};'",
"'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} '",
"'GRANT USAGE, SELECT ON SEQUENCES TO {1};'",
".",
"format",
"(",
"schema_name",
",",
"roles",
")",
")"
] | Sets search path | [
"Sets",
"search",
"path"
] | python | train |
saltstack/salt | salt/states/boto_apigateway.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L798-L813 | def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format)) | [
"def",
"_validate_lambda_funcname_format",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_lambda_funcname_format",
":",
"known_kwargs",
"=",
"dict",
"(",
"stage",
"=",
"''",
",",
"api",
"=",
"''",
",",
"resource",
"=",
"''",
",",
"method",
"=",
"''",
")",
"self",
".",
"_lambda_funcname_format",
".",
"format",
"(",
"*",
"*",
"known_kwargs",
")",
"return",
"True",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'Invalid lambda_funcname_format {0}. Please review '",
"'documentation for known substitutable keys'",
".",
"format",
"(",
"self",
".",
"_lambda_funcname_format",
")",
")"
] | Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error | [
"Checks",
"if",
"the",
"lambda",
"function",
"name",
"format",
"contains",
"only",
"known",
"elements",
":",
"return",
":",
"True",
"on",
"success",
"ValueError",
"raised",
"on",
"error"
] | python | train |
ray-project/ray | python/ray/worker.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L1142-L1210 | def _initialize_serialization(driver_id, worker=global_worker):
"""Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling.
"""
serialization_context = pyarrow.default_serialization_context()
# Tell the serialization context to use the cloudpickle version that we
# ship with Ray.
serialization_context.set_pickle(pickle.dumps, pickle.loads)
pyarrow.register_torch_serialization_handlers(serialization_context)
for id_type in ray._raylet._ID_TYPES:
serialization_context.register_type(
id_type,
"{}.{}".format(id_type.__module__, id_type.__name__),
pickle=True)
def actor_handle_serializer(obj):
return obj._serialization_helper(True)
def actor_handle_deserializer(serialized_obj):
new_handle = ray.actor.ActorHandle.__new__(ray.actor.ActorHandle)
new_handle._deserialization_helper(serialized_obj, True)
return new_handle
# We register this serializer on each worker instead of calling
# register_custom_serializer from the driver so that isinstance still
# works.
serialization_context.register_type(
ray.actor.ActorHandle,
"ray.ActorHandle",
pickle=False,
custom_serializer=actor_handle_serializer,
custom_deserializer=actor_handle_deserializer)
worker.serialization_context_map[driver_id] = serialization_context
# Register exception types.
for error_cls in RAY_EXCEPTION_TYPES:
register_custom_serializer(
error_cls,
use_dict=True,
local=True,
driver_id=driver_id,
class_id=error_cls.__module__ + ". " + error_cls.__name__,
)
# Tell Ray to serialize lambdas with pickle.
register_custom_serializer(
type(lambda: 0),
use_pickle=True,
local=True,
driver_id=driver_id,
class_id="lambda")
# Tell Ray to serialize types with pickle.
register_custom_serializer(
type(int),
use_pickle=True,
local=True,
driver_id=driver_id,
class_id="type")
# Tell Ray to serialize FunctionSignatures as dictionaries. This is
# used when passing around actor handles.
register_custom_serializer(
ray.signature.FunctionSignature,
use_dict=True,
local=True,
driver_id=driver_id,
class_id="ray.signature.FunctionSignature") | [
"def",
"_initialize_serialization",
"(",
"driver_id",
",",
"worker",
"=",
"global_worker",
")",
":",
"serialization_context",
"=",
"pyarrow",
".",
"default_serialization_context",
"(",
")",
"# Tell the serialization context to use the cloudpickle version that we",
"# ship with Ray.",
"serialization_context",
".",
"set_pickle",
"(",
"pickle",
".",
"dumps",
",",
"pickle",
".",
"loads",
")",
"pyarrow",
".",
"register_torch_serialization_handlers",
"(",
"serialization_context",
")",
"for",
"id_type",
"in",
"ray",
".",
"_raylet",
".",
"_ID_TYPES",
":",
"serialization_context",
".",
"register_type",
"(",
"id_type",
",",
"\"{}.{}\"",
".",
"format",
"(",
"id_type",
".",
"__module__",
",",
"id_type",
".",
"__name__",
")",
",",
"pickle",
"=",
"True",
")",
"def",
"actor_handle_serializer",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"_serialization_helper",
"(",
"True",
")",
"def",
"actor_handle_deserializer",
"(",
"serialized_obj",
")",
":",
"new_handle",
"=",
"ray",
".",
"actor",
".",
"ActorHandle",
".",
"__new__",
"(",
"ray",
".",
"actor",
".",
"ActorHandle",
")",
"new_handle",
".",
"_deserialization_helper",
"(",
"serialized_obj",
",",
"True",
")",
"return",
"new_handle",
"# We register this serializer on each worker instead of calling",
"# register_custom_serializer from the driver so that isinstance still",
"# works.",
"serialization_context",
".",
"register_type",
"(",
"ray",
".",
"actor",
".",
"ActorHandle",
",",
"\"ray.ActorHandle\"",
",",
"pickle",
"=",
"False",
",",
"custom_serializer",
"=",
"actor_handle_serializer",
",",
"custom_deserializer",
"=",
"actor_handle_deserializer",
")",
"worker",
".",
"serialization_context_map",
"[",
"driver_id",
"]",
"=",
"serialization_context",
"# Register exception types.",
"for",
"error_cls",
"in",
"RAY_EXCEPTION_TYPES",
":",
"register_custom_serializer",
"(",
"error_cls",
",",
"use_dict",
"=",
"True",
",",
"local",
"=",
"True",
",",
"driver_id",
"=",
"driver_id",
",",
"class_id",
"=",
"error_cls",
".",
"__module__",
"+",
"\". \"",
"+",
"error_cls",
".",
"__name__",
",",
")",
"# Tell Ray to serialize lambdas with pickle.",
"register_custom_serializer",
"(",
"type",
"(",
"lambda",
":",
"0",
")",
",",
"use_pickle",
"=",
"True",
",",
"local",
"=",
"True",
",",
"driver_id",
"=",
"driver_id",
",",
"class_id",
"=",
"\"lambda\"",
")",
"# Tell Ray to serialize types with pickle.",
"register_custom_serializer",
"(",
"type",
"(",
"int",
")",
",",
"use_pickle",
"=",
"True",
",",
"local",
"=",
"True",
",",
"driver_id",
"=",
"driver_id",
",",
"class_id",
"=",
"\"type\"",
")",
"# Tell Ray to serialize FunctionSignatures as dictionaries. This is",
"# used when passing around actor handles.",
"register_custom_serializer",
"(",
"ray",
".",
"signature",
".",
"FunctionSignature",
",",
"use_dict",
"=",
"True",
",",
"local",
"=",
"True",
",",
"driver_id",
"=",
"driver_id",
",",
"class_id",
"=",
"\"ray.signature.FunctionSignature\"",
")"
] | Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling. | [
"Initialize",
"the",
"serialization",
"library",
"."
] | python | train |
iotile/coretools | iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py#L225-L251 | def notify_event_nowait(self, conn_string, name, event):
"""Notify an event.
This will move the notification to the background event loop and
return immediately. It is useful for situations where you cannot
await notify_event but keep in mind that it prevents back-pressure
when you are notifying too fast so should be used sparingly.
Note that calling this method will push the notification to a
background task so it can be difficult to reason about when it will
precisely occur. For that reason, :meth:`notify_event` should be
preferred when possible since that method guarantees that all
callbacks will be called synchronously before it finishes.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
"""
if self._loop.stopping:
self._logger.debug("Ignoring notification %s from %s because loop is shutting down", name, conn_string)
return
self._loop.log_coroutine(self._notify_event_internal, conn_string, name, event) | [
"def",
"notify_event_nowait",
"(",
"self",
",",
"conn_string",
",",
"name",
",",
"event",
")",
":",
"if",
"self",
".",
"_loop",
".",
"stopping",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Ignoring notification %s from %s because loop is shutting down\"",
",",
"name",
",",
"conn_string",
")",
"return",
"self",
".",
"_loop",
".",
"log_coroutine",
"(",
"self",
".",
"_notify_event_internal",
",",
"conn_string",
",",
"name",
",",
"event",
")"
] | Notify an event.
This will move the notification to the background event loop and
return immediately. It is useful for situations where you cannot
await notify_event but keep in mind that it prevents back-pressure
when you are notifying too fast so should be used sparingly.
Note that calling this method will push the notification to a
background task so it can be difficult to reason about when it will
precisely occur. For that reason, :meth:`notify_event` should be
preferred when possible since that method guarantees that all
callbacks will be called synchronously before it finishes.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified. | [
"Notify",
"an",
"event",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L18277-L18291 | def add_formats(self, formats):
"""Adds MIME / Content-type formats to the supported formats.
in formats of type str
Collection of formats to add.
"""
if not isinstance(formats, list):
raise TypeError("formats can only be an instance of type list")
for a in formats[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("addFormats",
in_p=[formats]) | [
"def",
"add_formats",
"(",
"self",
",",
"formats",
")",
":",
"if",
"not",
"isinstance",
"(",
"formats",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"formats can only be an instance of type list\"",
")",
"for",
"a",
"in",
"formats",
"[",
":",
"10",
"]",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"array can only contain objects of type basestring\"",
")",
"self",
".",
"_call",
"(",
"\"addFormats\"",
",",
"in_p",
"=",
"[",
"formats",
"]",
")"
] | Adds MIME / Content-type formats to the supported formats.
in formats of type str
Collection of formats to add. | [
"Adds",
"MIME",
"/",
"Content",
"-",
"type",
"formats",
"to",
"the",
"supported",
"formats",
"."
] | python | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/tdManager.py | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L500-L532 | def _read_sensitivities(self, sens_dir):
"""import sensitivities from a directory
Note
----
* check that signs are correct in case CRMod switches potential
electrodes
"""
if self.assignments['sensitivities'] is not None:
print('Sensitivities already imported. Will not overwrite!')
return
else:
self.assignments['sensitivities'] = {}
sens_files = sorted(glob(sens_dir + os.sep + 'sens*.dat'))
for nr, filename in enumerate(sens_files):
with open(filename, 'r') as fid:
metadata = np.fromstring(
fid.readline().strip(), sep=' ', count=2
)
meta_re = metadata[0]
meta_im = metadata[1]
sens_data = np.loadtxt(fid)
cids = self.parman.add_data(
sens_data[:, 2:4],
[meta_re, meta_im],
)
# store cids for later retrieval
self.assignments['sensitivities'][nr] = cids | [
"def",
"_read_sensitivities",
"(",
"self",
",",
"sens_dir",
")",
":",
"if",
"self",
".",
"assignments",
"[",
"'sensitivities'",
"]",
"is",
"not",
"None",
":",
"print",
"(",
"'Sensitivities already imported. Will not overwrite!'",
")",
"return",
"else",
":",
"self",
".",
"assignments",
"[",
"'sensitivities'",
"]",
"=",
"{",
"}",
"sens_files",
"=",
"sorted",
"(",
"glob",
"(",
"sens_dir",
"+",
"os",
".",
"sep",
"+",
"'sens*.dat'",
")",
")",
"for",
"nr",
",",
"filename",
"in",
"enumerate",
"(",
"sens_files",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid",
":",
"metadata",
"=",
"np",
".",
"fromstring",
"(",
"fid",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
",",
"sep",
"=",
"' '",
",",
"count",
"=",
"2",
")",
"meta_re",
"=",
"metadata",
"[",
"0",
"]",
"meta_im",
"=",
"metadata",
"[",
"1",
"]",
"sens_data",
"=",
"np",
".",
"loadtxt",
"(",
"fid",
")",
"cids",
"=",
"self",
".",
"parman",
".",
"add_data",
"(",
"sens_data",
"[",
":",
",",
"2",
":",
"4",
"]",
",",
"[",
"meta_re",
",",
"meta_im",
"]",
",",
")",
"# store cids for later retrieval",
"self",
".",
"assignments",
"[",
"'sensitivities'",
"]",
"[",
"nr",
"]",
"=",
"cids"
] | import sensitivities from a directory
Note
----
* check that signs are correct in case CRMod switches potential
electrodes | [
"import",
"sensitivities",
"from",
"a",
"directory"
] | python | train |
jazzband/django-ical | django_ical/utils.py | https://github.com/jazzband/django-ical/blob/7d616b9e319509b56c3ddab8cac18b0439f33b59/django_ical/utils.py#L91-L103 | def build_rrule_from_recurrences_rrule(rule):
"""
Build rrule dictionary for vRecur class from a django_recurrences rrule.
django_recurrences is a popular implementation for recurrences in django.
https://pypi.org/project/django-recurrence/
this is a shortcut to interface between recurrences and icalendar.
"""
from recurrence import serialize
line = serialize(rule)
if line.startswith('RRULE:'):
line = line[6:]
return build_rrule_from_text(line) | [
"def",
"build_rrule_from_recurrences_rrule",
"(",
"rule",
")",
":",
"from",
"recurrence",
"import",
"serialize",
"line",
"=",
"serialize",
"(",
"rule",
")",
"if",
"line",
".",
"startswith",
"(",
"'RRULE:'",
")",
":",
"line",
"=",
"line",
"[",
"6",
":",
"]",
"return",
"build_rrule_from_text",
"(",
"line",
")"
] | Build rrule dictionary for vRecur class from a django_recurrences rrule.
django_recurrences is a popular implementation for recurrences in django.
https://pypi.org/project/django-recurrence/
this is a shortcut to interface between recurrences and icalendar. | [
"Build",
"rrule",
"dictionary",
"for",
"vRecur",
"class",
"from",
"a",
"django_recurrences",
"rrule",
"."
] | python | train |
saltstack/salt | salt/grains/core.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L1071-L1125 | def _virtual_hv(osdata):
'''
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
'''
grains = {}
# Bail early if we're not running on Xen
try:
if 'xen' not in osdata['virtual']:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ('major', 'minor', 'extra'):
with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {0: 'writable_page_tables',
1: 'writable_descriptor_tables',
2: 'auto_translated_physmap',
3: 'supervisor_mode_kernel',
4: 'pae_pgdir_above_4gb',
5: 'mmu_pt_update_preserve_ad',
7: 'gnttab_map_avail_bits',
8: 'hvm_callback_vector',
9: 'hvm_safe_pvclock',
10: 'hvm_pirqs',
11: 'dom0',
12: 'grant_map_identity',
13: 'memory_op_vnode_supported',
14: 'ARM_SMCCC_supported'}
try:
with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains['virtual_hv_features'] = features
grains['virtual_hv_features_list'] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains | [
"def",
"_virtual_hv",
"(",
"osdata",
")",
":",
"grains",
"=",
"{",
"}",
"# Bail early if we're not running on Xen",
"try",
":",
"if",
"'xen'",
"not",
"in",
"osdata",
"[",
"'virtual'",
"]",
":",
"return",
"grains",
"except",
"KeyError",
":",
"return",
"grains",
"# Try to get the exact hypervisor version from sysfs",
"try",
":",
"version",
"=",
"{",
"}",
"for",
"fn",
"in",
"(",
"'major'",
",",
"'minor'",
",",
"'extra'",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/sys/hypervisor/version/{}'",
".",
"format",
"(",
"fn",
")",
",",
"'r'",
")",
"as",
"fhr",
":",
"version",
"[",
"fn",
"]",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fhr",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
"grains",
"[",
"'virtual_hv_version'",
"]",
"=",
"'{}.{}{}'",
".",
"format",
"(",
"version",
"[",
"'major'",
"]",
",",
"version",
"[",
"'minor'",
"]",
",",
"version",
"[",
"'extra'",
"]",
")",
"grains",
"[",
"'virtual_hv_version_info'",
"]",
"=",
"[",
"version",
"[",
"'major'",
"]",
",",
"version",
"[",
"'minor'",
"]",
",",
"version",
"[",
"'extra'",
"]",
"]",
"except",
"(",
"IOError",
",",
"OSError",
",",
"KeyError",
")",
":",
"pass",
"# Try to read and decode the supported feature set of the hypervisor",
"# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py",
"# Table data from include/xen/interface/features.h",
"xen_feature_table",
"=",
"{",
"0",
":",
"'writable_page_tables'",
",",
"1",
":",
"'writable_descriptor_tables'",
",",
"2",
":",
"'auto_translated_physmap'",
",",
"3",
":",
"'supervisor_mode_kernel'",
",",
"4",
":",
"'pae_pgdir_above_4gb'",
",",
"5",
":",
"'mmu_pt_update_preserve_ad'",
",",
"7",
":",
"'gnttab_map_avail_bits'",
",",
"8",
":",
"'hvm_callback_vector'",
",",
"9",
":",
"'hvm_safe_pvclock'",
",",
"10",
":",
"'hvm_pirqs'",
",",
"11",
":",
"'dom0'",
",",
"12",
":",
"'grant_map_identity'",
",",
"13",
":",
"'memory_op_vnode_supported'",
",",
"14",
":",
"'ARM_SMCCC_supported'",
"}",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/sys/hypervisor/properties/features'",
",",
"'r'",
")",
"as",
"fhr",
":",
"features",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fhr",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
"enabled_features",
"=",
"[",
"]",
"for",
"bit",
",",
"feat",
"in",
"six",
".",
"iteritems",
"(",
"xen_feature_table",
")",
":",
"if",
"int",
"(",
"features",
",",
"16",
")",
"&",
"(",
"1",
"<<",
"bit",
")",
":",
"enabled_features",
".",
"append",
"(",
"feat",
")",
"grains",
"[",
"'virtual_hv_features'",
"]",
"=",
"features",
"grains",
"[",
"'virtual_hv_features_list'",
"]",
"=",
"enabled_features",
"except",
"(",
"IOError",
",",
"OSError",
",",
"KeyError",
")",
":",
"pass",
"return",
"grains"
] | Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen | [
"Returns",
"detailed",
"hypervisor",
"information",
"from",
"sysfs",
"Currently",
"this",
"seems",
"to",
"be",
"used",
"only",
"by",
"Xen"
] | python | train |
PyCQA/prospector | prospector/suppression.py | https://github.com/PyCQA/prospector/blob/7cfc6d587049a786f935a722d6851cd3b72d7972/prospector/suppression.py#L34-L51 | def get_noqa_suppressions(file_contents):
"""
Finds all pep8/flake8 suppression messages
:param file_contents:
A list of file lines
:return:
A pair - the first is whether to ignore the whole file, the
second is a set of (0-indexed) line numbers to ignore.
"""
ignore_whole_file = False
ignore_lines = set()
for line_number, line in enumerate(file_contents):
if _FLAKE8_IGNORE_FILE.search(line):
ignore_whole_file = True
if _PEP8_IGNORE_LINE.search(line):
ignore_lines.add(line_number + 1)
return ignore_whole_file, ignore_lines | [
"def",
"get_noqa_suppressions",
"(",
"file_contents",
")",
":",
"ignore_whole_file",
"=",
"False",
"ignore_lines",
"=",
"set",
"(",
")",
"for",
"line_number",
",",
"line",
"in",
"enumerate",
"(",
"file_contents",
")",
":",
"if",
"_FLAKE8_IGNORE_FILE",
".",
"search",
"(",
"line",
")",
":",
"ignore_whole_file",
"=",
"True",
"if",
"_PEP8_IGNORE_LINE",
".",
"search",
"(",
"line",
")",
":",
"ignore_lines",
".",
"add",
"(",
"line_number",
"+",
"1",
")",
"return",
"ignore_whole_file",
",",
"ignore_lines"
] | Finds all pep8/flake8 suppression messages
:param file_contents:
A list of file lines
:return:
A pair - the first is whether to ignore the whole file, the
second is a set of (0-indexed) line numbers to ignore. | [
"Finds",
"all",
"pep8",
"/",
"flake8",
"suppression",
"messages"
] | python | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L466-L488 | def get_config_file():
"""
Returns the file name of the config file from which the environment
variables are written.
"""
import os
from os.path import abspath, expanduser, join, exists
__lib_name = get_library_name()
assert __lib_name in ["sframe", "turicreate"]
__default_config_path = join(expanduser("~"), ".%s" % __lib_name, "config")
if "TURI_CONFIG_FILE" in os.environ:
__default_config_path = abspath(expanduser(os.environ["TURI_CONFIG_FILE"]))
if not exists(__default_config_path):
print(("WARNING: Config file specified in environment variable "
"'TURI_CONFIG_FILE' as "
"'%s', but this path does not exist.") % __default_config_path)
return __default_config_path | [
"def",
"get_config_file",
"(",
")",
":",
"import",
"os",
"from",
"os",
".",
"path",
"import",
"abspath",
",",
"expanduser",
",",
"join",
",",
"exists",
"__lib_name",
"=",
"get_library_name",
"(",
")",
"assert",
"__lib_name",
"in",
"[",
"\"sframe\"",
",",
"\"turicreate\"",
"]",
"__default_config_path",
"=",
"join",
"(",
"expanduser",
"(",
"\"~\"",
")",
",",
"\".%s\"",
"%",
"__lib_name",
",",
"\"config\"",
")",
"if",
"\"TURI_CONFIG_FILE\"",
"in",
"os",
".",
"environ",
":",
"__default_config_path",
"=",
"abspath",
"(",
"expanduser",
"(",
"os",
".",
"environ",
"[",
"\"TURI_CONFIG_FILE\"",
"]",
")",
")",
"if",
"not",
"exists",
"(",
"__default_config_path",
")",
":",
"print",
"(",
"(",
"\"WARNING: Config file specified in environment variable \"",
"\"'TURI_CONFIG_FILE' as \"",
"\"'%s', but this path does not exist.\"",
")",
"%",
"__default_config_path",
")",
"return",
"__default_config_path"
] | Returns the file name of the config file from which the environment
variables are written. | [
"Returns",
"the",
"file",
"name",
"of",
"the",
"config",
"file",
"from",
"which",
"the",
"environment",
"variables",
"are",
"written",
"."
] | python | train |
ToucanToco/toucan-data-sdk | toucan_data_sdk/utils/postprocess/filter_by_date.py | https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L115-L152 | def parse_date(datestr: str, date_fmt: str) -> date:
"""parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised.
"""
rgx = re.compile(r'\((?P<date>.*)\)(\s*(?P<sign>[+-])(?P<offset>.*))?$')
datestr = datestr.strip()
match = rgx.match(datestr)
# if regexp doesn't match, date must match the expected format
if match is None:
return _norm_date(datestr, date_fmt)
datestr = match.group('date').strip()
dateobj = _norm_date(datestr, date_fmt)
offset = match.group('offset')
if offset:
return add_offset(dateobj, offset, match.group('sign'))
return dateobj | [
"def",
"parse_date",
"(",
"datestr",
":",
"str",
",",
"date_fmt",
":",
"str",
")",
"->",
"date",
":",
"rgx",
"=",
"re",
".",
"compile",
"(",
"r'\\((?P<date>.*)\\)(\\s*(?P<sign>[+-])(?P<offset>.*))?$'",
")",
"datestr",
"=",
"datestr",
".",
"strip",
"(",
")",
"match",
"=",
"rgx",
".",
"match",
"(",
"datestr",
")",
"# if regexp doesn't match, date must match the expected format",
"if",
"match",
"is",
"None",
":",
"return",
"_norm_date",
"(",
"datestr",
",",
"date_fmt",
")",
"datestr",
"=",
"match",
".",
"group",
"(",
"'date'",
")",
".",
"strip",
"(",
")",
"dateobj",
"=",
"_norm_date",
"(",
"datestr",
",",
"date_fmt",
")",
"offset",
"=",
"match",
".",
"group",
"(",
"'offset'",
")",
"if",
"offset",
":",
"return",
"add_offset",
"(",
"dateobj",
",",
"offset",
",",
"match",
".",
"group",
"(",
"'sign'",
")",
")",
"return",
"dateobj"
] | parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised. | [
"parse",
"datestr",
"and",
"return",
"corresponding",
"date",
"object",
"."
] | python | test |
tensorflow/tensor2tensor | tensor2tensor/models/revnet.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L125-L144 | def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME',
activation=None)
return x | [
"def",
"downsample_bottleneck",
"(",
"x",
",",
"output_channels",
",",
"dim",
"=",
"'2d'",
",",
"stride",
"=",
"1",
",",
"scope",
"=",
"'h'",
")",
":",
"conv",
"=",
"CONFIG",
"[",
"dim",
"]",
"[",
"'conv'",
"]",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
")",
":",
"x",
"=",
"conv",
"(",
"x",
",",
"output_channels",
",",
"1",
",",
"strides",
"=",
"stride",
",",
"padding",
"=",
"'SAME'",
",",
"activation",
"=",
"None",
")",
"return",
"x"
] | Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1. | [
"Downsamples",
"x",
"by",
"stride",
"using",
"a",
"1x1",
"convolution",
"filter",
"."
] | python | train |
developersociety/django-glitter-news | glitter_news/views.py | https://github.com/developersociety/django-glitter-news/blob/e3c7f9932b3225549c444048b4866263357de58e/glitter_news/views.py#L67-L77 | def get_allow_future(self):
"""
Only superusers and users with the permission can edit the post.
"""
qs = self.get_queryset()
post_edit_permission = '{}.edit_{}'.format(
qs.model._meta.app_label, qs.model._meta.model_name
)
if self.request.user.has_perm(post_edit_permission):
return True
return False | [
"def",
"get_allow_future",
"(",
"self",
")",
":",
"qs",
"=",
"self",
".",
"get_queryset",
"(",
")",
"post_edit_permission",
"=",
"'{}.edit_{}'",
".",
"format",
"(",
"qs",
".",
"model",
".",
"_meta",
".",
"app_label",
",",
"qs",
".",
"model",
".",
"_meta",
".",
"model_name",
")",
"if",
"self",
".",
"request",
".",
"user",
".",
"has_perm",
"(",
"post_edit_permission",
")",
":",
"return",
"True",
"return",
"False"
] | Only superusers and users with the permission can edit the post. | [
"Only",
"superusers",
"and",
"users",
"with",
"the",
"permission",
"can",
"edit",
"the",
"post",
"."
] | python | train |
royi1000/py-libhdate | hdate/zmanim.py | https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/zmanim.py#L149-L157 | def utc_minute_timezone(self, minutes_from_utc):
"""Return the local time for a given time UTC."""
from_zone = tz.gettz('UTC')
to_zone = self.location.timezone
utc = dt.datetime.combine(self.date, dt.time()) + \
dt.timedelta(minutes=minutes_from_utc)
utc = utc.replace(tzinfo=from_zone)
local = utc.astimezone(to_zone)
return local | [
"def",
"utc_minute_timezone",
"(",
"self",
",",
"minutes_from_utc",
")",
":",
"from_zone",
"=",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
"to_zone",
"=",
"self",
".",
"location",
".",
"timezone",
"utc",
"=",
"dt",
".",
"datetime",
".",
"combine",
"(",
"self",
".",
"date",
",",
"dt",
".",
"time",
"(",
")",
")",
"+",
"dt",
".",
"timedelta",
"(",
"minutes",
"=",
"minutes_from_utc",
")",
"utc",
"=",
"utc",
".",
"replace",
"(",
"tzinfo",
"=",
"from_zone",
")",
"local",
"=",
"utc",
".",
"astimezone",
"(",
"to_zone",
")",
"return",
"local"
] | Return the local time for a given time UTC. | [
"Return",
"the",
"local",
"time",
"for",
"a",
"given",
"time",
"UTC",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/workingdirectory/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/workingdirectory/plugin.py#L133-L139 | def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.main.console.shell.refresh.connect(self.refresh_plugin)
iconsize = 24
self.toolbar.setIconSize(QSize(iconsize, iconsize))
self.main.addToolBar(self.toolbar) | [
"def",
"register_plugin",
"(",
"self",
")",
":",
"self",
".",
"redirect_stdio",
".",
"connect",
"(",
"self",
".",
"main",
".",
"redirect_internalshell_stdio",
")",
"self",
".",
"main",
".",
"console",
".",
"shell",
".",
"refresh",
".",
"connect",
"(",
"self",
".",
"refresh_plugin",
")",
"iconsize",
"=",
"24",
"self",
".",
"toolbar",
".",
"setIconSize",
"(",
"QSize",
"(",
"iconsize",
",",
"iconsize",
")",
")",
"self",
".",
"main",
".",
"addToolBar",
"(",
"self",
".",
"toolbar",
")"
] | Register plugin in Spyder's main window | [
"Register",
"plugin",
"in",
"Spyder",
"s",
"main",
"window"
] | python | train |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L96-L143 | def spawn(self, ax, target_channels):
"""
'd1' can be shown on ('d1', 'd2') or ('d1')
'd1', 'd2' can be shown only on ('d1', 'd2') or on ('d2', 'd1')
Parameters
--------------
This means that the channels on which the vertex
is defined has to be a subset of the channels
channels : names of channels on which to spawn
the vertex
Returns
-------------
spawnedvertex if successful otherwise None
"""
source_channels = set(self.coordinates.keys())
is_spawnable = _check_spawnable(source_channels, target_channels)
if not is_spawnable:
return None
if len(target_channels) == 1:
verts = self.coordinates.get(target_channels[0], None), None
else:
verts = tuple([self.coordinates.get(ch, None) for ch in target_channels])
def _callback(event):
if event.type == Event.CHANGE:
svertex = event.info['caller']
ch = svertex.channels
coordinates = svertex.coordinates
new_coordinates = {k: v for k, v in zip(ch, coordinates)}
self.update_coordinates(new_coordinates)
elif event.type == Event.VERTEX_REMOVED:
svertex = event.info['caller']
self.spawn_list.remove(svertex)
else:
raise ValueError('Unrecognized event {}'.format(event))
spawned_vertex = SpawnableVertex(verts, ax, _callback)
spawned_vertex.channels = target_channels
if self.spawn_list is None:
self.spawn_list = []
self.spawn_list.append(spawned_vertex)
return spawned_vertex | [
"def",
"spawn",
"(",
"self",
",",
"ax",
",",
"target_channels",
")",
":",
"source_channels",
"=",
"set",
"(",
"self",
".",
"coordinates",
".",
"keys",
"(",
")",
")",
"is_spawnable",
"=",
"_check_spawnable",
"(",
"source_channels",
",",
"target_channels",
")",
"if",
"not",
"is_spawnable",
":",
"return",
"None",
"if",
"len",
"(",
"target_channels",
")",
"==",
"1",
":",
"verts",
"=",
"self",
".",
"coordinates",
".",
"get",
"(",
"target_channels",
"[",
"0",
"]",
",",
"None",
")",
",",
"None",
"else",
":",
"verts",
"=",
"tuple",
"(",
"[",
"self",
".",
"coordinates",
".",
"get",
"(",
"ch",
",",
"None",
")",
"for",
"ch",
"in",
"target_channels",
"]",
")",
"def",
"_callback",
"(",
"event",
")",
":",
"if",
"event",
".",
"type",
"==",
"Event",
".",
"CHANGE",
":",
"svertex",
"=",
"event",
".",
"info",
"[",
"'caller'",
"]",
"ch",
"=",
"svertex",
".",
"channels",
"coordinates",
"=",
"svertex",
".",
"coordinates",
"new_coordinates",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"ch",
",",
"coordinates",
")",
"}",
"self",
".",
"update_coordinates",
"(",
"new_coordinates",
")",
"elif",
"event",
".",
"type",
"==",
"Event",
".",
"VERTEX_REMOVED",
":",
"svertex",
"=",
"event",
".",
"info",
"[",
"'caller'",
"]",
"self",
".",
"spawn_list",
".",
"remove",
"(",
"svertex",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unrecognized event {}'",
".",
"format",
"(",
"event",
")",
")",
"spawned_vertex",
"=",
"SpawnableVertex",
"(",
"verts",
",",
"ax",
",",
"_callback",
")",
"spawned_vertex",
".",
"channels",
"=",
"target_channels",
"if",
"self",
".",
"spawn_list",
"is",
"None",
":",
"self",
".",
"spawn_list",
"=",
"[",
"]",
"self",
".",
"spawn_list",
".",
"append",
"(",
"spawned_vertex",
")",
"return",
"spawned_vertex"
] | 'd1' can be shown on ('d1', 'd2') or ('d1')
'd1', 'd2' can be shown only on ('d1', 'd2') or on ('d2', 'd1')
Parameters
--------------
This means that the channels on which the vertex
is defined has to be a subset of the channels
channels : names of channels on which to spawn
the vertex
Returns
-------------
spawnedvertex if successful otherwise None | [
"d1",
"can",
"be",
"shown",
"on",
"(",
"d1",
"d2",
")",
"or",
"(",
"d1",
")",
"d1",
"d2",
"can",
"be",
"shown",
"only",
"on",
"(",
"d1",
"d2",
")",
"or",
"on",
"(",
"d2",
"d1",
")"
] | python | train |
maxpumperla/elephas | elephas/java/adapter.py | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/java/adapter.py#L35-L52 | def retrieve_keras_weights(java_model):
"""For a previously imported Keras model, after training it with DL4J Spark,
we want to set the resulting weights back to the original Keras model.
:param java_model: DL4J model (MultiLayerNetwork or ComputationGraph
:return: list of numpy arrays in correct order for model.set_weights(...) of a corresponding Keras model
"""
weights = []
layers = java_model.getLayers()
for layer in layers:
params = layer.paramTable()
keys = params.keySet()
key_list = java_classes.ArrayList(keys)
for key in key_list:
weight = params.get(key)
np_weight = np.squeeze(to_numpy(weight))
weights.append(np_weight)
return weights | [
"def",
"retrieve_keras_weights",
"(",
"java_model",
")",
":",
"weights",
"=",
"[",
"]",
"layers",
"=",
"java_model",
".",
"getLayers",
"(",
")",
"for",
"layer",
"in",
"layers",
":",
"params",
"=",
"layer",
".",
"paramTable",
"(",
")",
"keys",
"=",
"params",
".",
"keySet",
"(",
")",
"key_list",
"=",
"java_classes",
".",
"ArrayList",
"(",
"keys",
")",
"for",
"key",
"in",
"key_list",
":",
"weight",
"=",
"params",
".",
"get",
"(",
"key",
")",
"np_weight",
"=",
"np",
".",
"squeeze",
"(",
"to_numpy",
"(",
"weight",
")",
")",
"weights",
".",
"append",
"(",
"np_weight",
")",
"return",
"weights"
] | For a previously imported Keras model, after training it with DL4J Spark,
we want to set the resulting weights back to the original Keras model.
:param java_model: DL4J model (MultiLayerNetwork or ComputationGraph
:return: list of numpy arrays in correct order for model.set_weights(...) of a corresponding Keras model | [
"For",
"a",
"previously",
"imported",
"Keras",
"model",
"after",
"training",
"it",
"with",
"DL4J",
"Spark",
"we",
"want",
"to",
"set",
"the",
"resulting",
"weights",
"back",
"to",
"the",
"original",
"Keras",
"model",
"."
] | python | train |
tbobm/etnawrapper | etnawrapper/etna.py | https://github.com/tbobm/etnawrapper/blob/0f1759646a30f658cf75fd521fd6e9cef5cd09c4/etnawrapper/etna.py#L217-L229 | def get_picture(self, login=None, **kwargs):
"""Get a user's picture.
:param str login: Login of the user to check
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activities_url = PICTURE_URL.format(login=_login)
return self._request_api(url=_activities_url).content | [
"def",
"get_picture",
"(",
"self",
",",
"login",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_login",
"=",
"kwargs",
".",
"get",
"(",
"'login'",
",",
"login",
"or",
"self",
".",
"_login",
")",
"_activities_url",
"=",
"PICTURE_URL",
".",
"format",
"(",
"login",
"=",
"_login",
")",
"return",
"self",
".",
"_request_api",
"(",
"url",
"=",
"_activities_url",
")",
".",
"content"
] | Get a user's picture.
:param str login: Login of the user to check
:return: JSON | [
"Get",
"a",
"user",
"s",
"picture",
"."
] | python | train |
axiom-data-science/pyaxiom | pyaxiom/utils.py | https://github.com/axiom-data-science/pyaxiom/blob/7ea7626695abf095df6a67f66e5b3e9ae91b16df/pyaxiom/utils.py#L48-L65 | def normalize_array(var):
"""
Returns a normalized data array from a NetCDF4 variable. This is mostly
used to normalize string types between py2 and py3. It has no effect on types
other than chars/strings
"""
if np.issubdtype(var.dtype, 'S1'):
if var.dtype == str:
# Python 2 on netCDF4 'string' variables needs this.
# Python 3 returns false for np.issubdtype(var.dtype, 'S1')
return var[:]
def decoder(x):
return str(x.decode('utf-8'))
vfunc = np.vectorize(decoder)
return vfunc(nc4.chartostring(var[:]))
else:
return var[:] | [
"def",
"normalize_array",
"(",
"var",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"var",
".",
"dtype",
",",
"'S1'",
")",
":",
"if",
"var",
".",
"dtype",
"==",
"str",
":",
"# Python 2 on netCDF4 'string' variables needs this.",
"# Python 3 returns false for np.issubdtype(var.dtype, 'S1')",
"return",
"var",
"[",
":",
"]",
"def",
"decoder",
"(",
"x",
")",
":",
"return",
"str",
"(",
"x",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"vfunc",
"=",
"np",
".",
"vectorize",
"(",
"decoder",
")",
"return",
"vfunc",
"(",
"nc4",
".",
"chartostring",
"(",
"var",
"[",
":",
"]",
")",
")",
"else",
":",
"return",
"var",
"[",
":",
"]"
] | Returns a normalized data array from a NetCDF4 variable. This is mostly
used to normalize string types between py2 and py3. It has no effect on types
other than chars/strings | [
"Returns",
"a",
"normalized",
"data",
"array",
"from",
"a",
"NetCDF4",
"variable",
".",
"This",
"is",
"mostly",
"used",
"to",
"normalize",
"string",
"types",
"between",
"py2",
"and",
"py3",
".",
"It",
"has",
"no",
"effect",
"on",
"types",
"other",
"than",
"chars",
"/",
"strings"
] | python | valid |
django-auth-ldap/django-auth-ldap | django_auth_ldap/backend.py | https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L208-L238 | def get_or_build_user(self, username, ldap_user):
"""
This must return a (User, built) 2-tuple for the given LDAP user.
username is the Django-friendly username of the user. ldap_user.dn is
the user's DN and ldap_user.attrs contains all of their LDAP
attributes.
The returned User object may be an unsaved model instance.
"""
model = self.get_user_model()
if self.settings.USER_QUERY_FIELD:
query_field = self.settings.USER_QUERY_FIELD
query_value = ldap_user.attrs[self.settings.USER_ATTR_MAP[query_field]][0]
lookup = query_field
else:
query_field = model.USERNAME_FIELD
query_value = username.lower()
lookup = "{}__iexact".format(query_field)
try:
user = model.objects.get(**{lookup: query_value})
except model.DoesNotExist:
user = model(**{query_field: query_value})
built = True
else:
built = False
return (user, built) | [
"def",
"get_or_build_user",
"(",
"self",
",",
"username",
",",
"ldap_user",
")",
":",
"model",
"=",
"self",
".",
"get_user_model",
"(",
")",
"if",
"self",
".",
"settings",
".",
"USER_QUERY_FIELD",
":",
"query_field",
"=",
"self",
".",
"settings",
".",
"USER_QUERY_FIELD",
"query_value",
"=",
"ldap_user",
".",
"attrs",
"[",
"self",
".",
"settings",
".",
"USER_ATTR_MAP",
"[",
"query_field",
"]",
"]",
"[",
"0",
"]",
"lookup",
"=",
"query_field",
"else",
":",
"query_field",
"=",
"model",
".",
"USERNAME_FIELD",
"query_value",
"=",
"username",
".",
"lower",
"(",
")",
"lookup",
"=",
"\"{}__iexact\"",
".",
"format",
"(",
"query_field",
")",
"try",
":",
"user",
"=",
"model",
".",
"objects",
".",
"get",
"(",
"*",
"*",
"{",
"lookup",
":",
"query_value",
"}",
")",
"except",
"model",
".",
"DoesNotExist",
":",
"user",
"=",
"model",
"(",
"*",
"*",
"{",
"query_field",
":",
"query_value",
"}",
")",
"built",
"=",
"True",
"else",
":",
"built",
"=",
"False",
"return",
"(",
"user",
",",
"built",
")"
] | This must return a (User, built) 2-tuple for the given LDAP user.
username is the Django-friendly username of the user. ldap_user.dn is
the user's DN and ldap_user.attrs contains all of their LDAP
attributes.
The returned User object may be an unsaved model instance. | [
"This",
"must",
"return",
"a",
"(",
"User",
"built",
")",
"2",
"-",
"tuple",
"for",
"the",
"given",
"LDAP",
"user",
"."
] | python | train |
haifengat/hf_ctp_py_proxy | py_ctp/trade.py | https://github.com/haifengat/hf_ctp_py_proxy/blob/c2dc6dbde45aa6b097f75380474e91510d3f5d12/py_ctp/trade.py#L515-L533 | def ReqOrderAction(self, OrderID: str):
"""撤单
:param OrderID:
"""
of = self.orders[OrderID]
if not of:
return -1
else:
pOrderId = of.OrderID
return self.t.ReqOrderAction(
self.broker,
self.investor,
OrderRef=pOrderId.split('|')[2],
FrontID=int(pOrderId.split('|')[1]),
SessionID=int(pOrderId.split('|')[0]),
InstrumentID=of.InstrumentID,
ActionFlag=TThostFtdcActionFlagType.THOST_FTDC_AF_Delete) | [
"def",
"ReqOrderAction",
"(",
"self",
",",
"OrderID",
":",
"str",
")",
":",
"of",
"=",
"self",
".",
"orders",
"[",
"OrderID",
"]",
"if",
"not",
"of",
":",
"return",
"-",
"1",
"else",
":",
"pOrderId",
"=",
"of",
".",
"OrderID",
"return",
"self",
".",
"t",
".",
"ReqOrderAction",
"(",
"self",
".",
"broker",
",",
"self",
".",
"investor",
",",
"OrderRef",
"=",
"pOrderId",
".",
"split",
"(",
"'|'",
")",
"[",
"2",
"]",
",",
"FrontID",
"=",
"int",
"(",
"pOrderId",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
")",
",",
"SessionID",
"=",
"int",
"(",
"pOrderId",
".",
"split",
"(",
"'|'",
")",
"[",
"0",
"]",
")",
",",
"InstrumentID",
"=",
"of",
".",
"InstrumentID",
",",
"ActionFlag",
"=",
"TThostFtdcActionFlagType",
".",
"THOST_FTDC_AF_Delete",
")"
] | 撤单
:param OrderID: | [
"撤单"
] | python | train |
nanoporetech/ont_fast5_api | ont_fast5_api/analysis_tools/basecall_2d.py | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/basecall_2d.py#L17-L24 | def get_prior_alignment(self):
""" Return the prior alignment that was used for 2D basecalling.
:return: Alignment data table.
"""
data_group = '{}/HairpinAlign'.format(self.group_name)
data = self.handle.get_analysis_dataset(data_group, 'Alignment')
return data | [
"def",
"get_prior_alignment",
"(",
"self",
")",
":",
"data_group",
"=",
"'{}/HairpinAlign'",
".",
"format",
"(",
"self",
".",
"group_name",
")",
"data",
"=",
"self",
".",
"handle",
".",
"get_analysis_dataset",
"(",
"data_group",
",",
"'Alignment'",
")",
"return",
"data"
] | Return the prior alignment that was used for 2D basecalling.
:return: Alignment data table. | [
"Return",
"the",
"prior",
"alignment",
"that",
"was",
"used",
"for",
"2D",
"basecalling",
"."
] | python | train |
Hackerfleet/hfos | hfos/ui/clientmanager.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/clientmanager.py#L445-L454 | def _checkPermissions(self, user, event):
"""Checks if the user has in any role that allows to fire the event."""
for role in user.account.roles:
if role in event.roles:
self.log('Access granted', lvl=verbose)
return True
self.log('Access denied', lvl=verbose)
return False | [
"def",
"_checkPermissions",
"(",
"self",
",",
"user",
",",
"event",
")",
":",
"for",
"role",
"in",
"user",
".",
"account",
".",
"roles",
":",
"if",
"role",
"in",
"event",
".",
"roles",
":",
"self",
".",
"log",
"(",
"'Access granted'",
",",
"lvl",
"=",
"verbose",
")",
"return",
"True",
"self",
".",
"log",
"(",
"'Access denied'",
",",
"lvl",
"=",
"verbose",
")",
"return",
"False"
] | Checks if the user has in any role that allows to fire the event. | [
"Checks",
"if",
"the",
"user",
"has",
"in",
"any",
"role",
"that",
"allows",
"to",
"fire",
"the",
"event",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xnodewidget/xnodescene.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L781-L791 | def findNode( self, objectName ):
"""
Looks up the node based on the inputed node name.
:param objectName | <str>
"""
for item in self.items():
if ( isinstance(item, XNode) and
item.objectName() == objectName ):
return item
return None | [
"def",
"findNode",
"(",
"self",
",",
"objectName",
")",
":",
"for",
"item",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"(",
"isinstance",
"(",
"item",
",",
"XNode",
")",
"and",
"item",
".",
"objectName",
"(",
")",
"==",
"objectName",
")",
":",
"return",
"item",
"return",
"None"
] | Looks up the node based on the inputed node name.
:param objectName | <str> | [
"Looks",
"up",
"the",
"node",
"based",
"on",
"the",
"inputed",
"node",
"name",
".",
":",
"param",
"objectName",
"|",
"<str",
">"
] | python | train |
ecederstrand/exchangelib | exchangelib/items.py | https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/items.py#L426-L442 | def attach(self, attachments):
"""Add an attachment, or a list of attachments, to this item. If the item has already been saved, the
attachments will be created on the server immediately. If the item has not yet been saved, the attachments will
be created on the server the item is saved.
Adding attachments to an existing item will update the changekey of the item.
"""
if not is_iterable(attachments, generators_allowed=True):
attachments = [attachments]
for a in attachments:
if not a.parent_item:
a.parent_item = self
if self.id and not a.attachment_id:
# Already saved object. Attach the attachment server-side now
a.attach()
if a not in self.attachments:
self.attachments.append(a) | [
"def",
"attach",
"(",
"self",
",",
"attachments",
")",
":",
"if",
"not",
"is_iterable",
"(",
"attachments",
",",
"generators_allowed",
"=",
"True",
")",
":",
"attachments",
"=",
"[",
"attachments",
"]",
"for",
"a",
"in",
"attachments",
":",
"if",
"not",
"a",
".",
"parent_item",
":",
"a",
".",
"parent_item",
"=",
"self",
"if",
"self",
".",
"id",
"and",
"not",
"a",
".",
"attachment_id",
":",
"# Already saved object. Attach the attachment server-side now",
"a",
".",
"attach",
"(",
")",
"if",
"a",
"not",
"in",
"self",
".",
"attachments",
":",
"self",
".",
"attachments",
".",
"append",
"(",
"a",
")"
] | Add an attachment, or a list of attachments, to this item. If the item has already been saved, the
attachments will be created on the server immediately. If the item has not yet been saved, the attachments will
be created on the server the item is saved.
Adding attachments to an existing item will update the changekey of the item. | [
"Add",
"an",
"attachment",
"or",
"a",
"list",
"of",
"attachments",
"to",
"this",
"item",
".",
"If",
"the",
"item",
"has",
"already",
"been",
"saved",
"the",
"attachments",
"will",
"be",
"created",
"on",
"the",
"server",
"immediately",
".",
"If",
"the",
"item",
"has",
"not",
"yet",
"been",
"saved",
"the",
"attachments",
"will",
"be",
"created",
"on",
"the",
"server",
"the",
"item",
"is",
"saved",
"."
] | python | train |
Esri/ArcREST | src/arcrest/geometryservice/geometryservice.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/geometryservice/geometryservice.py#L222-L247 | def autoComplete(self,
polygons=[],
polylines=[],
sr=None
):
"""
The autoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons.
It constructs polygons that fill in the gaps between existing
polygons and a set of polylines.
Inputs:
polygons - array of Polygon objects.
polylines - list of Polyline objects.
sr - spatial reference of the input geometries WKID.
"""
url = self._url + "/autoComplete"
params = {"f":"json"}
if sr is not None:
params['sr'] = sr
params['polygons'] = self.__geomToStringArray(polygons)
params['polylines'] = self.__geomToStringArray(polylines)
return self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | [
"def",
"autoComplete",
"(",
"self",
",",
"polygons",
"=",
"[",
"]",
",",
"polylines",
"=",
"[",
"]",
",",
"sr",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/autoComplete\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"if",
"sr",
"is",
"not",
"None",
":",
"params",
"[",
"'sr'",
"]",
"=",
"sr",
"params",
"[",
"'polygons'",
"]",
"=",
"self",
".",
"__geomToStringArray",
"(",
"polygons",
")",
"params",
"[",
"'polylines'",
"]",
"=",
"self",
".",
"__geomToStringArray",
"(",
"polylines",
")",
"return",
"self",
".",
"_get",
"(",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | The autoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons.
It constructs polygons that fill in the gaps between existing
polygons and a set of polylines.
Inputs:
polygons - array of Polygon objects.
polylines - list of Polyline objects.
sr - spatial reference of the input geometries WKID. | [
"The",
"autoComplete",
"operation",
"simplifies",
"the",
"process",
"of",
"constructing",
"new",
"polygons",
"that",
"are",
"adjacent",
"to",
"other",
"polygons",
".",
"It",
"constructs",
"polygons",
"that",
"fill",
"in",
"the",
"gaps",
"between",
"existing",
"polygons",
"and",
"a",
"set",
"of",
"polylines",
"."
] | python | train |
xtrementl/focus | focus/environment/__init__.py | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/__init__.py#L104-L123 | def _setup_directories(self):
""" Creates data directory structure.
* Raises a ``DirectorySetupFail`` exception if error occurs
while creating directories.
"""
dirs = [self._data_dir]
dirs += [os.path.join(self._data_dir, name) for name
in self.DATA_SUBDIRS]
for path in dirs:
if not os.path.isdir(path):
try:
os.makedirs(path) # recursive mkdir
os.chmod(path, 0755) # rwxr-xr-x
except OSError:
raise errors.DirectorySetupFail()
return True | [
"def",
"_setup_directories",
"(",
"self",
")",
":",
"dirs",
"=",
"[",
"self",
".",
"_data_dir",
"]",
"dirs",
"+=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_dir",
",",
"name",
")",
"for",
"name",
"in",
"self",
".",
"DATA_SUBDIRS",
"]",
"for",
"path",
"in",
"dirs",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"# recursive mkdir",
"os",
".",
"chmod",
"(",
"path",
",",
"0755",
")",
"# rwxr-xr-x",
"except",
"OSError",
":",
"raise",
"errors",
".",
"DirectorySetupFail",
"(",
")",
"return",
"True"
] | Creates data directory structure.
* Raises a ``DirectorySetupFail`` exception if error occurs
while creating directories. | [
"Creates",
"data",
"directory",
"structure",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/requirementslib/models/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/utils.py#L428-L439 | def split_markers_from_line(line):
# type: (AnyStr) -> Tuple[AnyStr, Optional[AnyStr]]
"""Split markers from a dependency"""
if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
marker_sep = ";"
else:
marker_sep = "; "
markers = None
if marker_sep in line:
line, markers = line.split(marker_sep, 1)
markers = markers.strip() if markers else None
return line, markers | [
"def",
"split_markers_from_line",
"(",
"line",
")",
":",
"# type: (AnyStr) -> Tuple[AnyStr, Optional[AnyStr]]",
"if",
"not",
"any",
"(",
"line",
".",
"startswith",
"(",
"uri_prefix",
")",
"for",
"uri_prefix",
"in",
"SCHEME_LIST",
")",
":",
"marker_sep",
"=",
"\";\"",
"else",
":",
"marker_sep",
"=",
"\"; \"",
"markers",
"=",
"None",
"if",
"marker_sep",
"in",
"line",
":",
"line",
",",
"markers",
"=",
"line",
".",
"split",
"(",
"marker_sep",
",",
"1",
")",
"markers",
"=",
"markers",
".",
"strip",
"(",
")",
"if",
"markers",
"else",
"None",
"return",
"line",
",",
"markers"
] | Split markers from a dependency | [
"Split",
"markers",
"from",
"a",
"dependency"
] | python | train |
firecat53/urlscan | urlscan/urlscan.py | https://github.com/firecat53/urlscan/blob/2d10807d01167873733da3b478c784f8fa21bbc0/urlscan/urlscan.py#L463-L477 | def decode_msg(msg, enc='utf-8'):
"""
Decodes a message fragment.
Args: msg - A Message object representing the fragment
enc - The encoding to use for decoding the message
"""
# We avoid the get_payload decoding machinery for raw
# content-transfer-encodings potentially containing non-ascii characters,
# such as 8bit or binary, as these are encoded using raw-unicode-escape which
# seems to prevent subsequent utf-8 decoding.
cte = str(msg.get('content-transfer-encoding', '')).lower()
decode = cte not in ("8bit", "7bit", "binary")
res = msg.get_payload(decode=decode)
return decode_bytes(res, enc) | [
"def",
"decode_msg",
"(",
"msg",
",",
"enc",
"=",
"'utf-8'",
")",
":",
"# We avoid the get_payload decoding machinery for raw",
"# content-transfer-encodings potentially containing non-ascii characters,",
"# such as 8bit or binary, as these are encoded using raw-unicode-escape which",
"# seems to prevent subsequent utf-8 decoding.",
"cte",
"=",
"str",
"(",
"msg",
".",
"get",
"(",
"'content-transfer-encoding'",
",",
"''",
")",
")",
".",
"lower",
"(",
")",
"decode",
"=",
"cte",
"not",
"in",
"(",
"\"8bit\"",
",",
"\"7bit\"",
",",
"\"binary\"",
")",
"res",
"=",
"msg",
".",
"get_payload",
"(",
"decode",
"=",
"decode",
")",
"return",
"decode_bytes",
"(",
"res",
",",
"enc",
")"
] | Decodes a message fragment.
Args: msg - A Message object representing the fragment
enc - The encoding to use for decoding the message | [
"Decodes",
"a",
"message",
"fragment",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/import_model.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_model.py#L62-L93 | def get_model_metadata(model_file):
"""
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
model_proto = onnx.load_model(model_file)
metadata = graph.get_graph_metadata(model_proto.graph)
return metadata | [
"def",
"get_model_metadata",
"(",
"model_file",
")",
":",
"graph",
"=",
"GraphProto",
"(",
")",
"try",
":",
"import",
"onnx",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"model_proto",
"=",
"onnx",
".",
"load_model",
"(",
"model_file",
")",
"metadata",
"=",
"graph",
".",
"get_graph_metadata",
"(",
"model_proto",
".",
"graph",
")",
"return",
"metadata"
] | Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model | [
"Returns",
"the",
"name",
"and",
"shape",
"information",
"of",
"input",
"and",
"output",
"tensors",
"of",
"the",
"given",
"ONNX",
"model",
"file",
"."
] | python | train |
zagaran/mongolia | mongolia/database_object.py | https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L380-L390 | def remove(self):
"""
Deletes the object from the database
WARNING: This cannot be undone. Be really careful when deleting
programatically. It is recommended to backup your database before
applying specific deletes. If your application uses deletes regularly,
it is strongly recommended that you have a recurring backup system.
"""
self._collection.remove({ID_KEY: self[ID_KEY]})
dict.clear(self) | [
"def",
"remove",
"(",
"self",
")",
":",
"self",
".",
"_collection",
".",
"remove",
"(",
"{",
"ID_KEY",
":",
"self",
"[",
"ID_KEY",
"]",
"}",
")",
"dict",
".",
"clear",
"(",
"self",
")"
] | Deletes the object from the database
WARNING: This cannot be undone. Be really careful when deleting
programatically. It is recommended to backup your database before
applying specific deletes. If your application uses deletes regularly,
it is strongly recommended that you have a recurring backup system. | [
"Deletes",
"the",
"object",
"from",
"the",
"database",
"WARNING",
":",
"This",
"cannot",
"be",
"undone",
".",
"Be",
"really",
"careful",
"when",
"deleting",
"programatically",
".",
"It",
"is",
"recommended",
"to",
"backup",
"your",
"database",
"before",
"applying",
"specific",
"deletes",
".",
"If",
"your",
"application",
"uses",
"deletes",
"regularly",
"it",
"is",
"strongly",
"recommended",
"that",
"you",
"have",
"a",
"recurring",
"backup",
"system",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.