repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
mpdavis/python-jose | jose/jws.py | https://github.com/mpdavis/python-jose/blob/deea7600eeea47aeb1bf5053a96de51cf2b9c639/jose/jws.py#L19-L52 | def sign(payload, key, headers=None, algorithm=ALGORITHMS.HS256):
"""Signs a claims set and returns a JWS string.
Args:
payload (str): A string to sign
key (str or dict): The key to use for signing the claim set. Can be
individual JWK or JWK set.
headers (dict, optional): A set of headers that will be added to
the default headers. Any headers that are added as additional
headers will override the default headers.
algorithm (str, optional): The algorithm to use for signing the
the claims. Defaults to HS256.
Returns:
str: The string representation of the header, claims, and signature.
Raises:
JWSError: If there is an error signing the token.
Examples:
>>> jws.sign({'a': 'b'}, 'secret', algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
"""
if algorithm not in ALGORITHMS.SUPPORTED:
raise JWSError('Algorithm %s not supported.' % algorithm)
encoded_header = _encode_header(algorithm, additional_headers=headers)
encoded_payload = _encode_payload(payload)
signed_output = _sign_header_and_claims(encoded_header, encoded_payload, algorithm, key)
return signed_output | [
"def",
"sign",
"(",
"payload",
",",
"key",
",",
"headers",
"=",
"None",
",",
"algorithm",
"=",
"ALGORITHMS",
".",
"HS256",
")",
":",
"if",
"algorithm",
"not",
"in",
"ALGORITHMS",
".",
"SUPPORTED",
":",
"raise",
"JWSError",
"(",
"'Algorithm %s not supported.'",
"%",
"algorithm",
")",
"encoded_header",
"=",
"_encode_header",
"(",
"algorithm",
",",
"additional_headers",
"=",
"headers",
")",
"encoded_payload",
"=",
"_encode_payload",
"(",
"payload",
")",
"signed_output",
"=",
"_sign_header_and_claims",
"(",
"encoded_header",
",",
"encoded_payload",
",",
"algorithm",
",",
"key",
")",
"return",
"signed_output"
]
| Signs a claims set and returns a JWS string.
Args:
payload (str): A string to sign
key (str or dict): The key to use for signing the claim set. Can be
individual JWK or JWK set.
headers (dict, optional): A set of headers that will be added to
the default headers. Any headers that are added as additional
headers will override the default headers.
algorithm (str, optional): The algorithm to use for signing the
the claims. Defaults to HS256.
Returns:
str: The string representation of the header, claims, and signature.
Raises:
JWSError: If there is an error signing the token.
Examples:
>>> jws.sign({'a': 'b'}, 'secret', algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' | [
"Signs",
"a",
"claims",
"set",
"and",
"returns",
"a",
"JWS",
"string",
"."
]
| python | train |
pricingassistant/mrq | mrq/utils.py | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/utils.py#L94-L101 | def memoize_single_argument(f):
""" Memoization decorator for a function taking a single argument """
class memodict(dict):
def __missing__(self, key):
ret = self[key] = f(key)
return ret
return memodict().__getitem__ | [
"def",
"memoize_single_argument",
"(",
"f",
")",
":",
"class",
"memodict",
"(",
"dict",
")",
":",
"def",
"__missing__",
"(",
"self",
",",
"key",
")",
":",
"ret",
"=",
"self",
"[",
"key",
"]",
"=",
"f",
"(",
"key",
")",
"return",
"ret",
"return",
"memodict",
"(",
")",
".",
"__getitem__"
]
| Memoization decorator for a function taking a single argument | [
"Memoization",
"decorator",
"for",
"a",
"function",
"taking",
"a",
"single",
"argument"
]
| python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/psutil/_pslinux.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/_pslinux.py#L551-L609 | def get_memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid)
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if len(fields) >= 5:
yield (current_block.pop(), data)
current_block.append(line)
else:
data[fields[0]] = int(fields[1]) * 1024
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data['Size:'],
data.get('Pss:', 0),
data['Shared_Clean:'], data['Shared_Clean:'],
data['Private_Clean:'], data['Private_Dirty:'],
data['Referenced:'],
data['Anonymous:'],
data['Swap:'])
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
except:
if f is not None:
f.close()
raise | [
"def",
"get_memory_maps",
"(",
"self",
")",
":",
"f",
"=",
"None",
"try",
":",
"f",
"=",
"open",
"(",
"\"/proc/%s/smaps\"",
"%",
"self",
".",
"pid",
")",
"first_line",
"=",
"f",
".",
"readline",
"(",
")",
"current_block",
"=",
"[",
"first_line",
"]",
"def",
"get_blocks",
"(",
")",
":",
"data",
"=",
"{",
"}",
"for",
"line",
"in",
"f",
":",
"fields",
"=",
"line",
".",
"split",
"(",
"None",
",",
"5",
")",
"if",
"len",
"(",
"fields",
")",
">=",
"5",
":",
"yield",
"(",
"current_block",
".",
"pop",
"(",
")",
",",
"data",
")",
"current_block",
".",
"append",
"(",
"line",
")",
"else",
":",
"data",
"[",
"fields",
"[",
"0",
"]",
"]",
"=",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
"*",
"1024",
"yield",
"(",
"current_block",
".",
"pop",
"(",
")",
",",
"data",
")",
"if",
"first_line",
":",
"# smaps file can be empty",
"for",
"header",
",",
"data",
"in",
"get_blocks",
"(",
")",
":",
"hfields",
"=",
"header",
".",
"split",
"(",
"None",
",",
"5",
")",
"try",
":",
"addr",
",",
"perms",
",",
"offset",
",",
"dev",
",",
"inode",
",",
"path",
"=",
"hfields",
"except",
"ValueError",
":",
"addr",
",",
"perms",
",",
"offset",
",",
"dev",
",",
"inode",
",",
"path",
"=",
"hfields",
"+",
"[",
"''",
"]",
"if",
"not",
"path",
":",
"path",
"=",
"'[anon]'",
"else",
":",
"path",
"=",
"path",
".",
"strip",
"(",
")",
"yield",
"(",
"addr",
",",
"perms",
",",
"path",
",",
"data",
"[",
"'Rss:'",
"]",
",",
"data",
"[",
"'Size:'",
"]",
",",
"data",
".",
"get",
"(",
"'Pss:'",
",",
"0",
")",
",",
"data",
"[",
"'Shared_Clean:'",
"]",
",",
"data",
"[",
"'Shared_Clean:'",
"]",
",",
"data",
"[",
"'Private_Clean:'",
"]",
",",
"data",
"[",
"'Private_Dirty:'",
"]",
",",
"data",
"[",
"'Referenced:'",
"]",
",",
"data",
"[",
"'Anonymous:'",
"]",
",",
"data",
"[",
"'Swap:'",
"]",
")",
"f",
".",
"close",
"(",
")",
"except",
"EnvironmentError",
":",
"# XXX - Can't use wrap_exceptions decorator as we're",
"# returning a generator; this probably needs some",
"# refactoring in order to avoid this code duplication.",
"if",
"f",
"is",
"not",
"None",
":",
"f",
".",
"close",
"(",
")",
"err",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"if",
"err",
".",
"errno",
"in",
"(",
"errno",
".",
"ENOENT",
",",
"errno",
".",
"ESRCH",
")",
":",
"raise",
"NoSuchProcess",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_process_name",
")",
"if",
"err",
".",
"errno",
"in",
"(",
"errno",
".",
"EPERM",
",",
"errno",
".",
"EACCES",
")",
":",
"raise",
"AccessDenied",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_process_name",
")",
"raise",
"except",
":",
"if",
"f",
"is",
"not",
"None",
":",
"f",
".",
"close",
"(",
")",
"raise"
]
| Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo | [
"Return",
"process",
"s",
"mapped",
"memory",
"regions",
"as",
"a",
"list",
"of",
"nameduples",
".",
"Fields",
"are",
"explained",
"in",
"man",
"proc",
";",
"here",
"is",
"an",
"updated",
"(",
"Apr",
"2012",
")",
"version",
":",
"http",
":",
"//",
"goo",
".",
"gl",
"/",
"fmebo"
]
| python | test |
dpkp/kafka-python | kafka/protocol/parser.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/protocol/parser.py#L47-L72 | def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr)
return correlation_id | [
"def",
"send_request",
"(",
"self",
",",
"request",
",",
"correlation_id",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"'Sending request %s'",
",",
"request",
")",
"if",
"correlation_id",
"is",
"None",
":",
"correlation_id",
"=",
"self",
".",
"_next_correlation_id",
"(",
")",
"header",
"=",
"RequestHeader",
"(",
"request",
",",
"correlation_id",
"=",
"correlation_id",
",",
"client_id",
"=",
"self",
".",
"_client_id",
")",
"message",
"=",
"b''",
".",
"join",
"(",
"[",
"header",
".",
"encode",
"(",
")",
",",
"request",
".",
"encode",
"(",
")",
"]",
")",
"size",
"=",
"Int32",
".",
"encode",
"(",
"len",
"(",
"message",
")",
")",
"data",
"=",
"size",
"+",
"message",
"self",
".",
"bytes_to_send",
".",
"append",
"(",
"data",
")",
"if",
"request",
".",
"expect_response",
"(",
")",
":",
"ifr",
"=",
"(",
"correlation_id",
",",
"request",
")",
"self",
".",
"in_flight_requests",
".",
"append",
"(",
"ifr",
")",
"return",
"correlation_id"
]
| Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id | [
"Encode",
"and",
"queue",
"a",
"kafka",
"api",
"request",
"for",
"sending",
"."
]
| python | train |
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L99-L116 | def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean)) | [
"def",
"event_state_counties",
"(",
"self",
")",
":",
"# FIXME: most of this logic should be moved to the alert instance and refactored",
"counties",
"=",
"''",
"state",
"=",
"''",
"for",
"alert",
"in",
"self",
".",
"_alerts",
":",
"locations",
"=",
"[",
"]",
"states",
"=",
"[",
"]",
"for",
"samecode",
"in",
"alert",
".",
"samecodes",
":",
"county",
",",
"state",
"=",
"self",
".",
"geo",
".",
"lookup_county_state",
"(",
"samecode",
")",
"locations",
".",
"append",
"(",
"(",
"county",
",",
"state",
")",
")",
"if",
"state",
"not",
"in",
"states",
":",
"states",
".",
"append",
"(",
"state",
")",
"for",
"state",
"in",
"states",
":",
"counties",
"=",
"[",
"x",
"for",
"x",
",",
"y",
"in",
"locations",
"if",
"y",
"==",
"state",
"]",
"counties_clean",
"=",
"str",
"(",
"counties",
")",
".",
"strip",
"(",
"\"[']\"",
")",
"print",
"(",
"\"{0}: {1} - {2}\"",
".",
"format",
"(",
"alert",
".",
"event",
",",
"state",
",",
"counties_clean",
")",
")"
]
| DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated) | [
"DEPRECATED",
":",
"this",
"will",
"be",
"moved",
"elsewhere",
"or",
"dropped",
"in",
"the",
"near",
"future",
"stop",
"using",
"it",
".",
"Return",
"an",
"event",
"type",
"and",
"it",
"s",
"state",
"(",
"s",
")",
"and",
"counties",
"(",
"consolidated",
")"
]
| python | train |
datasift/datasift-python | datasift/historics.py | https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L104-L117 | def delete(self, historics_id):
""" Delete one specified playback query. If the query is currently running, stop it.
status_code is set to 204 on success
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete
:param historics_id: playback id of the query to delete
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('delete', data=dict(id=historics_id)) | [
"def",
"delete",
"(",
"self",
",",
"historics_id",
")",
":",
"return",
"self",
".",
"request",
".",
"post",
"(",
"'delete'",
",",
"data",
"=",
"dict",
"(",
"id",
"=",
"historics_id",
")",
")"
]
| Delete one specified playback query. If the query is currently running, stop it.
status_code is set to 204 on success
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete
:param historics_id: playback id of the query to delete
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | [
"Delete",
"one",
"specified",
"playback",
"query",
".",
"If",
"the",
"query",
"is",
"currently",
"running",
"stop",
"it",
"."
]
| python | train |
OLC-Bioinformatics/sipprverse | cgecore/utility.py | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L342-L358 | def load_json(json_object):
''' Load json from file or file name '''
content = None
if isinstance(json_object, str) and os.path.exists(json_object):
with open_(json_object) as f:
try:
content = json.load(f)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%f.name)
elif hasattr(json_object, 'read'):
try:
content = json.load(json_object)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%json_object.name)
else:
debug.log("%s\nWarning: Object type invalid!"%json_object)
return content | [
"def",
"load_json",
"(",
"json_object",
")",
":",
"content",
"=",
"None",
"if",
"isinstance",
"(",
"json_object",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"json_object",
")",
":",
"with",
"open_",
"(",
"json_object",
")",
"as",
"f",
":",
"try",
":",
"content",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"debug",
".",
"log",
"(",
"\"Warning: Content of '%s' file is not json.\"",
"%",
"f",
".",
"name",
")",
"elif",
"hasattr",
"(",
"json_object",
",",
"'read'",
")",
":",
"try",
":",
"content",
"=",
"json",
".",
"load",
"(",
"json_object",
")",
"except",
"Exception",
"as",
"e",
":",
"debug",
".",
"log",
"(",
"\"Warning: Content of '%s' file is not json.\"",
"%",
"json_object",
".",
"name",
")",
"else",
":",
"debug",
".",
"log",
"(",
"\"%s\\nWarning: Object type invalid!\"",
"%",
"json_object",
")",
"return",
"content"
]
| Load json from file or file name | [
"Load",
"json",
"from",
"file",
"or",
"file",
"name"
]
| python | train |
apache/incubator-heron | third_party/python/cpplint/cpplint.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1534-L1542 | def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentStart",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'/*'",
")",
":",
"# Only return this marker if the comment goes beyond this line",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"find",
"(",
"'*/'",
",",
"2",
")",
"<",
"0",
":",
"return",
"lineix",
"lineix",
"+=",
"1",
"return",
"len",
"(",
"lines",
")"
]
| Find the beginning marker for a multiline comment. | [
"Find",
"the",
"beginning",
"marker",
"for",
"a",
"multiline",
"comment",
"."
]
| python | valid |
waqasbhatti/astrobase | astrobase/lcfit/sinusoidal.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcfit/sinusoidal.py#L63-L111 | def _fourier_func(fourierparams, phase, mags):
'''This returns a summed Fourier cosine series.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
phase,mags : np.array
The input phase and magnitude areas to use as the basis for the cosine
series. The phases are used directly to generate the values of the
function, while the mags array is used to generate the zeroth order
amplitude coefficient.
Returns
-------
np.array
The Fourier cosine series function evaluated over `phase`.
'''
# figure out the order from the length of the Fourier param list
order = int(len(fourierparams)/2)
# get the amplitude and phase coefficients
f_amp = fourierparams[:order]
f_pha = fourierparams[order:]
# calculate all the individual terms of the series
f_orders = [f_amp[x]*npcos(2.0*pi_value*x*phase + f_pha[x])
for x in range(order)]
# this is the zeroth order coefficient - a constant equal to median mag
total_f = npmedian(mags)
# sum the series
for fo in f_orders:
total_f += fo
return total_f | [
"def",
"_fourier_func",
"(",
"fourierparams",
",",
"phase",
",",
"mags",
")",
":",
"# figure out the order from the length of the Fourier param list",
"order",
"=",
"int",
"(",
"len",
"(",
"fourierparams",
")",
"/",
"2",
")",
"# get the amplitude and phase coefficients",
"f_amp",
"=",
"fourierparams",
"[",
":",
"order",
"]",
"f_pha",
"=",
"fourierparams",
"[",
"order",
":",
"]",
"# calculate all the individual terms of the series",
"f_orders",
"=",
"[",
"f_amp",
"[",
"x",
"]",
"*",
"npcos",
"(",
"2.0",
"*",
"pi_value",
"*",
"x",
"*",
"phase",
"+",
"f_pha",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"order",
")",
"]",
"# this is the zeroth order coefficient - a constant equal to median mag",
"total_f",
"=",
"npmedian",
"(",
"mags",
")",
"# sum the series",
"for",
"fo",
"in",
"f_orders",
":",
"total_f",
"+=",
"fo",
"return",
"total_f"
]
| This returns a summed Fourier cosine series.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
phase,mags : np.array
The input phase and magnitude areas to use as the basis for the cosine
series. The phases are used directly to generate the values of the
function, while the mags array is used to generate the zeroth order
amplitude coefficient.
Returns
-------
np.array
The Fourier cosine series function evaluated over `phase`. | [
"This",
"returns",
"a",
"summed",
"Fourier",
"cosine",
"series",
"."
]
| python | valid |
mikekatz04/BOWIE | snr_calculator_folder/gwsnrcalc/utils/csnr.py | https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/csnr.py#L21-L102 | def csnr(freqs, hc, hn, fmrg, fpeak, prefactor=1.0):
"""Calculate the SNR of a frequency domain waveform.
SNRCalculation is a function that takes waveforms (frequencies and hcs)
and a noise curve, and returns SNRs for all binary phases and the whole waveform.
Arguments:
freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
hc (1D or 2D array of floats): Characteristic strain of the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating
inspiral from merger phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating
merger from ringdown phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
hn: (1D or 2D array of floats): Characteristic strain of the noise.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by.
Default is 1.0.
Returns:
(dict): Dictionary with SNRs from each phase.
"""
cfd = os.path.dirname(os.path.abspath(__file__))
if 'phenomd.cpython-35m-darwin.so' in os.listdir(cfd):
exec_call = cfd + '/phenomd.cpython-35m-darwin.so'
else:
exec_call = cfd + '/phenomd/phenomd.so'
c_obj = ctypes.CDLL(exec_call)
# check dimensionality
remove_axis = False
try:
len(fmrg)
except TypeError:
remove_axis = True
freqs, hc = np.array([freqs]), np.array([hc])
hn, fmrg, fpeak = np.array([hn]), np.array([fmrg]), np.array([fpeak])
# this implimentation in ctypes works with 1D arrays
freqs_in = freqs.flatten()
hc_in = hc.flatten()
hn_in = hn.flatten()
num_binaries, length_of_signal = hc.shape
# prepare outout arrays
snr_cast = ctypes.c_double*num_binaries
snr_all = snr_cast()
snr_ins = snr_cast()
snr_mrg = snr_cast()
snr_rd = snr_cast()
# find SNR values
c_obj.SNR_function(ctypes.byref(snr_all), ctypes.byref(snr_ins),
ctypes.byref(snr_mrg), ctypes.byref(snr_rd),
freqs_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
hc_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
hn_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
fmrg.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
fpeak.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.c_int(length_of_signal), ctypes.c_int(num_binaries))
# make into numpy arrays
snr_all, snr_ins, = np.ctypeslib.as_array(snr_all), np.ctypeslib.as_array(snr_ins)
snr_mrg, snr_rd = np.ctypeslib.as_array(snr_mrg), np.ctypeslib.as_array(snr_rd)
# remove axis if one binary
if remove_axis:
snr_all, snr_ins, snr_mrg, snr_rd = snr_all[0], snr_ins[0], snr_mrg[0], snr_rd[0]
# prepare output by multiplying by prefactor
return ({'all': snr_all*prefactor, 'ins': snr_ins*prefactor,
'mrg': snr_mrg*prefactor, 'rd': snr_rd*prefactor}) | [
"def",
"csnr",
"(",
"freqs",
",",
"hc",
",",
"hn",
",",
"fmrg",
",",
"fpeak",
",",
"prefactor",
"=",
"1.0",
")",
":",
"cfd",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"if",
"'phenomd.cpython-35m-darwin.so'",
"in",
"os",
".",
"listdir",
"(",
"cfd",
")",
":",
"exec_call",
"=",
"cfd",
"+",
"'/phenomd.cpython-35m-darwin.so'",
"else",
":",
"exec_call",
"=",
"cfd",
"+",
"'/phenomd/phenomd.so'",
"c_obj",
"=",
"ctypes",
".",
"CDLL",
"(",
"exec_call",
")",
"# check dimensionality",
"remove_axis",
"=",
"False",
"try",
":",
"len",
"(",
"fmrg",
")",
"except",
"TypeError",
":",
"remove_axis",
"=",
"True",
"freqs",
",",
"hc",
"=",
"np",
".",
"array",
"(",
"[",
"freqs",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"hc",
"]",
")",
"hn",
",",
"fmrg",
",",
"fpeak",
"=",
"np",
".",
"array",
"(",
"[",
"hn",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"fmrg",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"fpeak",
"]",
")",
"# this implimentation in ctypes works with 1D arrays",
"freqs_in",
"=",
"freqs",
".",
"flatten",
"(",
")",
"hc_in",
"=",
"hc",
".",
"flatten",
"(",
")",
"hn_in",
"=",
"hn",
".",
"flatten",
"(",
")",
"num_binaries",
",",
"length_of_signal",
"=",
"hc",
".",
"shape",
"# prepare outout arrays",
"snr_cast",
"=",
"ctypes",
".",
"c_double",
"*",
"num_binaries",
"snr_all",
"=",
"snr_cast",
"(",
")",
"snr_ins",
"=",
"snr_cast",
"(",
")",
"snr_mrg",
"=",
"snr_cast",
"(",
")",
"snr_rd",
"=",
"snr_cast",
"(",
")",
"# find SNR values",
"c_obj",
".",
"SNR_function",
"(",
"ctypes",
".",
"byref",
"(",
"snr_all",
")",
",",
"ctypes",
".",
"byref",
"(",
"snr_ins",
")",
",",
"ctypes",
".",
"byref",
"(",
"snr_mrg",
")",
",",
"ctypes",
".",
"byref",
"(",
"snr_rd",
")",
",",
"freqs_in",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
",",
"hc_in",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
",",
"hn_in",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
",",
"fmrg",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
",",
"fpeak",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"length_of_signal",
")",
",",
"ctypes",
".",
"c_int",
"(",
"num_binaries",
")",
")",
"# make into numpy arrays",
"snr_all",
",",
"snr_ins",
",",
"=",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"snr_all",
")",
",",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"snr_ins",
")",
"snr_mrg",
",",
"snr_rd",
"=",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"snr_mrg",
")",
",",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"snr_rd",
")",
"# remove axis if one binary",
"if",
"remove_axis",
":",
"snr_all",
",",
"snr_ins",
",",
"snr_mrg",
",",
"snr_rd",
"=",
"snr_all",
"[",
"0",
"]",
",",
"snr_ins",
"[",
"0",
"]",
",",
"snr_mrg",
"[",
"0",
"]",
",",
"snr_rd",
"[",
"0",
"]",
"# prepare output by multiplying by prefactor",
"return",
"(",
"{",
"'all'",
":",
"snr_all",
"*",
"prefactor",
",",
"'ins'",
":",
"snr_ins",
"*",
"prefactor",
",",
"'mrg'",
":",
"snr_mrg",
"*",
"prefactor",
",",
"'rd'",
":",
"snr_rd",
"*",
"prefactor",
"}",
")"
]
| Calculate the SNR of a frequency domain waveform.
SNRCalculation is a function that takes waveforms (frequencies and hcs)
and a noise curve, and returns SNRs for all binary phases and the whole waveform.
Arguments:
freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
hc (1D or 2D array of floats): Characteristic strain of the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating
inspiral from merger phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating
merger from ringdown phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
hn: (1D or 2D array of floats): Characteristic strain of the noise.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by.
Default is 1.0.
Returns:
(dict): Dictionary with SNRs from each phase. | [
"Calculate",
"the",
"SNR",
"of",
"a",
"frequency",
"domain",
"waveform",
"."
]
| python | train |
hydpy-dev/hydpy | hydpy/core/masktools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/masktools.py#L37-L43 | def array2mask(cls, array=None, **kwargs):
"""Create a new mask object based on the given |numpy.ndarray|
and return it."""
kwargs['dtype'] = bool
if array is None:
return numpy.ndarray.__new__(cls, 0, **kwargs)
return numpy.asarray(array, **kwargs).view(cls) | [
"def",
"array2mask",
"(",
"cls",
",",
"array",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'dtype'",
"]",
"=",
"bool",
"if",
"array",
"is",
"None",
":",
"return",
"numpy",
".",
"ndarray",
".",
"__new__",
"(",
"cls",
",",
"0",
",",
"*",
"*",
"kwargs",
")",
"return",
"numpy",
".",
"asarray",
"(",
"array",
",",
"*",
"*",
"kwargs",
")",
".",
"view",
"(",
"cls",
")"
]
| Create a new mask object based on the given |numpy.ndarray|
and return it. | [
"Create",
"a",
"new",
"mask",
"object",
"based",
"on",
"the",
"given",
"|numpy",
".",
"ndarray|",
"and",
"return",
"it",
"."
]
| python | train |
ThreatConnect-Inc/tcex | tcex/tcex_redis.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_redis.py#L88-L98 | def hset(self, key, value):
"""Create key/value pair in Redis.
Args:
key (string): The key to create in Redis.
value (any): The value to store in Redis.
Returns:
(string): The response from Redis.
"""
return self.r.hset(self.hash, key, value) | [
"def",
"hset",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"return",
"self",
".",
"r",
".",
"hset",
"(",
"self",
".",
"hash",
",",
"key",
",",
"value",
")"
]
| Create key/value pair in Redis.
Args:
key (string): The key to create in Redis.
value (any): The value to store in Redis.
Returns:
(string): The response from Redis. | [
"Create",
"key",
"/",
"value",
"pair",
"in",
"Redis",
"."
]
| python | train |
DerwenAI/pytextrank | pytextrank/pytextrank.py | https://github.com/DerwenAI/pytextrank/blob/181ea41375d29922eb96768cf6550e57a77a0c95/pytextrank/pytextrank.py#L496-L527 | def collect_phrases (sent, ranks, spacy_nlp):
"""
iterator for collecting the noun phrases
"""
tail = 0
last_idx = sent[0].idx - 1
phrase = []
while tail < len(sent):
w = sent[tail]
if (w.word_id > 0) and (w.root in ranks) and ((w.idx - last_idx) == 1):
# keep collecting...
rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root], ids=w.word_id, pos=w.pos.lower(), count=1)
phrase.append(rl)
else:
# just hit a phrase boundary
for text, p in enumerate_chunks(phrase, spacy_nlp):
if p:
id_list = [rl.ids for rl in p]
rank_list = [rl.rank for rl in p]
np_rl = RankedLexeme(text=text, rank=rank_list, ids=id_list, pos="np", count=1)
if DEBUG:
print(np_rl)
yield np_rl
phrase = []
last_idx = w.idx
tail += 1 | [
"def",
"collect_phrases",
"(",
"sent",
",",
"ranks",
",",
"spacy_nlp",
")",
":",
"tail",
"=",
"0",
"last_idx",
"=",
"sent",
"[",
"0",
"]",
".",
"idx",
"-",
"1",
"phrase",
"=",
"[",
"]",
"while",
"tail",
"<",
"len",
"(",
"sent",
")",
":",
"w",
"=",
"sent",
"[",
"tail",
"]",
"if",
"(",
"w",
".",
"word_id",
">",
"0",
")",
"and",
"(",
"w",
".",
"root",
"in",
"ranks",
")",
"and",
"(",
"(",
"w",
".",
"idx",
"-",
"last_idx",
")",
"==",
"1",
")",
":",
"# keep collecting...",
"rl",
"=",
"RankedLexeme",
"(",
"text",
"=",
"w",
".",
"raw",
".",
"lower",
"(",
")",
",",
"rank",
"=",
"ranks",
"[",
"w",
".",
"root",
"]",
",",
"ids",
"=",
"w",
".",
"word_id",
",",
"pos",
"=",
"w",
".",
"pos",
".",
"lower",
"(",
")",
",",
"count",
"=",
"1",
")",
"phrase",
".",
"append",
"(",
"rl",
")",
"else",
":",
"# just hit a phrase boundary",
"for",
"text",
",",
"p",
"in",
"enumerate_chunks",
"(",
"phrase",
",",
"spacy_nlp",
")",
":",
"if",
"p",
":",
"id_list",
"=",
"[",
"rl",
".",
"ids",
"for",
"rl",
"in",
"p",
"]",
"rank_list",
"=",
"[",
"rl",
".",
"rank",
"for",
"rl",
"in",
"p",
"]",
"np_rl",
"=",
"RankedLexeme",
"(",
"text",
"=",
"text",
",",
"rank",
"=",
"rank_list",
",",
"ids",
"=",
"id_list",
",",
"pos",
"=",
"\"np\"",
",",
"count",
"=",
"1",
")",
"if",
"DEBUG",
":",
"print",
"(",
"np_rl",
")",
"yield",
"np_rl",
"phrase",
"=",
"[",
"]",
"last_idx",
"=",
"w",
".",
"idx",
"tail",
"+=",
"1"
]
| iterator for collecting the noun phrases | [
"iterator",
"for",
"collecting",
"the",
"noun",
"phrases"
]
| python | valid |
cosven/feeluown-core | fuocore/player.py | https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/player.py#L188-L223 | def _get_good_song(self, base=0, random_=False, direction=1):
"""从播放列表中获取一首可以播放的歌曲
:param base: base index
:param random: random strategy or not
:param direction: forward if > 0 else backword
>>> pl = Playlist([1, 2, 3])
>>> pl._get_good_song()
1
>>> pl._get_good_song(base=1)
2
>>> pl._bad_songs = [2]
>>> pl._get_good_song(base=1, direction=-1)
1
>>> pl._get_good_song(base=1)
3
>>> pl._bad_songs = [1, 2, 3]
>>> pl._get_good_song()
"""
if not self._songs or len(self._songs) <= len(self._bad_songs):
logger.debug('No good song in playlist.')
return None
good_songs = []
if direction > 0:
song_list = self._songs[base:] + self._songs[0:base]
else:
song_list = self._songs[base::-1] + self._songs[:base:-1]
for song in song_list:
if song not in self._bad_songs:
good_songs.append(song)
if random_:
return random.choice(good_songs)
else:
return good_songs[0] | [
"def",
"_get_good_song",
"(",
"self",
",",
"base",
"=",
"0",
",",
"random_",
"=",
"False",
",",
"direction",
"=",
"1",
")",
":",
"if",
"not",
"self",
".",
"_songs",
"or",
"len",
"(",
"self",
".",
"_songs",
")",
"<=",
"len",
"(",
"self",
".",
"_bad_songs",
")",
":",
"logger",
".",
"debug",
"(",
"'No good song in playlist.'",
")",
"return",
"None",
"good_songs",
"=",
"[",
"]",
"if",
"direction",
">",
"0",
":",
"song_list",
"=",
"self",
".",
"_songs",
"[",
"base",
":",
"]",
"+",
"self",
".",
"_songs",
"[",
"0",
":",
"base",
"]",
"else",
":",
"song_list",
"=",
"self",
".",
"_songs",
"[",
"base",
":",
":",
"-",
"1",
"]",
"+",
"self",
".",
"_songs",
"[",
":",
"base",
":",
"-",
"1",
"]",
"for",
"song",
"in",
"song_list",
":",
"if",
"song",
"not",
"in",
"self",
".",
"_bad_songs",
":",
"good_songs",
".",
"append",
"(",
"song",
")",
"if",
"random_",
":",
"return",
"random",
".",
"choice",
"(",
"good_songs",
")",
"else",
":",
"return",
"good_songs",
"[",
"0",
"]"
]
| 从播放列表中获取一首可以播放的歌曲
:param base: base index
:param random: random strategy or not
:param direction: forward if > 0 else backword
>>> pl = Playlist([1, 2, 3])
>>> pl._get_good_song()
1
>>> pl._get_good_song(base=1)
2
>>> pl._bad_songs = [2]
>>> pl._get_good_song(base=1, direction=-1)
1
>>> pl._get_good_song(base=1)
3
>>> pl._bad_songs = [1, 2, 3]
>>> pl._get_good_song() | [
"从播放列表中获取一首可以播放的歌曲"
]
| python | train |
jaysonsantos/python-binary-memcached | bmemcached/protocol.py | https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/protocol.py#L181-L199 | def _read_socket(self, size):
"""
Reads data from socket.
:param size: Size in bytes to be read.
:return: Data from socket
"""
value = b''
while len(value) < size:
data = self.connection.recv(size - len(value))
if not data:
break
value += data
# If we got less data than we requested, the server disconnected.
if len(value) < size:
raise socket.error()
return value | [
"def",
"_read_socket",
"(",
"self",
",",
"size",
")",
":",
"value",
"=",
"b''",
"while",
"len",
"(",
"value",
")",
"<",
"size",
":",
"data",
"=",
"self",
".",
"connection",
".",
"recv",
"(",
"size",
"-",
"len",
"(",
"value",
")",
")",
"if",
"not",
"data",
":",
"break",
"value",
"+=",
"data",
"# If we got less data than we requested, the server disconnected.",
"if",
"len",
"(",
"value",
")",
"<",
"size",
":",
"raise",
"socket",
".",
"error",
"(",
")",
"return",
"value"
]
| Reads data from socket.
:param size: Size in bytes to be read.
:return: Data from socket | [
"Reads",
"data",
"from",
"socket",
"."
]
| python | train |
ceph/ceph-deploy | ceph_deploy/osd.py | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L177-L233 | def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
) | [
"def",
"create_osd",
"(",
"conn",
",",
"cluster",
",",
"data",
",",
"journal",
",",
"zap",
",",
"fs_type",
",",
"dmcrypt",
",",
"dmcrypt_dir",
",",
"storetype",
",",
"block_wal",
",",
"block_db",
",",
"*",
"*",
"kw",
")",
":",
"ceph_volume_executable",
"=",
"system",
".",
"executable_path",
"(",
"conn",
",",
"'ceph-volume'",
")",
"args",
"=",
"[",
"ceph_volume_executable",
",",
"'--cluster'",
",",
"cluster",
",",
"'lvm'",
",",
"'create'",
",",
"'--%s'",
"%",
"storetype",
",",
"'--data'",
",",
"data",
"]",
"if",
"zap",
":",
"LOG",
".",
"warning",
"(",
"'zapping is no longer supported when preparing'",
")",
"if",
"dmcrypt",
":",
"args",
".",
"append",
"(",
"'--dmcrypt'",
")",
"# TODO: re-enable dmcrypt support once ceph-volume grows it",
"LOG",
".",
"warning",
"(",
"'dmcrypt is currently not supported'",
")",
"if",
"storetype",
"==",
"'bluestore'",
":",
"if",
"block_wal",
":",
"args",
".",
"append",
"(",
"'--block.wal'",
")",
"args",
".",
"append",
"(",
"block_wal",
")",
"if",
"block_db",
":",
"args",
".",
"append",
"(",
"'--block.db'",
")",
"args",
".",
"append",
"(",
"block_db",
")",
"elif",
"storetype",
"==",
"'filestore'",
":",
"if",
"not",
"journal",
":",
"raise",
"RuntimeError",
"(",
"'A journal lv or GPT partition must be specified when using filestore'",
")",
"args",
".",
"append",
"(",
"'--journal'",
")",
"args",
".",
"append",
"(",
"journal",
")",
"if",
"kw",
".",
"get",
"(",
"'debug'",
")",
":",
"remoto",
".",
"process",
".",
"run",
"(",
"conn",
",",
"args",
",",
"extend_env",
"=",
"{",
"'CEPH_VOLUME_DEBUG'",
":",
"'1'",
"}",
")",
"else",
":",
"remoto",
".",
"process",
".",
"run",
"(",
"conn",
",",
"args",
")"
]
| Run on osd node, creates an OSD from a data disk. | [
"Run",
"on",
"osd",
"node",
"creates",
"an",
"OSD",
"from",
"a",
"data",
"disk",
"."
]
| python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/script_editor.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L3764-L3781 | def loop_through_editors(self, backward=False):
"""
Loops through the editor tabs.
:param backward: Looping backward.
:type backward: bool
:return: Method success.
:rtype: bool
"""
step = not backward and 1 or -1
idx = self.Script_Editor_tabWidget.currentIndex() + step
if idx < 0:
idx = self.Script_Editor_tabWidget.count() - 1
elif idx > self.Script_Editor_tabWidget.count() - 1:
idx = 0
self.Script_Editor_tabWidget.setCurrentIndex(idx)
return True | [
"def",
"loop_through_editors",
"(",
"self",
",",
"backward",
"=",
"False",
")",
":",
"step",
"=",
"not",
"backward",
"and",
"1",
"or",
"-",
"1",
"idx",
"=",
"self",
".",
"Script_Editor_tabWidget",
".",
"currentIndex",
"(",
")",
"+",
"step",
"if",
"idx",
"<",
"0",
":",
"idx",
"=",
"self",
".",
"Script_Editor_tabWidget",
".",
"count",
"(",
")",
"-",
"1",
"elif",
"idx",
">",
"self",
".",
"Script_Editor_tabWidget",
".",
"count",
"(",
")",
"-",
"1",
":",
"idx",
"=",
"0",
"self",
".",
"Script_Editor_tabWidget",
".",
"setCurrentIndex",
"(",
"idx",
")",
"return",
"True"
]
| Loops through the editor tabs.
:param backward: Looping backward.
:type backward: bool
:return: Method success.
:rtype: bool | [
"Loops",
"through",
"the",
"editor",
"tabs",
"."
]
| python | train |
pypa/pipenv | pipenv/vendor/toml/decoder.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/toml/decoder.py#L143-L461 | def loads(s, _dict=dict, decoder=None):
"""Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml
"""
implicitgroups = []
if decoder is None:
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if not isinstance(s, basestring):
raise TypeError("Expecting something like a string")
if not isinstance(s, unicode):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
keyname = 2
continue
elif item == '.':
dottedkey = True
continue
elif item.isalnum() or item == '_' or item == '-':
continue
elif (dottedkey and sl[i - 1] == '.' and
(item == '"' or item == "'")):
openstring = True
openstrchar = item
continue
elif keyname == 2:
if item.isspace():
if dottedkey:
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '.':
dottedkey = True
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '=':
keyname = 0
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
item + "'. Try quoting the key name.",
original, i)
if item == "'" and openstrchar != '"':
k = 1
try:
while sl[i - k] == "'":
k += 1
if k == 3:
break
except IndexError:
pass
if k == 3:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = "'"
else:
openstrchar = ""
if item == '"' and openstrchar != "'":
oddbackslash = False
k = 1
tripquote = False
try:
while sl[i - k] == '"':
k += 1
if k == 3:
tripquote = True
break
if k == 1 or (k == 3 and tripquote):
while sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
except IndexError:
pass
if not oddbackslash:
if tripquote:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = '"'
else:
openstrchar = ""
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
try:
while sl[j] != '\n':
sl[j] = ' '
j += 1
except IndexError:
break
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
if len(sl) > i + 1 and sl[i + 1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if item == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i - 1] == ']':
arrayoftables = False
else:
openarr -= 1
if item == '\n':
if openstring or multilinestr:
if not multilinestr:
raise TomlDecodeError("Unbalanced quotes", original, i)
if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
sl[i - 2] == sl[i - 1])):
sl[i] = sl[i - 1]
if sl[i - 3] == sl[i - 1]:
sl[i - 3] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
if len(line) > 2 and (line[-1] == multilinestr[0] and
line[-2] == multilinestr[0] and
line[-3] == multilinestr[0]):
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ""
else:
k = len(multilinestr) - 1
while k > -1 and multilinestr[k] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = multilinestr[:-1]
else:
multilinestr += "\n"
continue
if line[0] == '[':
arrayoftables = False
if len(line) == 1:
raise TomlDecodeError("Opening key group bracket on line by "
"itself.", original, pos)
if line[1] == '[':
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and splitstr in quotesplit:
break
i += quotesplit.count(splitstr)
quoted = not quoted
line = line.split(splitstr, i)
if len(line) < i + 1 or line[-1].strip() != "":
raise TomlDecodeError("Key group not on a line by itself.",
original, pos)
groups = splitstr.join(line[:-1]).split('.')
i = 0
while i < len(groups):
groups[i] = groups[i].strip()
if len(groups[i]) > 0 and (groups[i][0] == '"' or
groups[i][0] == "'"):
groupstr = groups[i]
j = i + 1
while not groupstr[0] == groupstr[-1]:
j += 1
if j > len(groups) + 2:
raise TomlDecodeError("Invalid group name '" +
groupstr + "' Something " +
"went wrong.", original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:-1]
groups[i + 1:j] = []
else:
if not _groupname_re.match(groups[i]):
raise TomlDecodeError("Invalid group name '" +
groups[i] + "'. Try quoting it.",
original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if group == "":
raise TomlDecodeError("Can't have a keygroup with an empty "
"name", original, pos)
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined "
"table can't be an array",
original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table()
)
else:
raise TomlDecodeError("What? " + group +
" already exists?" +
str(currentlevel),
original, pos)
except TypeError:
currentlevel = currentlevel[-1]
if group not in currentlevel:
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif line[0] == "{":
if line[-1] != "}":
raise TomlDecodeError("Line breaks are not allowed in inline"
"objects", original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif "=" in line:
try:
ret = decoder.load_line(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if ret is not None:
multikey, multilinestr, multibackslash = ret
return retval | [
"def",
"loads",
"(",
"s",
",",
"_dict",
"=",
"dict",
",",
"decoder",
"=",
"None",
")",
":",
"implicitgroups",
"=",
"[",
"]",
"if",
"decoder",
"is",
"None",
":",
"decoder",
"=",
"TomlDecoder",
"(",
"_dict",
")",
"retval",
"=",
"decoder",
".",
"get_empty_table",
"(",
")",
"currentlevel",
"=",
"retval",
"if",
"not",
"isinstance",
"(",
"s",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"Expecting something like a string\"",
")",
"if",
"not",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"'utf8'",
")",
"original",
"=",
"s",
"sl",
"=",
"list",
"(",
"s",
")",
"openarr",
"=",
"0",
"openstring",
"=",
"False",
"openstrchar",
"=",
"\"\"",
"multilinestr",
"=",
"False",
"arrayoftables",
"=",
"False",
"beginline",
"=",
"True",
"keygroup",
"=",
"False",
"dottedkey",
"=",
"False",
"keyname",
"=",
"0",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"sl",
")",
":",
"if",
"item",
"==",
"'\\r'",
"and",
"sl",
"[",
"i",
"+",
"1",
"]",
"==",
"'\\n'",
":",
"sl",
"[",
"i",
"]",
"=",
"' '",
"continue",
"if",
"keyname",
":",
"if",
"item",
"==",
"'\\n'",
":",
"raise",
"TomlDecodeError",
"(",
"\"Key name found without value.\"",
"\" Reached end of line.\"",
",",
"original",
",",
"i",
")",
"if",
"openstring",
":",
"if",
"item",
"==",
"openstrchar",
":",
"keyname",
"=",
"2",
"openstring",
"=",
"False",
"openstrchar",
"=",
"\"\"",
"continue",
"elif",
"keyname",
"==",
"1",
":",
"if",
"item",
".",
"isspace",
"(",
")",
":",
"keyname",
"=",
"2",
"continue",
"elif",
"item",
"==",
"'.'",
":",
"dottedkey",
"=",
"True",
"continue",
"elif",
"item",
".",
"isalnum",
"(",
")",
"or",
"item",
"==",
"'_'",
"or",
"item",
"==",
"'-'",
":",
"continue",
"elif",
"(",
"dottedkey",
"and",
"sl",
"[",
"i",
"-",
"1",
"]",
"==",
"'.'",
"and",
"(",
"item",
"==",
"'\"'",
"or",
"item",
"==",
"\"'\"",
")",
")",
":",
"openstring",
"=",
"True",
"openstrchar",
"=",
"item",
"continue",
"elif",
"keyname",
"==",
"2",
":",
"if",
"item",
".",
"isspace",
"(",
")",
":",
"if",
"dottedkey",
":",
"nextitem",
"=",
"sl",
"[",
"i",
"+",
"1",
"]",
"if",
"not",
"nextitem",
".",
"isspace",
"(",
")",
"and",
"nextitem",
"!=",
"'.'",
":",
"keyname",
"=",
"1",
"continue",
"if",
"item",
"==",
"'.'",
":",
"dottedkey",
"=",
"True",
"nextitem",
"=",
"sl",
"[",
"i",
"+",
"1",
"]",
"if",
"not",
"nextitem",
".",
"isspace",
"(",
")",
"and",
"nextitem",
"!=",
"'.'",
":",
"keyname",
"=",
"1",
"continue",
"if",
"item",
"==",
"'='",
":",
"keyname",
"=",
"0",
"dottedkey",
"=",
"False",
"else",
":",
"raise",
"TomlDecodeError",
"(",
"\"Found invalid character in key name: '\"",
"+",
"item",
"+",
"\"'. Try quoting the key name.\"",
",",
"original",
",",
"i",
")",
"if",
"item",
"==",
"\"'\"",
"and",
"openstrchar",
"!=",
"'\"'",
":",
"k",
"=",
"1",
"try",
":",
"while",
"sl",
"[",
"i",
"-",
"k",
"]",
"==",
"\"'\"",
":",
"k",
"+=",
"1",
"if",
"k",
"==",
"3",
":",
"break",
"except",
"IndexError",
":",
"pass",
"if",
"k",
"==",
"3",
":",
"multilinestr",
"=",
"not",
"multilinestr",
"openstring",
"=",
"multilinestr",
"else",
":",
"openstring",
"=",
"not",
"openstring",
"if",
"openstring",
":",
"openstrchar",
"=",
"\"'\"",
"else",
":",
"openstrchar",
"=",
"\"\"",
"if",
"item",
"==",
"'\"'",
"and",
"openstrchar",
"!=",
"\"'\"",
":",
"oddbackslash",
"=",
"False",
"k",
"=",
"1",
"tripquote",
"=",
"False",
"try",
":",
"while",
"sl",
"[",
"i",
"-",
"k",
"]",
"==",
"'\"'",
":",
"k",
"+=",
"1",
"if",
"k",
"==",
"3",
":",
"tripquote",
"=",
"True",
"break",
"if",
"k",
"==",
"1",
"or",
"(",
"k",
"==",
"3",
"and",
"tripquote",
")",
":",
"while",
"sl",
"[",
"i",
"-",
"k",
"]",
"==",
"'\\\\'",
":",
"oddbackslash",
"=",
"not",
"oddbackslash",
"k",
"+=",
"1",
"except",
"IndexError",
":",
"pass",
"if",
"not",
"oddbackslash",
":",
"if",
"tripquote",
":",
"multilinestr",
"=",
"not",
"multilinestr",
"openstring",
"=",
"multilinestr",
"else",
":",
"openstring",
"=",
"not",
"openstring",
"if",
"openstring",
":",
"openstrchar",
"=",
"'\"'",
"else",
":",
"openstrchar",
"=",
"\"\"",
"if",
"item",
"==",
"'#'",
"and",
"(",
"not",
"openstring",
"and",
"not",
"keygroup",
"and",
"not",
"arrayoftables",
")",
":",
"j",
"=",
"i",
"try",
":",
"while",
"sl",
"[",
"j",
"]",
"!=",
"'\\n'",
":",
"sl",
"[",
"j",
"]",
"=",
"' '",
"j",
"+=",
"1",
"except",
"IndexError",
":",
"break",
"if",
"item",
"==",
"'['",
"and",
"(",
"not",
"openstring",
"and",
"not",
"keygroup",
"and",
"not",
"arrayoftables",
")",
":",
"if",
"beginline",
":",
"if",
"len",
"(",
"sl",
")",
">",
"i",
"+",
"1",
"and",
"sl",
"[",
"i",
"+",
"1",
"]",
"==",
"'['",
":",
"arrayoftables",
"=",
"True",
"else",
":",
"keygroup",
"=",
"True",
"else",
":",
"openarr",
"+=",
"1",
"if",
"item",
"==",
"']'",
"and",
"not",
"openstring",
":",
"if",
"keygroup",
":",
"keygroup",
"=",
"False",
"elif",
"arrayoftables",
":",
"if",
"sl",
"[",
"i",
"-",
"1",
"]",
"==",
"']'",
":",
"arrayoftables",
"=",
"False",
"else",
":",
"openarr",
"-=",
"1",
"if",
"item",
"==",
"'\\n'",
":",
"if",
"openstring",
"or",
"multilinestr",
":",
"if",
"not",
"multilinestr",
":",
"raise",
"TomlDecodeError",
"(",
"\"Unbalanced quotes\"",
",",
"original",
",",
"i",
")",
"if",
"(",
"(",
"sl",
"[",
"i",
"-",
"1",
"]",
"==",
"\"'\"",
"or",
"sl",
"[",
"i",
"-",
"1",
"]",
"==",
"'\"'",
")",
"and",
"(",
"sl",
"[",
"i",
"-",
"2",
"]",
"==",
"sl",
"[",
"i",
"-",
"1",
"]",
")",
")",
":",
"sl",
"[",
"i",
"]",
"=",
"sl",
"[",
"i",
"-",
"1",
"]",
"if",
"sl",
"[",
"i",
"-",
"3",
"]",
"==",
"sl",
"[",
"i",
"-",
"1",
"]",
":",
"sl",
"[",
"i",
"-",
"3",
"]",
"=",
"' '",
"elif",
"openarr",
":",
"sl",
"[",
"i",
"]",
"=",
"' '",
"else",
":",
"beginline",
"=",
"True",
"elif",
"beginline",
"and",
"sl",
"[",
"i",
"]",
"!=",
"' '",
"and",
"sl",
"[",
"i",
"]",
"!=",
"'\\t'",
":",
"beginline",
"=",
"False",
"if",
"not",
"keygroup",
"and",
"not",
"arrayoftables",
":",
"if",
"sl",
"[",
"i",
"]",
"==",
"'='",
":",
"raise",
"TomlDecodeError",
"(",
"\"Found empty keyname. \"",
",",
"original",
",",
"i",
")",
"keyname",
"=",
"1",
"s",
"=",
"''",
".",
"join",
"(",
"sl",
")",
"s",
"=",
"s",
".",
"split",
"(",
"'\\n'",
")",
"multikey",
"=",
"None",
"multilinestr",
"=",
"\"\"",
"multibackslash",
"=",
"False",
"pos",
"=",
"0",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"idx",
">",
"0",
":",
"pos",
"+=",
"len",
"(",
"s",
"[",
"idx",
"-",
"1",
"]",
")",
"+",
"1",
"if",
"not",
"multilinestr",
"or",
"multibackslash",
"or",
"'\\n'",
"not",
"in",
"multilinestr",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"\"\"",
"and",
"(",
"not",
"multikey",
"or",
"multibackslash",
")",
":",
"continue",
"if",
"multikey",
":",
"if",
"multibackslash",
":",
"multilinestr",
"+=",
"line",
"else",
":",
"multilinestr",
"+=",
"line",
"multibackslash",
"=",
"False",
"if",
"len",
"(",
"line",
")",
">",
"2",
"and",
"(",
"line",
"[",
"-",
"1",
"]",
"==",
"multilinestr",
"[",
"0",
"]",
"and",
"line",
"[",
"-",
"2",
"]",
"==",
"multilinestr",
"[",
"0",
"]",
"and",
"line",
"[",
"-",
"3",
"]",
"==",
"multilinestr",
"[",
"0",
"]",
")",
":",
"try",
":",
"value",
",",
"vtype",
"=",
"decoder",
".",
"load_value",
"(",
"multilinestr",
")",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"TomlDecodeError",
"(",
"str",
"(",
"err",
")",
",",
"original",
",",
"pos",
")",
"currentlevel",
"[",
"multikey",
"]",
"=",
"value",
"multikey",
"=",
"None",
"multilinestr",
"=",
"\"\"",
"else",
":",
"k",
"=",
"len",
"(",
"multilinestr",
")",
"-",
"1",
"while",
"k",
">",
"-",
"1",
"and",
"multilinestr",
"[",
"k",
"]",
"==",
"'\\\\'",
":",
"multibackslash",
"=",
"not",
"multibackslash",
"k",
"-=",
"1",
"if",
"multibackslash",
":",
"multilinestr",
"=",
"multilinestr",
"[",
":",
"-",
"1",
"]",
"else",
":",
"multilinestr",
"+=",
"\"\\n\"",
"continue",
"if",
"line",
"[",
"0",
"]",
"==",
"'['",
":",
"arrayoftables",
"=",
"False",
"if",
"len",
"(",
"line",
")",
"==",
"1",
":",
"raise",
"TomlDecodeError",
"(",
"\"Opening key group bracket on line by \"",
"\"itself.\"",
",",
"original",
",",
"pos",
")",
"if",
"line",
"[",
"1",
"]",
"==",
"'['",
":",
"arrayoftables",
"=",
"True",
"line",
"=",
"line",
"[",
"2",
":",
"]",
"splitstr",
"=",
"']]'",
"else",
":",
"line",
"=",
"line",
"[",
"1",
":",
"]",
"splitstr",
"=",
"']'",
"i",
"=",
"1",
"quotesplits",
"=",
"decoder",
".",
"_get_split_on_quotes",
"(",
"line",
")",
"quoted",
"=",
"False",
"for",
"quotesplit",
"in",
"quotesplits",
":",
"if",
"not",
"quoted",
"and",
"splitstr",
"in",
"quotesplit",
":",
"break",
"i",
"+=",
"quotesplit",
".",
"count",
"(",
"splitstr",
")",
"quoted",
"=",
"not",
"quoted",
"line",
"=",
"line",
".",
"split",
"(",
"splitstr",
",",
"i",
")",
"if",
"len",
"(",
"line",
")",
"<",
"i",
"+",
"1",
"or",
"line",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"!=",
"\"\"",
":",
"raise",
"TomlDecodeError",
"(",
"\"Key group not on a line by itself.\"",
",",
"original",
",",
"pos",
")",
"groups",
"=",
"splitstr",
".",
"join",
"(",
"line",
"[",
":",
"-",
"1",
"]",
")",
".",
"split",
"(",
"'.'",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"groups",
")",
":",
"groups",
"[",
"i",
"]",
"=",
"groups",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"groups",
"[",
"i",
"]",
")",
">",
"0",
"and",
"(",
"groups",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"'\"'",
"or",
"groups",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"\"'\"",
")",
":",
"groupstr",
"=",
"groups",
"[",
"i",
"]",
"j",
"=",
"i",
"+",
"1",
"while",
"not",
"groupstr",
"[",
"0",
"]",
"==",
"groupstr",
"[",
"-",
"1",
"]",
":",
"j",
"+=",
"1",
"if",
"j",
">",
"len",
"(",
"groups",
")",
"+",
"2",
":",
"raise",
"TomlDecodeError",
"(",
"\"Invalid group name '\"",
"+",
"groupstr",
"+",
"\"' Something \"",
"+",
"\"went wrong.\"",
",",
"original",
",",
"pos",
")",
"groupstr",
"=",
"'.'",
".",
"join",
"(",
"groups",
"[",
"i",
":",
"j",
"]",
")",
".",
"strip",
"(",
")",
"groups",
"[",
"i",
"]",
"=",
"groupstr",
"[",
"1",
":",
"-",
"1",
"]",
"groups",
"[",
"i",
"+",
"1",
":",
"j",
"]",
"=",
"[",
"]",
"else",
":",
"if",
"not",
"_groupname_re",
".",
"match",
"(",
"groups",
"[",
"i",
"]",
")",
":",
"raise",
"TomlDecodeError",
"(",
"\"Invalid group name '\"",
"+",
"groups",
"[",
"i",
"]",
"+",
"\"'. Try quoting it.\"",
",",
"original",
",",
"pos",
")",
"i",
"+=",
"1",
"currentlevel",
"=",
"retval",
"for",
"i",
"in",
"_range",
"(",
"len",
"(",
"groups",
")",
")",
":",
"group",
"=",
"groups",
"[",
"i",
"]",
"if",
"group",
"==",
"\"\"",
":",
"raise",
"TomlDecodeError",
"(",
"\"Can't have a keygroup with an empty \"",
"\"name\"",
",",
"original",
",",
"pos",
")",
"try",
":",
"currentlevel",
"[",
"group",
"]",
"if",
"i",
"==",
"len",
"(",
"groups",
")",
"-",
"1",
":",
"if",
"group",
"in",
"implicitgroups",
":",
"implicitgroups",
".",
"remove",
"(",
"group",
")",
"if",
"arrayoftables",
":",
"raise",
"TomlDecodeError",
"(",
"\"An implicitly defined \"",
"\"table can't be an array\"",
",",
"original",
",",
"pos",
")",
"elif",
"arrayoftables",
":",
"currentlevel",
"[",
"group",
"]",
".",
"append",
"(",
"decoder",
".",
"get_empty_table",
"(",
")",
")",
"else",
":",
"raise",
"TomlDecodeError",
"(",
"\"What? \"",
"+",
"group",
"+",
"\" already exists?\"",
"+",
"str",
"(",
"currentlevel",
")",
",",
"original",
",",
"pos",
")",
"except",
"TypeError",
":",
"currentlevel",
"=",
"currentlevel",
"[",
"-",
"1",
"]",
"if",
"group",
"not",
"in",
"currentlevel",
":",
"currentlevel",
"[",
"group",
"]",
"=",
"decoder",
".",
"get_empty_table",
"(",
")",
"if",
"i",
"==",
"len",
"(",
"groups",
")",
"-",
"1",
"and",
"arrayoftables",
":",
"currentlevel",
"[",
"group",
"]",
"=",
"[",
"decoder",
".",
"get_empty_table",
"(",
")",
"]",
"except",
"KeyError",
":",
"if",
"i",
"!=",
"len",
"(",
"groups",
")",
"-",
"1",
":",
"implicitgroups",
".",
"append",
"(",
"group",
")",
"currentlevel",
"[",
"group",
"]",
"=",
"decoder",
".",
"get_empty_table",
"(",
")",
"if",
"i",
"==",
"len",
"(",
"groups",
")",
"-",
"1",
"and",
"arrayoftables",
":",
"currentlevel",
"[",
"group",
"]",
"=",
"[",
"decoder",
".",
"get_empty_table",
"(",
")",
"]",
"currentlevel",
"=",
"currentlevel",
"[",
"group",
"]",
"if",
"arrayoftables",
":",
"try",
":",
"currentlevel",
"=",
"currentlevel",
"[",
"-",
"1",
"]",
"except",
"KeyError",
":",
"pass",
"elif",
"line",
"[",
"0",
"]",
"==",
"\"{\"",
":",
"if",
"line",
"[",
"-",
"1",
"]",
"!=",
"\"}\"",
":",
"raise",
"TomlDecodeError",
"(",
"\"Line breaks are not allowed in inline\"",
"\"objects\"",
",",
"original",
",",
"pos",
")",
"try",
":",
"decoder",
".",
"load_inline_object",
"(",
"line",
",",
"currentlevel",
",",
"multikey",
",",
"multibackslash",
")",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"TomlDecodeError",
"(",
"str",
"(",
"err",
")",
",",
"original",
",",
"pos",
")",
"elif",
"\"=\"",
"in",
"line",
":",
"try",
":",
"ret",
"=",
"decoder",
".",
"load_line",
"(",
"line",
",",
"currentlevel",
",",
"multikey",
",",
"multibackslash",
")",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"TomlDecodeError",
"(",
"str",
"(",
"err",
")",
",",
"original",
",",
"pos",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"multikey",
",",
"multilinestr",
",",
"multibackslash",
"=",
"ret",
"return",
"retval"
]
| Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml | [
"Parses",
"string",
"as",
"toml"
]
| python | train |
jonathansick/paperweight | paperweight/texutils.py | https://github.com/jonathansick/paperweight/blob/803535b939a56d375967cefecd5fdca81323041e/paperweight/texutils.py#L68-L86 | def inline_bbl(root_tex, bbl_tex):
"""Inline a compiled bibliography (.bbl) in place of a bibliography
environment.
Parameters
----------
root_tex : unicode
Text to process.
bbl_tex : unicode
Text of bibliography file.
Returns
-------
txt : unicode
Text with bibliography included.
"""
bbl_tex = bbl_tex.replace(u'\\', u'\\\\')
result = bib_pattern.sub(bbl_tex, root_tex)
return result | [
"def",
"inline_bbl",
"(",
"root_tex",
",",
"bbl_tex",
")",
":",
"bbl_tex",
"=",
"bbl_tex",
".",
"replace",
"(",
"u'\\\\'",
",",
"u'\\\\\\\\'",
")",
"result",
"=",
"bib_pattern",
".",
"sub",
"(",
"bbl_tex",
",",
"root_tex",
")",
"return",
"result"
]
| Inline a compiled bibliography (.bbl) in place of a bibliography
environment.
Parameters
----------
root_tex : unicode
Text to process.
bbl_tex : unicode
Text of bibliography file.
Returns
-------
txt : unicode
Text with bibliography included. | [
"Inline",
"a",
"compiled",
"bibliography",
"(",
".",
"bbl",
")",
"in",
"place",
"of",
"a",
"bibliography",
"environment",
"."
]
| python | train |
nuagenetworks/bambou | bambou/nurest_push_center.py | https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_push_center.py#L173-L210 | def _did_receive_event(self, connection):
""" Receive an event from connection """
if not self._is_running:
return
if connection.has_timeouted:
return
response = connection.response
data = None
if response.status_code != 200:
pushcenter_logger.error("[NURESTPushCenter]: Connection failure [%s] %s" % (response.status_code, response.errors))
else:
data = response.data
if len(self._delegate_methods) > 0:
for m in self._delegate_methods:
try:
m(data)
except Exception as exc:
pushcenter_logger.error("[NURESTPushCenter] Delegate method %s failed:\n%s" % (m, exc))
elif data:
events = data['events']
self.nb_events_received += len(events)
self.nb_push_received += 1
pushcenter_logger.info("[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\n%s" % (self.nb_push_received, self.nb_events_received, len(events), json.dumps(events, indent=4)))
self._last_events.extend(events)
if self._is_running:
uuid = None
if data and 'uuid' in data:
uuid = data['uuid']
self._listen(uuid) | [
"def",
"_did_receive_event",
"(",
"self",
",",
"connection",
")",
":",
"if",
"not",
"self",
".",
"_is_running",
":",
"return",
"if",
"connection",
".",
"has_timeouted",
":",
"return",
"response",
"=",
"connection",
".",
"response",
"data",
"=",
"None",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"pushcenter_logger",
".",
"error",
"(",
"\"[NURESTPushCenter]: Connection failure [%s] %s\"",
"%",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"errors",
")",
")",
"else",
":",
"data",
"=",
"response",
".",
"data",
"if",
"len",
"(",
"self",
".",
"_delegate_methods",
")",
">",
"0",
":",
"for",
"m",
"in",
"self",
".",
"_delegate_methods",
":",
"try",
":",
"m",
"(",
"data",
")",
"except",
"Exception",
"as",
"exc",
":",
"pushcenter_logger",
".",
"error",
"(",
"\"[NURESTPushCenter] Delegate method %s failed:\\n%s\"",
"%",
"(",
"m",
",",
"exc",
")",
")",
"elif",
"data",
":",
"events",
"=",
"data",
"[",
"'events'",
"]",
"self",
".",
"nb_events_received",
"+=",
"len",
"(",
"events",
")",
"self",
".",
"nb_push_received",
"+=",
"1",
"pushcenter_logger",
".",
"info",
"(",
"\"[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\\n%s\"",
"%",
"(",
"self",
".",
"nb_push_received",
",",
"self",
".",
"nb_events_received",
",",
"len",
"(",
"events",
")",
",",
"json",
".",
"dumps",
"(",
"events",
",",
"indent",
"=",
"4",
")",
")",
")",
"self",
".",
"_last_events",
".",
"extend",
"(",
"events",
")",
"if",
"self",
".",
"_is_running",
":",
"uuid",
"=",
"None",
"if",
"data",
"and",
"'uuid'",
"in",
"data",
":",
"uuid",
"=",
"data",
"[",
"'uuid'",
"]",
"self",
".",
"_listen",
"(",
"uuid",
")"
]
| Receive an event from connection | [
"Receive",
"an",
"event",
"from",
"connection"
]
| python | train |
zhelev/python-afsapi | afsapi/__init__.py | https://github.com/zhelev/python-afsapi/blob/bb1990cf1460ae42f2dde75f2291625ddac2c0e4/afsapi/__init__.py#L262-L270 | def set_mode(self, value):
"""Set the currently active mode on the device (DAB, FM, Spotify)."""
mode = -1
modes = yield from self.get_modes()
for temp_mode in modes:
if temp_mode['label'] == value:
mode = temp_mode['band']
return (yield from self.handle_set(self.API.get('mode'), mode)) | [
"def",
"set_mode",
"(",
"self",
",",
"value",
")",
":",
"mode",
"=",
"-",
"1",
"modes",
"=",
"yield",
"from",
"self",
".",
"get_modes",
"(",
")",
"for",
"temp_mode",
"in",
"modes",
":",
"if",
"temp_mode",
"[",
"'label'",
"]",
"==",
"value",
":",
"mode",
"=",
"temp_mode",
"[",
"'band'",
"]",
"return",
"(",
"yield",
"from",
"self",
".",
"handle_set",
"(",
"self",
".",
"API",
".",
"get",
"(",
"'mode'",
")",
",",
"mode",
")",
")"
]
| Set the currently active mode on the device (DAB, FM, Spotify). | [
"Set",
"the",
"currently",
"active",
"mode",
"on",
"the",
"device",
"(",
"DAB",
"FM",
"Spotify",
")",
"."
]
| python | valid |
insilichem/ommprotocol | ommprotocol/utils.py | https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/utils.py#L59-L65 | def assertinstance(obj, types):
"""
Make sure object `obj` is of type `types`. Else, raise TypeError.
"""
if isinstance(obj, types):
return obj
raise TypeError('{} must be instance of {}'.format(obj, types)) | [
"def",
"assertinstance",
"(",
"obj",
",",
"types",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"types",
")",
":",
"return",
"obj",
"raise",
"TypeError",
"(",
"'{} must be instance of {}'",
".",
"format",
"(",
"obj",
",",
"types",
")",
")"
]
| Make sure object `obj` is of type `types`. Else, raise TypeError. | [
"Make",
"sure",
"object",
"obj",
"is",
"of",
"type",
"types",
".",
"Else",
"raise",
"TypeError",
"."
]
| python | train |
ltworf/typedload | typedload/__init__.py | https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/__init__.py#L157-L168 | def dump(value: Any, **kwargs) -> Any:
"""
Quick function to dump a data structure into
something that is compatible with json or
other programs and languages.
It is useful to avoid creating the Dumper object,
in case only the default parameters are used.
"""
from . import datadumper
dumper = datadumper.Dumper(**kwargs)
return dumper.dump(value) | [
"def",
"dump",
"(",
"value",
":",
"Any",
",",
"*",
"*",
"kwargs",
")",
"->",
"Any",
":",
"from",
".",
"import",
"datadumper",
"dumper",
"=",
"datadumper",
".",
"Dumper",
"(",
"*",
"*",
"kwargs",
")",
"return",
"dumper",
".",
"dump",
"(",
"value",
")"
]
| Quick function to dump a data structure into
something that is compatible with json or
other programs and languages.
It is useful to avoid creating the Dumper object,
in case only the default parameters are used. | [
"Quick",
"function",
"to",
"dump",
"a",
"data",
"structure",
"into",
"something",
"that",
"is",
"compatible",
"with",
"json",
"or",
"other",
"programs",
"and",
"languages",
"."
]
| python | train |
google/grr | grr/client/grr_response_client/client_actions/windows/windows.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/windows/windows.py#L71-L96 | def EnumerateInterfacesFromClient(args):
"""Enumerate all MAC addresses of all NICs.
Args:
args: Unused.
Yields:
`rdf_client_network.Interface` instances.
"""
del args # Unused.
pythoncom.CoInitialize()
for interface in (wmi.WMI().Win32_NetworkAdapterConfiguration() or []):
addresses = []
for ip_address in interface.IPAddress or []:
addresses.append(
rdf_client_network.NetworkAddress(human_readable_address=ip_address))
response = rdf_client_network.Interface(ifname=interface.Description)
if interface.MACAddress:
response.mac_address = binascii.unhexlify(
interface.MACAddress.replace(":", ""))
if addresses:
response.addresses = addresses
yield response | [
"def",
"EnumerateInterfacesFromClient",
"(",
"args",
")",
":",
"del",
"args",
"# Unused.",
"pythoncom",
".",
"CoInitialize",
"(",
")",
"for",
"interface",
"in",
"(",
"wmi",
".",
"WMI",
"(",
")",
".",
"Win32_NetworkAdapterConfiguration",
"(",
")",
"or",
"[",
"]",
")",
":",
"addresses",
"=",
"[",
"]",
"for",
"ip_address",
"in",
"interface",
".",
"IPAddress",
"or",
"[",
"]",
":",
"addresses",
".",
"append",
"(",
"rdf_client_network",
".",
"NetworkAddress",
"(",
"human_readable_address",
"=",
"ip_address",
")",
")",
"response",
"=",
"rdf_client_network",
".",
"Interface",
"(",
"ifname",
"=",
"interface",
".",
"Description",
")",
"if",
"interface",
".",
"MACAddress",
":",
"response",
".",
"mac_address",
"=",
"binascii",
".",
"unhexlify",
"(",
"interface",
".",
"MACAddress",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
")",
"if",
"addresses",
":",
"response",
".",
"addresses",
"=",
"addresses",
"yield",
"response"
]
| Enumerate all MAC addresses of all NICs.
Args:
args: Unused.
Yields:
`rdf_client_network.Interface` instances. | [
"Enumerate",
"all",
"MAC",
"addresses",
"of",
"all",
"NICs",
"."
]
| python | train |
CivicSpleen/ambry | ambry/bundle/files.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/files.py#L458-L466 | def record_to_fh(self, f):
"""Write the record, in filesystem format, to a file handle or file object"""
fr = self.record
if fr.contents:
yaml.safe_dump(fr.unpacked_contents, f, default_flow_style=False, encoding='utf-8')
fr.source_hash = self.fs_hash
fr.modified = self.fs_modtime | [
"def",
"record_to_fh",
"(",
"self",
",",
"f",
")",
":",
"fr",
"=",
"self",
".",
"record",
"if",
"fr",
".",
"contents",
":",
"yaml",
".",
"safe_dump",
"(",
"fr",
".",
"unpacked_contents",
",",
"f",
",",
"default_flow_style",
"=",
"False",
",",
"encoding",
"=",
"'utf-8'",
")",
"fr",
".",
"source_hash",
"=",
"self",
".",
"fs_hash",
"fr",
".",
"modified",
"=",
"self",
".",
"fs_modtime"
]
| Write the record, in filesystem format, to a file handle or file object | [
"Write",
"the",
"record",
"in",
"filesystem",
"format",
"to",
"a",
"file",
"handle",
"or",
"file",
"object"
]
| python | train |
mwgielen/jackal | jackal/utils.py | https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/utils.py#L85-L189 | def draw_interface(objects, callback, callback_text):
"""
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
"""
screen = curses.initscr()
height, width = screen.getmaxyx()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad( 1 )
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.color_pair( 1 )
normalText = curses.A_NORMAL
screen.border( 0 )
curses.curs_set( 0 )
max_row = height - 15 # max number of rows
box = curses.newwin( max_row + 2, int(width - 2), 1, 1 )
box.box()
fmt = PartialFormatter()
row_num = len( objects )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row + 1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if (i == position):
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
while x != 27:
if x == curses.KEY_DOWN:
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_UP:
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
screen.erase()
if x == ord( "\n" ) and row_num != 0:
screen.erase()
screen.border( 0 )
service = objects[position -1]
text = fmt.format(callback_text, **service)
screen.addstr( max_row + 4, 3, text)
text = callback(service)
count = 0
for line in text:
screen.addstr( max_row + 5 + count, 3, line)
count += 1
box.erase()
screen.border( 0 )
box.border( 0 )
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
curses.endwin()
exit() | [
"def",
"draw_interface",
"(",
"objects",
",",
"callback",
",",
"callback_text",
")",
":",
"screen",
"=",
"curses",
".",
"initscr",
"(",
")",
"height",
",",
"width",
"=",
"screen",
".",
"getmaxyx",
"(",
")",
"curses",
".",
"noecho",
"(",
")",
"curses",
".",
"cbreak",
"(",
")",
"curses",
".",
"start_color",
"(",
")",
"screen",
".",
"keypad",
"(",
"1",
")",
"curses",
".",
"init_pair",
"(",
"1",
",",
"curses",
".",
"COLOR_BLACK",
",",
"curses",
".",
"COLOR_CYAN",
")",
"highlightText",
"=",
"curses",
".",
"color_pair",
"(",
"1",
")",
"normalText",
"=",
"curses",
".",
"A_NORMAL",
"screen",
".",
"border",
"(",
"0",
")",
"curses",
".",
"curs_set",
"(",
"0",
")",
"max_row",
"=",
"height",
"-",
"15",
"# max number of rows",
"box",
"=",
"curses",
".",
"newwin",
"(",
"max_row",
"+",
"2",
",",
"int",
"(",
"width",
"-",
"2",
")",
",",
"1",
",",
"1",
")",
"box",
".",
"box",
"(",
")",
"fmt",
"=",
"PartialFormatter",
"(",
")",
"row_num",
"=",
"len",
"(",
"objects",
")",
"pages",
"=",
"int",
"(",
"ceil",
"(",
"row_num",
"/",
"max_row",
")",
")",
"position",
"=",
"1",
"page",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"max_row",
"+",
"1",
")",
":",
"if",
"row_num",
"==",
"0",
":",
"box",
".",
"addstr",
"(",
"1",
",",
"1",
",",
"\"There aren't strings\"",
",",
"highlightText",
")",
"else",
":",
"if",
"(",
"i",
"==",
"position",
")",
":",
"box",
".",
"addstr",
"(",
"i",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"highlightText",
")",
"else",
":",
"box",
".",
"addstr",
"(",
"i",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"normalText",
")",
"if",
"i",
"==",
"row_num",
":",
"break",
"screen",
".",
"refresh",
"(",
")",
"box",
".",
"refresh",
"(",
")",
"x",
"=",
"screen",
".",
"getch",
"(",
")",
"while",
"x",
"!=",
"27",
":",
"if",
"x",
"==",
"curses",
".",
"KEY_DOWN",
":",
"if",
"page",
"==",
"1",
":",
"if",
"position",
"<",
"i",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"if",
"pages",
">",
"1",
":",
"page",
"=",
"page",
"+",
"1",
"position",
"=",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"elif",
"page",
"==",
"pages",
":",
"if",
"position",
"<",
"row_num",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"if",
"position",
"<",
"max_row",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"page",
"=",
"page",
"+",
"1",
"position",
"=",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"if",
"x",
"==",
"curses",
".",
"KEY_UP",
":",
"if",
"page",
"==",
"1",
":",
"if",
"position",
">",
"1",
":",
"position",
"=",
"position",
"-",
"1",
"else",
":",
"if",
"position",
">",
"(",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"position",
"=",
"position",
"-",
"1",
"else",
":",
"page",
"=",
"page",
"-",
"1",
"position",
"=",
"max_row",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"screen",
".",
"erase",
"(",
")",
"if",
"x",
"==",
"ord",
"(",
"\"\\n\"",
")",
"and",
"row_num",
"!=",
"0",
":",
"screen",
".",
"erase",
"(",
")",
"screen",
".",
"border",
"(",
"0",
")",
"service",
"=",
"objects",
"[",
"position",
"-",
"1",
"]",
"text",
"=",
"fmt",
".",
"format",
"(",
"callback_text",
",",
"*",
"*",
"service",
")",
"screen",
".",
"addstr",
"(",
"max_row",
"+",
"4",
",",
"3",
",",
"text",
")",
"text",
"=",
"callback",
"(",
"service",
")",
"count",
"=",
"0",
"for",
"line",
"in",
"text",
":",
"screen",
".",
"addstr",
"(",
"max_row",
"+",
"5",
"+",
"count",
",",
"3",
",",
"line",
")",
"count",
"+=",
"1",
"box",
".",
"erase",
"(",
")",
"screen",
".",
"border",
"(",
"0",
")",
"box",
".",
"border",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"max_row",
"+",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"if",
"row_num",
"==",
"0",
":",
"box",
".",
"addstr",
"(",
"1",
",",
"1",
",",
"\"There aren't strings\"",
",",
"highlightText",
")",
"else",
":",
"if",
"(",
"i",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"==",
"position",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"box",
".",
"addstr",
"(",
"i",
"-",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"highlightText",
")",
"else",
":",
"box",
".",
"addstr",
"(",
"i",
"-",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"normalText",
")",
"if",
"i",
"==",
"row_num",
":",
"break",
"screen",
".",
"refresh",
"(",
")",
"box",
".",
"refresh",
"(",
")",
"x",
"=",
"screen",
".",
"getch",
"(",
")",
"curses",
".",
"endwin",
"(",
")",
"exit",
"(",
")"
]
| Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868 | [
"Draws",
"a",
"ncurses",
"interface",
".",
"Based",
"on",
"the",
"given",
"object",
"list",
"every",
"object",
"should",
"have",
"a",
"string",
"key",
"this",
"is",
"whats",
"displayed",
"on",
"the",
"screen",
"callback",
"is",
"called",
"with",
"the",
"selected",
"object",
".",
"Rest",
"of",
"the",
"code",
"is",
"modified",
"from",
":",
"https",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"30834868"
]
| python | valid |
ankitmathur3193/song-cli | song/commands/MusicWebsiteParser/MrJattParser.py | https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L29-L45 | def list_of_all_href(self,html):
'''
It will return all hyper links found in the mr-jatt page for download
'''
soup=BeautifulSoup(html)
links=[]
a_list=soup.findAll('a','touch')
for x in xrange(len(a_list)-1):
link = a_list[x].get('href')
name = a_list[x]
name = str(name)
name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name)
name=re.sub(r'^[0-9]+\.','',name)
links.append([link,name])
#quit()
return links | [
"def",
"list_of_all_href",
"(",
"self",
",",
"html",
")",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
")",
"links",
"=",
"[",
"]",
"a_list",
"=",
"soup",
".",
"findAll",
"(",
"'a'",
",",
"'touch'",
")",
"for",
"x",
"in",
"xrange",
"(",
"len",
"(",
"a_list",
")",
"-",
"1",
")",
":",
"link",
"=",
"a_list",
"[",
"x",
"]",
".",
"get",
"(",
"'href'",
")",
"name",
"=",
"a_list",
"[",
"x",
"]",
"name",
"=",
"str",
"(",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"r'<a.*/>|<span.*\">|</span>|</a>|<a.*html\">|<font.*\">|</font>'",
",",
"''",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"r'^[0-9]+\\.'",
",",
"''",
",",
"name",
")",
"links",
".",
"append",
"(",
"[",
"link",
",",
"name",
"]",
")",
"#quit()",
"return",
"links"
]
| It will return all hyper links found in the mr-jatt page for download | [
"It",
"will",
"return",
"all",
"hyper",
"links",
"found",
"in",
"the",
"mr",
"-",
"jatt",
"page",
"for",
"download"
]
| python | test |
Kozea/cairocffi | cairocffi/context.py | https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1370-L1380 | def mask(self, pattern):
"""A drawing operator that paints the current source
using the alpha channel of :obj:`pattern` as a mask.
(Opaque areas of :obj:`pattern` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Pattern` object.
"""
cairo.cairo_mask(self._pointer, pattern._pointer)
self._check_status() | [
"def",
"mask",
"(",
"self",
",",
"pattern",
")",
":",
"cairo",
".",
"cairo_mask",
"(",
"self",
".",
"_pointer",
",",
"pattern",
".",
"_pointer",
")",
"self",
".",
"_check_status",
"(",
")"
]
| A drawing operator that paints the current source
using the alpha channel of :obj:`pattern` as a mask.
(Opaque areas of :obj:`pattern` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Pattern` object. | [
"A",
"drawing",
"operator",
"that",
"paints",
"the",
"current",
"source",
"using",
"the",
"alpha",
"channel",
"of",
":",
"obj",
":",
"pattern",
"as",
"a",
"mask",
".",
"(",
"Opaque",
"areas",
"of",
":",
"obj",
":",
"pattern",
"are",
"painted",
"with",
"the",
"source",
"transparent",
"areas",
"are",
"not",
"painted",
".",
")"
]
| python | train |
google/grr | grr/server/grr_response_server/databases/mysql_signed_binaries.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_signed_binaries.py#L85-L96 | def DeleteSignedBinaryReferences(self,
binary_id,
cursor=None):
"""Deletes blob references for the given signed binary from the DB."""
cursor.execute(
"""
DELETE FROM signed_binary_references
WHERE binary_type = %s AND binary_path_hash = %s
""", [
binary_id.binary_type.SerializeToDataStore(),
mysql_utils.Hash(binary_id.path)
]) | [
"def",
"DeleteSignedBinaryReferences",
"(",
"self",
",",
"binary_id",
",",
"cursor",
"=",
"None",
")",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\n DELETE FROM signed_binary_references\n WHERE binary_type = %s AND binary_path_hash = %s\n \"\"\"",
",",
"[",
"binary_id",
".",
"binary_type",
".",
"SerializeToDataStore",
"(",
")",
",",
"mysql_utils",
".",
"Hash",
"(",
"binary_id",
".",
"path",
")",
"]",
")"
]
| Deletes blob references for the given signed binary from the DB. | [
"Deletes",
"blob",
"references",
"for",
"the",
"given",
"signed",
"binary",
"from",
"the",
"DB",
"."
]
| python | train |
mitsei/dlkit | dlkit/records/osid/base_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L861-L867 | def clear_decimal_value(self):
"""stub"""
if (self.get_decimal_value_metadata().is_read_only() or
self.get_decimal_value_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['decimalValue'] = \
self.get_decimal_value_metadata().get_default_decimal_values()[0] | [
"def",
"clear_decimal_value",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"get_decimal_value_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
"or",
"self",
".",
"get_decimal_value_metadata",
"(",
")",
".",
"is_required",
"(",
")",
")",
":",
"raise",
"NoAccess",
"(",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'decimalValue'",
"]",
"=",
"self",
".",
"get_decimal_value_metadata",
"(",
")",
".",
"get_default_decimal_values",
"(",
")",
"[",
"0",
"]"
]
| stub | [
"stub"
]
| python | train |
trailofbits/manticore | manticore/platforms/evm.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L1545-L1551 | def MSTORE(self, address, value):
"""Save word to memory"""
if istainted(self.pc):
for taint in get_taints(self.pc):
value = taint_with(value, taint)
self._allocate(address, 32)
self._store(address, value, 32) | [
"def",
"MSTORE",
"(",
"self",
",",
"address",
",",
"value",
")",
":",
"if",
"istainted",
"(",
"self",
".",
"pc",
")",
":",
"for",
"taint",
"in",
"get_taints",
"(",
"self",
".",
"pc",
")",
":",
"value",
"=",
"taint_with",
"(",
"value",
",",
"taint",
")",
"self",
".",
"_allocate",
"(",
"address",
",",
"32",
")",
"self",
".",
"_store",
"(",
"address",
",",
"value",
",",
"32",
")"
]
| Save word to memory | [
"Save",
"word",
"to",
"memory"
]
| python | valid |
pmacosta/pcsv | pcsv/dsort.py | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/dsort.py#L37-L93 | def dsort(fname, order, has_header=True, frow=0, ofname=None):
r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | [
"def",
"dsort",
"(",
"fname",
",",
"order",
",",
"has_header",
"=",
"True",
",",
"frow",
"=",
"0",
",",
"ofname",
"=",
"None",
")",
":",
"ofname",
"=",
"fname",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
"obj",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname",
",",
"has_header",
"=",
"has_header",
",",
"frow",
"=",
"frow",
")",
"obj",
".",
"dsort",
"(",
"order",
")",
"obj",
".",
"write",
"(",
"fname",
"=",
"ofname",
",",
"header",
"=",
"has_header",
",",
"append",
"=",
"False",
")"
]
| r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | [
"r",
"Sort",
"file",
"data",
"."
]
| python | train |
kylejusticemagnuson/pyti | pyti/average_true_range_percent.py | https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/average_true_range_percent.py#L9-L18 | def average_true_range_percent(close_data, period):
"""
Average True Range Percent.
Formula:
ATRP = (ATR / CLOSE) * 100
"""
catch_errors.check_for_period_error(close_data, period)
atrp = (atr(close_data, period) / np.array(close_data)) * 100
return atrp | [
"def",
"average_true_range_percent",
"(",
"close_data",
",",
"period",
")",
":",
"catch_errors",
".",
"check_for_period_error",
"(",
"close_data",
",",
"period",
")",
"atrp",
"=",
"(",
"atr",
"(",
"close_data",
",",
"period",
")",
"/",
"np",
".",
"array",
"(",
"close_data",
")",
")",
"*",
"100",
"return",
"atrp"
]
| Average True Range Percent.
Formula:
ATRP = (ATR / CLOSE) * 100 | [
"Average",
"True",
"Range",
"Percent",
"."
]
| python | train |
mitsei/dlkit | dlkit/services/repository.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1483-L1491 | def use_federated_repository_view(self):
"""Pass through to provider AssetLookupSession.use_federated_repository_view"""
self._repository_view = FEDERATED
# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_repository_view()
except AttributeError:
pass | [
"def",
"use_federated_repository_view",
"(",
"self",
")",
":",
"self",
".",
"_repository_view",
"=",
"FEDERATED",
"# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provider_sessions",
"(",
")",
":",
"try",
":",
"session",
".",
"use_federated_repository_view",
"(",
")",
"except",
"AttributeError",
":",
"pass"
]
| Pass through to provider AssetLookupSession.use_federated_repository_view | [
"Pass",
"through",
"to",
"provider",
"AssetLookupSession",
".",
"use_federated_repository_view"
]
| python | train |
zsimic/runez | src/runez/click.py | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L49-L53 | def debug(*args, **attrs):
"""Show debugging information."""
attrs.setdefault("is_flag", True)
attrs.setdefault("default", None)
return option(debug, *args, **attrs) | [
"def",
"debug",
"(",
"*",
"args",
",",
"*",
"*",
"attrs",
")",
":",
"attrs",
".",
"setdefault",
"(",
"\"is_flag\"",
",",
"True",
")",
"attrs",
".",
"setdefault",
"(",
"\"default\"",
",",
"None",
")",
"return",
"option",
"(",
"debug",
",",
"*",
"args",
",",
"*",
"*",
"attrs",
")"
]
| Show debugging information. | [
"Show",
"debugging",
"information",
"."
]
| python | train |
mitsei/dlkit | dlkit/json_/repository/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L697-L722 | def get_asset_contents_for_asset(self, asset_id):
"""Gets an ``AssetList`` from the given Asset.
In plenary mode, the returned list contains all known asset contents or
an error results. Otherwise, the returned list may contain only
those asset contents that are accessible through this session.
:param asset_id: an asset ``Id``
:type asset_id: ``osid.id.Id``
:return: the returned ``AssetContent list``
:rtype: ``osid.repository.AssetContentList``
:raise: ``NullArgument`` -- ``asset_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
result = collection.find_one(
dict({'_id': ObjectId(self._get_id(asset_id, 'repository').get_identifier())},
**self._view_filter()))
asset_content_maps = [ac for ac in result['assetContents']]
return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy) | [
"def",
"get_asset_contents_for_asset",
"(",
"self",
",",
"asset_id",
")",
":",
"collection",
"=",
"JSONClientValidated",
"(",
"'repository'",
",",
"collection",
"=",
"'Asset'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"result",
"=",
"collection",
".",
"find_one",
"(",
"dict",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"self",
".",
"_get_id",
"(",
"asset_id",
",",
"'repository'",
")",
".",
"get_identifier",
"(",
")",
")",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
"asset_content_maps",
"=",
"[",
"ac",
"for",
"ac",
"in",
"result",
"[",
"'assetContents'",
"]",
"]",
"return",
"objects",
".",
"AssetContentList",
"(",
"asset_content_maps",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
]
| Gets an ``AssetList`` from the given Asset.
In plenary mode, the returned list contains all known asset contents or
an error results. Otherwise, the returned list may contain only
those asset contents that are accessible through this session.
:param asset_id: an asset ``Id``
:type asset_id: ``osid.id.Id``
:return: the returned ``AssetContent list``
:rtype: ``osid.repository.AssetContentList``
:raise: ``NullArgument`` -- ``asset_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"an",
"AssetList",
"from",
"the",
"given",
"Asset",
"."
]
| python | train |
openego/ding0 | ding0/grid/lv_grid/build_grid.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/lv_grid/build_grid.py#L601-L752 | def build_lv_graph_residential(lvgd, selected_string_df):
"""Builds nxGraph based on the LV grid model
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
selected_string_df: :pandas:`pandas.DataFrame<dataframe>`
Table of strings of the selected grid model
Notes
-----
To understand what is happening in this method a few data table columns
are explained here
* `count house branch`: number of houses connected to a string
* `distance house branch`: distance on a string between two house branches
* `string length`: total length of a string
* `length house branch A|B`: cable from string to connection point of a house
A|B in general brings some variation in to the typified model grid and
refer to different length of house branches and different cable types
respectively different cable widths.
"""
houses_connected = (
selected_string_df['occurence'] * selected_string_df[
'count house branch']).sum()
average_load = lvgd.peak_load_residential / \
houses_connected
average_consumption = lvgd.sector_consumption_residential / \
houses_connected
hh_branch = 0
# iterate over each type of branch
for i, row in selected_string_df.iterrows():
# get overall count of branches to set unique branch_no
branch_count_sum = len(list(
lvgd.lv_grid._graph.neighbors(lvgd.lv_grid.station())))
# iterate over it's occurences
for branch_no in range(1, int(row['occurence']) + 1):
hh_branch += 1
# iterate over house branches
for house_branch in range(1, row['count house branch'] + 1):
if house_branch % 2 == 0:
variant = 'B'
else:
variant = 'A'
# cable distributor to divert from main branch
lv_cable_dist = LVCableDistributorDing0(
grid=lvgd.lv_grid,
string_id=i,
branch_no=branch_no + branch_count_sum,
load_no=house_branch)
# add lv_cable_dist to graph
lvgd.lv_grid.add_cable_dist(lv_cable_dist)
# cable distributor within building (to connect load+geno)
lv_cable_dist_building = LVCableDistributorDing0(
grid=lvgd.lv_grid,
string_id=i,
branch_no=branch_no + branch_count_sum,
load_no=house_branch,
in_building=True)
# add lv_cable_dist_building to graph
lvgd.lv_grid.add_cable_dist(lv_cable_dist_building)
lv_load = LVLoadDing0(grid=lvgd.lv_grid,
string_id=i,
branch_no=branch_no + branch_count_sum,
load_no=house_branch,
peak_load=average_load,
consumption={
'residential': average_consumption})
# add lv_load to graph
lvgd.lv_grid.add_load(lv_load)
cable_name = row['cable type'] + \
' 4x1x{}'.format(row['cable width'])
cable_type = lvgd.lv_grid.network.static_data[
'LV_cables'].loc[cable_name]
# connect current lv_cable_dist to station
if house_branch == 1:
# edge connect first house branch in branch with the station
lvgd.lv_grid._graph.add_edge(
lvgd.lv_grid.station(),
lv_cable_dist,
branch=BranchDing0(
length=row['distance house branch'],
kind='cable',
type=cable_type,
id_db='branch_{sector}{branch}_{load}'.format(
branch=hh_branch,
load=house_branch,
sector='HH')
))
# connect current lv_cable_dist to last one
else:
lvgd.lv_grid._graph.add_edge(
lvgd.lv_grid._cable_distributors[-4],
lv_cable_dist,
branch=BranchDing0(
length=row['distance house branch'],
kind='cable',
type=lvgd.lv_grid.network.static_data[
'LV_cables'].loc[cable_name],
id_db='branch_{sector}{branch}_{load}'.format(
branch=hh_branch,
load=house_branch,
sector='HH')))
# connect house to cable distributor
house_cable_name = row['cable type {}'.format(variant)] + \
' 4x1x{}'.format(
row['cable width {}'.format(variant)])
lvgd.lv_grid._graph.add_edge(
lv_cable_dist,
lv_cable_dist_building,
branch=BranchDing0(
length=row['length house branch {}'.format(
variant)],
kind='cable',
type=lvgd.lv_grid.network.static_data['LV_cables']. \
loc[house_cable_name],
id_db='branch_{sector}{branch}_{load}'.format(
branch=hh_branch,
load=house_branch,
sector='HH'))
)
lvgd.lv_grid._graph.add_edge(
lv_cable_dist_building,
lv_load,
branch=BranchDing0(
length=1,
kind='cable',
type=lvgd.lv_grid.network.static_data['LV_cables']. \
loc[house_cable_name],
id_db='branch_{sector}{branch}_{load}'.format(
branch=hh_branch,
load=house_branch,
sector='HH'))
) | [
"def",
"build_lv_graph_residential",
"(",
"lvgd",
",",
"selected_string_df",
")",
":",
"houses_connected",
"=",
"(",
"selected_string_df",
"[",
"'occurence'",
"]",
"*",
"selected_string_df",
"[",
"'count house branch'",
"]",
")",
".",
"sum",
"(",
")",
"average_load",
"=",
"lvgd",
".",
"peak_load_residential",
"/",
"houses_connected",
"average_consumption",
"=",
"lvgd",
".",
"sector_consumption_residential",
"/",
"houses_connected",
"hh_branch",
"=",
"0",
"# iterate over each type of branch",
"for",
"i",
",",
"row",
"in",
"selected_string_df",
".",
"iterrows",
"(",
")",
":",
"# get overall count of branches to set unique branch_no",
"branch_count_sum",
"=",
"len",
"(",
"list",
"(",
"lvgd",
".",
"lv_grid",
".",
"_graph",
".",
"neighbors",
"(",
"lvgd",
".",
"lv_grid",
".",
"station",
"(",
")",
")",
")",
")",
"# iterate over it's occurences",
"for",
"branch_no",
"in",
"range",
"(",
"1",
",",
"int",
"(",
"row",
"[",
"'occurence'",
"]",
")",
"+",
"1",
")",
":",
"hh_branch",
"+=",
"1",
"# iterate over house branches",
"for",
"house_branch",
"in",
"range",
"(",
"1",
",",
"row",
"[",
"'count house branch'",
"]",
"+",
"1",
")",
":",
"if",
"house_branch",
"%",
"2",
"==",
"0",
":",
"variant",
"=",
"'B'",
"else",
":",
"variant",
"=",
"'A'",
"# cable distributor to divert from main branch",
"lv_cable_dist",
"=",
"LVCableDistributorDing0",
"(",
"grid",
"=",
"lvgd",
".",
"lv_grid",
",",
"string_id",
"=",
"i",
",",
"branch_no",
"=",
"branch_no",
"+",
"branch_count_sum",
",",
"load_no",
"=",
"house_branch",
")",
"# add lv_cable_dist to graph",
"lvgd",
".",
"lv_grid",
".",
"add_cable_dist",
"(",
"lv_cable_dist",
")",
"# cable distributor within building (to connect load+geno)",
"lv_cable_dist_building",
"=",
"LVCableDistributorDing0",
"(",
"grid",
"=",
"lvgd",
".",
"lv_grid",
",",
"string_id",
"=",
"i",
",",
"branch_no",
"=",
"branch_no",
"+",
"branch_count_sum",
",",
"load_no",
"=",
"house_branch",
",",
"in_building",
"=",
"True",
")",
"# add lv_cable_dist_building to graph",
"lvgd",
".",
"lv_grid",
".",
"add_cable_dist",
"(",
"lv_cable_dist_building",
")",
"lv_load",
"=",
"LVLoadDing0",
"(",
"grid",
"=",
"lvgd",
".",
"lv_grid",
",",
"string_id",
"=",
"i",
",",
"branch_no",
"=",
"branch_no",
"+",
"branch_count_sum",
",",
"load_no",
"=",
"house_branch",
",",
"peak_load",
"=",
"average_load",
",",
"consumption",
"=",
"{",
"'residential'",
":",
"average_consumption",
"}",
")",
"# add lv_load to graph",
"lvgd",
".",
"lv_grid",
".",
"add_load",
"(",
"lv_load",
")",
"cable_name",
"=",
"row",
"[",
"'cable type'",
"]",
"+",
"' 4x1x{}'",
".",
"format",
"(",
"row",
"[",
"'cable width'",
"]",
")",
"cable_type",
"=",
"lvgd",
".",
"lv_grid",
".",
"network",
".",
"static_data",
"[",
"'LV_cables'",
"]",
".",
"loc",
"[",
"cable_name",
"]",
"# connect current lv_cable_dist to station",
"if",
"house_branch",
"==",
"1",
":",
"# edge connect first house branch in branch with the station",
"lvgd",
".",
"lv_grid",
".",
"_graph",
".",
"add_edge",
"(",
"lvgd",
".",
"lv_grid",
".",
"station",
"(",
")",
",",
"lv_cable_dist",
",",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"row",
"[",
"'distance house branch'",
"]",
",",
"kind",
"=",
"'cable'",
",",
"type",
"=",
"cable_type",
",",
"id_db",
"=",
"'branch_{sector}{branch}_{load}'",
".",
"format",
"(",
"branch",
"=",
"hh_branch",
",",
"load",
"=",
"house_branch",
",",
"sector",
"=",
"'HH'",
")",
")",
")",
"# connect current lv_cable_dist to last one",
"else",
":",
"lvgd",
".",
"lv_grid",
".",
"_graph",
".",
"add_edge",
"(",
"lvgd",
".",
"lv_grid",
".",
"_cable_distributors",
"[",
"-",
"4",
"]",
",",
"lv_cable_dist",
",",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"row",
"[",
"'distance house branch'",
"]",
",",
"kind",
"=",
"'cable'",
",",
"type",
"=",
"lvgd",
".",
"lv_grid",
".",
"network",
".",
"static_data",
"[",
"'LV_cables'",
"]",
".",
"loc",
"[",
"cable_name",
"]",
",",
"id_db",
"=",
"'branch_{sector}{branch}_{load}'",
".",
"format",
"(",
"branch",
"=",
"hh_branch",
",",
"load",
"=",
"house_branch",
",",
"sector",
"=",
"'HH'",
")",
")",
")",
"# connect house to cable distributor",
"house_cable_name",
"=",
"row",
"[",
"'cable type {}'",
".",
"format",
"(",
"variant",
")",
"]",
"+",
"' 4x1x{}'",
".",
"format",
"(",
"row",
"[",
"'cable width {}'",
".",
"format",
"(",
"variant",
")",
"]",
")",
"lvgd",
".",
"lv_grid",
".",
"_graph",
".",
"add_edge",
"(",
"lv_cable_dist",
",",
"lv_cable_dist_building",
",",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"row",
"[",
"'length house branch {}'",
".",
"format",
"(",
"variant",
")",
"]",
",",
"kind",
"=",
"'cable'",
",",
"type",
"=",
"lvgd",
".",
"lv_grid",
".",
"network",
".",
"static_data",
"[",
"'LV_cables'",
"]",
".",
"loc",
"[",
"house_cable_name",
"]",
",",
"id_db",
"=",
"'branch_{sector}{branch}_{load}'",
".",
"format",
"(",
"branch",
"=",
"hh_branch",
",",
"load",
"=",
"house_branch",
",",
"sector",
"=",
"'HH'",
")",
")",
")",
"lvgd",
".",
"lv_grid",
".",
"_graph",
".",
"add_edge",
"(",
"lv_cable_dist_building",
",",
"lv_load",
",",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"1",
",",
"kind",
"=",
"'cable'",
",",
"type",
"=",
"lvgd",
".",
"lv_grid",
".",
"network",
".",
"static_data",
"[",
"'LV_cables'",
"]",
".",
"loc",
"[",
"house_cable_name",
"]",
",",
"id_db",
"=",
"'branch_{sector}{branch}_{load}'",
".",
"format",
"(",
"branch",
"=",
"hh_branch",
",",
"load",
"=",
"house_branch",
",",
"sector",
"=",
"'HH'",
")",
")",
")"
]
| Builds nxGraph based on the LV grid model
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
selected_string_df: :pandas:`pandas.DataFrame<dataframe>`
Table of strings of the selected grid model
Notes
-----
To understand what is happening in this method a few data table columns
are explained here
* `count house branch`: number of houses connected to a string
* `distance house branch`: distance on a string between two house branches
* `string length`: total length of a string
* `length house branch A|B`: cable from string to connection point of a house
A|B in general brings some variation in to the typified model grid and
refer to different length of house branches and different cable types
respectively different cable widths. | [
"Builds",
"nxGraph",
"based",
"on",
"the",
"LV",
"grid",
"model"
]
| python | train |
trailofbits/manticore | manticore/native/cpu/x86.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L5781-L5790 | def VEXTRACTF128(cpu, dest, src, offset):
"""Extract Packed Floating-Point Values
Extracts 128-bits of packed floating-point values from the source
operand (second operand) at an 128-bit offset from imm8[0] into the
destination operand (first operand). The destination may be either an
XMM register or an 128-bit memory location.
"""
offset = offset.read()
dest.write(Operators.EXTRACT(src.read(), offset * 128, (offset + 1) * 128)) | [
"def",
"VEXTRACTF128",
"(",
"cpu",
",",
"dest",
",",
"src",
",",
"offset",
")",
":",
"offset",
"=",
"offset",
".",
"read",
"(",
")",
"dest",
".",
"write",
"(",
"Operators",
".",
"EXTRACT",
"(",
"src",
".",
"read",
"(",
")",
",",
"offset",
"*",
"128",
",",
"(",
"offset",
"+",
"1",
")",
"*",
"128",
")",
")"
]
| Extract Packed Floating-Point Values
Extracts 128-bits of packed floating-point values from the source
operand (second operand) at an 128-bit offset from imm8[0] into the
destination operand (first operand). The destination may be either an
XMM register or an 128-bit memory location. | [
"Extract",
"Packed",
"Floating",
"-",
"Point",
"Values"
]
| python | valid |
riga/scinum | scinum.py | https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L431-L437 | def set_uncertainty(self, name, value):
"""
Sets the uncertainty *value* for an uncertainty *name*. *value* should have one of the
formats as described in :py:meth:`uncertainties`.
"""
uncertainties = self.__class__.uncertainties.fparse(self, {name: value})
self._uncertainties.update(uncertainties) | [
"def",
"set_uncertainty",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"uncertainties",
"=",
"self",
".",
"__class__",
".",
"uncertainties",
".",
"fparse",
"(",
"self",
",",
"{",
"name",
":",
"value",
"}",
")",
"self",
".",
"_uncertainties",
".",
"update",
"(",
"uncertainties",
")"
]
| Sets the uncertainty *value* for an uncertainty *name*. *value* should have one of the
formats as described in :py:meth:`uncertainties`. | [
"Sets",
"the",
"uncertainty",
"*",
"value",
"*",
"for",
"an",
"uncertainty",
"*",
"name",
"*",
".",
"*",
"value",
"*",
"should",
"have",
"one",
"of",
"the",
"formats",
"as",
"described",
"in",
":",
"py",
":",
"meth",
":",
"uncertainties",
"."
]
| python | train |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L606-L628 | def project_delete_event(self, proj_info):
"""Process project delete event."""
LOG.debug("Processing project_delete_event...")
proj_id = proj_info.get('resource_info')
proj_name = self.get_project_name(proj_id)
if proj_name:
try:
self.dcnm_client.delete_project(proj_name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to create project %s on DCNM.",
proj_name)
self.update_project_info_cache(proj_id, name=proj_name,
opcode='delete',
result=constants.DELETE_FAIL)
else:
self.update_project_info_cache(proj_id, opcode='delete')
LOG.debug('Deleted project:%s', proj_name)
self.project_delete_notif(proj_id, proj_name) | [
"def",
"project_delete_event",
"(",
"self",
",",
"proj_info",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Processing project_delete_event...\"",
")",
"proj_id",
"=",
"proj_info",
".",
"get",
"(",
"'resource_info'",
")",
"proj_name",
"=",
"self",
".",
"get_project_name",
"(",
"proj_id",
")",
"if",
"proj_name",
":",
"try",
":",
"self",
".",
"dcnm_client",
".",
"delete_project",
"(",
"proj_name",
",",
"self",
".",
"cfg",
".",
"dcnm",
".",
"default_partition_name",
")",
"except",
"dexc",
".",
"DfaClientRequestFailed",
":",
"# Failed to delete project in DCNM.",
"# Save the info and mark it as failure and retry it later.",
"LOG",
".",
"error",
"(",
"\"Failed to create project %s on DCNM.\"",
",",
"proj_name",
")",
"self",
".",
"update_project_info_cache",
"(",
"proj_id",
",",
"name",
"=",
"proj_name",
",",
"opcode",
"=",
"'delete'",
",",
"result",
"=",
"constants",
".",
"DELETE_FAIL",
")",
"else",
":",
"self",
".",
"update_project_info_cache",
"(",
"proj_id",
",",
"opcode",
"=",
"'delete'",
")",
"LOG",
".",
"debug",
"(",
"'Deleted project:%s'",
",",
"proj_name",
")",
"self",
".",
"project_delete_notif",
"(",
"proj_id",
",",
"proj_name",
")"
]
| Process project delete event. | [
"Process",
"project",
"delete",
"event",
"."
]
| python | train |
aleju/imgaug | imgaug/augmentables/kps.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L105-L131 | def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
xy_proj = project_coords([(self.x, self.y)], from_shape, to_shape)
return self.deepcopy(x=xy_proj[0][0], y=xy_proj[0][1]) | [
"def",
"project",
"(",
"self",
",",
"from_shape",
",",
"to_shape",
")",
":",
"xy_proj",
"=",
"project_coords",
"(",
"[",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
")",
"]",
",",
"from_shape",
",",
"to_shape",
")",
"return",
"self",
".",
"deepcopy",
"(",
"x",
"=",
"xy_proj",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"y",
"=",
"xy_proj",
"[",
"0",
"]",
"[",
"1",
"]",
")"
]
| Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates. | [
"Project",
"the",
"keypoint",
"onto",
"a",
"new",
"position",
"on",
"a",
"new",
"image",
"."
]
| python | valid |
ynop/audiomate | audiomate/corpus/io/common_voice.py | https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/common_voice.py#L39-L48 | def get_subset_ids(path):
""" Return a list with ids of all available subsets (based on existing csv-files). """
all = []
for path in glob.glob(os.path.join(path, '*.tsv')):
file_name = os.path.split(path)[1]
basename = os.path.splitext(file_name)[0]
all.append(basename)
return all | [
"def",
"get_subset_ids",
"(",
"path",
")",
":",
"all",
"=",
"[",
"]",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'*.tsv'",
")",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"1",
"]",
"basename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"[",
"0",
"]",
"all",
".",
"append",
"(",
"basename",
")",
"return",
"all"
]
| Return a list with ids of all available subsets (based on existing csv-files). | [
"Return",
"a",
"list",
"with",
"ids",
"of",
"all",
"available",
"subsets",
"(",
"based",
"on",
"existing",
"csv",
"-",
"files",
")",
"."
]
| python | train |
CI-WATER/gsshapy | gsshapy/orm/tim.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L60-L82 | def _read(self, directory, filename, session, path, name, extension, spatial=None, spatialReferenceID=None, replaceParamFile=None):
"""
Generic Time Series Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
timeSeries = []
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
record = {'time': sline[0],
'values': []}
for idx in range(1, len(sline)):
record['values'].append(sline[idx])
timeSeries.append(record)
self._createTimeSeriesObjects(timeSeries, filename) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
"=",
"None",
",",
"spatialReferenceID",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"# Assign file extension attribute to file object",
"self",
".",
"fileExtension",
"=",
"extension",
"timeSeries",
"=",
"[",
"]",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"record",
"=",
"{",
"'time'",
":",
"sline",
"[",
"0",
"]",
",",
"'values'",
":",
"[",
"]",
"}",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"sline",
")",
")",
":",
"record",
"[",
"'values'",
"]",
".",
"append",
"(",
"sline",
"[",
"idx",
"]",
")",
"timeSeries",
".",
"append",
"(",
"record",
")",
"self",
".",
"_createTimeSeriesObjects",
"(",
"timeSeries",
",",
"filename",
")"
]
| Generic Time Series Read from File Method | [
"Generic",
"Time",
"Series",
"Read",
"from",
"File",
"Method"
]
| python | train |
fracpete/python-weka-wrapper3 | python/weka/core/classes.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L1471-L1485 | def find(self, name):
"""
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
"""
result = None
for t in self.array:
if str(t) == name:
result = Tag(t.jobject)
break
return result | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"result",
"=",
"None",
"for",
"t",
"in",
"self",
".",
"array",
":",
"if",
"str",
"(",
"t",
")",
"==",
"name",
":",
"result",
"=",
"Tag",
"(",
"t",
".",
"jobject",
")",
"break",
"return",
"result"
]
| Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag | [
"Returns",
"the",
"Tag",
"that",
"matches",
"the",
"name",
"."
]
| python | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L83-L87 | def available_streams():
"""Show a short list of available streams."""
sds = kp.db.StreamDS()
print("Available streams: ")
print(', '.join(sorted(sds.streams))) | [
"def",
"available_streams",
"(",
")",
":",
"sds",
"=",
"kp",
".",
"db",
".",
"StreamDS",
"(",
")",
"print",
"(",
"\"Available streams: \"",
")",
"print",
"(",
"', '",
".",
"join",
"(",
"sorted",
"(",
"sds",
".",
"streams",
")",
")",
")"
]
| Show a short list of available streams. | [
"Show",
"a",
"short",
"list",
"of",
"available",
"streams",
"."
]
| python | train |
diux-dev/ncluster | ncluster/aws_backend.py | https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L813-L892 | def make_job(
name: str = '',
run_name: str = '',
num_tasks: int = 1,
install_script: str = '',
instance_type: str = '',
image_name: str = '',
create_resources=True,
**kwargs) -> Job:
"""
Args:
create_resources: if True, will create resources if necessary
name: see backend.make_task
run_name: see backend.make_task
num_tasks: number of tasks to launch
install_script: see make_task
instance_type: see make_task
image_name: see make_task
Returns:
"""
assert num_tasks > 0, f"Can't create job with {num_tasks} tasks"
assert name.count(
'.') <= 1, "Job name has too many .'s (see ncluster design: Run/Job/Task hierarchy for convention)"
# dummy tasks for logging
tasks = [backend.Task(f"{i}.{name}") for i in range(num_tasks)]
_set_aws_environment(tasks[0])
if create_resources:
_maybe_create_resources(tasks[0])
name = ncluster_globals.auto_assign_job_name_if_needed(name)
run_name = ncluster_globals.auto_assign_run_name_if_needed(run_name)
_run = ncluster_globals.create_run_if_needed(run_name, make_run)
job = Job(name=name, tasks=tasks, run_name=run_name, **kwargs)
exceptions = []
# make tasks in parallel
def make_task_fn(i: int):
try:
tasks[i] = make_task(f"{i}.{name}", run_name=run_name,
install_script=install_script,
instance_type=instance_type, image_name=image_name,
logging_task=tasks[i],
create_resources=False,
# handle resources in job already
**kwargs)
except Exception as e:
exceptions.append(e)
util.log("Creating threads")
threads = [threading.Thread(name=f'make_task_{i}',
target=make_task_fn, args=[i])
for i in range(num_tasks)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print("Exception are ", exceptions)
if exceptions:
raise exceptions[0]
job.tasks = tasks
# double check that all instances are in the same placement_group group
# this can happen if some instances from previous smaller run are getting reused
placement_dict = {task.instance.placement_group: task.name for task in
job.tasks}
# TODO: make placement_group group name derived from run, to make it deterministic
# on individual instance restarts
if len(placement_dict) > 1:
util.log("Job tasks are spread over multiple placement_group groups")
pprint.pprint(placement_dict)
raise RuntimeError(
f"Got instance spread over multiple placement_group groups: {placement_dict}. Must terminate all instances in run {run_name} and try again.")
return job | [
"def",
"make_job",
"(",
"name",
":",
"str",
"=",
"''",
",",
"run_name",
":",
"str",
"=",
"''",
",",
"num_tasks",
":",
"int",
"=",
"1",
",",
"install_script",
":",
"str",
"=",
"''",
",",
"instance_type",
":",
"str",
"=",
"''",
",",
"image_name",
":",
"str",
"=",
"''",
",",
"create_resources",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"->",
"Job",
":",
"assert",
"num_tasks",
">",
"0",
",",
"f\"Can't create job with {num_tasks} tasks\"",
"assert",
"name",
".",
"count",
"(",
"'.'",
")",
"<=",
"1",
",",
"\"Job name has too many .'s (see ncluster design: Run/Job/Task hierarchy for convention)\"",
"# dummy tasks for logging",
"tasks",
"=",
"[",
"backend",
".",
"Task",
"(",
"f\"{i}.{name}\"",
")",
"for",
"i",
"in",
"range",
"(",
"num_tasks",
")",
"]",
"_set_aws_environment",
"(",
"tasks",
"[",
"0",
"]",
")",
"if",
"create_resources",
":",
"_maybe_create_resources",
"(",
"tasks",
"[",
"0",
"]",
")",
"name",
"=",
"ncluster_globals",
".",
"auto_assign_job_name_if_needed",
"(",
"name",
")",
"run_name",
"=",
"ncluster_globals",
".",
"auto_assign_run_name_if_needed",
"(",
"run_name",
")",
"_run",
"=",
"ncluster_globals",
".",
"create_run_if_needed",
"(",
"run_name",
",",
"make_run",
")",
"job",
"=",
"Job",
"(",
"name",
"=",
"name",
",",
"tasks",
"=",
"tasks",
",",
"run_name",
"=",
"run_name",
",",
"*",
"*",
"kwargs",
")",
"exceptions",
"=",
"[",
"]",
"# make tasks in parallel",
"def",
"make_task_fn",
"(",
"i",
":",
"int",
")",
":",
"try",
":",
"tasks",
"[",
"i",
"]",
"=",
"make_task",
"(",
"f\"{i}.{name}\"",
",",
"run_name",
"=",
"run_name",
",",
"install_script",
"=",
"install_script",
",",
"instance_type",
"=",
"instance_type",
",",
"image_name",
"=",
"image_name",
",",
"logging_task",
"=",
"tasks",
"[",
"i",
"]",
",",
"create_resources",
"=",
"False",
",",
"# handle resources in job already",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptions",
".",
"append",
"(",
"e",
")",
"util",
".",
"log",
"(",
"\"Creating threads\"",
")",
"threads",
"=",
"[",
"threading",
".",
"Thread",
"(",
"name",
"=",
"f'make_task_{i}'",
",",
"target",
"=",
"make_task_fn",
",",
"args",
"=",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"num_tasks",
")",
"]",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"start",
"(",
")",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"join",
"(",
")",
"print",
"(",
"\"Exception are \"",
",",
"exceptions",
")",
"if",
"exceptions",
":",
"raise",
"exceptions",
"[",
"0",
"]",
"job",
".",
"tasks",
"=",
"tasks",
"# double check that all instances are in the same placement_group group",
"# this can happen if some instances from previous smaller run are getting reused",
"placement_dict",
"=",
"{",
"task",
".",
"instance",
".",
"placement_group",
":",
"task",
".",
"name",
"for",
"task",
"in",
"job",
".",
"tasks",
"}",
"# TODO: make placement_group group name derived from run, to make it deterministic",
"# on individual instance restarts",
"if",
"len",
"(",
"placement_dict",
")",
">",
"1",
":",
"util",
".",
"log",
"(",
"\"Job tasks are spread over multiple placement_group groups\"",
")",
"pprint",
".",
"pprint",
"(",
"placement_dict",
")",
"raise",
"RuntimeError",
"(",
"f\"Got instance spread over multiple placement_group groups: {placement_dict}. Must terminate all instances in run {run_name} and try again.\"",
")",
"return",
"job"
]
| Args:
create_resources: if True, will create resources if necessary
name: see backend.make_task
run_name: see backend.make_task
num_tasks: number of tasks to launch
install_script: see make_task
instance_type: see make_task
image_name: see make_task
Returns: | [
"Args",
":",
"create_resources",
":",
"if",
"True",
"will",
"create",
"resources",
"if",
"necessary",
"name",
":",
"see",
"backend",
".",
"make_task",
"run_name",
":",
"see",
"backend",
".",
"make_task",
"num_tasks",
":",
"number",
"of",
"tasks",
"to",
"launch",
"install_script",
":",
"see",
"make_task",
"instance_type",
":",
"see",
"make_task",
"image_name",
":",
"see",
"make_task"
]
| python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10796-L10808 | def ekf_status_report_encode(self, flags, velocity_variance, pos_horiz_variance, pos_vert_variance, compass_variance, terrain_alt_variance):
'''
EKF Status message including flags and variances
flags : Flags (uint16_t)
velocity_variance : Velocity variance (float)
pos_horiz_variance : Horizontal Position variance (float)
pos_vert_variance : Vertical Position variance (float)
compass_variance : Compass variance (float)
terrain_alt_variance : Terrain Altitude variance (float)
'''
return MAVLink_ekf_status_report_message(flags, velocity_variance, pos_horiz_variance, pos_vert_variance, compass_variance, terrain_alt_variance) | [
"def",
"ekf_status_report_encode",
"(",
"self",
",",
"flags",
",",
"velocity_variance",
",",
"pos_horiz_variance",
",",
"pos_vert_variance",
",",
"compass_variance",
",",
"terrain_alt_variance",
")",
":",
"return",
"MAVLink_ekf_status_report_message",
"(",
"flags",
",",
"velocity_variance",
",",
"pos_horiz_variance",
",",
"pos_vert_variance",
",",
"compass_variance",
",",
"terrain_alt_variance",
")"
]
| EKF Status message including flags and variances
flags : Flags (uint16_t)
velocity_variance : Velocity variance (float)
pos_horiz_variance : Horizontal Position variance (float)
pos_vert_variance : Vertical Position variance (float)
compass_variance : Compass variance (float)
terrain_alt_variance : Terrain Altitude variance (float) | [
"EKF",
"Status",
"message",
"including",
"flags",
"and",
"variances"
]
| python | train |
artefactual-labs/mets-reader-writer | metsrw/metadata.py | https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/metadata.py#L540-L568 | def parse(cls, root):
"""
Create a new MDWrap by parsing root.
:param root: Element or ElementTree to be parsed into a MDWrap.
:raises exceptions.ParseError: If mdWrap does not contain MDTYPE
:raises exceptions.ParseError: If xmlData contains no children
"""
if root.tag != utils.lxmlns("mets") + "mdWrap":
raise exceptions.ParseError(
"MDWrap can only parse mdWrap elements with METS namespace."
)
mdtype = root.get("MDTYPE")
if not mdtype:
raise exceptions.ParseError("mdWrap must have a MDTYPE")
othermdtype = root.get("OTHERMDTYPE")
document = root.xpath("mets:xmlData/*", namespaces=utils.NAMESPACES)
if len(document) == 0:
raise exceptions.ParseError(
"All mdWrap/xmlData elements must have at least one child; this"
" one has none"
)
elif len(document) == 1:
document = document[0]
# Create a copy, so that the element is not moved by duplicate references.
document = copy.deepcopy(document)
return cls(document, mdtype, othermdtype) | [
"def",
"parse",
"(",
"cls",
",",
"root",
")",
":",
"if",
"root",
".",
"tag",
"!=",
"utils",
".",
"lxmlns",
"(",
"\"mets\"",
")",
"+",
"\"mdWrap\"",
":",
"raise",
"exceptions",
".",
"ParseError",
"(",
"\"MDWrap can only parse mdWrap elements with METS namespace.\"",
")",
"mdtype",
"=",
"root",
".",
"get",
"(",
"\"MDTYPE\"",
")",
"if",
"not",
"mdtype",
":",
"raise",
"exceptions",
".",
"ParseError",
"(",
"\"mdWrap must have a MDTYPE\"",
")",
"othermdtype",
"=",
"root",
".",
"get",
"(",
"\"OTHERMDTYPE\"",
")",
"document",
"=",
"root",
".",
"xpath",
"(",
"\"mets:xmlData/*\"",
",",
"namespaces",
"=",
"utils",
".",
"NAMESPACES",
")",
"if",
"len",
"(",
"document",
")",
"==",
"0",
":",
"raise",
"exceptions",
".",
"ParseError",
"(",
"\"All mdWrap/xmlData elements must have at least one child; this\"",
"\" one has none\"",
")",
"elif",
"len",
"(",
"document",
")",
"==",
"1",
":",
"document",
"=",
"document",
"[",
"0",
"]",
"# Create a copy, so that the element is not moved by duplicate references.",
"document",
"=",
"copy",
".",
"deepcopy",
"(",
"document",
")",
"return",
"cls",
"(",
"document",
",",
"mdtype",
",",
"othermdtype",
")"
]
| Create a new MDWrap by parsing root.
:param root: Element or ElementTree to be parsed into a MDWrap.
:raises exceptions.ParseError: If mdWrap does not contain MDTYPE
:raises exceptions.ParseError: If xmlData contains no children | [
"Create",
"a",
"new",
"MDWrap",
"by",
"parsing",
"root",
"."
]
| python | train |
pantsbuild/pants | src/python/pants/backend/jvm/targets/jarable.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/targets/jarable.py#L32-L43 | def get_artifact_info(self):
"""Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable.
"""
exported = bool(self.provides)
org = self.provides.org if exported else 'internal'
name = self.provides.name if exported else self.identifier
# TODO(John Sirois): This should return something less than a JarDependency encapsulating just
# the org and name. Perhaps a JarFamily?
return JarDependency(org=org, name=name, rev=None), exported | [
"def",
"get_artifact_info",
"(",
"self",
")",
":",
"exported",
"=",
"bool",
"(",
"self",
".",
"provides",
")",
"org",
"=",
"self",
".",
"provides",
".",
"org",
"if",
"exported",
"else",
"'internal'",
"name",
"=",
"self",
".",
"provides",
".",
"name",
"if",
"exported",
"else",
"self",
".",
"identifier",
"# TODO(John Sirois): This should return something less than a JarDependency encapsulating just",
"# the org and name. Perhaps a JarFamily?",
"return",
"JarDependency",
"(",
"org",
"=",
"org",
",",
"name",
"=",
"name",
",",
"rev",
"=",
"None",
")",
",",
"exported"
]
| Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable. | [
"Returns",
"a",
"tuple",
"composed",
"of",
"a",
":",
"class",
":",
"pants",
".",
"java",
".",
"jar",
".",
"JarDependency",
"describing",
"the",
"jar",
"for",
"this",
"target",
"and",
"a",
"bool",
"indicating",
"if",
"this",
"target",
"is",
"exportable",
"."
]
| python | train |
rytilahti/python-eq3bt | eq3bt/connection.py | https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/connection.py#L53-L57 | def handleNotification(self, handle, data):
"""Handle Callback from a Bluetooth (GATT) request."""
_LOGGER.debug("Got notification from %s: %s", handle, codecs.encode(data, 'hex'))
if handle in self._callbacks:
self._callbacks[handle](data) | [
"def",
"handleNotification",
"(",
"self",
",",
"handle",
",",
"data",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Got notification from %s: %s\"",
",",
"handle",
",",
"codecs",
".",
"encode",
"(",
"data",
",",
"'hex'",
")",
")",
"if",
"handle",
"in",
"self",
".",
"_callbacks",
":",
"self",
".",
"_callbacks",
"[",
"handle",
"]",
"(",
"data",
")"
]
| Handle Callback from a Bluetooth (GATT) request. | [
"Handle",
"Callback",
"from",
"a",
"Bluetooth",
"(",
"GATT",
")",
"request",
"."
]
| python | train |
mdickinson/bigfloat | bigfloat/core.py | https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L99-L113 | def _mpfr_get_str2(base, ndigits, op, rounding_mode):
"""
Variant of mpfr_get_str, for internal use: simply splits off the '-'
sign from the digit string, and returns a triple
(sign, digits, exp)
Also converts the byte-string produced by mpfr_get_str to Unicode.
"""
digits, exp = mpfr.mpfr_get_str(base, ndigits, op, rounding_mode)
negative = digits.startswith('-')
if negative:
digits = digits[1:]
return negative, digits, exp | [
"def",
"_mpfr_get_str2",
"(",
"base",
",",
"ndigits",
",",
"op",
",",
"rounding_mode",
")",
":",
"digits",
",",
"exp",
"=",
"mpfr",
".",
"mpfr_get_str",
"(",
"base",
",",
"ndigits",
",",
"op",
",",
"rounding_mode",
")",
"negative",
"=",
"digits",
".",
"startswith",
"(",
"'-'",
")",
"if",
"negative",
":",
"digits",
"=",
"digits",
"[",
"1",
":",
"]",
"return",
"negative",
",",
"digits",
",",
"exp"
]
| Variant of mpfr_get_str, for internal use: simply splits off the '-'
sign from the digit string, and returns a triple
(sign, digits, exp)
Also converts the byte-string produced by mpfr_get_str to Unicode. | [
"Variant",
"of",
"mpfr_get_str",
"for",
"internal",
"use",
":",
"simply",
"splits",
"off",
"the",
"-",
"sign",
"from",
"the",
"digit",
"string",
"and",
"returns",
"a",
"triple"
]
| python | train |
python-odin/odinweb | odinweb/containers.py | https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/containers.py#L298-L345 | def dispatch_operation(self, operation, request, path_args):
# type: (Operation, BaseHttpRequest, Dict[str, Any]) -> Tuple[Any, Optional[HTTPStatus], Optional[dict]]
"""
Dispatch and handle exceptions from operation.
"""
try:
# path_args is passed by ref so changes can be made.
for middleware in self.middleware.pre_dispatch:
middleware(request, path_args)
resource = operation(request, path_args)
for middleware in self.middleware.post_dispatch:
resource = middleware(request, resource)
except ImmediateHttpResponse as e:
# An exception used to return a response immediately, skipping any
# further processing.
return e.resource, e.status, e.headers
except ValidationError as e:
# A validation error was raised by a resource.
if hasattr(e, 'message_dict'):
resource = Error.from_status(HTTPStatus.BAD_REQUEST, 0, "Failed validation", meta=e.message_dict)
else:
resource = Error.from_status(HTTPStatus.BAD_REQUEST, 0, str(e))
return resource, resource.status, None
except NotImplementedError:
resource = Error.from_status(HTTPStatus.NOT_IMPLEMENTED, 0, "The method has not been implemented")
return resource, resource.status, None
except Exception as e:
if self.debug_enabled:
# If debug is enabled then fallback to the frameworks default
# error processing, this often provides convenience features
# to aid in the debugging process.
raise
resource = None
# Fallback to the default handler
if resource is None:
resource = self.handle_500(request, e)
return resource, resource.status, None
else:
return resource, None, None | [
"def",
"dispatch_operation",
"(",
"self",
",",
"operation",
",",
"request",
",",
"path_args",
")",
":",
"# type: (Operation, BaseHttpRequest, Dict[str, Any]) -> Tuple[Any, Optional[HTTPStatus], Optional[dict]]",
"try",
":",
"# path_args is passed by ref so changes can be made.",
"for",
"middleware",
"in",
"self",
".",
"middleware",
".",
"pre_dispatch",
":",
"middleware",
"(",
"request",
",",
"path_args",
")",
"resource",
"=",
"operation",
"(",
"request",
",",
"path_args",
")",
"for",
"middleware",
"in",
"self",
".",
"middleware",
".",
"post_dispatch",
":",
"resource",
"=",
"middleware",
"(",
"request",
",",
"resource",
")",
"except",
"ImmediateHttpResponse",
"as",
"e",
":",
"# An exception used to return a response immediately, skipping any",
"# further processing.",
"return",
"e",
".",
"resource",
",",
"e",
".",
"status",
",",
"e",
".",
"headers",
"except",
"ValidationError",
"as",
"e",
":",
"# A validation error was raised by a resource.",
"if",
"hasattr",
"(",
"e",
",",
"'message_dict'",
")",
":",
"resource",
"=",
"Error",
".",
"from_status",
"(",
"HTTPStatus",
".",
"BAD_REQUEST",
",",
"0",
",",
"\"Failed validation\"",
",",
"meta",
"=",
"e",
".",
"message_dict",
")",
"else",
":",
"resource",
"=",
"Error",
".",
"from_status",
"(",
"HTTPStatus",
".",
"BAD_REQUEST",
",",
"0",
",",
"str",
"(",
"e",
")",
")",
"return",
"resource",
",",
"resource",
".",
"status",
",",
"None",
"except",
"NotImplementedError",
":",
"resource",
"=",
"Error",
".",
"from_status",
"(",
"HTTPStatus",
".",
"NOT_IMPLEMENTED",
",",
"0",
",",
"\"The method has not been implemented\"",
")",
"return",
"resource",
",",
"resource",
".",
"status",
",",
"None",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"debug_enabled",
":",
"# If debug is enabled then fallback to the frameworks default",
"# error processing, this often provides convenience features",
"# to aid in the debugging process.",
"raise",
"resource",
"=",
"None",
"# Fallback to the default handler",
"if",
"resource",
"is",
"None",
":",
"resource",
"=",
"self",
".",
"handle_500",
"(",
"request",
",",
"e",
")",
"return",
"resource",
",",
"resource",
".",
"status",
",",
"None",
"else",
":",
"return",
"resource",
",",
"None",
",",
"None"
]
| Dispatch and handle exceptions from operation. | [
"Dispatch",
"and",
"handle",
"exceptions",
"from",
"operation",
"."
]
| python | train |
canonical-ols/acceptable | acceptable/_service.py | https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_service.py#L234-L266 | def api(self,
url,
name,
introduced_at=None,
undocumented=False,
deprecated_at=None,
title=None,
**options):
"""Add an API to the service.
:param url: This is the url that the API should be registered at.
:param name: This is the name of the api, and will be registered with
flask apps under.
Other keyword arguments may be used, and they will be passed to the
flask application when initialised. Of particular interest is the
'methods' keyword argument, which can be used to specify the HTTP
method the URL will be added for.
"""
location = get_callsite_location()
api = AcceptableAPI(
self,
name,
url,
introduced_at,
options,
undocumented=undocumented,
deprecated_at=deprecated_at,
title=title,
location=location,
)
self.metadata.register_api(self.name, self.group, api)
return api | [
"def",
"api",
"(",
"self",
",",
"url",
",",
"name",
",",
"introduced_at",
"=",
"None",
",",
"undocumented",
"=",
"False",
",",
"deprecated_at",
"=",
"None",
",",
"title",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"location",
"=",
"get_callsite_location",
"(",
")",
"api",
"=",
"AcceptableAPI",
"(",
"self",
",",
"name",
",",
"url",
",",
"introduced_at",
",",
"options",
",",
"undocumented",
"=",
"undocumented",
",",
"deprecated_at",
"=",
"deprecated_at",
",",
"title",
"=",
"title",
",",
"location",
"=",
"location",
",",
")",
"self",
".",
"metadata",
".",
"register_api",
"(",
"self",
".",
"name",
",",
"self",
".",
"group",
",",
"api",
")",
"return",
"api"
]
| Add an API to the service.
:param url: This is the url that the API should be registered at.
:param name: This is the name of the api, and will be registered with
flask apps under.
Other keyword arguments may be used, and they will be passed to the
flask application when initialised. Of particular interest is the
'methods' keyword argument, which can be used to specify the HTTP
method the URL will be added for. | [
"Add",
"an",
"API",
"to",
"the",
"service",
"."
]
| python | train |
xav/Grapefruit | grapefruit.py | https://github.com/xav/Grapefruit/blob/b3d88375be727a3a1ec5839fbc462e0e8e0836e4/grapefruit.py#L1123-L1147 | def from_rgb(r, g, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed RGB values.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_rgb(1.0, 0.5, 0.0)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_rgb(1.0, 0.5, 0.0, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color((r, g, b), 'rgb', alpha, wref) | [
"def",
"from_rgb",
"(",
"r",
",",
"g",
",",
"b",
",",
"alpha",
"=",
"1.0",
",",
"wref",
"=",
"_DEFAULT_WREF",
")",
":",
"return",
"Color",
"(",
"(",
"r",
",",
"g",
",",
"b",
")",
",",
"'rgb'",
",",
"alpha",
",",
"wref",
")"
]
| Create a new instance based on the specifed RGB values.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_rgb(1.0, 0.5, 0.0)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_rgb(1.0, 0.5, 0.0, 0.5)
Color(1.0, 0.5, 0.0, 0.5) | [
"Create",
"a",
"new",
"instance",
"based",
"on",
"the",
"specifed",
"RGB",
"values",
"."
]
| python | train |
brechtm/rinohtype | src/rinoh/backend/pdf/xobject/purepng.py | https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L1734-L1759 | def do_filter(self, filter_type, line):
"""
Applying filter, caring about prev line, interlacing etc.
`filter_type` may be integer to apply basic filter or
adaptive strategy with dict
(`name` is reqired field, others may tune strategy)
"""
# Recall that filtering algorithms are applied to bytes,
# not to pixels, regardless of the bit depth or colour type
# of the image.
line = bytearray(line)
if isinstance(filter_type, int):
res = bytearray(line)
self._filter_scanline(filter_type, line, res)
res.insert(0, filter_type) # Add filter type as the first byte
else:
res = self.adaptive_filter(filter_type, line)
self.prev = line
if self.restarts:
self.restarts[0] -= 1
if self.restarts[0] == 0:
del self.restarts[0]
self.prev = None
return res | [
"def",
"do_filter",
"(",
"self",
",",
"filter_type",
",",
"line",
")",
":",
"# Recall that filtering algorithms are applied to bytes,",
"# not to pixels, regardless of the bit depth or colour type",
"# of the image.",
"line",
"=",
"bytearray",
"(",
"line",
")",
"if",
"isinstance",
"(",
"filter_type",
",",
"int",
")",
":",
"res",
"=",
"bytearray",
"(",
"line",
")",
"self",
".",
"_filter_scanline",
"(",
"filter_type",
",",
"line",
",",
"res",
")",
"res",
".",
"insert",
"(",
"0",
",",
"filter_type",
")",
"# Add filter type as the first byte",
"else",
":",
"res",
"=",
"self",
".",
"adaptive_filter",
"(",
"filter_type",
",",
"line",
")",
"self",
".",
"prev",
"=",
"line",
"if",
"self",
".",
"restarts",
":",
"self",
".",
"restarts",
"[",
"0",
"]",
"-=",
"1",
"if",
"self",
".",
"restarts",
"[",
"0",
"]",
"==",
"0",
":",
"del",
"self",
".",
"restarts",
"[",
"0",
"]",
"self",
".",
"prev",
"=",
"None",
"return",
"res"
]
| Applying filter, caring about prev line, interlacing etc.
`filter_type` may be integer to apply basic filter or
adaptive strategy with dict
(`name` is reqired field, others may tune strategy) | [
"Applying",
"filter",
"caring",
"about",
"prev",
"line",
"interlacing",
"etc",
"."
]
| python | train |
alex-kostirin/pyatomac | atomac/Clipboard.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/Clipboard.py#L99-L130 | def copy(cls, data):
"""Set the clipboard data ('Copy').
Parameters: data to set (string)
Optional: datatype if it's not a string
Returns: True / False on successful copy, Any exception raised (like
passes the NSPasteboardCommunicationError) should be caught
by the caller.
"""
pp = pprint.PrettyPrinter()
copy_data = 'Data to copy (put in pasteboard): %s'
logging.debug(copy_data % pp.pformat(data))
# Clear the pasteboard first:
cleared = cls.clearAll()
if not cleared:
logging.warning('Clipboard could not clear properly')
return False
# Prepare to write the data
# If we just use writeObjects the sequence to write to the clipboard is
# a) Call clearContents()
# b) Call writeObjects() with a list of objects to write to the
# clipboard
if not isinstance(data, types.ListType):
data = [data]
pb = AppKit.NSPasteboard.generalPasteboard()
pb_set_ok = pb.writeObjects_(data)
return bool(pb_set_ok) | [
"def",
"copy",
"(",
"cls",
",",
"data",
")",
":",
"pp",
"=",
"pprint",
".",
"PrettyPrinter",
"(",
")",
"copy_data",
"=",
"'Data to copy (put in pasteboard): %s'",
"logging",
".",
"debug",
"(",
"copy_data",
"%",
"pp",
".",
"pformat",
"(",
"data",
")",
")",
"# Clear the pasteboard first:",
"cleared",
"=",
"cls",
".",
"clearAll",
"(",
")",
"if",
"not",
"cleared",
":",
"logging",
".",
"warning",
"(",
"'Clipboard could not clear properly'",
")",
"return",
"False",
"# Prepare to write the data",
"# If we just use writeObjects the sequence to write to the clipboard is",
"# a) Call clearContents()",
"# b) Call writeObjects() with a list of objects to write to the",
"# clipboard",
"if",
"not",
"isinstance",
"(",
"data",
",",
"types",
".",
"ListType",
")",
":",
"data",
"=",
"[",
"data",
"]",
"pb",
"=",
"AppKit",
".",
"NSPasteboard",
".",
"generalPasteboard",
"(",
")",
"pb_set_ok",
"=",
"pb",
".",
"writeObjects_",
"(",
"data",
")",
"return",
"bool",
"(",
"pb_set_ok",
")"
]
| Set the clipboard data ('Copy').
Parameters: data to set (string)
Optional: datatype if it's not a string
Returns: True / False on successful copy, Any exception raised (like
passes the NSPasteboardCommunicationError) should be caught
by the caller. | [
"Set",
"the",
"clipboard",
"data",
"(",
"Copy",
")",
"."
]
| python | valid |
CivicSpleen/ambry | ambry/valuetype/geo.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/geo.py#L139-L150 | def subclass(cls, vt_code, vt_args):
"""Return a dynamic subclass that has the extra parameters built in"""
from geoid import get_class
import geoid.census
parser = get_class(geoid.census, vt_args.strip('/')).parse
cls = type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'parser': parser})
globals()[cls.__name__] = cls
assert cls.parser
return cls | [
"def",
"subclass",
"(",
"cls",
",",
"vt_code",
",",
"vt_args",
")",
":",
"from",
"geoid",
"import",
"get_class",
"import",
"geoid",
".",
"census",
"parser",
"=",
"get_class",
"(",
"geoid",
".",
"census",
",",
"vt_args",
".",
"strip",
"(",
"'/'",
")",
")",
".",
"parse",
"cls",
"=",
"type",
"(",
"vt_code",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
",",
"(",
"cls",
",",
")",
",",
"{",
"'vt_code'",
":",
"vt_code",
",",
"'parser'",
":",
"parser",
"}",
")",
"globals",
"(",
")",
"[",
"cls",
".",
"__name__",
"]",
"=",
"cls",
"assert",
"cls",
".",
"parser",
"return",
"cls"
]
| Return a dynamic subclass that has the extra parameters built in | [
"Return",
"a",
"dynamic",
"subclass",
"that",
"has",
"the",
"extra",
"parameters",
"built",
"in"
]
| python | train |
SoftwareDefinedBuildings/XBOS | apps/occupancy/OccupancyThanos.py | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/occupancy/OccupancyThanos.py#L48-L88 | def find_similar_days(training_data, now, observation_length, k, method=hamming_distance):
min_time = training_data.index[0] + timedelta(minutes=observation_length)
# Find moments in our dataset that have the same hour/minute and is_weekend() == weekend.
selector = ((training_data.index.minute == now.minute) &
(training_data.index.hour == now.hour) &
(training_data.index > min_time))
"""
if now.weekday() < 5:
selector = (
(training_data.index.minute == now.minute) &
(training_data.index.hour == now.hour) &
(training_data.index > min_time) &
(training_data.index.weekday < 5)
)
else:
selector = (
(training_data.index.minute == now.minute) &
(training_data.index.hour == now.hour) &
(training_data.index > min_time) &
(training_data.index.weekday >= 5)
)
"""
similar_moments = training_data[selector][:-1]
obs_td = timedelta(minutes=observation_length)
similar_moments['Similarity'] = [
method(
training_data[(training_data.index >= now - obs_td) &
(training_data.index <= now)].get_values(),
training_data[(training_data.index >= i - obs_td) &
(training_data.index <= i)].get_values()
) for i in similar_moments.index
]
indexes = (similar_moments.sort_values('Similarity', ascending=True)
.head(k).index)
return indexes | [
"def",
"find_similar_days",
"(",
"training_data",
",",
"now",
",",
"observation_length",
",",
"k",
",",
"method",
"=",
"hamming_distance",
")",
":",
"min_time",
"=",
"training_data",
".",
"index",
"[",
"0",
"]",
"+",
"timedelta",
"(",
"minutes",
"=",
"observation_length",
")",
"# Find moments in our dataset that have the same hour/minute and is_weekend() == weekend.",
"selector",
"=",
"(",
"(",
"training_data",
".",
"index",
".",
"minute",
"==",
"now",
".",
"minute",
")",
"&",
"(",
"training_data",
".",
"index",
".",
"hour",
"==",
"now",
".",
"hour",
")",
"&",
"(",
"training_data",
".",
"index",
">",
"min_time",
")",
")",
"similar_moments",
"=",
"training_data",
"[",
"selector",
"]",
"[",
":",
"-",
"1",
"]",
"obs_td",
"=",
"timedelta",
"(",
"minutes",
"=",
"observation_length",
")",
"similar_moments",
"[",
"'Similarity'",
"]",
"=",
"[",
"method",
"(",
"training_data",
"[",
"(",
"training_data",
".",
"index",
">=",
"now",
"-",
"obs_td",
")",
"&",
"(",
"training_data",
".",
"index",
"<=",
"now",
")",
"]",
".",
"get_values",
"(",
")",
",",
"training_data",
"[",
"(",
"training_data",
".",
"index",
">=",
"i",
"-",
"obs_td",
")",
"&",
"(",
"training_data",
".",
"index",
"<=",
"i",
")",
"]",
".",
"get_values",
"(",
")",
")",
"for",
"i",
"in",
"similar_moments",
".",
"index",
"]",
"indexes",
"=",
"(",
"similar_moments",
".",
"sort_values",
"(",
"'Similarity'",
",",
"ascending",
"=",
"True",
")",
".",
"head",
"(",
"k",
")",
".",
"index",
")",
"return",
"indexes"
]
| if now.weekday() < 5:
selector = (
(training_data.index.minute == now.minute) &
(training_data.index.hour == now.hour) &
(training_data.index > min_time) &
(training_data.index.weekday < 5)
)
else:
selector = (
(training_data.index.minute == now.minute) &
(training_data.index.hour == now.hour) &
(training_data.index > min_time) &
(training_data.index.weekday >= 5)
) | [
"if",
"now",
".",
"weekday",
"()",
"<",
"5",
":",
"selector",
"=",
"(",
"(",
"training_data",
".",
"index",
".",
"minute",
"==",
"now",
".",
"minute",
")",
"&",
"(",
"training_data",
".",
"index",
".",
"hour",
"==",
"now",
".",
"hour",
")",
"&",
"(",
"training_data",
".",
"index",
">",
"min_time",
")",
"&",
"(",
"training_data",
".",
"index",
".",
"weekday",
"<",
"5",
")",
")",
"else",
":",
"selector",
"=",
"(",
"(",
"training_data",
".",
"index",
".",
"minute",
"==",
"now",
".",
"minute",
")",
"&",
"(",
"training_data",
".",
"index",
".",
"hour",
"==",
"now",
".",
"hour",
")",
"&",
"(",
"training_data",
".",
"index",
">",
"min_time",
")",
"&",
"(",
"training_data",
".",
"index",
".",
"weekday",
">",
"=",
"5",
")",
")"
]
| python | train |
pantsbuild/pants | contrib/scrooge/src/python/pants/contrib/scrooge/tasks/thrift_util.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/scrooge/src/python/pants/contrib/scrooge/tasks/thrift_util.py#L46-L57 | def find_root_thrifts(basedirs, sources, log=None):
"""Finds the root thrift files in the graph formed by sources and their recursive includes.
:basedirs: A set of thrift source file base directories to look for includes in.
:sources: Seed thrift files to examine.
:log: An optional logger.
"""
root_sources = set(sources)
for source in sources:
root_sources.difference_update(find_includes(basedirs, source, log=log))
return root_sources | [
"def",
"find_root_thrifts",
"(",
"basedirs",
",",
"sources",
",",
"log",
"=",
"None",
")",
":",
"root_sources",
"=",
"set",
"(",
"sources",
")",
"for",
"source",
"in",
"sources",
":",
"root_sources",
".",
"difference_update",
"(",
"find_includes",
"(",
"basedirs",
",",
"source",
",",
"log",
"=",
"log",
")",
")",
"return",
"root_sources"
]
| Finds the root thrift files in the graph formed by sources and their recursive includes.
:basedirs: A set of thrift source file base directories to look for includes in.
:sources: Seed thrift files to examine.
:log: An optional logger. | [
"Finds",
"the",
"root",
"thrift",
"files",
"in",
"the",
"graph",
"formed",
"by",
"sources",
"and",
"their",
"recursive",
"includes",
"."
]
| python | train |
astropy/astropy-healpix | astropy_healpix/bench.py | https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/bench.py#L210-L214 | def main(fast=False):
"""Run all benchmarks and print report to the console."""
print('Running benchmarks...\n')
results = bench_run(fast=fast)
bench_report(results) | [
"def",
"main",
"(",
"fast",
"=",
"False",
")",
":",
"print",
"(",
"'Running benchmarks...\\n'",
")",
"results",
"=",
"bench_run",
"(",
"fast",
"=",
"fast",
")",
"bench_report",
"(",
"results",
")"
]
| Run all benchmarks and print report to the console. | [
"Run",
"all",
"benchmarks",
"and",
"print",
"report",
"to",
"the",
"console",
"."
]
| python | train |
its-rigs/Trolly | trolly/trelloobject.py | https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/trelloobject.py#L130-L134 | def create_checklist_item(self, card_id, checklist_id, checklistitem_json, **kwargs):
'''
Create a ChecklistItem object from JSON object
'''
return self.client.create_checklist_item(card_id, checklist_id, checklistitem_json, **kwargs) | [
"def",
"create_checklist_item",
"(",
"self",
",",
"card_id",
",",
"checklist_id",
",",
"checklistitem_json",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"client",
".",
"create_checklist_item",
"(",
"card_id",
",",
"checklist_id",
",",
"checklistitem_json",
",",
"*",
"*",
"kwargs",
")"
]
| Create a ChecklistItem object from JSON object | [
"Create",
"a",
"ChecklistItem",
"object",
"from",
"JSON",
"object"
]
| python | test |
manns/pyspread | pyspread/src/lib/_grid_cairo_renderer.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L1151-L1159 | def _get_bottom_line_coordinates(self):
"""Returns start and stop coordinates of bottom line"""
rect_x, rect_y, rect_width, rect_height = self.rect
start_point = rect_x, rect_y + rect_height
end_point = rect_x + rect_width, rect_y + rect_height
return start_point, end_point | [
"def",
"_get_bottom_line_coordinates",
"(",
"self",
")",
":",
"rect_x",
",",
"rect_y",
",",
"rect_width",
",",
"rect_height",
"=",
"self",
".",
"rect",
"start_point",
"=",
"rect_x",
",",
"rect_y",
"+",
"rect_height",
"end_point",
"=",
"rect_x",
"+",
"rect_width",
",",
"rect_y",
"+",
"rect_height",
"return",
"start_point",
",",
"end_point"
]
| Returns start and stop coordinates of bottom line | [
"Returns",
"start",
"and",
"stop",
"coordinates",
"of",
"bottom",
"line"
]
| python | train |
wummel/patool | patoolib/programs/tar.py | https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/tar.py#L27-L32 | def list_tar (archive, compression, cmd, verbosity, interactive):
"""List a TAR archive."""
cmdlist = [cmd, '--list']
add_tar_opts(cmdlist, compression, verbosity)
cmdlist.extend(["--file", archive])
return cmdlist | [
"def",
"list_tar",
"(",
"archive",
",",
"compression",
",",
"cmd",
",",
"verbosity",
",",
"interactive",
")",
":",
"cmdlist",
"=",
"[",
"cmd",
",",
"'--list'",
"]",
"add_tar_opts",
"(",
"cmdlist",
",",
"compression",
",",
"verbosity",
")",
"cmdlist",
".",
"extend",
"(",
"[",
"\"--file\"",
",",
"archive",
"]",
")",
"return",
"cmdlist"
]
| List a TAR archive. | [
"List",
"a",
"TAR",
"archive",
"."
]
| python | train |
python-bonobo/bonobo | bonobo/execution/contexts/node.py | https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/execution/contexts/node.py#L325-L356 | def _cast(self, _input, _output):
"""
Transforms a pair of input/output into the real slim shoutput.
:param _input: Bag
:param _output: mixed
:return: Bag
"""
if isenvelope(_output):
_output, _flags, _options = _output.unfold()
else:
_flags, _options = [], {}
if len(_flags):
# TODO: parse flags to check constraints are respected (like not modified alone, etc.)
if F_NOT_MODIFIED in _flags:
return _input
if F_INHERIT in _flags:
if self._output_type is None:
self._output_type = concat_types(
self._input_type, self._input_length, self._output_type, len(_output)
)
_output = _input + ensure_tuple(_output)
if not self._output_type:
if issubclass(type(_output), tuple):
self._output_type = type(_output)
return ensure_tuple(_output, cls=self._output_type) | [
"def",
"_cast",
"(",
"self",
",",
"_input",
",",
"_output",
")",
":",
"if",
"isenvelope",
"(",
"_output",
")",
":",
"_output",
",",
"_flags",
",",
"_options",
"=",
"_output",
".",
"unfold",
"(",
")",
"else",
":",
"_flags",
",",
"_options",
"=",
"[",
"]",
",",
"{",
"}",
"if",
"len",
"(",
"_flags",
")",
":",
"# TODO: parse flags to check constraints are respected (like not modified alone, etc.)",
"if",
"F_NOT_MODIFIED",
"in",
"_flags",
":",
"return",
"_input",
"if",
"F_INHERIT",
"in",
"_flags",
":",
"if",
"self",
".",
"_output_type",
"is",
"None",
":",
"self",
".",
"_output_type",
"=",
"concat_types",
"(",
"self",
".",
"_input_type",
",",
"self",
".",
"_input_length",
",",
"self",
".",
"_output_type",
",",
"len",
"(",
"_output",
")",
")",
"_output",
"=",
"_input",
"+",
"ensure_tuple",
"(",
"_output",
")",
"if",
"not",
"self",
".",
"_output_type",
":",
"if",
"issubclass",
"(",
"type",
"(",
"_output",
")",
",",
"tuple",
")",
":",
"self",
".",
"_output_type",
"=",
"type",
"(",
"_output",
")",
"return",
"ensure_tuple",
"(",
"_output",
",",
"cls",
"=",
"self",
".",
"_output_type",
")"
]
| Transforms a pair of input/output into the real slim shoutput.
:param _input: Bag
:param _output: mixed
:return: Bag | [
"Transforms",
"a",
"pair",
"of",
"input",
"/",
"output",
"into",
"the",
"real",
"slim",
"shoutput",
"."
]
| python | train |
saltstack/salt | salt/modules/mac_power.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L238-L263 | def set_harddisk_sleep(minutes):
'''
Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off"
to never sleep.
:param minutes: Can be an integer between 1 and 180 or "Never" or "Off"
:ptype: int, str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_harddisk_sleep 120
salt '*' power.set_harddisk_sleep off
'''
value = _validate_sleep(minutes)
cmd = 'systemsetup -setharddisksleep {0}'.format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_harddisk_sleep,
) | [
"def",
"set_harddisk_sleep",
"(",
"minutes",
")",
":",
"value",
"=",
"_validate_sleep",
"(",
"minutes",
")",
"cmd",
"=",
"'systemsetup -setharddisksleep {0}'",
".",
"format",
"(",
"value",
")",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"execute_return_success",
"(",
"cmd",
")",
"return",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"confirm_updated",
"(",
"str",
"(",
"value",
")",
",",
"get_harddisk_sleep",
",",
")"
]
| Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off"
to never sleep.
:param minutes: Can be an integer between 1 and 180 or "Never" or "Off"
:ptype: int, str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_harddisk_sleep 120
salt '*' power.set_harddisk_sleep off | [
"Set",
"the",
"amount",
"of",
"idle",
"time",
"until",
"the",
"harddisk",
"sleeps",
".",
"Pass",
"Never",
"of",
"Off",
"to",
"never",
"sleep",
"."
]
| python | train |
batiste/django-page-cms | pages/views.py | https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/views.py#L149-L165 | def choose_language(self, lang, request):
"""Deal with the multiple corner case of choosing the language."""
# Can be an empty string or None
if not lang:
lang = get_language_from_request(request)
# Raise a 404 if the language is not in not in the list
if lang not in [key for (key, value) in settings.PAGE_LANGUAGES]:
raise Http404
# We're going to serve CMS pages in language lang;
# make django gettext use that language too
if lang and translation.check_for_language(lang):
translation.activate(lang)
return lang | [
"def",
"choose_language",
"(",
"self",
",",
"lang",
",",
"request",
")",
":",
"# Can be an empty string or None",
"if",
"not",
"lang",
":",
"lang",
"=",
"get_language_from_request",
"(",
"request",
")",
"# Raise a 404 if the language is not in not in the list",
"if",
"lang",
"not",
"in",
"[",
"key",
"for",
"(",
"key",
",",
"value",
")",
"in",
"settings",
".",
"PAGE_LANGUAGES",
"]",
":",
"raise",
"Http404",
"# We're going to serve CMS pages in language lang;",
"# make django gettext use that language too",
"if",
"lang",
"and",
"translation",
".",
"check_for_language",
"(",
"lang",
")",
":",
"translation",
".",
"activate",
"(",
"lang",
")",
"return",
"lang"
]
| Deal with the multiple corner case of choosing the language. | [
"Deal",
"with",
"the",
"multiple",
"corner",
"case",
"of",
"choosing",
"the",
"language",
"."
]
| python | train |
alphagov/performanceplatform-collector | performanceplatform/collector/arguments.py | https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/arguments.py#L6-L63 | def parse_args(name="", args=None):
"""Parse command line argument for a collector
Returns an argparse.Namespace with 'config' and 'query' options"""
def _load_json_file(path):
with open(path) as f:
json_data = json.load(f)
json_data['path_to_json_file'] = path
return json_data
parser = argparse.ArgumentParser(description="%s collector for sending"
" data to the performance"
" platform" % name)
parser.add_argument('-c', '--credentials', dest='credentials',
type=_load_json_file,
help='JSON file containing credentials '
'for the collector',
required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-l', '--collector', dest='collector_slug',
type=str,
help='Collector slug to query the API for the '
'collector config')
group.add_argument('-q', '--query', dest='query',
type=_load_json_file,
help='JSON file containing details '
'about the query to make '
'against the source API '
'and the target data-set')
parser.add_argument('-t', '--token', dest='token',
type=_load_json_file,
help='JSON file containing token '
'for the collector',
required=True)
parser.add_argument('-b', '--performanceplatform',
dest='performanceplatform',
type=_load_json_file,
help='JSON file containing the Performance Platform '
'config for the collector',
required=True)
parser.add_argument('-s', '--start', dest='start_at',
type=parse_date,
help='Date to start collection from')
parser.add_argument('-e', '--end', dest='end_at',
type=parse_date,
help='Date to end collection')
parser.add_argument('--console-logging', dest='console_logging',
action='store_true',
help='Output logging to the console rather than file')
parser.add_argument('--dry-run', dest='dry_run',
action='store_true',
help='Instead of pushing to the Performance Platform '
'the collector will print out what would have '
'been pushed')
parser.set_defaults(console_logging=False, dry_run=False)
args = parser.parse_args(args)
return args | [
"def",
"parse_args",
"(",
"name",
"=",
"\"\"",
",",
"args",
"=",
"None",
")",
":",
"def",
"_load_json_file",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"json_data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"json_data",
"[",
"'path_to_json_file'",
"]",
"=",
"path",
"return",
"json_data",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"%s collector for sending\"",
"\" data to the performance\"",
"\" platform\"",
"%",
"name",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--credentials'",
",",
"dest",
"=",
"'credentials'",
",",
"type",
"=",
"_load_json_file",
",",
"help",
"=",
"'JSON file containing credentials '",
"'for the collector'",
",",
"required",
"=",
"True",
")",
"group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"group",
".",
"add_argument",
"(",
"'-l'",
",",
"'--collector'",
",",
"dest",
"=",
"'collector_slug'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Collector slug to query the API for the '",
"'collector config'",
")",
"group",
".",
"add_argument",
"(",
"'-q'",
",",
"'--query'",
",",
"dest",
"=",
"'query'",
",",
"type",
"=",
"_load_json_file",
",",
"help",
"=",
"'JSON file containing details '",
"'about the query to make '",
"'against the source API '",
"'and the target data-set'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--token'",
",",
"dest",
"=",
"'token'",
",",
"type",
"=",
"_load_json_file",
",",
"help",
"=",
"'JSON file containing token '",
"'for the collector'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"'--performanceplatform'",
",",
"dest",
"=",
"'performanceplatform'",
",",
"type",
"=",
"_load_json_file",
",",
"help",
"=",
"'JSON file containing the Performance Platform '",
"'config for the collector'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--start'",
",",
"dest",
"=",
"'start_at'",
",",
"type",
"=",
"parse_date",
",",
"help",
"=",
"'Date to start collection from'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--end'",
",",
"dest",
"=",
"'end_at'",
",",
"type",
"=",
"parse_date",
",",
"help",
"=",
"'Date to end collection'",
")",
"parser",
".",
"add_argument",
"(",
"'--console-logging'",
",",
"dest",
"=",
"'console_logging'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Output logging to the console rather than file'",
")",
"parser",
".",
"add_argument",
"(",
"'--dry-run'",
",",
"dest",
"=",
"'dry_run'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Instead of pushing to the Performance Platform '",
"'the collector will print out what would have '",
"'been pushed'",
")",
"parser",
".",
"set_defaults",
"(",
"console_logging",
"=",
"False",
",",
"dry_run",
"=",
"False",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"return",
"args"
]
| Parse command line argument for a collector
Returns an argparse.Namespace with 'config' and 'query' options | [
"Parse",
"command",
"line",
"argument",
"for",
"a",
"collector"
]
| python | train |
base4sistemas/satcfe | satcfe/resposta/consultarnumerosessao.py | https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/consultarnumerosessao.py#L65-L81 | def analisar(retorno):
"""Constrói uma :class:`RespostaSAT` ou especialização dependendo da
função SAT encontrada na sessão consultada.
:param unicode retorno: Retorno da função ``ConsultarNumeroSessao``.
"""
if '|' not in retorno:
raise ErroRespostaSATInvalida('Resposta nao possui pipes '
'separando os campos: {!r}'.format(retorno))
resposta = _RespostaParcial(*(retorno.split('|')[:2]))
for faixa, construtor in _RESPOSTAS_POSSIVEIS:
if int(resposta.EEEEE) in xrange(faixa, faixa+1000):
return construtor(retorno)
return RespostaConsultarNumeroSessao._pos_analise(retorno) | [
"def",
"analisar",
"(",
"retorno",
")",
":",
"if",
"'|'",
"not",
"in",
"retorno",
":",
"raise",
"ErroRespostaSATInvalida",
"(",
"'Resposta nao possui pipes '",
"'separando os campos: {!r}'",
".",
"format",
"(",
"retorno",
")",
")",
"resposta",
"=",
"_RespostaParcial",
"(",
"*",
"(",
"retorno",
".",
"split",
"(",
"'|'",
")",
"[",
":",
"2",
"]",
")",
")",
"for",
"faixa",
",",
"construtor",
"in",
"_RESPOSTAS_POSSIVEIS",
":",
"if",
"int",
"(",
"resposta",
".",
"EEEEE",
")",
"in",
"xrange",
"(",
"faixa",
",",
"faixa",
"+",
"1000",
")",
":",
"return",
"construtor",
"(",
"retorno",
")",
"return",
"RespostaConsultarNumeroSessao",
".",
"_pos_analise",
"(",
"retorno",
")"
]
| Constrói uma :class:`RespostaSAT` ou especialização dependendo da
função SAT encontrada na sessão consultada.
:param unicode retorno: Retorno da função ``ConsultarNumeroSessao``. | [
"Constrói",
"uma",
":",
"class",
":",
"RespostaSAT",
"ou",
"especialização",
"dependendo",
"da",
"função",
"SAT",
"encontrada",
"na",
"sessão",
"consultada",
"."
]
| python | train |
dw/mitogen | mitogen/parent.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L1340-L1351 | def get_python_argv(self):
"""
Return the initial argument vector elements necessary to invoke Python,
by returning a 1-element list containing :attr:`python_path` if it is a
string, or simply returning it if it is already a list.
This allows emulation of existing tools where the Python invocation may
be set to e.g. `['/usr/bin/env', 'python']`.
"""
if isinstance(self.python_path, list):
return self.python_path
return [self.python_path] | [
"def",
"get_python_argv",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"python_path",
",",
"list",
")",
":",
"return",
"self",
".",
"python_path",
"return",
"[",
"self",
".",
"python_path",
"]"
]
| Return the initial argument vector elements necessary to invoke Python,
by returning a 1-element list containing :attr:`python_path` if it is a
string, or simply returning it if it is already a list.
This allows emulation of existing tools where the Python invocation may
be set to e.g. `['/usr/bin/env', 'python']`. | [
"Return",
"the",
"initial",
"argument",
"vector",
"elements",
"necessary",
"to",
"invoke",
"Python",
"by",
"returning",
"a",
"1",
"-",
"element",
"list",
"containing",
":",
"attr",
":",
"python_path",
"if",
"it",
"is",
"a",
"string",
"or",
"simply",
"returning",
"it",
"if",
"it",
"is",
"already",
"a",
"list",
"."
]
| python | train |
openid/python-openid | openid/store/filestore.py | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/store/filestore.py#L182-L204 | def getAssociationFilename(self, server_url, handle):
"""Create a unique filename for a given server url and
handle. This implementation does not assume anything about the
format of the handle. The filename that is returned will
contain the domain name from the server URL for ease of human
inspection of the data directory.
(str, str) -> str
"""
if server_url.find('://') == -1:
raise ValueError('Bad server URL: %r' % server_url)
proto, rest = server_url.split('://', 1)
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
if handle:
handle_hash = _safe64(handle)
else:
handle_hash = ''
filename = '%s-%s-%s-%s' % (proto, domain, url_hash, handle_hash)
return os.path.join(self.association_dir, filename) | [
"def",
"getAssociationFilename",
"(",
"self",
",",
"server_url",
",",
"handle",
")",
":",
"if",
"server_url",
".",
"find",
"(",
"'://'",
")",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"'Bad server URL: %r'",
"%",
"server_url",
")",
"proto",
",",
"rest",
"=",
"server_url",
".",
"split",
"(",
"'://'",
",",
"1",
")",
"domain",
"=",
"_filenameEscape",
"(",
"rest",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]",
")",
"url_hash",
"=",
"_safe64",
"(",
"server_url",
")",
"if",
"handle",
":",
"handle_hash",
"=",
"_safe64",
"(",
"handle",
")",
"else",
":",
"handle_hash",
"=",
"''",
"filename",
"=",
"'%s-%s-%s-%s'",
"%",
"(",
"proto",
",",
"domain",
",",
"url_hash",
",",
"handle_hash",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"association_dir",
",",
"filename",
")"
]
| Create a unique filename for a given server url and
handle. This implementation does not assume anything about the
format of the handle. The filename that is returned will
contain the domain name from the server URL for ease of human
inspection of the data directory.
(str, str) -> str | [
"Create",
"a",
"unique",
"filename",
"for",
"a",
"given",
"server",
"url",
"and",
"handle",
".",
"This",
"implementation",
"does",
"not",
"assume",
"anything",
"about",
"the",
"format",
"of",
"the",
"handle",
".",
"The",
"filename",
"that",
"is",
"returned",
"will",
"contain",
"the",
"domain",
"name",
"from",
"the",
"server",
"URL",
"for",
"ease",
"of",
"human",
"inspection",
"of",
"the",
"data",
"directory",
"."
]
| python | train |
PmagPy/PmagPy | programs/demag_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L844-L1085 | def create_menu(self):
"""
Create the MenuBar for the GUI current structure is:
File : Change Working Directory, Import Interpretations from LSQ
file, Import interpretations from a redo file, Save interpretations
to a redo file, Save MagIC tables, Save Plots
Edit : New Interpretation, Delete Interpretation, Next
Interpretation, Previous Interpretation, Next Specimen, Previous
Speciemen, Flag Measurement Data, Coordinate Systems
Analysis : Acceptance Criteria, Sample Orientation, Flag
Interpretaions
Tools : Interpretation Editor, VGP Viewer
Help : Usage and Tips, PmagPy Cookbook, Open Docs, Github Page, Open
Debugger
"""
self.menubar = wx.MenuBar()
# -----------------
# File Menu
# -----------------
menu_file = wx.Menu()
m_change_WD = menu_file.Append(-1,
"Change Working Directory\tCtrl-W", "")
self.Bind(wx.EVT_MENU, self.on_menu_change_working_directory, m_change_WD)
m_import_meas_file = menu_file.Append(-1,
"Change measurements file", "")
self.Bind(wx.EVT_MENU, self.on_menu_import_meas_file, m_import_meas_file)
m_import_LSQ = menu_file.Append(-1,
"&Import Interpretations from LSQ file\tCtrl-L", "")
self.Bind(wx.EVT_MENU, self.on_menu_read_from_LSQ, m_import_LSQ)
m_previous_interpretation = menu_file.Append(
-1, "&Import interpretations from a redo file\tCtrl-R", "")
self.Bind(wx.EVT_MENU, self.on_menu_previous_interpretation,
m_previous_interpretation)
m_save_interpretation = menu_file.Append(
-1, "&Save interpretations to a redo file\tCtrl-S", "")
self.Bind(wx.EVT_MENU, self.on_menu_save_interpretation,
m_save_interpretation)
m_make_MagIC_results_tables = menu_file.Append(
-1, "&Save MagIC tables\tCtrl-Shift-S", "")
self.Bind(wx.EVT_MENU, self.on_menu_make_MagIC_results_tables,
m_make_MagIC_results_tables)
submenu_save_plots = wx.Menu()
m_save_zij_plot = submenu_save_plots.Append(
-1, "&Save Zijderveld plot", "")
self.Bind(wx.EVT_MENU, self.on_save_Zij_plot, m_save_zij_plot, "Zij")
m_save_eq_plot = submenu_save_plots.Append(
-1, "&Save specimen equal area plot", "")
self.Bind(wx.EVT_MENU, self.on_save_Eq_plot,
m_save_eq_plot, "specimen-Eq")
m_save_M_t_plot = submenu_save_plots.Append(-1, "&Save M-t plot", "")
self.Bind(wx.EVT_MENU, self.on_save_M_t_plot, m_save_M_t_plot, "M_t")
m_save_high_level = submenu_save_plots.Append(
-1, "&Save high level plot", "")
self.Bind(wx.EVT_MENU, self.on_save_high_level,
m_save_high_level, "Eq")
m_save_all_plots = submenu_save_plots.Append(-1, "&Save all plots", "")
self.Bind(wx.EVT_MENU, self.on_save_all_figures, m_save_all_plots)
m_new_sub_plots = menu_file.AppendSubMenu(submenu_save_plots, "&Save plot")
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_menu_exit, m_exit)
# -----------------
# Edit Menu
# -----------------
menu_edit = wx.Menu()
m_new = menu_edit.Append(-1, "&New interpretation\tCtrl-N", "")
self.Bind(wx.EVT_MENU, self.on_btn_add_fit, m_new)
m_delete = menu_edit.Append(-1, "&Delete interpretation\tCtrl-D", "")
self.Bind(wx.EVT_MENU, self.on_btn_delete_fit, m_delete)
m_next_interp = menu_edit.Append(-1,
"&Next interpretation\tCtrl-Up", "")
self.Bind(wx.EVT_MENU, self.on_menu_next_interp, m_next_interp)
m_previous_interp = menu_edit.Append(-1,
"&Previous interpretation\tCtrl-Down", "")
self.Bind(wx.EVT_MENU, self.on_menu_prev_interp, m_previous_interp)
m_next_specimen = menu_edit.Append(-1,
"&Next Specimen\tCtrl-Right", "")
self.Bind(wx.EVT_MENU, self.on_next_button, m_next_specimen)
m_previous_specimen = menu_edit.Append(-1,
"&Previous Specimen\tCtrl-Left", "")
self.Bind(wx.EVT_MENU, self.on_prev_button, m_previous_specimen)
menu_flag_meas = wx.Menu()
m_good = menu_flag_meas.Append(-1, "&Good Measurement\tCtrl-Alt-G", "")
self.Bind(wx.EVT_MENU, self.on_menu_flag_meas_good, m_good)
m_bad = menu_flag_meas.Append(-1, "&Bad Measurement\tCtrl-Alt-B", "")
self.Bind(wx.EVT_MENU, self.on_menu_flag_meas_bad, m_bad)
m_flag_meas = menu_edit.AppendSubMenu(menu_flag_meas, "&Flag Measurement Data")
menu_coordinates = wx.Menu()
m_speci = menu_coordinates.Append(-1,
"&Specimen Coordinates\tCtrl-P", "")
self.Bind(wx.EVT_MENU, self.on_menu_change_speci_coord, m_speci)
if "geographic" in self.coordinate_list:
m_geo = menu_coordinates.Append(-1,
"&Geographic Coordinates\tCtrl-G", "")
self.Bind(wx.EVT_MENU, self.on_menu_change_geo_coord, m_geo)
if "tilt-corrected" in self.coordinate_list:
m_tilt = menu_coordinates.Append(-1,
"&Tilt-Corrected Coordinates\tCtrl-T", "")
self.Bind(wx.EVT_MENU, self.on_menu_change_tilt_coord, m_tilt)
m_coords = menu_edit.AppendSubMenu(menu_coordinates, "&Coordinate Systems")
# -----------------
# Analysis Menu
# -----------------
menu_Analysis = wx.Menu()
submenu_criteria = wx.Menu()
m_change_criteria_file = submenu_criteria.Append(
-1, "&Change acceptance criteria", "")
self.Bind(wx.EVT_MENU, self.on_menu_change_criteria,
m_change_criteria_file)
m_import_criteria_file = submenu_criteria.Append(
-1, "&Import criteria file", "")
self.Bind(wx.EVT_MENU, self.on_menu_criteria_file,
m_import_criteria_file)
m_new_sub = menu_Analysis.AppendSubMenu(submenu_criteria, "Acceptance criteria")
menu_flag_fit = wx.Menu()
m_good_fit = menu_flag_fit.Append(-1,
"&Good Interpretation\tCtrl-Shift-G", "")
self.Bind(wx.EVT_MENU, self.on_menu_flag_fit_good, m_good_fit)
m_bad_fit = menu_flag_fit.Append(-1,
"&Bad Interpretation\tCtrl-Shift-B", "")
self.Bind(wx.EVT_MENU, self.on_menu_flag_fit_bad, m_bad_fit)
m_flag_fit = menu_Analysis.AppendSubMenu(menu_flag_fit, "&Flag Interpretations")
submenu_sample_check = wx.Menu()
m_check_orient = submenu_sample_check.Append(
-1, "&Check Sample Orientations\tCtrl-O", "")
self.Bind(wx.EVT_MENU, self.on_menu_check_orient, m_check_orient)
m_mark_samp_bad = submenu_sample_check.Append(
-1, "&Mark Sample Bad\tCtrl-.", "")
self.Bind(wx.EVT_MENU, self.on_menu_mark_samp_bad, m_mark_samp_bad)
m_mark_samp_good = submenu_sample_check.Append(
-1, "&Mark Sample Good\tCtrl-,", "")
self.Bind(wx.EVT_MENU, self.on_menu_mark_samp_good, m_mark_samp_good)
m_submenu = menu_Analysis.AppendSubMenu(submenu_sample_check, "Sample Orientation")
submenu_toggle_mean_display = wx.Menu()
lines = ["m_%s_toggle_mean = submenu_toggle_mean_display.AppendCheckItem(-1, '&%s', ''); self.Bind(wx.EVT_MENU, self.on_menu_toggle_mean, m_%s_toggle_mean)" % (
f, f) for f in self.all_fits_list]
for line in lines:
exec(line)
menu_Analysis.AppendSubMenu(submenu_toggle_mean_display, "Toggle Mean Display")
# -----------------
# Tools Menu
# -----------------
menu_Tools = wx.Menu()
# m_auto_interpret = menu_Tools.Append(-1, "&Auto interpret (alpha version)\tCtrl-A", "")
# self.Bind(wx.EVT_MENU, self.autointerpret, m_auto_interpret)
m_edit_interpretations = menu_Tools.Append(
-1, "&Interpretation editor\tCtrl-E", "")
self.Bind(wx.EVT_MENU, self.on_menu_edit_interpretations,
m_edit_interpretations)
m_view_VGP = menu_Tools.Append(-1, "&View VGPs\tCtrl-Shift-V", "")
self.Bind(wx.EVT_MENU, self.on_menu_view_vgps, m_view_VGP)
# -----------------
# Help Menu
# -----------------
menu_Help = wx.Menu()
m_help = menu_Help.Append(-1, "&Usage and Tips\tCtrl-H", "")
self.Bind(wx.EVT_MENU, self.on_menu_help, m_help)
m_cookbook = menu_Help.Append(-1, "&PmagPy Cookbook\tCtrl-Shift-W", "")
self.Bind(wx.EVT_MENU, self.on_menu_cookbook, m_cookbook)
m_docs = menu_Help.Append(-1, "&Open Docs\tCtrl-Shift-H", "")
self.Bind(wx.EVT_MENU, self.on_menu_docs, m_docs)
m_git = menu_Help.Append(-1, "&Github Page\tCtrl-Shift-G", "")
self.Bind(wx.EVT_MENU, self.on_menu_git, m_git)
m_debug = menu_Help.Append(-1, "&Open Debugger\tCtrl-Shift-D", "")
self.Bind(wx.EVT_MENU, self.on_menu_debug, m_debug)
# -----------------
#self.menubar.Append(menu_preferences, "& Preferences")
self.menubar.Append(menu_file, "&File")
self.menubar.Append(menu_edit, "&Edit")
self.menubar.Append(menu_Analysis, "&Analysis")
self.menubar.Append(menu_Tools, "&Tools")
self.menubar.Append(menu_Help, "&Help")
#self.menubar.Append(menu_Plot, "&Plot")
#self.menubar.Append(menu_results_table, "&Table")
#self.menubar.Append(menu_MagIC, "&MagIC")
self.SetMenuBar(self.menubar) | [
"def",
"create_menu",
"(",
"self",
")",
":",
"self",
".",
"menubar",
"=",
"wx",
".",
"MenuBar",
"(",
")",
"# -----------------",
"# File Menu",
"# -----------------",
"menu_file",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_change_WD",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"Change Working Directory\\tCtrl-W\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_change_working_directory",
",",
"m_change_WD",
")",
"m_import_meas_file",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"Change measurements file\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_import_meas_file",
",",
"m_import_meas_file",
")",
"m_import_LSQ",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Import Interpretations from LSQ file\\tCtrl-L\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_read_from_LSQ",
",",
"m_import_LSQ",
")",
"m_previous_interpretation",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Import interpretations from a redo file\\tCtrl-R\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_previous_interpretation",
",",
"m_previous_interpretation",
")",
"m_save_interpretation",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save interpretations to a redo file\\tCtrl-S\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_save_interpretation",
",",
"m_save_interpretation",
")",
"m_make_MagIC_results_tables",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save MagIC tables\\tCtrl-Shift-S\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_make_MagIC_results_tables",
",",
"m_make_MagIC_results_tables",
")",
"submenu_save_plots",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_save_zij_plot",
"=",
"submenu_save_plots",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save Zijderveld plot\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_save_Zij_plot",
",",
"m_save_zij_plot",
",",
"\"Zij\"",
")",
"m_save_eq_plot",
"=",
"submenu_save_plots",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save specimen equal area plot\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_save_Eq_plot",
",",
"m_save_eq_plot",
",",
"\"specimen-Eq\"",
")",
"m_save_M_t_plot",
"=",
"submenu_save_plots",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save M-t plot\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_save_M_t_plot",
",",
"m_save_M_t_plot",
",",
"\"M_t\"",
")",
"m_save_high_level",
"=",
"submenu_save_plots",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save high level plot\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_save_high_level",
",",
"m_save_high_level",
",",
"\"Eq\"",
")",
"m_save_all_plots",
"=",
"submenu_save_plots",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Save all plots\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_save_all_figures",
",",
"m_save_all_plots",
")",
"m_new_sub_plots",
"=",
"menu_file",
".",
"AppendSubMenu",
"(",
"submenu_save_plots",
",",
"\"&Save plot\"",
")",
"menu_file",
".",
"AppendSeparator",
"(",
")",
"m_exit",
"=",
"menu_file",
".",
"Append",
"(",
"-",
"1",
",",
"\"E&xit\\tCtrl-Q\"",
",",
"\"Exit\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_exit",
",",
"m_exit",
")",
"# -----------------",
"# Edit Menu",
"# -----------------",
"menu_edit",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_new",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&New interpretation\\tCtrl-N\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_btn_add_fit",
",",
"m_new",
")",
"m_delete",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Delete interpretation\\tCtrl-D\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_btn_delete_fit",
",",
"m_delete",
")",
"m_next_interp",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Next interpretation\\tCtrl-Up\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_next_interp",
",",
"m_next_interp",
")",
"m_previous_interp",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Previous interpretation\\tCtrl-Down\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_prev_interp",
",",
"m_previous_interp",
")",
"m_next_specimen",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Next Specimen\\tCtrl-Right\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_next_button",
",",
"m_next_specimen",
")",
"m_previous_specimen",
"=",
"menu_edit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Previous Specimen\\tCtrl-Left\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_prev_button",
",",
"m_previous_specimen",
")",
"menu_flag_meas",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_good",
"=",
"menu_flag_meas",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Good Measurement\\tCtrl-Alt-G\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_flag_meas_good",
",",
"m_good",
")",
"m_bad",
"=",
"menu_flag_meas",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Bad Measurement\\tCtrl-Alt-B\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_flag_meas_bad",
",",
"m_bad",
")",
"m_flag_meas",
"=",
"menu_edit",
".",
"AppendSubMenu",
"(",
"menu_flag_meas",
",",
"\"&Flag Measurement Data\"",
")",
"menu_coordinates",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_speci",
"=",
"menu_coordinates",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Specimen Coordinates\\tCtrl-P\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_change_speci_coord",
",",
"m_speci",
")",
"if",
"\"geographic\"",
"in",
"self",
".",
"coordinate_list",
":",
"m_geo",
"=",
"menu_coordinates",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Geographic Coordinates\\tCtrl-G\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_change_geo_coord",
",",
"m_geo",
")",
"if",
"\"tilt-corrected\"",
"in",
"self",
".",
"coordinate_list",
":",
"m_tilt",
"=",
"menu_coordinates",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Tilt-Corrected Coordinates\\tCtrl-T\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_change_tilt_coord",
",",
"m_tilt",
")",
"m_coords",
"=",
"menu_edit",
".",
"AppendSubMenu",
"(",
"menu_coordinates",
",",
"\"&Coordinate Systems\"",
")",
"# -----------------",
"# Analysis Menu",
"# -----------------",
"menu_Analysis",
"=",
"wx",
".",
"Menu",
"(",
")",
"submenu_criteria",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_change_criteria_file",
"=",
"submenu_criteria",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Change acceptance criteria\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_change_criteria",
",",
"m_change_criteria_file",
")",
"m_import_criteria_file",
"=",
"submenu_criteria",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Import criteria file\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_criteria_file",
",",
"m_import_criteria_file",
")",
"m_new_sub",
"=",
"menu_Analysis",
".",
"AppendSubMenu",
"(",
"submenu_criteria",
",",
"\"Acceptance criteria\"",
")",
"menu_flag_fit",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_good_fit",
"=",
"menu_flag_fit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Good Interpretation\\tCtrl-Shift-G\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_flag_fit_good",
",",
"m_good_fit",
")",
"m_bad_fit",
"=",
"menu_flag_fit",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Bad Interpretation\\tCtrl-Shift-B\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_flag_fit_bad",
",",
"m_bad_fit",
")",
"m_flag_fit",
"=",
"menu_Analysis",
".",
"AppendSubMenu",
"(",
"menu_flag_fit",
",",
"\"&Flag Interpretations\"",
")",
"submenu_sample_check",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_check_orient",
"=",
"submenu_sample_check",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Check Sample Orientations\\tCtrl-O\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_check_orient",
",",
"m_check_orient",
")",
"m_mark_samp_bad",
"=",
"submenu_sample_check",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Mark Sample Bad\\tCtrl-.\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_mark_samp_bad",
",",
"m_mark_samp_bad",
")",
"m_mark_samp_good",
"=",
"submenu_sample_check",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Mark Sample Good\\tCtrl-,\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_mark_samp_good",
",",
"m_mark_samp_good",
")",
"m_submenu",
"=",
"menu_Analysis",
".",
"AppendSubMenu",
"(",
"submenu_sample_check",
",",
"\"Sample Orientation\"",
")",
"submenu_toggle_mean_display",
"=",
"wx",
".",
"Menu",
"(",
")",
"lines",
"=",
"[",
"\"m_%s_toggle_mean = submenu_toggle_mean_display.AppendCheckItem(-1, '&%s', ''); self.Bind(wx.EVT_MENU, self.on_menu_toggle_mean, m_%s_toggle_mean)\"",
"%",
"(",
"f",
",",
"f",
")",
"for",
"f",
"in",
"self",
".",
"all_fits_list",
"]",
"for",
"line",
"in",
"lines",
":",
"exec",
"(",
"line",
")",
"menu_Analysis",
".",
"AppendSubMenu",
"(",
"submenu_toggle_mean_display",
",",
"\"Toggle Mean Display\"",
")",
"# -----------------",
"# Tools Menu",
"# -----------------",
"menu_Tools",
"=",
"wx",
".",
"Menu",
"(",
")",
"# m_auto_interpret = menu_Tools.Append(-1, \"&Auto interpret (alpha version)\\tCtrl-A\", \"\")",
"# self.Bind(wx.EVT_MENU, self.autointerpret, m_auto_interpret)",
"m_edit_interpretations",
"=",
"menu_Tools",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Interpretation editor\\tCtrl-E\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_edit_interpretations",
",",
"m_edit_interpretations",
")",
"m_view_VGP",
"=",
"menu_Tools",
".",
"Append",
"(",
"-",
"1",
",",
"\"&View VGPs\\tCtrl-Shift-V\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_view_vgps",
",",
"m_view_VGP",
")",
"# -----------------",
"# Help Menu",
"# -----------------",
"menu_Help",
"=",
"wx",
".",
"Menu",
"(",
")",
"m_help",
"=",
"menu_Help",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Usage and Tips\\tCtrl-H\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_help",
",",
"m_help",
")",
"m_cookbook",
"=",
"menu_Help",
".",
"Append",
"(",
"-",
"1",
",",
"\"&PmagPy Cookbook\\tCtrl-Shift-W\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_cookbook",
",",
"m_cookbook",
")",
"m_docs",
"=",
"menu_Help",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Open Docs\\tCtrl-Shift-H\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_docs",
",",
"m_docs",
")",
"m_git",
"=",
"menu_Help",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Github Page\\tCtrl-Shift-G\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_git",
",",
"m_git",
")",
"m_debug",
"=",
"menu_Help",
".",
"Append",
"(",
"-",
"1",
",",
"\"&Open Debugger\\tCtrl-Shift-D\"",
",",
"\"\"",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_MENU",
",",
"self",
".",
"on_menu_debug",
",",
"m_debug",
")",
"# -----------------",
"#self.menubar.Append(menu_preferences, \"& Preferences\")",
"self",
".",
"menubar",
".",
"Append",
"(",
"menu_file",
",",
"\"&File\"",
")",
"self",
".",
"menubar",
".",
"Append",
"(",
"menu_edit",
",",
"\"&Edit\"",
")",
"self",
".",
"menubar",
".",
"Append",
"(",
"menu_Analysis",
",",
"\"&Analysis\"",
")",
"self",
".",
"menubar",
".",
"Append",
"(",
"menu_Tools",
",",
"\"&Tools\"",
")",
"self",
".",
"menubar",
".",
"Append",
"(",
"menu_Help",
",",
"\"&Help\"",
")",
"#self.menubar.Append(menu_Plot, \"&Plot\")",
"#self.menubar.Append(menu_results_table, \"&Table\")",
"#self.menubar.Append(menu_MagIC, \"&MagIC\")",
"self",
".",
"SetMenuBar",
"(",
"self",
".",
"menubar",
")"
]
| Create the MenuBar for the GUI current structure is:
File : Change Working Directory, Import Interpretations from LSQ
file, Import interpretations from a redo file, Save interpretations
to a redo file, Save MagIC tables, Save Plots
Edit : New Interpretation, Delete Interpretation, Next
Interpretation, Previous Interpretation, Next Specimen, Previous
Speciemen, Flag Measurement Data, Coordinate Systems
Analysis : Acceptance Criteria, Sample Orientation, Flag
Interpretaions
Tools : Interpretation Editor, VGP Viewer
Help : Usage and Tips, PmagPy Cookbook, Open Docs, Github Page, Open
Debugger | [
"Create",
"the",
"MenuBar",
"for",
"the",
"GUI",
"current",
"structure",
"is",
":",
"File",
":",
"Change",
"Working",
"Directory",
"Import",
"Interpretations",
"from",
"LSQ",
"file",
"Import",
"interpretations",
"from",
"a",
"redo",
"file",
"Save",
"interpretations",
"to",
"a",
"redo",
"file",
"Save",
"MagIC",
"tables",
"Save",
"Plots",
"Edit",
":",
"New",
"Interpretation",
"Delete",
"Interpretation",
"Next",
"Interpretation",
"Previous",
"Interpretation",
"Next",
"Specimen",
"Previous",
"Speciemen",
"Flag",
"Measurement",
"Data",
"Coordinate",
"Systems",
"Analysis",
":",
"Acceptance",
"Criteria",
"Sample",
"Orientation",
"Flag",
"Interpretaions",
"Tools",
":",
"Interpretation",
"Editor",
"VGP",
"Viewer",
"Help",
":",
"Usage",
"and",
"Tips",
"PmagPy",
"Cookbook",
"Open",
"Docs",
"Github",
"Page",
"Open",
"Debugger"
]
| python | train |
blockcypher/blockcypher-python | blockcypher/api.py | https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L1904-L1927 | def get_metadata(address=None, tx_hash=None, block_hash=None, api_key=None, private=True, coin_symbol='btc'):
'''
Get metadata using blockcypher's API.
This is data on blockcypher's servers and not embedded into the bitcoin (or other) blockchain.
'''
assert is_valid_coin_symbol(coin_symbol), coin_symbol
assert api_key or not private, 'Cannot see private metadata without an API key'
kwarg = get_valid_metadata_identifier(
coin_symbol=coin_symbol,
address=address,
tx_hash=tx_hash,
block_hash=block_hash,
)
url = make_url(coin_symbol, meta=True, **kwarg)
params = {'token': api_key} if api_key else {'private': 'true'}
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
response_dict = get_valid_json(r)
return response_dict | [
"def",
"get_metadata",
"(",
"address",
"=",
"None",
",",
"tx_hash",
"=",
"None",
",",
"block_hash",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"private",
"=",
"True",
",",
"coin_symbol",
"=",
"'btc'",
")",
":",
"assert",
"is_valid_coin_symbol",
"(",
"coin_symbol",
")",
",",
"coin_symbol",
"assert",
"api_key",
"or",
"not",
"private",
",",
"'Cannot see private metadata without an API key'",
"kwarg",
"=",
"get_valid_metadata_identifier",
"(",
"coin_symbol",
"=",
"coin_symbol",
",",
"address",
"=",
"address",
",",
"tx_hash",
"=",
"tx_hash",
",",
"block_hash",
"=",
"block_hash",
",",
")",
"url",
"=",
"make_url",
"(",
"coin_symbol",
",",
"meta",
"=",
"True",
",",
"*",
"*",
"kwarg",
")",
"params",
"=",
"{",
"'token'",
":",
"api_key",
"}",
"if",
"api_key",
"else",
"{",
"'private'",
":",
"'true'",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"True",
",",
"timeout",
"=",
"TIMEOUT_IN_SECONDS",
")",
"response_dict",
"=",
"get_valid_json",
"(",
"r",
")",
"return",
"response_dict"
]
| Get metadata using blockcypher's API.
This is data on blockcypher's servers and not embedded into the bitcoin (or other) blockchain. | [
"Get",
"metadata",
"using",
"blockcypher",
"s",
"API",
"."
]
| python | train |
timknip/pyswf | swf/stream.py | https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L459-L468 | def readtag_header(self):
""" Read a tag header """
pos = self.tell()
tag_type_and_length = self.readUI16()
tag_length = tag_type_and_length & 0x003f
if tag_length == 0x3f:
# The SWF10 spec sez that this is a signed int.
# Shouldn't it be an unsigned int?
tag_length = self.readSI32();
return SWFRecordHeader(tag_type_and_length >> 6, tag_length, self.tell() - pos) | [
"def",
"readtag_header",
"(",
"self",
")",
":",
"pos",
"=",
"self",
".",
"tell",
"(",
")",
"tag_type_and_length",
"=",
"self",
".",
"readUI16",
"(",
")",
"tag_length",
"=",
"tag_type_and_length",
"&",
"0x003f",
"if",
"tag_length",
"==",
"0x3f",
":",
"# The SWF10 spec sez that this is a signed int.",
"# Shouldn't it be an unsigned int?",
"tag_length",
"=",
"self",
".",
"readSI32",
"(",
")",
"return",
"SWFRecordHeader",
"(",
"tag_type_and_length",
">>",
"6",
",",
"tag_length",
",",
"self",
".",
"tell",
"(",
")",
"-",
"pos",
")"
]
| Read a tag header | [
"Read",
"a",
"tag",
"header"
]
| python | train |
benoitkugler/abstractDataLibrary | pyDLib/Core/controller.py | https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/controller.py#L322-L339 | def loggin(self, user_id, mdp, autolog):
"""Check mdp and return True it's ok"""
r = sql.abstractRequetesSQL.check_mdp_user(user_id, mdp)
if r():
# update auto-log params
self.autolog[user_id] = autolog and mdp or False
self.modules = self.users[user_id]["modules"] # load modules list
dic = {"autolog": self.autolog, "modules": self.modules}
s = json.dumps(dic, indent=4, ensure_ascii=False)
b = security.protege_data(s, True)
with open("local/init", "wb") as f:
f.write(b)
self.mode_online = True # authorization to execute bakground tasks
return True
else:
logging.debug("Bad password !") | [
"def",
"loggin",
"(",
"self",
",",
"user_id",
",",
"mdp",
",",
"autolog",
")",
":",
"r",
"=",
"sql",
".",
"abstractRequetesSQL",
".",
"check_mdp_user",
"(",
"user_id",
",",
"mdp",
")",
"if",
"r",
"(",
")",
":",
"# update auto-log params",
"self",
".",
"autolog",
"[",
"user_id",
"]",
"=",
"autolog",
"and",
"mdp",
"or",
"False",
"self",
".",
"modules",
"=",
"self",
".",
"users",
"[",
"user_id",
"]",
"[",
"\"modules\"",
"]",
"# load modules list",
"dic",
"=",
"{",
"\"autolog\"",
":",
"self",
".",
"autolog",
",",
"\"modules\"",
":",
"self",
".",
"modules",
"}",
"s",
"=",
"json",
".",
"dumps",
"(",
"dic",
",",
"indent",
"=",
"4",
",",
"ensure_ascii",
"=",
"False",
")",
"b",
"=",
"security",
".",
"protege_data",
"(",
"s",
",",
"True",
")",
"with",
"open",
"(",
"\"local/init\"",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"b",
")",
"self",
".",
"mode_online",
"=",
"True",
"# authorization to execute bakground tasks",
"return",
"True",
"else",
":",
"logging",
".",
"debug",
"(",
"\"Bad password !\"",
")"
]
| Check mdp and return True it's ok | [
"Check",
"mdp",
"and",
"return",
"True",
"it",
"s",
"ok"
]
| python | train |
tjcsl/ion | intranet/apps/files/views.py | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/files/views.py#L102-L122 | def get_authinfo(request):
"""Get authentication info from the encrypted message."""
if (("files_iv" not in request.session) or ("files_text" not in request.session) or ("files_key" not in request.COOKIES)):
return False
"""
Decrypt the password given the SERVER-side IV, SERVER-side
ciphertext, and CLIENT-side key.
See note above on why this is done.
"""
iv = base64.b64decode(request.session["files_iv"])
text = base64.b64decode(request.session["files_text"])
key = base64.b64decode(request.COOKIES["files_key"])
obj = AES.new(key, AES.MODE_CFB, iv)
password = obj.decrypt(text)
username = request.session["filecenter_username"] if "filecenter_username" in request.session else request.user.username
return {"username": username, "password": password} | [
"def",
"get_authinfo",
"(",
"request",
")",
":",
"if",
"(",
"(",
"\"files_iv\"",
"not",
"in",
"request",
".",
"session",
")",
"or",
"(",
"\"files_text\"",
"not",
"in",
"request",
".",
"session",
")",
"or",
"(",
"\"files_key\"",
"not",
"in",
"request",
".",
"COOKIES",
")",
")",
":",
"return",
"False",
"\"\"\"\n Decrypt the password given the SERVER-side IV, SERVER-side\n ciphertext, and CLIENT-side key.\n\n See note above on why this is done.\n \"\"\"",
"iv",
"=",
"base64",
".",
"b64decode",
"(",
"request",
".",
"session",
"[",
"\"files_iv\"",
"]",
")",
"text",
"=",
"base64",
".",
"b64decode",
"(",
"request",
".",
"session",
"[",
"\"files_text\"",
"]",
")",
"key",
"=",
"base64",
".",
"b64decode",
"(",
"request",
".",
"COOKIES",
"[",
"\"files_key\"",
"]",
")",
"obj",
"=",
"AES",
".",
"new",
"(",
"key",
",",
"AES",
".",
"MODE_CFB",
",",
"iv",
")",
"password",
"=",
"obj",
".",
"decrypt",
"(",
"text",
")",
"username",
"=",
"request",
".",
"session",
"[",
"\"filecenter_username\"",
"]",
"if",
"\"filecenter_username\"",
"in",
"request",
".",
"session",
"else",
"request",
".",
"user",
".",
"username",
"return",
"{",
"\"username\"",
":",
"username",
",",
"\"password\"",
":",
"password",
"}"
]
| Get authentication info from the encrypted message. | [
"Get",
"authentication",
"info",
"from",
"the",
"encrypted",
"message",
"."
]
| python | train |
Alveo/pyalveo | pyalveo/pyalveo.py | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L947-L959 | def get_items(self, collection_uri):
"""Return all items in this collection.
:param collection_uri: The URI that references the collection
:type collection_uri: String
:rtype: List
:returns: a list of the URIs of the items in this collection
"""
cname = os.path.split(collection_uri)[1]
return self.search_metadata("collection_name:%s" % cname) | [
"def",
"get_items",
"(",
"self",
",",
"collection_uri",
")",
":",
"cname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"collection_uri",
")",
"[",
"1",
"]",
"return",
"self",
".",
"search_metadata",
"(",
"\"collection_name:%s\"",
"%",
"cname",
")"
]
| Return all items in this collection.
:param collection_uri: The URI that references the collection
:type collection_uri: String
:rtype: List
:returns: a list of the URIs of the items in this collection | [
"Return",
"all",
"items",
"in",
"this",
"collection",
"."
]
| python | train |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L114-L132 | def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"(",
"self",
".",
"components_",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'No components initialized'",
")",
"return",
"self",
".",
"_compute_hidden_activations",
"(",
"X",
")"
]
| Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components] | [
"Generate",
"the",
"random",
"hidden",
"layer",
"s",
"activations",
"given",
"X",
"as",
"input",
"."
]
| python | train |
wummel/linkchecker | third_party/dnspython/dns/query.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/query.py#L48-L73 | def _poll_for(fd, readable, writable, error, timeout):
"""
@param fd: File descriptor (int).
@param readable: Whether to wait for readability (bool).
@param writable: Whether to wait for writability (bool).
@param expiration: Deadline timeout (expiration time, in seconds (float)).
@return True on success, False on timeout
"""
event_mask = 0
if readable:
event_mask |= select.POLLIN
if writable:
event_mask |= select.POLLOUT
if error:
event_mask |= select.POLLERR
pollable = select.poll()
pollable.register(fd, event_mask)
if timeout:
event_list = pollable.poll(long(timeout * 1000))
else:
event_list = pollable.poll()
return bool(event_list) | [
"def",
"_poll_for",
"(",
"fd",
",",
"readable",
",",
"writable",
",",
"error",
",",
"timeout",
")",
":",
"event_mask",
"=",
"0",
"if",
"readable",
":",
"event_mask",
"|=",
"select",
".",
"POLLIN",
"if",
"writable",
":",
"event_mask",
"|=",
"select",
".",
"POLLOUT",
"if",
"error",
":",
"event_mask",
"|=",
"select",
".",
"POLLERR",
"pollable",
"=",
"select",
".",
"poll",
"(",
")",
"pollable",
".",
"register",
"(",
"fd",
",",
"event_mask",
")",
"if",
"timeout",
":",
"event_list",
"=",
"pollable",
".",
"poll",
"(",
"long",
"(",
"timeout",
"*",
"1000",
")",
")",
"else",
":",
"event_list",
"=",
"pollable",
".",
"poll",
"(",
")",
"return",
"bool",
"(",
"event_list",
")"
]
| @param fd: File descriptor (int).
@param readable: Whether to wait for readability (bool).
@param writable: Whether to wait for writability (bool).
@param expiration: Deadline timeout (expiration time, in seconds (float)).
@return True on success, False on timeout | [
"@param",
"fd",
":",
"File",
"descriptor",
"(",
"int",
")",
".",
"@param",
"readable",
":",
"Whether",
"to",
"wait",
"for",
"readability",
"(",
"bool",
")",
".",
"@param",
"writable",
":",
"Whether",
"to",
"wait",
"for",
"writability",
"(",
"bool",
")",
".",
"@param",
"expiration",
":",
"Deadline",
"timeout",
"(",
"expiration",
"time",
"in",
"seconds",
"(",
"float",
"))",
"."
]
| python | train |
sorgerlab/indra | indra/tools/executable_subnetwork.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/executable_subnetwork.py#L47-L70 | def _filter_statements(statements, agents):
"""Return INDRA Statements which have Agents in the given list.
Only statements are returned in which all appearing Agents as in the
agents list.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to filter.
agents : list[str]
A list of agent names that need to appear in filtered statements.
Returns
-------
filtered_statements : list[indra.statements.Statement]
The list of filtered INDRA Statements.
"""
filtered_statements = []
for s in stmts:
if all([a is not None for a in s.agent_list()]) and \
all([a.name in agents for a in s.agent_list()]):
filtered_statements.append(s)
return filtered_statements | [
"def",
"_filter_statements",
"(",
"statements",
",",
"agents",
")",
":",
"filtered_statements",
"=",
"[",
"]",
"for",
"s",
"in",
"stmts",
":",
"if",
"all",
"(",
"[",
"a",
"is",
"not",
"None",
"for",
"a",
"in",
"s",
".",
"agent_list",
"(",
")",
"]",
")",
"and",
"all",
"(",
"[",
"a",
".",
"name",
"in",
"agents",
"for",
"a",
"in",
"s",
".",
"agent_list",
"(",
")",
"]",
")",
":",
"filtered_statements",
".",
"append",
"(",
"s",
")",
"return",
"filtered_statements"
]
| Return INDRA Statements which have Agents in the given list.
Only statements are returned in which all appearing Agents as in the
agents list.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to filter.
agents : list[str]
A list of agent names that need to appear in filtered statements.
Returns
-------
filtered_statements : list[indra.statements.Statement]
The list of filtered INDRA Statements. | [
"Return",
"INDRA",
"Statements",
"which",
"have",
"Agents",
"in",
"the",
"given",
"list",
"."
]
| python | train |
idlesign/uwsgiconf | uwsgiconf/options/networking.py | https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/networking.py#L56-L81 | def set_socket_params(
self, send_timeout=None, keep_alive=None, no_defer_accept=None,
buffer_send=None, buffer_receive=None):
"""Sets common socket params.
:param int send_timeout: Send (write) timeout in seconds.
:param bool keep_alive: Enable TCP KEEPALIVEs.
:param bool no_defer_accept: Disable deferred ``accept()`` on sockets
by default (where available) uWSGI will defer the accept() of requests until some data
is sent by the client (this is a security/performance measure).
If you want to disable this feature for some reason, specify this option.
:param int buffer_send: Set SO_SNDBUF (bytes).
:param int buffer_receive: Set SO_RCVBUF (bytes).
"""
self._set('so-send-timeout', send_timeout)
self._set('so-keepalive', keep_alive, cast=bool)
self._set('no-defer-accept', no_defer_accept, cast=bool)
self._set('socket-sndbuf', buffer_send)
self._set('socket-rcvbuf', buffer_receive)
return self._section | [
"def",
"set_socket_params",
"(",
"self",
",",
"send_timeout",
"=",
"None",
",",
"keep_alive",
"=",
"None",
",",
"no_defer_accept",
"=",
"None",
",",
"buffer_send",
"=",
"None",
",",
"buffer_receive",
"=",
"None",
")",
":",
"self",
".",
"_set",
"(",
"'so-send-timeout'",
",",
"send_timeout",
")",
"self",
".",
"_set",
"(",
"'so-keepalive'",
",",
"keep_alive",
",",
"cast",
"=",
"bool",
")",
"self",
".",
"_set",
"(",
"'no-defer-accept'",
",",
"no_defer_accept",
",",
"cast",
"=",
"bool",
")",
"self",
".",
"_set",
"(",
"'socket-sndbuf'",
",",
"buffer_send",
")",
"self",
".",
"_set",
"(",
"'socket-rcvbuf'",
",",
"buffer_receive",
")",
"return",
"self",
".",
"_section"
]
| Sets common socket params.
:param int send_timeout: Send (write) timeout in seconds.
:param bool keep_alive: Enable TCP KEEPALIVEs.
:param bool no_defer_accept: Disable deferred ``accept()`` on sockets
by default (where available) uWSGI will defer the accept() of requests until some data
is sent by the client (this is a security/performance measure).
If you want to disable this feature for some reason, specify this option.
:param int buffer_send: Set SO_SNDBUF (bytes).
:param int buffer_receive: Set SO_RCVBUF (bytes). | [
"Sets",
"common",
"socket",
"params",
"."
]
| python | train |
project-rig/rig | rig/machine_control/machine_controller.py | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1590-L1646 | def wait_for_cores_to_reach_state(self, state, count, app_id,
poll_interval=0.1, timeout=None):
"""Block until the specified number of cores reach the specified state.
This is a simple utility-wrapper around the
:py:meth:`.count_cores_in_state` method which polls the machine until
(at least) the supplied number of cores has reached the specified
state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). As a result, in uncommon-but-possible circumstances,
this function may never exit. Users should treat this function with
caution. Future versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState`
The state to wait for cores to enter. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string.
count : int
The (minimum) number of cores reach the specified state before this
method terminates.
poll_interval : float
Number of seconds between state counting requests sent to the
machine.
timeout : float or Null
Maximum number of seconds which may elapse before giving up. If
None, keep trying forever.
Returns
-------
int
The number of cores in the given state (which will be less than the
number required if the method timed out).
"""
if timeout is not None:
timeout_time = time.time() + timeout
while True:
cur_count = self.count_cores_in_state(state, app_id)
if cur_count >= count:
break
# Stop if timeout elapsed
if timeout is not None and time.time() > timeout_time:
break
# Pause before retrying
time.sleep(poll_interval)
return cur_count | [
"def",
"wait_for_cores_to_reach_state",
"(",
"self",
",",
"state",
",",
"count",
",",
"app_id",
",",
"poll_interval",
"=",
"0.1",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"timeout_time",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"while",
"True",
":",
"cur_count",
"=",
"self",
".",
"count_cores_in_state",
"(",
"state",
",",
"app_id",
")",
"if",
"cur_count",
">=",
"count",
":",
"break",
"# Stop if timeout elapsed",
"if",
"timeout",
"is",
"not",
"None",
"and",
"time",
".",
"time",
"(",
")",
">",
"timeout_time",
":",
"break",
"# Pause before retrying",
"time",
".",
"sleep",
"(",
"poll_interval",
")",
"return",
"cur_count"
]
| Block until the specified number of cores reach the specified state.
This is a simple utility-wrapper around the
:py:meth:`.count_cores_in_state` method which polls the machine until
(at least) the supplied number of cores has reached the specified
state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). As a result, in uncommon-but-possible circumstances,
this function may never exit. Users should treat this function with
caution. Future versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState`
The state to wait for cores to enter. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string.
count : int
The (minimum) number of cores reach the specified state before this
method terminates.
poll_interval : float
Number of seconds between state counting requests sent to the
machine.
timeout : float or Null
Maximum number of seconds which may elapse before giving up. If
None, keep trying forever.
Returns
-------
int
The number of cores in the given state (which will be less than the
number required if the method timed out). | [
"Block",
"until",
"the",
"specified",
"number",
"of",
"cores",
"reach",
"the",
"specified",
"state",
"."
]
| python | train |
mlenzen/collections-extended | collections_extended/setlists.py | https://github.com/mlenzen/collections-extended/blob/ee9e86f6bbef442dbebcb3a5970642c5c969e2cf/collections_extended/setlists.py#L506-L515 | def symmetric_difference_update(self, other):
"""Update self to include only the symmetric difference with other."""
other = setlist(other)
indices_to_delete = set()
for i, item in enumerate(self):
if item in other:
indices_to_delete.add(i)
for item in other:
self.add(item)
self._delete_values_by_index(indices_to_delete) | [
"def",
"symmetric_difference_update",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"setlist",
"(",
"other",
")",
"indices_to_delete",
"=",
"set",
"(",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"item",
"in",
"other",
":",
"indices_to_delete",
".",
"add",
"(",
"i",
")",
"for",
"item",
"in",
"other",
":",
"self",
".",
"add",
"(",
"item",
")",
"self",
".",
"_delete_values_by_index",
"(",
"indices_to_delete",
")"
]
| Update self to include only the symmetric difference with other. | [
"Update",
"self",
"to",
"include",
"only",
"the",
"symmetric",
"difference",
"with",
"other",
"."
]
| python | train |
aws/sagemaker-python-sdk | src/sagemaker/predictor.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/predictor.py#L131-L146 | def delete_model(self):
"""Deletes the Amazon SageMaker models backing this predictor.
"""
request_failed = False
failed_models = []
for model_name in self._model_names:
try:
self.sagemaker_session.delete_model(model_name)
except Exception: # pylint: disable=broad-except
request_failed = True
failed_models.append(model_name)
if request_failed:
raise Exception('One or more models cannot be deleted, please retry. \n'
'Failed models: {}'.format(', '.join(failed_models))) | [
"def",
"delete_model",
"(",
"self",
")",
":",
"request_failed",
"=",
"False",
"failed_models",
"=",
"[",
"]",
"for",
"model_name",
"in",
"self",
".",
"_model_names",
":",
"try",
":",
"self",
".",
"sagemaker_session",
".",
"delete_model",
"(",
"model_name",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"request_failed",
"=",
"True",
"failed_models",
".",
"append",
"(",
"model_name",
")",
"if",
"request_failed",
":",
"raise",
"Exception",
"(",
"'One or more models cannot be deleted, please retry. \\n'",
"'Failed models: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"failed_models",
")",
")",
")"
]
| Deletes the Amazon SageMaker models backing this predictor. | [
"Deletes",
"the",
"Amazon",
"SageMaker",
"models",
"backing",
"this",
"predictor",
"."
]
| python | train |
SeattleTestbed/seash | seash_helper.py | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/seash_helper.py#L79-L109 | def local_updatetime(port):
"""
<Purpose>
Callback for time_interface.r2py to update the time that is used
internally for nodemanager communications.
<Arguments>
port:
The port to update on. This is not used however. It is only
specified to adhere to the function signature expected by
time_interface.r2py.
<Side Effects>
If we reach this function, then it means that other time server updates
failed. We will notify the user of the failure, and set time.r2py to
use the local clock.
<Exceptions>
None
<Returns>
None
"""
print 'Time update failed, could not connect to any time servers...'
print 'Your network connection may be down.'
print "Falling back to using your computer's local clock."
print
# time.time() gives us the # of seconds since 1970, whereas the NTP
# services gives us the # of seconds since 1900.
time.time_settime(pythontime.time() + time.time_seconds_from_1900_to_1970) | [
"def",
"local_updatetime",
"(",
"port",
")",
":",
"print",
"'Time update failed, could not connect to any time servers...'",
"print",
"'Your network connection may be down.'",
"print",
"\"Falling back to using your computer's local clock.\"",
"print",
"# time.time() gives us the # of seconds since 1970, whereas the NTP",
"# services gives us the # of seconds since 1900.",
"time",
".",
"time_settime",
"(",
"pythontime",
".",
"time",
"(",
")",
"+",
"time",
".",
"time_seconds_from_1900_to_1970",
")"
]
| <Purpose>
Callback for time_interface.r2py to update the time that is used
internally for nodemanager communications.
<Arguments>
port:
The port to update on. This is not used however. It is only
specified to adhere to the function signature expected by
time_interface.r2py.
<Side Effects>
If we reach this function, then it means that other time server updates
failed. We will notify the user of the failure, and set time.r2py to
use the local clock.
<Exceptions>
None
<Returns>
None | [
"<Purpose",
">",
"Callback",
"for",
"time_interface",
".",
"r2py",
"to",
"update",
"the",
"time",
"that",
"is",
"used",
"internally",
"for",
"nodemanager",
"communications",
"."
]
| python | train |
inveniosoftware-attic/invenio-utils | invenio_utils/html.py | https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L498-L658 | def get_html_text_editor(
name,
id=None,
content='',
textual_content=None,
width='300px',
height='200px',
enabled=True,
file_upload_url=None,
toolbar_set="Basic",
custom_configurations_path='/js/ckeditor/invenio-ckeditor-config.js',
ln=None):
"""
Returns a wysiwyg editor (CKEditor) to embed in html pages.
Fall back to a simple textarea when the library is not installed,
or when the user's browser is not compatible with the editor, or
when 'enable' is False, or when javascript is not enabled.
NOTE that the output also contains a hidden field named
'editor_type' that contains the kind of editor used, 'textarea' or
'ckeditor'.
Based on 'editor_type' you might want to take different actions,
like replace CRLF with <br/> when editor_type equals to
'textarea', but not when editor_type equals to 'ckeditor'.
@param name: *str* the name attribute of the returned editor
@param id: *str* the id attribute of the returned editor (when
applicable)
@param content: *str* the default content of the editor.
@param textual_content: *str* a content formatted for the case where the
wysiwyg editor is not available for user. When not
specified, use value of 'content'
@param width: *str* width of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param height: *str* height of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param enabled: *bool* if the wysiwyg editor is return (True) or if a
simple texteara is returned (False)
@param file_upload_url: *str* the URL used to upload new files via the
editor upload panel. You have to implement the
handler for your own use. The URL handler will get
form variables 'File' as POST for the uploaded file,
and 'Type' as GET for the type of file ('file',
'image', 'flash', 'media')
When value is not given, the file upload is disabled.
@param toolbar_set: *str* the name of the toolbar layout to
use. CKeditor comes by default with 'Basic' and
'Default'. To define other sets, customize the
config file in
/opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js
@param custom_configurations_path: *str* value for the CKeditor config
variable 'CustomConfigurationsPath',
which allows to specify the path of a
file that contains a custom configuration
for the editor. The path is relative to
/opt/invenio/var/www/
@return: the HTML markup of the editor
"""
ln = default_ln(ln)
if textual_content is None:
textual_content = content
editor = ''
if enabled and ckeditor_available:
# Prepare upload path settings
file_upload_script = ''
if file_upload_url is not None:
file_upload_script = ''',
filebrowserLinkUploadUrl: '%(file_upload_url)s',
filebrowserImageUploadUrl: '%(file_upload_url)s?type=Image',
filebrowserFlashUploadUrl: '%(file_upload_url)s?type=Flash'
''' % {'file_upload_url': file_upload_url}
# Prepare code to instantiate an editor
editor += '''
<script type="text/javascript" language="javascript">//<![CDATA[
/* Load the script only once, or else multiple instance of the editor on the same page will not work */
var INVENIO_CKEDITOR_ALREADY_LOADED
if (INVENIO_CKEDITOR_ALREADY_LOADED != 1) {
document.write('<script type="text/javascript" src="%(CFG_SITE_URL)s/vendors/ckeditor/ckeditor.js"><\/script>');
INVENIO_CKEDITOR_ALREADY_LOADED = 1;
}
//]]></script>
<input type="hidden" name="editor_type" id="%(id)seditortype" value="textarea" />
<textarea rows="100" cols="80" id="%(id)s" name="%(name)s" style="width:%(width)s;height:%(height)s">%(textual_content)s</textarea>
<textarea rows="100" cols="80" id="%(id)shtmlvalue" name="%(name)shtmlvalue" style="display:none;width:%(width)s;height:%(height)s">%(html_content)s</textarea>
<script type="text/javascript">//<![CDATA[
var CKEDITOR_BASEPATH = '/ckeditor/';
CKEDITOR.replace( '%(name)s',
{customConfig: '%(custom_configurations_path)s',
toolbar: '%(toolbar)s',
width: '%(width)s',
height:'%(height)s',
language: '%(ln)s'
%(file_upload_script)s
});
CKEDITOR.on('instanceReady',
function( evt )
{
/* If CKeditor was correctly loaded, display the nice HTML representation */
var oEditor = evt.editor;
editor_id = oEditor.id
editor_name = oEditor.name
var html_editor = document.getElementById(editor_name + 'htmlvalue');
oEditor.setData(html_editor.value);
var editor_type_field = document.getElementById(editor_name + 'editortype');
editor_type_field.value = 'ckeditor';
var writer = oEditor.dataProcessor.writer;
writer.indentationChars = ''; /*Do not indent source code with tabs*/
oEditor.resetDirty();
/* Workaround: http://dev.ckeditor.com/ticket/3674 */
evt.editor.on( 'contentDom', function( ev )
{
ev.removeListener();
evt.editor.resetDirty();
} );
/* End workaround */
})
//]]></script>
''' % \
{'textual_content': cgi.escape(textual_content),
'html_content': content,
'width': width,
'height': height,
'name': name,
'id': id or name,
'custom_configurations_path': custom_configurations_path,
'toolbar': toolbar_set,
'file_upload_script': file_upload_script,
'CFG_SITE_URL': cfg['CFG_SITE_URL'],
'ln': ln}
else:
# CKedior is not installed
textarea = '<textarea rows="100" cols="80" %(id)s name="%(name)s" style="width:%(width)s;height:%(height)s">%(content)s</textarea>' \
% {'content': cgi.escape(textual_content),
'width': width,
'height': height,
'name': name,
'id': id and ('id="%s"' % id) or ''}
editor += textarea
editor += '<input type="hidden" name="editor_type" value="textarea" />'
return editor | [
"def",
"get_html_text_editor",
"(",
"name",
",",
"id",
"=",
"None",
",",
"content",
"=",
"''",
",",
"textual_content",
"=",
"None",
",",
"width",
"=",
"'300px'",
",",
"height",
"=",
"'200px'",
",",
"enabled",
"=",
"True",
",",
"file_upload_url",
"=",
"None",
",",
"toolbar_set",
"=",
"\"Basic\"",
",",
"custom_configurations_path",
"=",
"'/js/ckeditor/invenio-ckeditor-config.js'",
",",
"ln",
"=",
"None",
")",
":",
"ln",
"=",
"default_ln",
"(",
"ln",
")",
"if",
"textual_content",
"is",
"None",
":",
"textual_content",
"=",
"content",
"editor",
"=",
"''",
"if",
"enabled",
"and",
"ckeditor_available",
":",
"# Prepare upload path settings",
"file_upload_script",
"=",
"''",
"if",
"file_upload_url",
"is",
"not",
"None",
":",
"file_upload_script",
"=",
"''',\n filebrowserLinkUploadUrl: '%(file_upload_url)s',\n filebrowserImageUploadUrl: '%(file_upload_url)s?type=Image',\n filebrowserFlashUploadUrl: '%(file_upload_url)s?type=Flash'\n '''",
"%",
"{",
"'file_upload_url'",
":",
"file_upload_url",
"}",
"# Prepare code to instantiate an editor",
"editor",
"+=",
"'''\n <script type=\"text/javascript\" language=\"javascript\">//<![CDATA[\n /* Load the script only once, or else multiple instance of the editor on the same page will not work */\n var INVENIO_CKEDITOR_ALREADY_LOADED\n if (INVENIO_CKEDITOR_ALREADY_LOADED != 1) {\n document.write('<script type=\"text/javascript\" src=\"%(CFG_SITE_URL)s/vendors/ckeditor/ckeditor.js\"><\\/script>');\n INVENIO_CKEDITOR_ALREADY_LOADED = 1;\n }\n //]]></script>\n <input type=\"hidden\" name=\"editor_type\" id=\"%(id)seditortype\" value=\"textarea\" />\n <textarea rows=\"100\" cols=\"80\" id=\"%(id)s\" name=\"%(name)s\" style=\"width:%(width)s;height:%(height)s\">%(textual_content)s</textarea>\n <textarea rows=\"100\" cols=\"80\" id=\"%(id)shtmlvalue\" name=\"%(name)shtmlvalue\" style=\"display:none;width:%(width)s;height:%(height)s\">%(html_content)s</textarea>\n <script type=\"text/javascript\">//<![CDATA[\n var CKEDITOR_BASEPATH = '/ckeditor/';\n\n CKEDITOR.replace( '%(name)s',\n {customConfig: '%(custom_configurations_path)s',\n toolbar: '%(toolbar)s',\n width: '%(width)s',\n height:'%(height)s',\n language: '%(ln)s'\n %(file_upload_script)s\n });\n\n CKEDITOR.on('instanceReady',\n function( evt )\n {\n /* If CKeditor was correctly loaded, display the nice HTML representation */\n var oEditor = evt.editor;\n editor_id = oEditor.id\n editor_name = oEditor.name\n var html_editor = document.getElementById(editor_name + 'htmlvalue');\n oEditor.setData(html_editor.value);\n var editor_type_field = document.getElementById(editor_name + 'editortype');\n editor_type_field.value = 'ckeditor';\n var writer = oEditor.dataProcessor.writer;\n writer.indentationChars = ''; /*Do not indent source code with tabs*/\n oEditor.resetDirty();\n /* Workaround: http://dev.ckeditor.com/ticket/3674 */\n evt.editor.on( 'contentDom', function( ev )\n {\n ev.removeListener();\n evt.editor.resetDirty();\n } );\n /* End workaround */\n })\n\n //]]></script>\n '''",
"%",
"{",
"'textual_content'",
":",
"cgi",
".",
"escape",
"(",
"textual_content",
")",
",",
"'html_content'",
":",
"content",
",",
"'width'",
":",
"width",
",",
"'height'",
":",
"height",
",",
"'name'",
":",
"name",
",",
"'id'",
":",
"id",
"or",
"name",
",",
"'custom_configurations_path'",
":",
"custom_configurations_path",
",",
"'toolbar'",
":",
"toolbar_set",
",",
"'file_upload_script'",
":",
"file_upload_script",
",",
"'CFG_SITE_URL'",
":",
"cfg",
"[",
"'CFG_SITE_URL'",
"]",
",",
"'ln'",
":",
"ln",
"}",
"else",
":",
"# CKedior is not installed",
"textarea",
"=",
"'<textarea rows=\"100\" cols=\"80\" %(id)s name=\"%(name)s\" style=\"width:%(width)s;height:%(height)s\">%(content)s</textarea>'",
"%",
"{",
"'content'",
":",
"cgi",
".",
"escape",
"(",
"textual_content",
")",
",",
"'width'",
":",
"width",
",",
"'height'",
":",
"height",
",",
"'name'",
":",
"name",
",",
"'id'",
":",
"id",
"and",
"(",
"'id=\"%s\"'",
"%",
"id",
")",
"or",
"''",
"}",
"editor",
"+=",
"textarea",
"editor",
"+=",
"'<input type=\"hidden\" name=\"editor_type\" value=\"textarea\" />'",
"return",
"editor"
]
| Returns a wysiwyg editor (CKEditor) to embed in html pages.
Fall back to a simple textarea when the library is not installed,
or when the user's browser is not compatible with the editor, or
when 'enable' is False, or when javascript is not enabled.
NOTE that the output also contains a hidden field named
'editor_type' that contains the kind of editor used, 'textarea' or
'ckeditor'.
Based on 'editor_type' you might want to take different actions,
like replace CRLF with <br/> when editor_type equals to
'textarea', but not when editor_type equals to 'ckeditor'.
@param name: *str* the name attribute of the returned editor
@param id: *str* the id attribute of the returned editor (when
applicable)
@param content: *str* the default content of the editor.
@param textual_content: *str* a content formatted for the case where the
wysiwyg editor is not available for user. When not
specified, use value of 'content'
@param width: *str* width of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param height: *str* height of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param enabled: *bool* if the wysiwyg editor is return (True) or if a
simple texteara is returned (False)
@param file_upload_url: *str* the URL used to upload new files via the
editor upload panel. You have to implement the
handler for your own use. The URL handler will get
form variables 'File' as POST for the uploaded file,
and 'Type' as GET for the type of file ('file',
'image', 'flash', 'media')
When value is not given, the file upload is disabled.
@param toolbar_set: *str* the name of the toolbar layout to
use. CKeditor comes by default with 'Basic' and
'Default'. To define other sets, customize the
config file in
/opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js
@param custom_configurations_path: *str* value for the CKeditor config
variable 'CustomConfigurationsPath',
which allows to specify the path of a
file that contains a custom configuration
for the editor. The path is relative to
/opt/invenio/var/www/
@return: the HTML markup of the editor | [
"Returns",
"a",
"wysiwyg",
"editor",
"(",
"CKEditor",
")",
"to",
"embed",
"in",
"html",
"pages",
"."
]
| python | train |
cpenv/cpenv | cpenv/resolver.py | https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/resolver.py#L207-L223 | def redirect_resolver(resolver, path):
'''Resolves environment from .cpenv file...recursively walks up the tree
in attempt to find a .cpenv file'''
if not os.path.exists(path):
raise ResolveError
if os.path.isfile(path):
path = os.path.dirname(path)
for root, _, _ in walk_up(path):
if is_redirecting(root):
env_paths = redirect_to_env_paths(unipath(root, '.cpenv'))
r = Resolver(*env_paths)
return r.resolve()
raise ResolveError | [
"def",
"redirect_resolver",
"(",
"resolver",
",",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ResolveError",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"for",
"root",
",",
"_",
",",
"_",
"in",
"walk_up",
"(",
"path",
")",
":",
"if",
"is_redirecting",
"(",
"root",
")",
":",
"env_paths",
"=",
"redirect_to_env_paths",
"(",
"unipath",
"(",
"root",
",",
"'.cpenv'",
")",
")",
"r",
"=",
"Resolver",
"(",
"*",
"env_paths",
")",
"return",
"r",
".",
"resolve",
"(",
")",
"raise",
"ResolveError"
]
| Resolves environment from .cpenv file...recursively walks up the tree
in attempt to find a .cpenv file | [
"Resolves",
"environment",
"from",
".",
"cpenv",
"file",
"...",
"recursively",
"walks",
"up",
"the",
"tree",
"in",
"attempt",
"to",
"find",
"a",
".",
"cpenv",
"file"
]
| python | valid |
tjcsl/ion | intranet/apps/announcements/views.py | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/announcements/views.py#L387-L405 | def hide_announcement_view(request):
""" Hide an announcement for the logged-in user.
announcements_hidden in the user model is the related_name for
"users_hidden" in the announcement model.
"""
if request.method == "POST":
announcement_id = request.POST.get("announcement_id")
if announcement_id:
announcement = Announcement.objects.get(id=announcement_id)
try:
announcement.user_map.users_hidden.add(request.user)
announcement.user_map.save()
except IntegrityError:
logger.warning("Duplicate value when hiding announcement {} for {}.".format(announcement_id, request.user.username))
return http.HttpResponse("Hidden")
raise http.Http404
else:
return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED") | [
"def",
"hide_announcement_view",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"announcement_id",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"announcement_id\"",
")",
"if",
"announcement_id",
":",
"announcement",
"=",
"Announcement",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"announcement_id",
")",
"try",
":",
"announcement",
".",
"user_map",
".",
"users_hidden",
".",
"add",
"(",
"request",
".",
"user",
")",
"announcement",
".",
"user_map",
".",
"save",
"(",
")",
"except",
"IntegrityError",
":",
"logger",
".",
"warning",
"(",
"\"Duplicate value when hiding announcement {} for {}.\"",
".",
"format",
"(",
"announcement_id",
",",
"request",
".",
"user",
".",
"username",
")",
")",
"return",
"http",
".",
"HttpResponse",
"(",
"\"Hidden\"",
")",
"raise",
"http",
".",
"Http404",
"else",
":",
"return",
"http",
".",
"HttpResponseNotAllowed",
"(",
"[",
"\"POST\"",
"]",
",",
"\"HTTP 405: METHOD NOT ALLOWED\"",
")"
]
| Hide an announcement for the logged-in user.
announcements_hidden in the user model is the related_name for
"users_hidden" in the announcement model. | [
"Hide",
"an",
"announcement",
"for",
"the",
"logged",
"-",
"in",
"user",
"."
]
| python | train |
hosford42/xcs | xcs/scenarios.py | https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/scenarios.py#L898-L915 | def get_classifications(self):
"""Return the classifications made by the algorithm for this
scenario.
Usage:
model.run(scenario, learn=False)
classifications = scenario.get_classifications()
Arguments: None
Return:
An indexable sequence containing the classifications made by
the model for each situation, in the same order as the original
situations themselves appear.
"""
if bitstrings.using_numpy():
return numpy.array(self.classifications)
else:
return self.classifications | [
"def",
"get_classifications",
"(",
"self",
")",
":",
"if",
"bitstrings",
".",
"using_numpy",
"(",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"self",
".",
"classifications",
")",
"else",
":",
"return",
"self",
".",
"classifications"
]
| Return the classifications made by the algorithm for this
scenario.
Usage:
model.run(scenario, learn=False)
classifications = scenario.get_classifications()
Arguments: None
Return:
An indexable sequence containing the classifications made by
the model for each situation, in the same order as the original
situations themselves appear. | [
"Return",
"the",
"classifications",
"made",
"by",
"the",
"algorithm",
"for",
"this",
"scenario",
"."
]
| python | train |
ttinies/sc2gameLobby | sc2gameLobby/gameConfig.py | https://github.com/ttinies/sc2gameLobby/blob/5352d51d53ddeb4858e92e682da89c4434123e52/sc2gameLobby/gameConfig.py#L489-L504 | def requestCreateDetails(self):
"""add configuration to the SC2 protocol create request"""
createReq = sc_pb.RequestCreateGame( # used to advance to Status.initGame state, when hosting
realtime = self.realtime,
disable_fog = self.fogDisabled,
random_seed = int(time.time()), # a game is created using the current second timestamp as the seed
local_map = sc_pb.LocalMap(map_path=self.mapLocalPath,
map_data=self.mapData))
for player in self.players:
reqPlayer = createReq.player_setup.add() # add new player; get link to settings
playerObj = PlayerPreGame(player)
if playerObj.isComputer:
reqPlayer.difficulty = playerObj.difficulty.gameValue()
reqPlayer.type = c.types.PlayerControls(playerObj.control).gameValue()
reqPlayer.race = playerObj.selectedRace.gameValue()
return createReq # SC2APIProtocol.RequestCreateGame | [
"def",
"requestCreateDetails",
"(",
"self",
")",
":",
"createReq",
"=",
"sc_pb",
".",
"RequestCreateGame",
"(",
"# used to advance to Status.initGame state, when hosting",
"realtime",
"=",
"self",
".",
"realtime",
",",
"disable_fog",
"=",
"self",
".",
"fogDisabled",
",",
"random_seed",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
",",
"# a game is created using the current second timestamp as the seed",
"local_map",
"=",
"sc_pb",
".",
"LocalMap",
"(",
"map_path",
"=",
"self",
".",
"mapLocalPath",
",",
"map_data",
"=",
"self",
".",
"mapData",
")",
")",
"for",
"player",
"in",
"self",
".",
"players",
":",
"reqPlayer",
"=",
"createReq",
".",
"player_setup",
".",
"add",
"(",
")",
"# add new player; get link to settings",
"playerObj",
"=",
"PlayerPreGame",
"(",
"player",
")",
"if",
"playerObj",
".",
"isComputer",
":",
"reqPlayer",
".",
"difficulty",
"=",
"playerObj",
".",
"difficulty",
".",
"gameValue",
"(",
")",
"reqPlayer",
".",
"type",
"=",
"c",
".",
"types",
".",
"PlayerControls",
"(",
"playerObj",
".",
"control",
")",
".",
"gameValue",
"(",
")",
"reqPlayer",
".",
"race",
"=",
"playerObj",
".",
"selectedRace",
".",
"gameValue",
"(",
")",
"return",
"createReq",
"# SC2APIProtocol.RequestCreateGame"
]
| add configuration to the SC2 protocol create request | [
"add",
"configuration",
"to",
"the",
"SC2",
"protocol",
"create",
"request"
]
| python | train |
pandas-dev/pandas | pandas/core/generic.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5932-L5977 | def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(datetime=True, numeric=False,
timedelta=True, coerce=False,
copy=True)).__finalize__(self) | [
"def",
"infer_objects",
"(",
"self",
")",
":",
"# numeric=False necessary to only soft convert;",
"# python objects will still be converted to",
"# native numpy numeric types",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"_data",
".",
"convert",
"(",
"datetime",
"=",
"True",
",",
"numeric",
"=",
"False",
",",
"timedelta",
"=",
"True",
",",
"coerce",
"=",
"False",
",",
"copy",
"=",
"True",
")",
")",
".",
"__finalize__",
"(",
"self",
")"
]
| Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object | [
"Attempt",
"to",
"infer",
"better",
"dtypes",
"for",
"object",
"columns",
"."
]
| python | train |
materialsproject/pymatgen | pymatgen/symmetry/analyzer.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1370-L1416 | def symmetrize_molecule(self):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
None
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self.get_equivalent_atoms()
eq_sets, ops = eq['eq_sets'], eq['sym_ops']
coords = self.centered_mol.cart_coords.copy()
for i, eq_indices in eq_sets.items():
for j in eq_indices:
coords[j] = np.dot(ops[j][i], coords[j])
coords[i] = np.mean(coords[list(eq_indices)], axis=0)
for j in eq_indices:
if j == i:
continue
coords[j] = np.dot(ops[i][j], coords[i])
coords[j] = np.dot(ops[i][j], coords[i])
molecule = Molecule(species=self.centered_mol.species_and_occu,
coords=coords)
return {'sym_mol': molecule,
'eq_sets': eq_sets,
'sym_ops': ops} | [
"def",
"symmetrize_molecule",
"(",
"self",
")",
":",
"eq",
"=",
"self",
".",
"get_equivalent_atoms",
"(",
")",
"eq_sets",
",",
"ops",
"=",
"eq",
"[",
"'eq_sets'",
"]",
",",
"eq",
"[",
"'sym_ops'",
"]",
"coords",
"=",
"self",
".",
"centered_mol",
".",
"cart_coords",
".",
"copy",
"(",
")",
"for",
"i",
",",
"eq_indices",
"in",
"eq_sets",
".",
"items",
"(",
")",
":",
"for",
"j",
"in",
"eq_indices",
":",
"coords",
"[",
"j",
"]",
"=",
"np",
".",
"dot",
"(",
"ops",
"[",
"j",
"]",
"[",
"i",
"]",
",",
"coords",
"[",
"j",
"]",
")",
"coords",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"coords",
"[",
"list",
"(",
"eq_indices",
")",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"j",
"in",
"eq_indices",
":",
"if",
"j",
"==",
"i",
":",
"continue",
"coords",
"[",
"j",
"]",
"=",
"np",
".",
"dot",
"(",
"ops",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"coords",
"[",
"i",
"]",
")",
"coords",
"[",
"j",
"]",
"=",
"np",
".",
"dot",
"(",
"ops",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"coords",
"[",
"i",
"]",
")",
"molecule",
"=",
"Molecule",
"(",
"species",
"=",
"self",
".",
"centered_mol",
".",
"species_and_occu",
",",
"coords",
"=",
"coords",
")",
"return",
"{",
"'sym_mol'",
":",
"molecule",
",",
"'eq_sets'",
":",
"eq_sets",
",",
"'sym_ops'",
":",
"ops",
"}"
]
| Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
None
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``. | [
"Returns",
"a",
"symmetrized",
"molecule"
]
| python | train |
DLR-RM/RAFCON | share/examples/plugins/templates/core_template_observer.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/share/examples/plugins/templates/core_template_observer.py#L47-L55 | def register_states_of_state_machine(self, state_machine):
""" This functions registers all states of state machine.
:param state_machine: the state machine to register all states of
:return:
"""
root = state_machine.root_state
root.add_observer(self, "state_execution_status",
notify_after_function=self.on_state_execution_status_changed_after)
self.recursively_register_child_states(root) | [
"def",
"register_states_of_state_machine",
"(",
"self",
",",
"state_machine",
")",
":",
"root",
"=",
"state_machine",
".",
"root_state",
"root",
".",
"add_observer",
"(",
"self",
",",
"\"state_execution_status\"",
",",
"notify_after_function",
"=",
"self",
".",
"on_state_execution_status_changed_after",
")",
"self",
".",
"recursively_register_child_states",
"(",
"root",
")"
]
| This functions registers all states of state machine.
:param state_machine: the state machine to register all states of
:return: | [
"This",
"functions",
"registers",
"all",
"states",
"of",
"state",
"machine",
".",
":",
"param",
"state_machine",
":",
"the",
"state",
"machine",
"to",
"register",
"all",
"states",
"of",
":",
"return",
":"
]
| python | train |
saltstack/salt | salt/utils/vmware.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L125-L179 | def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret | [
"def",
"esxcli",
"(",
"host",
",",
"user",
",",
"pwd",
",",
"cmd",
",",
"protocol",
"=",
"None",
",",
"port",
"=",
"None",
",",
"esxi_host",
"=",
"None",
",",
"credstore",
"=",
"None",
")",
":",
"esx_cmd",
"=",
"salt",
".",
"utils",
".",
"path",
".",
"which",
"(",
"'esxcli'",
")",
"if",
"not",
"esx_cmd",
":",
"log",
".",
"error",
"(",
"'Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.'",
")",
"return",
"False",
"# Set default port and protocol if none are provided.",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"443",
"if",
"protocol",
"is",
"None",
":",
"protocol",
"=",
"'https'",
"if",
"credstore",
":",
"esx_cmd",
"+=",
"' --credstore \\'{0}\\''",
".",
"format",
"(",
"credstore",
")",
"if",
"not",
"esxi_host",
":",
"# Then we are connecting directly to an ESXi server,",
"# 'host' points at that server, and esxi_host is a reference to the",
"# ESXi instance we are manipulating",
"esx_cmd",
"+=",
"' -s {0} -u {1} -p \\'{2}\\' '",
"'--protocol={3} --portnumber={4} {5}'",
".",
"format",
"(",
"host",
",",
"user",
",",
"pwd",
",",
"protocol",
",",
"port",
",",
"cmd",
")",
"else",
":",
"esx_cmd",
"+=",
"' -s {0} -h {1} -u {2} -p \\'{3}\\' '",
"'--protocol={4} --portnumber={5} {6}'",
".",
"format",
"(",
"host",
",",
"esxi_host",
",",
"user",
",",
"pwd",
",",
"protocol",
",",
"port",
",",
"cmd",
")",
"ret",
"=",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"run_all",
"(",
"esx_cmd",
",",
"output_loglevel",
"=",
"'quiet'",
")",
"return",
"ret"
]
| Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary | [
"Shell",
"out",
"and",
"call",
"the",
"specified",
"esxcli",
"commmand",
"parse",
"the",
"result",
"and",
"return",
"something",
"sane",
"."
]
| python | train |
joyent/python-manta | manta/client.py | https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/manta/client.py#L483-L496 | def add_job_inputs(self, job_id, keys):
"""AddJobInputs
https://apidocs.joyent.com/manta/api.html#AddJobInputs
"""
log.debug("AddJobInputs %r", job_id)
path = "/%s/jobs/%s/live/in" % (self.account, job_id)
body = '\r\n'.join(keys) + '\r\n'
headers = {
"Content-Type": "text/plain",
"Content-Length": str(len(body))
}
res, content = self._request(path, "POST", body=body, headers=headers)
if res["status"] != '204':
raise errors.MantaAPIError(res, content) | [
"def",
"add_job_inputs",
"(",
"self",
",",
"job_id",
",",
"keys",
")",
":",
"log",
".",
"debug",
"(",
"\"AddJobInputs %r\"",
",",
"job_id",
")",
"path",
"=",
"\"/%s/jobs/%s/live/in\"",
"%",
"(",
"self",
".",
"account",
",",
"job_id",
")",
"body",
"=",
"'\\r\\n'",
".",
"join",
"(",
"keys",
")",
"+",
"'\\r\\n'",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"text/plain\"",
",",
"\"Content-Length\"",
":",
"str",
"(",
"len",
"(",
"body",
")",
")",
"}",
"res",
",",
"content",
"=",
"self",
".",
"_request",
"(",
"path",
",",
"\"POST\"",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
")",
"if",
"res",
"[",
"\"status\"",
"]",
"!=",
"'204'",
":",
"raise",
"errors",
".",
"MantaAPIError",
"(",
"res",
",",
"content",
")"
]
| AddJobInputs
https://apidocs.joyent.com/manta/api.html#AddJobInputs | [
"AddJobInputs",
"https",
":",
"//",
"apidocs",
".",
"joyent",
".",
"com",
"/",
"manta",
"/",
"api",
".",
"html#AddJobInputs"
]
| python | train |
LordGaav/python-chaos | chaos/amqp/rpc.py | https://github.com/LordGaav/python-chaos/blob/52cd29a6fd15693ee1e53786b93bcb23fbf84ddd/chaos/amqp/rpc.py#L304-L320 | def reply(self, original_headers, message, properties=None):
"""
Reply to a RPC request. This function will use the default exchange, to directly contact the reply_to queue.
Parameters
----------
original_headers: dict
The headers of the originating message that caused this reply.
message: string
Message to reply with
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
"""
rpc_reply(self.channel, original_headers, message, properties) | [
"def",
"reply",
"(",
"self",
",",
"original_headers",
",",
"message",
",",
"properties",
"=",
"None",
")",
":",
"rpc_reply",
"(",
"self",
".",
"channel",
",",
"original_headers",
",",
"message",
",",
"properties",
")"
]
| Reply to a RPC request. This function will use the default exchange, to directly contact the reply_to queue.
Parameters
----------
original_headers: dict
The headers of the originating message that caused this reply.
message: string
Message to reply with
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE . | [
"Reply",
"to",
"a",
"RPC",
"request",
".",
"This",
"function",
"will",
"use",
"the",
"default",
"exchange",
"to",
"directly",
"contact",
"the",
"reply_to",
"queue",
"."
]
| python | train |
madzak/python-json-logger | src/pythonjsonlogger/jsonlogger.py | https://github.com/madzak/python-json-logger/blob/687cc52260876fd2189cbb7c5856e3fbaff65279/src/pythonjsonlogger/jsonlogger.py#L136-L144 | def parse(self):
"""
Parses format string looking for substitutions
This method is responsible for returning a list of fields (as strings)
to include in all log messages.
"""
standard_formatters = re.compile(r'\((.+?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt) | [
"def",
"parse",
"(",
"self",
")",
":",
"standard_formatters",
"=",
"re",
".",
"compile",
"(",
"r'\\((.+?)\\)'",
",",
"re",
".",
"IGNORECASE",
")",
"return",
"standard_formatters",
".",
"findall",
"(",
"self",
".",
"_fmt",
")"
]
| Parses format string looking for substitutions
This method is responsible for returning a list of fields (as strings)
to include in all log messages. | [
"Parses",
"format",
"string",
"looking",
"for",
"substitutions"
]
| python | train |
OSSOS/MOP | src/ossos/core/ossos/storage.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1366-L1383 | def get_property(node_uri, property_name, ossos_base=True):
"""
Retrieves the value associated with a property on a node in VOSpace.
@param node_uri:
@param property_name:
@param ossos_base:
@return:
"""
# Must use force or we could have a cached copy of the node from before
# properties of interest were set/updated.
node = client.get_node(node_uri, force=True)
property_uri = tag_uri(property_name) if ossos_base else property_name
if property_uri not in node.props:
return None
return node.props[property_uri] | [
"def",
"get_property",
"(",
"node_uri",
",",
"property_name",
",",
"ossos_base",
"=",
"True",
")",
":",
"# Must use force or we could have a cached copy of the node from before",
"# properties of interest were set/updated.",
"node",
"=",
"client",
".",
"get_node",
"(",
"node_uri",
",",
"force",
"=",
"True",
")",
"property_uri",
"=",
"tag_uri",
"(",
"property_name",
")",
"if",
"ossos_base",
"else",
"property_name",
"if",
"property_uri",
"not",
"in",
"node",
".",
"props",
":",
"return",
"None",
"return",
"node",
".",
"props",
"[",
"property_uri",
"]"
]
| Retrieves the value associated with a property on a node in VOSpace.
@param node_uri:
@param property_name:
@param ossos_base:
@return: | [
"Retrieves",
"the",
"value",
"associated",
"with",
"a",
"property",
"on",
"a",
"node",
"in",
"VOSpace",
"."
]
| python | train |
python-gitlab/python-gitlab | gitlab/mixins.py | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/mixins.py#L602-L619 | def render(self, link_url, image_url, **kwargs):
"""Preview link_url and image_url after interpolation.
Args:
link_url (str): URL of the badge link
image_url (str): URL of the badge image
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabRenderError: If the rendering failed
Returns:
dict: The rendering properties
"""
path = '%s/render' % self.path
data = {'link_url': link_url, 'image_url': image_url}
return self.gitlab.http_get(path, data, **kwargs) | [
"def",
"render",
"(",
"self",
",",
"link_url",
",",
"image_url",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'%s/render'",
"%",
"self",
".",
"path",
"data",
"=",
"{",
"'link_url'",
":",
"link_url",
",",
"'image_url'",
":",
"image_url",
"}",
"return",
"self",
".",
"gitlab",
".",
"http_get",
"(",
"path",
",",
"data",
",",
"*",
"*",
"kwargs",
")"
]
| Preview link_url and image_url after interpolation.
Args:
link_url (str): URL of the badge link
image_url (str): URL of the badge image
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabRenderError: If the rendering failed
Returns:
dict: The rendering properties | [
"Preview",
"link_url",
"and",
"image_url",
"after",
"interpolation",
"."
]
| python | train |
danilobellini/audiolazy | audiolazy/lazy_misc.py | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_misc.py#L70-L125 | def blocks(seq, size=None, hop=None, padval=0.):
"""
General iterable blockenizer.
Generator that gets ``size`` elements from ``seq``, and outputs them in a
collections.deque (mutable circular queue) sequence container. Next output
starts ``hop`` elements after the first element in last output block. Last
block may be appended with ``padval``, if needed to get the desired size.
The ``seq`` can have hybrid / hetherogeneous data, it just need to be an
iterable. You can use other type content as padval (e.g. None) to help
segregate the padding at the end, if desired.
Note
----
When hop is less than size, changing the returned contents will keep the
new changed value in the next yielded container.
"""
# Initialization
res = deque(maxlen=size) # Circular queue
idx = 0
last_idx = size - 1
if hop is None:
hop = size
reinit_idx = size - hop
# Yields each block, keeping last values when needed
if hop <= size:
for el in seq:
res.append(el)
if idx == last_idx:
yield res
idx = reinit_idx
else:
idx += 1
# Yields each block and skips (loses) data due to hop > size
else:
for el in seq:
if idx < 0: # Skips data
idx += 1
else:
res.append(el)
if idx == last_idx:
yield res
#res = dtype()
idx = size-hop
else:
idx += 1
# Padding to finish
if idx > max(size-hop, 0):
for _ in xrange(idx,size):
res.append(padval)
yield res | [
"def",
"blocks",
"(",
"seq",
",",
"size",
"=",
"None",
",",
"hop",
"=",
"None",
",",
"padval",
"=",
"0.",
")",
":",
"# Initialization",
"res",
"=",
"deque",
"(",
"maxlen",
"=",
"size",
")",
"# Circular queue",
"idx",
"=",
"0",
"last_idx",
"=",
"size",
"-",
"1",
"if",
"hop",
"is",
"None",
":",
"hop",
"=",
"size",
"reinit_idx",
"=",
"size",
"-",
"hop",
"# Yields each block, keeping last values when needed",
"if",
"hop",
"<=",
"size",
":",
"for",
"el",
"in",
"seq",
":",
"res",
".",
"append",
"(",
"el",
")",
"if",
"idx",
"==",
"last_idx",
":",
"yield",
"res",
"idx",
"=",
"reinit_idx",
"else",
":",
"idx",
"+=",
"1",
"# Yields each block and skips (loses) data due to hop > size",
"else",
":",
"for",
"el",
"in",
"seq",
":",
"if",
"idx",
"<",
"0",
":",
"# Skips data",
"idx",
"+=",
"1",
"else",
":",
"res",
".",
"append",
"(",
"el",
")",
"if",
"idx",
"==",
"last_idx",
":",
"yield",
"res",
"#res = dtype()",
"idx",
"=",
"size",
"-",
"hop",
"else",
":",
"idx",
"+=",
"1",
"# Padding to finish",
"if",
"idx",
">",
"max",
"(",
"size",
"-",
"hop",
",",
"0",
")",
":",
"for",
"_",
"in",
"xrange",
"(",
"idx",
",",
"size",
")",
":",
"res",
".",
"append",
"(",
"padval",
")",
"yield",
"res"
]
| General iterable blockenizer.
Generator that gets ``size`` elements from ``seq``, and outputs them in a
collections.deque (mutable circular queue) sequence container. Next output
starts ``hop`` elements after the first element in last output block. Last
block may be appended with ``padval``, if needed to get the desired size.
The ``seq`` can have hybrid / hetherogeneous data, it just need to be an
iterable. You can use other type content as padval (e.g. None) to help
segregate the padding at the end, if desired.
Note
----
When hop is less than size, changing the returned contents will keep the
new changed value in the next yielded container. | [
"General",
"iterable",
"blockenizer",
"."
]
| python | train |
saltstack/salt | salt/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L677-L711 | def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, six.string_types):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high | [
"def",
"apply_exclude",
"(",
"self",
",",
"high",
")",
":",
"if",
"'__exclude__'",
"not",
"in",
"high",
":",
"return",
"high",
"ex_sls",
"=",
"set",
"(",
")",
"ex_id",
"=",
"set",
"(",
")",
"exclude",
"=",
"high",
".",
"pop",
"(",
"'__exclude__'",
")",
"for",
"exc",
"in",
"exclude",
":",
"if",
"isinstance",
"(",
"exc",
",",
"six",
".",
"string_types",
")",
":",
"# The exclude statement is a string, assume it is an sls",
"ex_sls",
".",
"add",
"(",
"exc",
")",
"if",
"isinstance",
"(",
"exc",
",",
"dict",
")",
":",
"# Explicitly declared exclude",
"if",
"len",
"(",
"exc",
")",
"!=",
"1",
":",
"continue",
"key",
"=",
"next",
"(",
"six",
".",
"iterkeys",
"(",
"exc",
")",
")",
"if",
"key",
"==",
"'sls'",
":",
"ex_sls",
".",
"add",
"(",
"exc",
"[",
"'sls'",
"]",
")",
"elif",
"key",
"==",
"'id'",
":",
"ex_id",
".",
"add",
"(",
"exc",
"[",
"'id'",
"]",
")",
"# Now the excludes have been simplified, use them",
"if",
"ex_sls",
":",
"# There are sls excludes, find the associtaed ids",
"for",
"name",
",",
"body",
"in",
"six",
".",
"iteritems",
"(",
"high",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"'__'",
")",
":",
"continue",
"if",
"body",
".",
"get",
"(",
"'__sls__'",
",",
"''",
")",
"in",
"ex_sls",
":",
"ex_id",
".",
"add",
"(",
"name",
")",
"for",
"id_",
"in",
"ex_id",
":",
"if",
"id_",
"in",
"high",
":",
"high",
".",
"pop",
"(",
"id_",
")",
"return",
"high"
]
| Read in the __exclude__ list and remove all excluded objects from the
high data | [
"Read",
"in",
"the",
"__exclude__",
"list",
"and",
"remove",
"all",
"excluded",
"objects",
"from",
"the",
"high",
"data"
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.