repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9706-L9717 | def mission_ack_send(self, target_system, target_component, type, force_mavlink1=False):
'''
Ack message during MISSION handling. The type field states if this
message is a positive ack (type=0) or if an error
happened (type=non-zero).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
type : See MAV_MISSION_RESULT enum (uint8_t)
'''
return self.send(self.mission_ack_encode(target_system, target_component, type), force_mavlink1=force_mavlink1) | [
"def",
"mission_ack_send",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"type",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"mission_ack_encode",
"(",
"target_system",
",",
"target_component",
",",
"type",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | Ack message during MISSION handling. The type field states if this
message is a positive ack (type=0) or if an error
happened (type=non-zero).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
type : See MAV_MISSION_RESULT enum (uint8_t) | [
"Ack",
"message",
"during",
"MISSION",
"handling",
".",
"The",
"type",
"field",
"states",
"if",
"this",
"message",
"is",
"a",
"positive",
"ack",
"(",
"type",
"=",
"0",
")",
"or",
"if",
"an",
"error",
"happened",
"(",
"type",
"=",
"non",
"-",
"zero",
")",
"."
] | python | train |
twilio/twilio-python | twilio/rest/flex_api/v1/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/flex_api/v1/__init__.py#L38-L44 | def configuration(self):
"""
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationList
"""
if self._configuration is None:
self._configuration = ConfigurationList(self)
return self._configuration | [
"def",
"configuration",
"(",
"self",
")",
":",
"if",
"self",
".",
"_configuration",
"is",
"None",
":",
"self",
".",
"_configuration",
"=",
"ConfigurationList",
"(",
"self",
")",
"return",
"self",
".",
"_configuration"
] | :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationList | [
":",
"rtype",
":",
"twilio",
".",
"rest",
".",
"flex_api",
".",
"v1",
".",
"configuration",
".",
"ConfigurationList"
] | python | train |
nickw444/flask-ldap3-login | flask_ldap3_login/__init__.py | https://github.com/nickw444/flask-ldap3-login/blob/3cf0faff52d0e04d4813119a2ba36d706e6fb31f/flask_ldap3_login/__init__.py#L205-L219 | def teardown(self, exception):
"""
Cleanup after a request. Close any open connections.
"""
ctx = stack.top
if ctx is not None:
if hasattr(ctx, 'ldap3_manager_connections'):
for connection in ctx.ldap3_manager_connections:
self.destroy_connection(connection)
if hasattr(ctx, 'ldap3_manager_main_connection'):
log.debug(
"Unbinding a connection used within the request context.")
ctx.ldap3_manager_main_connection.unbind()
ctx.ldap3_manager_main_connection = None | [
"def",
"teardown",
"(",
"self",
",",
"exception",
")",
":",
"ctx",
"=",
"stack",
".",
"top",
"if",
"ctx",
"is",
"not",
"None",
":",
"if",
"hasattr",
"(",
"ctx",
",",
"'ldap3_manager_connections'",
")",
":",
"for",
"connection",
"in",
"ctx",
".",
"ldap3_manager_connections",
":",
"self",
".",
"destroy_connection",
"(",
"connection",
")",
"if",
"hasattr",
"(",
"ctx",
",",
"'ldap3_manager_main_connection'",
")",
":",
"log",
".",
"debug",
"(",
"\"Unbinding a connection used within the request context.\"",
")",
"ctx",
".",
"ldap3_manager_main_connection",
".",
"unbind",
"(",
")",
"ctx",
".",
"ldap3_manager_main_connection",
"=",
"None"
] | Cleanup after a request. Close any open connections. | [
"Cleanup",
"after",
"a",
"request",
".",
"Close",
"any",
"open",
"connections",
"."
] | python | test |
jborean93/requests-credssp | requests_credssp/credssp.py | https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L175-L221 | def _build_pub_key_auth(self, context, nonce, auth_token, public_key):
"""
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3
https://msdn.microsoft.com/en-us/library/cc226791.aspx
This step sends the final SPNEGO token to the server if required and
computes the value for the pubKeyAuth field for the protocol version
negotiated.
The format of the pubKeyAuth field depends on the version that the
server supports.
For version 2 to 4:
The pubKeyAuth field is just wrapped using the authenticated context
For versions 5 to 6:
The pubKeyAuth is a sha256 hash of the server's public key plus a nonce
and a magic string value. This hash is wrapped using the authenticated
context and the nonce is added to the TSRequest alongside the nonce
used in the hash calcs.
:param context: The authenticated context
:param nonce: If versions 5+, the nonce to use in the hash
:param auth_token: If NTLM, this is the last msg (authenticate msg) to
send in the same request
:param public_key: The server's public key
:return: The TSRequest as a byte string to send to the server
"""
ts_request = TSRequest()
if auth_token is not None:
nego_token = NegoToken()
nego_token['negoToken'] = auth_token
ts_request['negoTokens'].append(nego_token)
if nonce is not None:
ts_request['clientNonce'] = nonce
hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \
nonce + public_key
pub_value = hashlib.sha256(hash_input).digest()
else:
pub_value = public_key
enc_public_key = context.wrap(pub_value)
ts_request['pubKeyAuth'] = enc_public_key
return encoder.encode(ts_request) | [
"def",
"_build_pub_key_auth",
"(",
"self",
",",
"context",
",",
"nonce",
",",
"auth_token",
",",
"public_key",
")",
":",
"ts_request",
"=",
"TSRequest",
"(",
")",
"if",
"auth_token",
"is",
"not",
"None",
":",
"nego_token",
"=",
"NegoToken",
"(",
")",
"nego_token",
"[",
"'negoToken'",
"]",
"=",
"auth_token",
"ts_request",
"[",
"'negoTokens'",
"]",
".",
"append",
"(",
"nego_token",
")",
"if",
"nonce",
"is",
"not",
"None",
":",
"ts_request",
"[",
"'clientNonce'",
"]",
"=",
"nonce",
"hash_input",
"=",
"b\"CredSSP Client-To-Server Binding Hash\\x00\"",
"+",
"nonce",
"+",
"public_key",
"pub_value",
"=",
"hashlib",
".",
"sha256",
"(",
"hash_input",
")",
".",
"digest",
"(",
")",
"else",
":",
"pub_value",
"=",
"public_key",
"enc_public_key",
"=",
"context",
".",
"wrap",
"(",
"pub_value",
")",
"ts_request",
"[",
"'pubKeyAuth'",
"]",
"=",
"enc_public_key",
"return",
"encoder",
".",
"encode",
"(",
"ts_request",
")"
] | [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3
https://msdn.microsoft.com/en-us/library/cc226791.aspx
This step sends the final SPNEGO token to the server if required and
computes the value for the pubKeyAuth field for the protocol version
negotiated.
The format of the pubKeyAuth field depends on the version that the
server supports.
For version 2 to 4:
The pubKeyAuth field is just wrapped using the authenticated context
For versions 5 to 6:
The pubKeyAuth is a sha256 hash of the server's public key plus a nonce
and a magic string value. This hash is wrapped using the authenticated
context and the nonce is added to the TSRequest alongside the nonce
used in the hash calcs.
:param context: The authenticated context
:param nonce: If versions 5+, the nonce to use in the hash
:param auth_token: If NTLM, this is the last msg (authenticate msg) to
send in the same request
:param public_key: The server's public key
:return: The TSRequest as a byte string to send to the server | [
"[",
"MS",
"-",
"CSSP",
"]",
"3",
".",
"1",
".",
"5",
"Processing",
"Events",
"and",
"Sequencing",
"Rules",
"-",
"Step",
"3",
"https",
":",
"//",
"msdn",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"library",
"/",
"cc226791",
".",
"aspx"
] | python | train |
clintval/sample-sheet | sample_sheet/__init__.py | https://github.com/clintval/sample-sheet/blob/116ac6f26f6e61b57716c90f6e887d3d457756f3/sample_sheet/__init__.py#L865-L908 | def write(self, handle: TextIO, blank_lines: int = 1) -> None:
"""Write this :class:`SampleSheet` to a file-like object.
Args:
handle: Object to wrap by csv.writer.
blank_lines: Number of blank lines to write between sections.
"""
if not isinstance(blank_lines, int) or blank_lines <= 0:
raise ValueError('Number of blank lines must be a positive int.')
writer = csv.writer(handle)
csv_width: int = max([len(self.all_sample_keys), 2])
section_order = ['Header', 'Reads'] + self._sections + ['Settings']
def pad_iterable(
iterable: Iterable, size: int = csv_width, padding: str = ''
) -> List[str]:
return list(islice(chain(iterable, repeat(padding)), size))
def write_blank_lines(
writer: Any, n: int = blank_lines, width: int = csv_width
) -> None:
for i in range(n):
writer.writerow(pad_iterable([], width))
for title in section_order:
writer.writerow(pad_iterable([f'[{title}]'], csv_width))
section = getattr(self, title)
if title == 'Reads':
for read in self.Reads:
writer.writerow(pad_iterable([read], csv_width))
else:
for key, value in section.items():
writer.writerow(pad_iterable([key, value], csv_width))
write_blank_lines(writer)
writer.writerow(pad_iterable(['[Data]'], csv_width))
writer.writerow(pad_iterable(self.all_sample_keys, csv_width))
for sample in self.samples:
line = [getattr(sample, key) for key in self.all_sample_keys]
writer.writerow(pad_iterable(line, csv_width)) | [
"def",
"write",
"(",
"self",
",",
"handle",
":",
"TextIO",
",",
"blank_lines",
":",
"int",
"=",
"1",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"blank_lines",
",",
"int",
")",
"or",
"blank_lines",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Number of blank lines must be a positive int.'",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"handle",
")",
"csv_width",
":",
"int",
"=",
"max",
"(",
"[",
"len",
"(",
"self",
".",
"all_sample_keys",
")",
",",
"2",
"]",
")",
"section_order",
"=",
"[",
"'Header'",
",",
"'Reads'",
"]",
"+",
"self",
".",
"_sections",
"+",
"[",
"'Settings'",
"]",
"def",
"pad_iterable",
"(",
"iterable",
":",
"Iterable",
",",
"size",
":",
"int",
"=",
"csv_width",
",",
"padding",
":",
"str",
"=",
"''",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"list",
"(",
"islice",
"(",
"chain",
"(",
"iterable",
",",
"repeat",
"(",
"padding",
")",
")",
",",
"size",
")",
")",
"def",
"write_blank_lines",
"(",
"writer",
":",
"Any",
",",
"n",
":",
"int",
"=",
"blank_lines",
",",
"width",
":",
"int",
"=",
"csv_width",
")",
"->",
"None",
":",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"[",
"]",
",",
"width",
")",
")",
"for",
"title",
"in",
"section_order",
":",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"[",
"f'[{title}]'",
"]",
",",
"csv_width",
")",
")",
"section",
"=",
"getattr",
"(",
"self",
",",
"title",
")",
"if",
"title",
"==",
"'Reads'",
":",
"for",
"read",
"in",
"self",
".",
"Reads",
":",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"[",
"read",
"]",
",",
"csv_width",
")",
")",
"else",
":",
"for",
"key",
",",
"value",
"in",
"section",
".",
"items",
"(",
")",
":",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"[",
"key",
",",
"value",
"]",
",",
"csv_width",
")",
")",
"write_blank_lines",
"(",
"writer",
")",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"[",
"'[Data]'",
"]",
",",
"csv_width",
")",
")",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"self",
".",
"all_sample_keys",
",",
"csv_width",
")",
")",
"for",
"sample",
"in",
"self",
".",
"samples",
":",
"line",
"=",
"[",
"getattr",
"(",
"sample",
",",
"key",
")",
"for",
"key",
"in",
"self",
".",
"all_sample_keys",
"]",
"writer",
".",
"writerow",
"(",
"pad_iterable",
"(",
"line",
",",
"csv_width",
")",
")"
] | Write this :class:`SampleSheet` to a file-like object.
Args:
handle: Object to wrap by csv.writer.
blank_lines: Number of blank lines to write between sections. | [
"Write",
"this",
":",
"class",
":",
"SampleSheet",
"to",
"a",
"file",
"-",
"like",
"object",
"."
] | python | train |
mikekatz04/BOWIE | bowie/plotutils/makeprocess.py | https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/makeprocess.py#L252-L280 | def create_plots(self):
"""Creates plots according to each plotting class.
"""
for i, axis in enumerate(self.ax):
# plot everything. First check general dict for parameters related to plots.
trans_plot_class_call = globals()[self.plot_types[i]]
trans_plot_class = trans_plot_class_call(self.fig, axis,
self.value_classes[i].x_arr_list,
self.value_classes[i].y_arr_list,
self.value_classes[i].z_arr_list,
colorbar=(
self.colorbar_classes[self.plot_types[i]]),
**{**self.general,
**self.figure,
**self.plot_info[str(i)],
**self.plot_info[str(i)]['limits'],
**self.plot_info[str(i)]['label'],
**self.plot_info[str(i)]['extra'],
**self.plot_info[str(i)]['legend']})
# create the plot
trans_plot_class.make_plot()
# setup the plot
trans_plot_class.setup_plot()
# print("Axis", i, "Complete")
return | [
"def",
"create_plots",
"(",
"self",
")",
":",
"for",
"i",
",",
"axis",
"in",
"enumerate",
"(",
"self",
".",
"ax",
")",
":",
"# plot everything. First check general dict for parameters related to plots.",
"trans_plot_class_call",
"=",
"globals",
"(",
")",
"[",
"self",
".",
"plot_types",
"[",
"i",
"]",
"]",
"trans_plot_class",
"=",
"trans_plot_class_call",
"(",
"self",
".",
"fig",
",",
"axis",
",",
"self",
".",
"value_classes",
"[",
"i",
"]",
".",
"x_arr_list",
",",
"self",
".",
"value_classes",
"[",
"i",
"]",
".",
"y_arr_list",
",",
"self",
".",
"value_classes",
"[",
"i",
"]",
".",
"z_arr_list",
",",
"colorbar",
"=",
"(",
"self",
".",
"colorbar_classes",
"[",
"self",
".",
"plot_types",
"[",
"i",
"]",
"]",
")",
",",
"*",
"*",
"{",
"*",
"*",
"self",
".",
"general",
",",
"*",
"*",
"self",
".",
"figure",
",",
"*",
"*",
"self",
".",
"plot_info",
"[",
"str",
"(",
"i",
")",
"]",
",",
"*",
"*",
"self",
".",
"plot_info",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"'limits'",
"]",
",",
"*",
"*",
"self",
".",
"plot_info",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"'label'",
"]",
",",
"*",
"*",
"self",
".",
"plot_info",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"'extra'",
"]",
",",
"*",
"*",
"self",
".",
"plot_info",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"'legend'",
"]",
"}",
")",
"# create the plot",
"trans_plot_class",
".",
"make_plot",
"(",
")",
"# setup the plot",
"trans_plot_class",
".",
"setup_plot",
"(",
")",
"# print(\"Axis\", i, \"Complete\")",
"return"
] | Creates plots according to each plotting class. | [
"Creates",
"plots",
"according",
"to",
"each",
"plotting",
"class",
"."
] | python | train |
google/transitfeed | merge.py | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1597-L1642 | def _FindLargestIdPostfixNumber(self, schedule):
"""Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id.
"""
postfix_number_re = re.compile('(\d+)$')
def ExtractPostfixNumber(entity_id):
"""Try to extract an integer from the end of entity_id.
If entity_id is None or if there is no integer ending the id, zero is
returned.
Args:
entity_id: An id string or None.
Returns:
An integer ending the entity_id or zero.
"""
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {'agency_id': schedule.GetAgencyList(),
'stop_id': schedule.GetStopList(),
'route_id': schedule.GetRouteList(),
'trip_id': schedule.GetTripList(),
'service_id': schedule.GetServicePeriodList(),
'fare_id': schedule.GetFareAttributeList(),
'shape_id': schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number | [
"def",
"_FindLargestIdPostfixNumber",
"(",
"self",
",",
"schedule",
")",
":",
"postfix_number_re",
"=",
"re",
".",
"compile",
"(",
"'(\\d+)$'",
")",
"def",
"ExtractPostfixNumber",
"(",
"entity_id",
")",
":",
"\"\"\"Try to extract an integer from the end of entity_id.\n\n If entity_id is None or if there is no integer ending the id, zero is\n returned.\n\n Args:\n entity_id: An id string or None.\n\n Returns:\n An integer ending the entity_id or zero.\n \"\"\"",
"if",
"entity_id",
"is",
"None",
":",
"return",
"0",
"match",
"=",
"postfix_number_re",
".",
"search",
"(",
"entity_id",
")",
"if",
"match",
"is",
"not",
"None",
":",
"return",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"else",
":",
"return",
"0",
"id_data_sets",
"=",
"{",
"'agency_id'",
":",
"schedule",
".",
"GetAgencyList",
"(",
")",
",",
"'stop_id'",
":",
"schedule",
".",
"GetStopList",
"(",
")",
",",
"'route_id'",
":",
"schedule",
".",
"GetRouteList",
"(",
")",
",",
"'trip_id'",
":",
"schedule",
".",
"GetTripList",
"(",
")",
",",
"'service_id'",
":",
"schedule",
".",
"GetServicePeriodList",
"(",
")",
",",
"'fare_id'",
":",
"schedule",
".",
"GetFareAttributeList",
"(",
")",
",",
"'shape_id'",
":",
"schedule",
".",
"GetShapeList",
"(",
")",
"}",
"max_postfix_number",
"=",
"0",
"for",
"id_name",
",",
"entity_list",
"in",
"id_data_sets",
".",
"items",
"(",
")",
":",
"for",
"entity",
"in",
"entity_list",
":",
"entity_id",
"=",
"getattr",
"(",
"entity",
",",
"id_name",
")",
"postfix_number",
"=",
"ExtractPostfixNumber",
"(",
"entity_id",
")",
"max_postfix_number",
"=",
"max",
"(",
"max_postfix_number",
",",
"postfix_number",
")",
"return",
"max_postfix_number"
] | Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id. | [
"Finds",
"the",
"largest",
"integer",
"used",
"as",
"the",
"ending",
"of",
"an",
"id",
"in",
"the",
"schedule",
"."
] | python | train |
jvarho/pylibscrypt | pylibscrypt/pylibsodium.py | https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibsodium.py#L98-L138 | def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw | [
"def",
"scrypt",
"(",
"password",
",",
"salt",
",",
"N",
"=",
"SCRYPT_N",
",",
"r",
"=",
"SCRYPT_r",
",",
"p",
"=",
"SCRYPT_p",
",",
"olen",
"=",
"64",
")",
":",
"check_args",
"(",
"password",
",",
"salt",
",",
"N",
",",
"r",
",",
"p",
",",
"olen",
")",
"if",
"_scrypt_ll",
":",
"out",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"olen",
")",
"if",
"_scrypt_ll",
"(",
"password",
",",
"len",
"(",
"password",
")",
",",
"salt",
",",
"len",
"(",
"salt",
")",
",",
"N",
",",
"r",
",",
"p",
",",
"out",
",",
"olen",
")",
":",
"raise",
"ValueError",
"return",
"out",
".",
"raw",
"if",
"len",
"(",
"salt",
")",
"!=",
"_scrypt_salt",
"or",
"r",
"!=",
"8",
"or",
"(",
"p",
"&",
"(",
"p",
"-",
"1",
")",
")",
"or",
"(",
"N",
"*",
"p",
"<=",
"512",
")",
":",
"return",
"scr_mod",
".",
"scrypt",
"(",
"password",
",",
"salt",
",",
"N",
",",
"r",
",",
"p",
",",
"olen",
")",
"s",
"=",
"next",
"(",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"64",
")",
"if",
"2",
"**",
"i",
"==",
"N",
")",
"t",
"=",
"next",
"(",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"30",
")",
"if",
"2",
"**",
"i",
"==",
"p",
")",
"m",
"=",
"2",
"**",
"(",
"10",
"+",
"s",
")",
"o",
"=",
"2",
"**",
"(",
"5",
"+",
"t",
"+",
"s",
")",
"if",
"s",
">",
"53",
"or",
"t",
"+",
"s",
">",
"58",
":",
"raise",
"ValueError",
"out",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"olen",
")",
"if",
"_scrypt",
"(",
"out",
",",
"olen",
",",
"password",
",",
"len",
"(",
"password",
")",
",",
"salt",
",",
"o",
",",
"m",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"return",
"out",
".",
"raw"
] | Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful. | [
"Returns",
"a",
"key",
"derived",
"using",
"the",
"scrypt",
"key",
"-",
"derivarion",
"function"
] | python | train |
annoviko/pyclustering | pyclustering/cluster/__init__.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/__init__.py#L424-L476 | def append_cluster(self, cluster, data=None, canvas=0, marker='.', markersize=None, color=None):
"""!
@brief Appends cluster to canvas for drawing.
@param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
"""
if len(cluster) == 0:
return
if canvas > self.__number_canvases or canvas < 0:
raise ValueError("Canvas index '%d' is out of range [0; %d]." % self.__number_canvases or canvas)
if color is None:
index_color = len(self.__canvas_clusters[canvas]) % len(color_list.TITLES)
color = color_list.TITLES[index_color]
added_canvas_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)
self.__canvas_clusters[canvas].append( added_canvas_descriptor )
if data is None:
dimension = len(cluster[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
else:
dimension = len(data[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
if (dimension < 1) or (dimension > 3):
raise ValueError("Only objects with size dimension 1 (1D plot), 2 (2D plot) or 3 (3D plot) "
"can be displayed. For multi-dimensional data use 'cluster_visualizer_multidim'.")
if markersize is None:
if (dimension == 1) or (dimension == 2):
added_canvas_descriptor.markersize = self.__default_2d_marker_size
elif dimension == 3:
added_canvas_descriptor.markersize = self.__default_3d_marker_size
return len(self.__canvas_clusters[canvas]) - 1 | [
"def",
"append_cluster",
"(",
"self",
",",
"cluster",
",",
"data",
"=",
"None",
",",
"canvas",
"=",
"0",
",",
"marker",
"=",
"'.'",
",",
"markersize",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"if",
"len",
"(",
"cluster",
")",
"==",
"0",
":",
"return",
"if",
"canvas",
">",
"self",
".",
"__number_canvases",
"or",
"canvas",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Canvas index '%d' is out of range [0; %d].\"",
"%",
"self",
".",
"__number_canvases",
"or",
"canvas",
")",
"if",
"color",
"is",
"None",
":",
"index_color",
"=",
"len",
"(",
"self",
".",
"__canvas_clusters",
"[",
"canvas",
"]",
")",
"%",
"len",
"(",
"color_list",
".",
"TITLES",
")",
"color",
"=",
"color_list",
".",
"TITLES",
"[",
"index_color",
"]",
"added_canvas_descriptor",
"=",
"canvas_cluster_descr",
"(",
"cluster",
",",
"data",
",",
"marker",
",",
"markersize",
",",
"color",
")",
"self",
".",
"__canvas_clusters",
"[",
"canvas",
"]",
".",
"append",
"(",
"added_canvas_descriptor",
")",
"if",
"data",
"is",
"None",
":",
"dimension",
"=",
"len",
"(",
"cluster",
"[",
"0",
"]",
")",
"if",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"is",
"None",
":",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"=",
"dimension",
"elif",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"!=",
"dimension",
":",
"raise",
"ValueError",
"(",
"\"Only clusters with the same dimension of objects can be displayed on canvas.\"",
")",
"else",
":",
"dimension",
"=",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"if",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"is",
"None",
":",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"=",
"dimension",
"elif",
"self",
".",
"__canvas_dimensions",
"[",
"canvas",
"]",
"!=",
"dimension",
":",
"raise",
"ValueError",
"(",
"\"Only clusters with the same dimension of objects can be displayed on canvas.\"",
")",
"if",
"(",
"dimension",
"<",
"1",
")",
"or",
"(",
"dimension",
">",
"3",
")",
":",
"raise",
"ValueError",
"(",
"\"Only objects with size dimension 1 (1D plot), 2 (2D plot) or 3 (3D plot) \"",
"\"can be displayed. For multi-dimensional data use 'cluster_visualizer_multidim'.\"",
")",
"if",
"markersize",
"is",
"None",
":",
"if",
"(",
"dimension",
"==",
"1",
")",
"or",
"(",
"dimension",
"==",
"2",
")",
":",
"added_canvas_descriptor",
".",
"markersize",
"=",
"self",
".",
"__default_2d_marker_size",
"elif",
"dimension",
"==",
"3",
":",
"added_canvas_descriptor",
".",
"markersize",
"=",
"self",
".",
"__default_3d_marker_size",
"return",
"len",
"(",
"self",
".",
"__canvas_clusters",
"[",
"canvas",
"]",
")",
"-",
"1"
] | !
@brief Appends cluster to canvas for drawing.
@param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas. | [
"!"
] | python | valid |
codelv/enaml-native | src/enamlnative/android/app.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/app.py#L119-L151 | def request_permissions(self, permissions):
""" Return a future that resolves with the results
of the permission requests
"""
f = self.create_future()
#: Old versions of android did permissions at install time
if self.api_level < 23:
f.set_result({p: True for p in permissions})
return f
w = self.widget
request_code = self._permission_code
self._permission_code += 1 #: So next call has a unique code
#: On first request, setup our listener, and request the permission
if request_code == 0:
w.setPermissionResultListener(w.getId())
w.onRequestPermissionsResult.connect(self._on_permission_result)
def on_results(code, perms, results):
#: Check permissions
f.set_result({p: r == Activity.PERMISSION_GRANTED
for (p, r) in zip(perms, results)})
#: Save a reference
self._permission_requests[request_code] = on_results
#: Send out the request
self.widget.requestPermissions(permissions, request_code)
return f | [
"def",
"request_permissions",
"(",
"self",
",",
"permissions",
")",
":",
"f",
"=",
"self",
".",
"create_future",
"(",
")",
"#: Old versions of android did permissions at install time",
"if",
"self",
".",
"api_level",
"<",
"23",
":",
"f",
".",
"set_result",
"(",
"{",
"p",
":",
"True",
"for",
"p",
"in",
"permissions",
"}",
")",
"return",
"f",
"w",
"=",
"self",
".",
"widget",
"request_code",
"=",
"self",
".",
"_permission_code",
"self",
".",
"_permission_code",
"+=",
"1",
"#: So next call has a unique code",
"#: On first request, setup our listener, and request the permission",
"if",
"request_code",
"==",
"0",
":",
"w",
".",
"setPermissionResultListener",
"(",
"w",
".",
"getId",
"(",
")",
")",
"w",
".",
"onRequestPermissionsResult",
".",
"connect",
"(",
"self",
".",
"_on_permission_result",
")",
"def",
"on_results",
"(",
"code",
",",
"perms",
",",
"results",
")",
":",
"#: Check permissions",
"f",
".",
"set_result",
"(",
"{",
"p",
":",
"r",
"==",
"Activity",
".",
"PERMISSION_GRANTED",
"for",
"(",
"p",
",",
"r",
")",
"in",
"zip",
"(",
"perms",
",",
"results",
")",
"}",
")",
"#: Save a reference",
"self",
".",
"_permission_requests",
"[",
"request_code",
"]",
"=",
"on_results",
"#: Send out the request",
"self",
".",
"widget",
".",
"requestPermissions",
"(",
"permissions",
",",
"request_code",
")",
"return",
"f"
] | Return a future that resolves with the results
of the permission requests | [
"Return",
"a",
"future",
"that",
"resolves",
"with",
"the",
"results",
"of",
"the",
"permission",
"requests"
] | python | train |
wbond/asn1crypto | asn1crypto/x509.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L1083-L1119 | def human_friendly(self):
"""
:return:
A human-friendly unicode string containing the parts of the name
"""
if self._human_friendly is None:
data = OrderedDict()
last_field = None
for rdn in self.chosen:
for type_val in rdn:
field_name = type_val['type'].human_friendly
last_field = field_name
if field_name in data:
data[field_name] = [data[field_name]]
data[field_name].append(type_val['value'])
else:
data[field_name] = type_val['value']
to_join = []
keys = data.keys()
if last_field == 'Country':
keys = reversed(list(keys))
for key in keys:
value = data[key]
native_value = self._recursive_humanize(value)
to_join.append('%s: %s' % (key, native_value))
has_comma = False
for element in to_join:
if element.find(',') != -1:
has_comma = True
break
separator = ', ' if not has_comma else '; '
self._human_friendly = separator.join(to_join[::-1])
return self._human_friendly | [
"def",
"human_friendly",
"(",
"self",
")",
":",
"if",
"self",
".",
"_human_friendly",
"is",
"None",
":",
"data",
"=",
"OrderedDict",
"(",
")",
"last_field",
"=",
"None",
"for",
"rdn",
"in",
"self",
".",
"chosen",
":",
"for",
"type_val",
"in",
"rdn",
":",
"field_name",
"=",
"type_val",
"[",
"'type'",
"]",
".",
"human_friendly",
"last_field",
"=",
"field_name",
"if",
"field_name",
"in",
"data",
":",
"data",
"[",
"field_name",
"]",
"=",
"[",
"data",
"[",
"field_name",
"]",
"]",
"data",
"[",
"field_name",
"]",
".",
"append",
"(",
"type_val",
"[",
"'value'",
"]",
")",
"else",
":",
"data",
"[",
"field_name",
"]",
"=",
"type_val",
"[",
"'value'",
"]",
"to_join",
"=",
"[",
"]",
"keys",
"=",
"data",
".",
"keys",
"(",
")",
"if",
"last_field",
"==",
"'Country'",
":",
"keys",
"=",
"reversed",
"(",
"list",
"(",
"keys",
")",
")",
"for",
"key",
"in",
"keys",
":",
"value",
"=",
"data",
"[",
"key",
"]",
"native_value",
"=",
"self",
".",
"_recursive_humanize",
"(",
"value",
")",
"to_join",
".",
"append",
"(",
"'%s: %s'",
"%",
"(",
"key",
",",
"native_value",
")",
")",
"has_comma",
"=",
"False",
"for",
"element",
"in",
"to_join",
":",
"if",
"element",
".",
"find",
"(",
"','",
")",
"!=",
"-",
"1",
":",
"has_comma",
"=",
"True",
"break",
"separator",
"=",
"', '",
"if",
"not",
"has_comma",
"else",
"'; '",
"self",
".",
"_human_friendly",
"=",
"separator",
".",
"join",
"(",
"to_join",
"[",
":",
":",
"-",
"1",
"]",
")",
"return",
"self",
".",
"_human_friendly"
] | :return:
A human-friendly unicode string containing the parts of the name | [
":",
"return",
":",
"A",
"human",
"-",
"friendly",
"unicode",
"string",
"containing",
"the",
"parts",
"of",
"the",
"name"
] | python | train |
HazyResearch/metal | metal/multitask/mt_classifier.py | https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/multitask/mt_classifier.py#L213-L218 | def _to_torch(Z, dtype=None):
"""Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor"""
if isinstance(Z, list):
return [Classifier._to_torch(z, dtype=dtype) for z in Z]
else:
return Classifier._to_torch(Z) | [
"def",
"_to_torch",
"(",
"Z",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"Z",
",",
"list",
")",
":",
"return",
"[",
"Classifier",
".",
"_to_torch",
"(",
"z",
",",
"dtype",
"=",
"dtype",
")",
"for",
"z",
"in",
"Z",
"]",
"else",
":",
"return",
"Classifier",
".",
"_to_torch",
"(",
"Z",
")"
] | Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor | [
"Converts",
"a",
"None",
"list",
"np",
".",
"ndarray",
"or",
"torch",
".",
"Tensor",
"to",
"torch",
".",
"Tensor"
] | python | train |
tensorflow/probability | experimental/mcmc/elliptical_slice_sampler.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/mcmc/elliptical_slice_sampler.py#L405-L417 | def _prepare_args(log_likelihood_fn, state,
log_likelihood=None, description='log_likelihood'):
"""Processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [tf.convert_to_tensor(s, name='current_state')
for s in state_parts]
log_likelihood = _maybe_call_fn(
log_likelihood_fn,
state_parts,
log_likelihood,
description)
return [state_parts, log_likelihood] | [
"def",
"_prepare_args",
"(",
"log_likelihood_fn",
",",
"state",
",",
"log_likelihood",
"=",
"None",
",",
"description",
"=",
"'log_likelihood'",
")",
":",
"state_parts",
"=",
"list",
"(",
"state",
")",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"state",
")",
"else",
"[",
"state",
"]",
"state_parts",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"s",
",",
"name",
"=",
"'current_state'",
")",
"for",
"s",
"in",
"state_parts",
"]",
"log_likelihood",
"=",
"_maybe_call_fn",
"(",
"log_likelihood_fn",
",",
"state_parts",
",",
"log_likelihood",
",",
"description",
")",
"return",
"[",
"state_parts",
",",
"log_likelihood",
"]"
] | Processes input args to meet list-like assumptions. | [
"Processes",
"input",
"args",
"to",
"meet",
"list",
"-",
"like",
"assumptions",
"."
] | python | test |
quantumlib/Cirq | cirq/ops/common_gates.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/ops/common_gates.py#L1138-L1141 | def Ry(rads: Union[float, sympy.Basic]) -> YPowGate:
"""Returns a gate with the matrix e^{-i Y rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return YPowGate(exponent=rads / pi, global_shift=-0.5) | [
"def",
"Ry",
"(",
"rads",
":",
"Union",
"[",
"float",
",",
"sympy",
".",
"Basic",
"]",
")",
"->",
"YPowGate",
":",
"pi",
"=",
"sympy",
".",
"pi",
"if",
"protocols",
".",
"is_parameterized",
"(",
"rads",
")",
"else",
"np",
".",
"pi",
"return",
"YPowGate",
"(",
"exponent",
"=",
"rads",
"/",
"pi",
",",
"global_shift",
"=",
"-",
"0.5",
")"
] | Returns a gate with the matrix e^{-i Y rads / 2}. | [
"Returns",
"a",
"gate",
"with",
"the",
"matrix",
"e^",
"{",
"-",
"i",
"Y",
"rads",
"/",
"2",
"}",
"."
] | python | train |
sentinel-hub/eo-learn | features/eolearn/features/radiometric_normalization.py | https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/features/eolearn/features/radiometric_normalization.py#L104-L124 | def _geoville_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel. """
# no_obs = bn.allnan(arr_tmp["data"], axis=0)
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
# replace NaN with maximum
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
# sort - former NaNs will move to the end
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
# desired position as well as floor and ceiling of it
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(np.int)
# get floor value of reference band and index band
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx | [
"def",
"_geoville_index_by_percentile",
"(",
"self",
",",
"data",
",",
"percentile",
")",
":",
"# no_obs = bn.allnan(arr_tmp[\"data\"], axis=0)",
"data_tmp",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
")",
"valid_obs",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"isfinite",
"(",
"data_tmp",
")",
",",
"axis",
"=",
"0",
")",
"# replace NaN with maximum",
"max_val",
"=",
"np",
".",
"nanmax",
"(",
"data_tmp",
")",
"+",
"1",
"data_tmp",
"[",
"np",
".",
"isnan",
"(",
"data_tmp",
")",
"]",
"=",
"max_val",
"# sort - former NaNs will move to the end",
"ind_tmp",
"=",
"np",
".",
"argsort",
"(",
"data_tmp",
",",
"kind",
"=",
"\"mergesort\"",
",",
"axis",
"=",
"0",
")",
"# desired position as well as floor and ceiling of it",
"k_arr",
"=",
"(",
"valid_obs",
"-",
"1",
")",
"*",
"(",
"percentile",
"/",
"100.0",
")",
"k_arr",
"=",
"np",
".",
"where",
"(",
"k_arr",
"<",
"0",
",",
"0",
",",
"k_arr",
")",
"f_arr",
"=",
"np",
".",
"floor",
"(",
"k_arr",
"+",
"0.5",
")",
"f_arr",
"=",
"f_arr",
".",
"astype",
"(",
"np",
".",
"int",
")",
"# get floor value of reference band and index band",
"ind",
"=",
"f_arr",
".",
"astype",
"(",
"\"int16\"",
")",
"y_val",
",",
"x_val",
"=",
"ind_tmp",
".",
"shape",
"[",
"1",
"]",
",",
"ind_tmp",
".",
"shape",
"[",
"2",
"]",
"y_val",
",",
"x_val",
"=",
"np",
".",
"ogrid",
"[",
"0",
":",
"y_val",
",",
"0",
":",
"x_val",
"]",
"idx",
"=",
"np",
".",
"where",
"(",
"valid_obs",
"==",
"0",
",",
"self",
".",
"max_index",
",",
"ind_tmp",
"[",
"ind",
",",
"y_val",
",",
"x_val",
"]",
")",
"return",
"idx"
] | Calculate percentile of numpy stack and return the index of the chosen pixel. | [
"Calculate",
"percentile",
"of",
"numpy",
"stack",
"and",
"return",
"the",
"index",
"of",
"the",
"chosen",
"pixel",
"."
] | python | train |
kgiusti/pyngus | pyngus/link.py | https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/pyngus/link.py#L878-L884 | def _ep_need_close(self):
"""Peer has closed its end of the session."""
LOG.debug("Session %s close requested - closing...",
self._name)
links = self._links.copy() # may modify _links
for link in links:
link._session_closed() | [
"def",
"_ep_need_close",
"(",
"self",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Session %s close requested - closing...\"",
",",
"self",
".",
"_name",
")",
"links",
"=",
"self",
".",
"_links",
".",
"copy",
"(",
")",
"# may modify _links",
"for",
"link",
"in",
"links",
":",
"link",
".",
"_session_closed",
"(",
")"
] | Peer has closed its end of the session. | [
"Peer",
"has",
"closed",
"its",
"end",
"of",
"the",
"session",
"."
] | python | test |
UDST/urbansim | urbansim/models/dcm.py | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1439-L1457 | def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(SegmentedMNLDiscreteChoiceModel, self).apply_fit_filters(
choosers, alternatives) | [
"def",
"apply_fit_filters",
"(",
"self",
",",
"choosers",
",",
"alternatives",
")",
":",
"return",
"super",
"(",
"SegmentedMNLDiscreteChoiceModel",
",",
"self",
")",
".",
"apply_fit_filters",
"(",
"choosers",
",",
"alternatives",
")"
] | Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame | [
"Filter",
"choosers",
"and",
"alternatives",
"for",
"fitting",
"."
] | python | train |
basho/riak-python-client | riak/mapreduce.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L77-L85 | def add_object(self, obj):
"""
Adds a RiakObject to the inputs.
:param obj: the object to add
:type obj: RiakObject
:rtype: :class:`RiakMapReduce`
"""
return self.add_bucket_key_data(obj._bucket._name, obj._key, None) | [
"def",
"add_object",
"(",
"self",
",",
"obj",
")",
":",
"return",
"self",
".",
"add_bucket_key_data",
"(",
"obj",
".",
"_bucket",
".",
"_name",
",",
"obj",
".",
"_key",
",",
"None",
")"
] | Adds a RiakObject to the inputs.
:param obj: the object to add
:type obj: RiakObject
:rtype: :class:`RiakMapReduce` | [
"Adds",
"a",
"RiakObject",
"to",
"the",
"inputs",
"."
] | python | train |
sebp/scikit-survival | sksurv/meta/ensemble_selection.py | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/meta/ensemble_selection.py#L277-L297 | def fit(self, X, y=None, **fit_params):
"""Fit ensemble of models
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self
"""
self._check_params()
cv = check_cv(self.cv, X)
self._fit(X, y, cv, **fit_params)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"*",
"*",
"fit_params",
")",
":",
"self",
".",
"_check_params",
"(",
")",
"cv",
"=",
"check_cv",
"(",
"self",
".",
"cv",
",",
"X",
")",
"self",
".",
"_fit",
"(",
"X",
",",
"y",
",",
"cv",
",",
"*",
"*",
"fit_params",
")",
"return",
"self"
] | Fit ensemble of models
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self | [
"Fit",
"ensemble",
"of",
"models"
] | python | train |
Jaymon/endpoints | endpoints/http.py | https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L971-L976 | def path(self):
"""path part of a url (eg, http://host.com/path?query=string)"""
self._path = ''
path_args = self.path_args
path = "/{}".format("/".join(path_args))
return path | [
"def",
"path",
"(",
"self",
")",
":",
"self",
".",
"_path",
"=",
"''",
"path_args",
"=",
"self",
".",
"path_args",
"path",
"=",
"\"/{}\"",
".",
"format",
"(",
"\"/\"",
".",
"join",
"(",
"path_args",
")",
")",
"return",
"path"
] | path part of a url (eg, http://host.com/path?query=string) | [
"path",
"part",
"of",
"a",
"url",
"(",
"eg",
"http",
":",
"//",
"host",
".",
"com",
"/",
"path?query",
"=",
"string",
")"
] | python | train |
rsinger86/django-lifecycle | django_lifecycle/__init__.py | https://github.com/rsinger86/django-lifecycle/blob/2196908ef0e242e52aab5bfaa3d337930700c106/django_lifecycle/__init__.py#L228-L245 | def _run_hooked_methods(self, hook: str):
"""
Iterate through decorated methods to find those that should be
triggered by the current hook. If conditions exist, check them before
running otherwise go ahead and run.
"""
for method in self._potentially_hooked_methods:
for callback_specs in method._hooked:
if callback_specs['hook'] != hook:
continue
when = callback_specs.get('when')
if when:
if self._check_callback_conditions(callback_specs):
method()
else:
method() | [
"def",
"_run_hooked_methods",
"(",
"self",
",",
"hook",
":",
"str",
")",
":",
"for",
"method",
"in",
"self",
".",
"_potentially_hooked_methods",
":",
"for",
"callback_specs",
"in",
"method",
".",
"_hooked",
":",
"if",
"callback_specs",
"[",
"'hook'",
"]",
"!=",
"hook",
":",
"continue",
"when",
"=",
"callback_specs",
".",
"get",
"(",
"'when'",
")",
"if",
"when",
":",
"if",
"self",
".",
"_check_callback_conditions",
"(",
"callback_specs",
")",
":",
"method",
"(",
")",
"else",
":",
"method",
"(",
")"
] | Iterate through decorated methods to find those that should be
triggered by the current hook. If conditions exist, check them before
running otherwise go ahead and run. | [
"Iterate",
"through",
"decorated",
"methods",
"to",
"find",
"those",
"that",
"should",
"be",
"triggered",
"by",
"the",
"current",
"hook",
".",
"If",
"conditions",
"exist",
"check",
"them",
"before",
"running",
"otherwise",
"go",
"ahead",
"and",
"run",
"."
] | python | train |
merll/docker-fabric | dockerfabric/actions.py | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/actions.py#L16-L25 | def perform(action_name, container, **kwargs):
"""
Performs an action on the given container map and configuration.
:param action_name: Name of the action (e.g. ``update``).
:param container: Container configuration name.
:param kwargs: Keyword arguments for the action implementation.
"""
cf = container_fabric()
cf.call(action_name, container, **kwargs) | [
"def",
"perform",
"(",
"action_name",
",",
"container",
",",
"*",
"*",
"kwargs",
")",
":",
"cf",
"=",
"container_fabric",
"(",
")",
"cf",
".",
"call",
"(",
"action_name",
",",
"container",
",",
"*",
"*",
"kwargs",
")"
] | Performs an action on the given container map and configuration.
:param action_name: Name of the action (e.g. ``update``).
:param container: Container configuration name.
:param kwargs: Keyword arguments for the action implementation. | [
"Performs",
"an",
"action",
"on",
"the",
"given",
"container",
"map",
"and",
"configuration",
"."
] | python | train |
blockcypher/blockcypher-python | blockcypher/api.py | https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L887-L902 | def delete_forwarding_address(payment_id, coin_symbol='btc', api_key=None):
'''
Delete a forwarding address on a specific blockchain, using its
payment id
'''
assert payment_id, 'payment_id required'
assert is_valid_coin_symbol(coin_symbol)
assert api_key, 'api_key required'
params = {'token': api_key}
url = make_url(**dict(payments=payment_id))
r = requests.delete(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r, allow_204=True) | [
"def",
"delete_forwarding_address",
"(",
"payment_id",
",",
"coin_symbol",
"=",
"'btc'",
",",
"api_key",
"=",
"None",
")",
":",
"assert",
"payment_id",
",",
"'payment_id required'",
"assert",
"is_valid_coin_symbol",
"(",
"coin_symbol",
")",
"assert",
"api_key",
",",
"'api_key required'",
"params",
"=",
"{",
"'token'",
":",
"api_key",
"}",
"url",
"=",
"make_url",
"(",
"*",
"*",
"dict",
"(",
"payments",
"=",
"payment_id",
")",
")",
"r",
"=",
"requests",
".",
"delete",
"(",
"url",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"True",
",",
"timeout",
"=",
"TIMEOUT_IN_SECONDS",
")",
"return",
"get_valid_json",
"(",
"r",
",",
"allow_204",
"=",
"True",
")"
] | Delete a forwarding address on a specific blockchain, using its
payment id | [
"Delete",
"a",
"forwarding",
"address",
"on",
"a",
"specific",
"blockchain",
"using",
"its",
"payment",
"id"
] | python | train |
Ouranosinc/xclim | xclim/indices.py | https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/indices.py#L1051-L1083 | def ice_days(tasmax, freq='YS'):
r"""Number of ice/freezing days
Number of days where daily maximum temperatures are below 0℃.
Parameters
----------
tasmax : xarrray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of ice/freezing days.
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} < 0℃
"""
tu = units.parse_units(tasmax.attrs['units'].replace('-', '**-'))
fu = 'degC'
frz = 0
if fu != tu:
frz = units.convert(frz, fu, tu)
f = (tasmax < frz) * 1
return f.resample(time=freq).sum(dim='time') | [
"def",
"ice_days",
"(",
"tasmax",
",",
"freq",
"=",
"'YS'",
")",
":",
"tu",
"=",
"units",
".",
"parse_units",
"(",
"tasmax",
".",
"attrs",
"[",
"'units'",
"]",
".",
"replace",
"(",
"'-'",
",",
"'**-'",
")",
")",
"fu",
"=",
"'degC'",
"frz",
"=",
"0",
"if",
"fu",
"!=",
"tu",
":",
"frz",
"=",
"units",
".",
"convert",
"(",
"frz",
",",
"fu",
",",
"tu",
")",
"f",
"=",
"(",
"tasmax",
"<",
"frz",
")",
"*",
"1",
"return",
"f",
".",
"resample",
"(",
"time",
"=",
"freq",
")",
".",
"sum",
"(",
"dim",
"=",
"'time'",
")"
] | r"""Number of ice/freezing days
Number of days where daily maximum temperatures are below 0℃.
Parameters
----------
tasmax : xarrray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of ice/freezing days.
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} < 0℃ | [
"r",
"Number",
"of",
"ice",
"/",
"freezing",
"days"
] | python | train |
h2oai/h2o-3 | h2o-bindings/bin/bindings.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/bindings.py#L298-L303 | def endpoint_groups():
"""Return endpoints, grouped by the class which handles them."""
groups = defaultdict(list)
for e in endpoints():
groups[e["class_name"]].append(e)
return groups | [
"def",
"endpoint_groups",
"(",
")",
":",
"groups",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"e",
"in",
"endpoints",
"(",
")",
":",
"groups",
"[",
"e",
"[",
"\"class_name\"",
"]",
"]",
".",
"append",
"(",
"e",
")",
"return",
"groups"
] | Return endpoints, grouped by the class which handles them. | [
"Return",
"endpoints",
"grouped",
"by",
"the",
"class",
"which",
"handles",
"them",
"."
] | python | test |
click-contrib/sphinx-click | sphinx_click/ext.py | https://github.com/click-contrib/sphinx-click/blob/ec76d15697ec80e51486a6e3daa0aec60b04870f/sphinx_click/ext.py#L144-L156 | def _format_envvar(param):
"""Format the envvars of a `click.Option` or `click.Argument`."""
yield '.. envvar:: {}'.format(param.envvar)
yield ' :noindex:'
yield ''
if isinstance(param, click.Argument):
param_ref = param.human_readable_name
else:
# if a user has defined an opt with multiple "aliases", always use the
# first. For example, if '--foo' or '-f' are possible, use '--foo'.
param_ref = param.opts[0]
yield _indent('Provide a default for :option:`{}`'.format(param_ref)) | [
"def",
"_format_envvar",
"(",
"param",
")",
":",
"yield",
"'.. envvar:: {}'",
".",
"format",
"(",
"param",
".",
"envvar",
")",
"yield",
"' :noindex:'",
"yield",
"''",
"if",
"isinstance",
"(",
"param",
",",
"click",
".",
"Argument",
")",
":",
"param_ref",
"=",
"param",
".",
"human_readable_name",
"else",
":",
"# if a user has defined an opt with multiple \"aliases\", always use the",
"# first. For example, if '--foo' or '-f' are possible, use '--foo'.",
"param_ref",
"=",
"param",
".",
"opts",
"[",
"0",
"]",
"yield",
"_indent",
"(",
"'Provide a default for :option:`{}`'",
".",
"format",
"(",
"param_ref",
")",
")"
] | Format the envvars of a `click.Option` or `click.Argument`. | [
"Format",
"the",
"envvars",
"of",
"a",
"click",
".",
"Option",
"or",
"click",
".",
"Argument",
"."
] | python | train |
closeio/tasktiger | tasktiger/worker.py | https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L177-L220 | def _wait_for_new_tasks(self, timeout=0, batch_timeout=0):
"""
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method
"""
new_queue_found = False
start_time = batch_exit = time.time()
while True:
# Check to see if batch_exit has been updated
if batch_exit > start_time:
pubsub_sleep = batch_exit - time.time()
else:
pubsub_sleep = start_time + timeout - time.time()
message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or
self._did_work
else pubsub_sleep)
# Pull remaining messages off of channel
while message:
if message['type'] == 'message':
new_queue_found, batch_exit = self._process_queue_message(
message['data'], new_queue_found, batch_exit,
start_time, timeout, batch_timeout
)
message = self._pubsub.get_message()
if self._did_work:
break # Exit immediately if we did work during the last
# execution loop because there might be more work to do
elif time.time() >= batch_exit and new_queue_found:
break # After finding a new queue we can wait until the
# batch timeout expires
elif time.time() - start_time > timeout:
break | [
"def",
"_wait_for_new_tasks",
"(",
"self",
",",
"timeout",
"=",
"0",
",",
"batch_timeout",
"=",
"0",
")",
":",
"new_queue_found",
"=",
"False",
"start_time",
"=",
"batch_exit",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"# Check to see if batch_exit has been updated",
"if",
"batch_exit",
">",
"start_time",
":",
"pubsub_sleep",
"=",
"batch_exit",
"-",
"time",
".",
"time",
"(",
")",
"else",
":",
"pubsub_sleep",
"=",
"start_time",
"+",
"timeout",
"-",
"time",
".",
"time",
"(",
")",
"message",
"=",
"self",
".",
"_pubsub",
".",
"get_message",
"(",
"timeout",
"=",
"0",
"if",
"pubsub_sleep",
"<",
"0",
"or",
"self",
".",
"_did_work",
"else",
"pubsub_sleep",
")",
"# Pull remaining messages off of channel",
"while",
"message",
":",
"if",
"message",
"[",
"'type'",
"]",
"==",
"'message'",
":",
"new_queue_found",
",",
"batch_exit",
"=",
"self",
".",
"_process_queue_message",
"(",
"message",
"[",
"'data'",
"]",
",",
"new_queue_found",
",",
"batch_exit",
",",
"start_time",
",",
"timeout",
",",
"batch_timeout",
")",
"message",
"=",
"self",
".",
"_pubsub",
".",
"get_message",
"(",
")",
"if",
"self",
".",
"_did_work",
":",
"break",
"# Exit immediately if we did work during the last",
"# execution loop because there might be more work to do",
"elif",
"time",
".",
"time",
"(",
")",
">=",
"batch_exit",
"and",
"new_queue_found",
":",
"break",
"# After finding a new queue we can wait until the",
"# batch timeout expires",
"elif",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
">",
"timeout",
":",
"break"
] | Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method | [
"Check",
"activity",
"channel",
"and",
"wait",
"as",
"necessary",
"."
] | python | train |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L280-L292 | def get_formatted_relative_path(self, path):
"""
Formates path to not start with a leading './' or '.\' if enables in
the config
:param str path: the path to format
:return str: the [formatted] path
"""
if self.format_relative_path and \
(path.startswith('./') or path.startswith('.\\')):
return path[2:]
else:
return path | [
"def",
"get_formatted_relative_path",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"format_relative_path",
"and",
"(",
"path",
".",
"startswith",
"(",
"'./'",
")",
"or",
"path",
".",
"startswith",
"(",
"'.\\\\'",
")",
")",
":",
"return",
"path",
"[",
"2",
":",
"]",
"else",
":",
"return",
"path"
] | Formates path to not start with a leading './' or '.\' if enables in
the config
:param str path: the path to format
:return str: the [formatted] path | [
"Formates",
"path",
"to",
"not",
"start",
"with",
"a",
"leading",
".",
"/",
"or",
".",
"\\",
"if",
"enables",
"in",
"the",
"config"
] | python | train |
fhs/pyhdf | pyhdf/V.py | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L778-L798 | def find(self, name):
"""Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfind(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"refnum",
"=",
"_C",
".",
"Vfind",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"name",
")",
"if",
"not",
"refnum",
":",
"raise",
"HDF4Error",
"(",
"\"vgroup not found\"",
")",
"return",
"refnum"
] | Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind | [
"Find",
"a",
"vgroup",
"given",
"its",
"name",
"returning",
"its",
"reference",
"number",
"if",
"found",
"."
] | python | train |
sentinel-hub/sentinelhub-py | sentinelhub/geopedia.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geopedia.py#L388-L398 | def _fetch_features(self):
""" Retrieves a new page of features from Geopedia
"""
if self.next_page_url is None:
return
response = get_json(self.next_page_url, post_values=self.query, headers=self.gpd_session.session_headers)
self.features.extend(response['features'])
self.next_page_url = response['pagination']['next']
self.layer_size = response['pagination']['total'] | [
"def",
"_fetch_features",
"(",
"self",
")",
":",
"if",
"self",
".",
"next_page_url",
"is",
"None",
":",
"return",
"response",
"=",
"get_json",
"(",
"self",
".",
"next_page_url",
",",
"post_values",
"=",
"self",
".",
"query",
",",
"headers",
"=",
"self",
".",
"gpd_session",
".",
"session_headers",
")",
"self",
".",
"features",
".",
"extend",
"(",
"response",
"[",
"'features'",
"]",
")",
"self",
".",
"next_page_url",
"=",
"response",
"[",
"'pagination'",
"]",
"[",
"'next'",
"]",
"self",
".",
"layer_size",
"=",
"response",
"[",
"'pagination'",
"]",
"[",
"'total'",
"]"
] | Retrieves a new page of features from Geopedia | [
"Retrieves",
"a",
"new",
"page",
"of",
"features",
"from",
"Geopedia"
] | python | train |
numenta/nupic | src/nupic/data/aggregator.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/aggregator.py#L720-L834 | def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
"""Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned.
"""
# Create the input stream
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
# Instantiate the aggregator
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
# Is it a null aggregation? If so, just return the input file unmodified
if aggregator.isNullAggregation():
return inputFullPath
# ------------------------------------------------------------------------
# If we were not given an output filename, create one based on the
# aggregation settings
if outputFilename is None:
outputFilename = 'agg_%s' % \
os.path.splitext(os.path.basename(inputFullPath))[0]
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if aggregationInfo.get(k, 0) > 0:
outputFilename += '_%s_%d' % (k, aggregationInfo[k])
outputFilename += '.csv'
outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)
# ------------------------------------------------------------------------
# If some other process already started creating this file, simply
# wait for it to finish and return without doing anything
lockFilePath = outputFilename + '.please_wait'
if os.path.isfile(outputFilename) or \
os.path.isfile(lockFilePath):
while os.path.isfile(lockFilePath):
print 'Waiting for %s to be fully written by another process' % \
lockFilePath
time.sleep(1)
return outputFilename
# Create the lock file
lockFD = open(lockFilePath, 'w')
# -------------------------------------------------------------------------
# Create the output stream
outputObj = FileRecordStream(streamID=outputFilename, write=True,
fields=inputObj.getFields())
# -------------------------------------------------------------------------
# Write all aggregated records to the output
while True:
inRecord = inputObj.getNextRecord()
(aggRecord, aggBookmark) = aggregator.next(inRecord, None)
if aggRecord is None and inRecord is None:
break
if aggRecord is not None:
outputObj.appendRecord(aggRecord)
return outputFilename | [
"def",
"generateDataset",
"(",
"aggregationInfo",
",",
"inputFilename",
",",
"outputFilename",
"=",
"None",
")",
":",
"# Create the input stream",
"inputFullPath",
"=",
"resource_filename",
"(",
"\"nupic.datafiles\"",
",",
"inputFilename",
")",
"inputObj",
"=",
"FileRecordStream",
"(",
"inputFullPath",
")",
"# Instantiate the aggregator",
"aggregator",
"=",
"Aggregator",
"(",
"aggregationInfo",
"=",
"aggregationInfo",
",",
"inputFields",
"=",
"inputObj",
".",
"getFields",
"(",
")",
")",
"# Is it a null aggregation? If so, just return the input file unmodified",
"if",
"aggregator",
".",
"isNullAggregation",
"(",
")",
":",
"return",
"inputFullPath",
"# ------------------------------------------------------------------------",
"# If we were not given an output filename, create one based on the",
"# aggregation settings",
"if",
"outputFilename",
"is",
"None",
":",
"outputFilename",
"=",
"'agg_%s'",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"inputFullPath",
")",
")",
"[",
"0",
"]",
"timePeriods",
"=",
"'years months weeks days '",
"'hours minutes seconds milliseconds microseconds'",
"for",
"k",
"in",
"timePeriods",
".",
"split",
"(",
")",
":",
"if",
"aggregationInfo",
".",
"get",
"(",
"k",
",",
"0",
")",
">",
"0",
":",
"outputFilename",
"+=",
"'_%s_%d'",
"%",
"(",
"k",
",",
"aggregationInfo",
"[",
"k",
"]",
")",
"outputFilename",
"+=",
"'.csv'",
"outputFilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"inputFullPath",
")",
",",
"outputFilename",
")",
"# ------------------------------------------------------------------------",
"# If some other process already started creating this file, simply",
"# wait for it to finish and return without doing anything",
"lockFilePath",
"=",
"outputFilename",
"+",
"'.please_wait'",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"outputFilename",
")",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"lockFilePath",
")",
":",
"while",
"os",
".",
"path",
".",
"isfile",
"(",
"lockFilePath",
")",
":",
"print",
"'Waiting for %s to be fully written by another process'",
"%",
"lockFilePath",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"outputFilename",
"# Create the lock file",
"lockFD",
"=",
"open",
"(",
"lockFilePath",
",",
"'w'",
")",
"# -------------------------------------------------------------------------",
"# Create the output stream",
"outputObj",
"=",
"FileRecordStream",
"(",
"streamID",
"=",
"outputFilename",
",",
"write",
"=",
"True",
",",
"fields",
"=",
"inputObj",
".",
"getFields",
"(",
")",
")",
"# -------------------------------------------------------------------------",
"# Write all aggregated records to the output",
"while",
"True",
":",
"inRecord",
"=",
"inputObj",
".",
"getNextRecord",
"(",
")",
"(",
"aggRecord",
",",
"aggBookmark",
")",
"=",
"aggregator",
".",
"next",
"(",
"inRecord",
",",
"None",
")",
"if",
"aggRecord",
"is",
"None",
"and",
"inRecord",
"is",
"None",
":",
"break",
"if",
"aggRecord",
"is",
"not",
"None",
":",
"outputObj",
".",
"appendRecord",
"(",
"aggRecord",
")",
"return",
"outputFilename"
] | Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned. | [
"Generate",
"a",
"dataset",
"of",
"aggregated",
"values"
] | python | valid |
apache/incubator-mxnet | example/gluon/lipnet/utils/align.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/align.py#L54-L60 | def sentence(self, padding=75):
"""
Get sentence
"""
vec = word_to_vector(self.sentence_str)
vec += [-1] * (padding - self.sentence_length)
return np.array(vec, dtype=np.int32) | [
"def",
"sentence",
"(",
"self",
",",
"padding",
"=",
"75",
")",
":",
"vec",
"=",
"word_to_vector",
"(",
"self",
".",
"sentence_str",
")",
"vec",
"+=",
"[",
"-",
"1",
"]",
"*",
"(",
"padding",
"-",
"self",
".",
"sentence_length",
")",
"return",
"np",
".",
"array",
"(",
"vec",
",",
"dtype",
"=",
"np",
".",
"int32",
")"
] | Get sentence | [
"Get",
"sentence"
] | python | train |
phaethon/kamene | kamene/contrib/gsm_um.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2390-L2399 | def detachRequest(GmmCause_presence=0):
"""DETACH REQUEST Section 9.4.5"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x5) # 00000101
c = DetachTypeAndForceToStandby()
packet = a / b / c
if GmmCause_presence is 1:
e = GmmCause(ieiGC=0x25)
packet = packet / e
return packet | [
"def",
"detachRequest",
"(",
"GmmCause_presence",
"=",
"0",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x5",
")",
"# 00000101",
"c",
"=",
"DetachTypeAndForceToStandby",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"if",
"GmmCause_presence",
"is",
"1",
":",
"e",
"=",
"GmmCause",
"(",
"ieiGC",
"=",
"0x25",
")",
"packet",
"=",
"packet",
"/",
"e",
"return",
"packet"
] | DETACH REQUEST Section 9.4.5 | [
"DETACH",
"REQUEST",
"Section",
"9",
".",
"4",
".",
"5"
] | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/kernel.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/kernel.py#L168-L200 | async def complete(self, code: str, opts: dict = None) -> Iterable[str]:
'''
Gets the auto-completion candidates from the given code string,
as if a user has pressed the tab key just after the code in
IDEs.
Depending on the language of the compute session, this feature
may not be supported. Unsupported sessions returns an empty list.
:param code: An (incomplete) code text.
:param opts: Additional information about the current cursor position,
such as row, col, line and the remainder text.
:returns: An ordered list of strings.
'''
opts = {} if opts is None else opts
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
rqst = Request(self.session,
'POST', '/kernel/{}/complete'.format(self.kernel_id),
params=params)
rqst.set_json({
'code': code,
'options': {
'row': int(opts.get('row', 0)),
'col': int(opts.get('col', 0)),
'line': opts.get('line', ''),
'post': opts.get('post', ''),
},
})
async with rqst.fetch() as resp:
return await resp.json() | [
"async",
"def",
"complete",
"(",
"self",
",",
"code",
":",
"str",
",",
"opts",
":",
"dict",
"=",
"None",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"opts",
"=",
"{",
"}",
"if",
"opts",
"is",
"None",
"else",
"opts",
"params",
"=",
"{",
"}",
"if",
"self",
".",
"owner_access_key",
":",
"params",
"[",
"'owner_access_key'",
"]",
"=",
"self",
".",
"owner_access_key",
"rqst",
"=",
"Request",
"(",
"self",
".",
"session",
",",
"'POST'",
",",
"'/kernel/{}/complete'",
".",
"format",
"(",
"self",
".",
"kernel_id",
")",
",",
"params",
"=",
"params",
")",
"rqst",
".",
"set_json",
"(",
"{",
"'code'",
":",
"code",
",",
"'options'",
":",
"{",
"'row'",
":",
"int",
"(",
"opts",
".",
"get",
"(",
"'row'",
",",
"0",
")",
")",
",",
"'col'",
":",
"int",
"(",
"opts",
".",
"get",
"(",
"'col'",
",",
"0",
")",
")",
",",
"'line'",
":",
"opts",
".",
"get",
"(",
"'line'",
",",
"''",
")",
",",
"'post'",
":",
"opts",
".",
"get",
"(",
"'post'",
",",
"''",
")",
",",
"}",
",",
"}",
")",
"async",
"with",
"rqst",
".",
"fetch",
"(",
")",
"as",
"resp",
":",
"return",
"await",
"resp",
".",
"json",
"(",
")"
] | Gets the auto-completion candidates from the given code string,
as if a user has pressed the tab key just after the code in
IDEs.
Depending on the language of the compute session, this feature
may not be supported. Unsupported sessions returns an empty list.
:param code: An (incomplete) code text.
:param opts: Additional information about the current cursor position,
such as row, col, line and the remainder text.
:returns: An ordered list of strings. | [
"Gets",
"the",
"auto",
"-",
"completion",
"candidates",
"from",
"the",
"given",
"code",
"string",
"as",
"if",
"a",
"user",
"has",
"pressed",
"the",
"tab",
"key",
"just",
"after",
"the",
"code",
"in",
"IDEs",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17r_2_00/routing_system/interface/ve/ipv6/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/routing_system/interface/ve/ipv6/__init__.py#L559-L580 | def _set_vrrpv3e(self, v, load=False):
"""
Setter method for vrrpv3e, mapped from YANG variable /routing_system/interface/ve/ipv6/vrrpv3e (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrrpv3e is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrrpv3e() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrid",vrrpv3e.vrrpv3e, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrid', extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}), is_container='list', yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vrrpv3e must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrid",vrrpv3e.vrrpv3e, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrid', extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}), is_container='list', yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='list', is_config=True)""",
})
self.__vrrpv3e = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_vrrpv3e",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"vrid\"",
",",
"vrrpv3e",
".",
"vrrpv3e",
",",
"yang_name",
"=",
"\"vrrpv3e\"",
",",
"rest_name",
"=",
"\"vrrp-extended-group\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'vrid'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Start VRRPE configuration'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'alt-name'",
":",
"u'vrrp-extended-group'",
",",
"u'sort-priority'",
":",
"u'143'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-match-completion'",
":",
"None",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'callpoint'",
":",
"u'vrrpv3eSessionVlan'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"vrrpv3e\"",
",",
"rest_name",
"=",
"\"vrrp-extended-group\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Start VRRPE configuration'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'alt-name'",
":",
"u'vrrp-extended-group'",
",",
"u'sort-priority'",
":",
"u'143'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-match-completion'",
":",
"None",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'callpoint'",
":",
"u'vrrpv3eSessionVlan'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-vrrpv3'",
",",
"defining_module",
"=",
"'brocade-vrrpv3'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"vrrpv3e must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"vrid\",vrrpv3e.vrrpv3e, yang_name=\"vrrpv3e\", rest_name=\"vrrp-extended-group\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrid', extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}), is_container='list', yang_name=\"vrrpv3e\", rest_name=\"vrrp-extended-group\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__vrrpv3e",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for vrrpv3e, mapped from YANG variable /routing_system/interface/ve/ipv6/vrrpv3e (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrrpv3e is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrrpv3e() directly. | [
"Setter",
"method",
"for",
"vrrpv3e",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"interface",
"/",
"ve",
"/",
"ipv6",
"/",
"vrrpv3e",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_vrrpv3e",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_vrrpv3e",
"()",
"directly",
"."
] | python | train |
cons3rt/pycons3rt | pycons3rt/bash.py | https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/bash.py#L677-L719 | def get_ip(interface=0):
"""This method return the IP address
:param interface: (int) Interface number (e.g. 0 for eth0)
:return: (str) IP address or None
"""
log = logging.getLogger(mod_logger + '.get_ip')
log.info('Getting the IP address for this system...')
ip_address = None
try:
log.info('Attempting to get IP address by hostname...')
ip_address = socket.gethostbyname(socket.gethostname())
except socket.error:
log.info('Unable to get IP address for this system using hostname, '
'using a bash command...')
command = 'ip addr show eth%s | grep inet | grep -v inet6 | ' \
'awk \'{ print $2 }\' | cut -d/ -f1 ' \
'>> /root/ip' % interface
try:
log.info('Running command: %s', command)
subprocess.check_call(command, shell=True)
except(OSError, subprocess.CalledProcessError):
_, ex, trace = sys.exc_info()
msg = 'Unable to get the IP address of this system\n{e}'.format(
e=str(ex))
log.error(msg)
raise CommandError, msg, trace
else:
ip_file = '/root/ip'
log.info('Command executed successfully, pulling IP address from '
'file: %s', ip_file)
if os.path.isfile(ip_file):
with open(ip_file, 'r') as f:
for line in f:
ip_address = line.strip()
log.info('Found IP address from file: %s', ip_address)
else:
msg = 'File not found: {f}'.format(f=ip_file)
log.error(msg)
raise CommandError(msg)
log.info('Returning IP address: %s', ip_address)
return ip_address | [
"def",
"get_ip",
"(",
"interface",
"=",
"0",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"mod_logger",
"+",
"'.get_ip'",
")",
"log",
".",
"info",
"(",
"'Getting the IP address for this system...'",
")",
"ip_address",
"=",
"None",
"try",
":",
"log",
".",
"info",
"(",
"'Attempting to get IP address by hostname...'",
")",
"ip_address",
"=",
"socket",
".",
"gethostbyname",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
"except",
"socket",
".",
"error",
":",
"log",
".",
"info",
"(",
"'Unable to get IP address for this system using hostname, '",
"'using a bash command...'",
")",
"command",
"=",
"'ip addr show eth%s | grep inet | grep -v inet6 | '",
"'awk \\'{ print $2 }\\' | cut -d/ -f1 '",
"'>> /root/ip'",
"%",
"interface",
"try",
":",
"log",
".",
"info",
"(",
"'Running command: %s'",
",",
"command",
")",
"subprocess",
".",
"check_call",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"except",
"(",
"OSError",
",",
"subprocess",
".",
"CalledProcessError",
")",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"'Unable to get the IP address of this system\\n{e}'",
".",
"format",
"(",
"e",
"=",
"str",
"(",
"ex",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"CommandError",
",",
"msg",
",",
"trace",
"else",
":",
"ip_file",
"=",
"'/root/ip'",
"log",
".",
"info",
"(",
"'Command executed successfully, pulling IP address from '",
"'file: %s'",
",",
"ip_file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"ip_file",
")",
":",
"with",
"open",
"(",
"ip_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"ip_address",
"=",
"line",
".",
"strip",
"(",
")",
"log",
".",
"info",
"(",
"'Found IP address from file: %s'",
",",
"ip_address",
")",
"else",
":",
"msg",
"=",
"'File not found: {f}'",
".",
"format",
"(",
"f",
"=",
"ip_file",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"CommandError",
"(",
"msg",
")",
"log",
".",
"info",
"(",
"'Returning IP address: %s'",
",",
"ip_address",
")",
"return",
"ip_address"
] | This method return the IP address
:param interface: (int) Interface number (e.g. 0 for eth0)
:return: (str) IP address or None | [
"This",
"method",
"return",
"the",
"IP",
"address"
] | python | train |
buguroo/pyknow | pyknow/matchers/rete/abstract.py | https://github.com/buguroo/pyknow/blob/48818336f2e9a126f1964f2d8dc22d37ff800fe8/pyknow/matchers/rete/abstract.py#L57-L61 | def activate_left(self, token):
"""Make a copy of the received token and call `_activate_left`."""
watchers.MATCHER.debug(
"Node <%s> activated left with token %r", self, token)
return self._activate_left(token.copy()) | [
"def",
"activate_left",
"(",
"self",
",",
"token",
")",
":",
"watchers",
".",
"MATCHER",
".",
"debug",
"(",
"\"Node <%s> activated left with token %r\"",
",",
"self",
",",
"token",
")",
"return",
"self",
".",
"_activate_left",
"(",
"token",
".",
"copy",
"(",
")",
")"
] | Make a copy of the received token and call `_activate_left`. | [
"Make",
"a",
"copy",
"of",
"the",
"received",
"token",
"and",
"call",
"_activate_left",
"."
] | python | train |
praekelt/jmbo-poll | poll/models.py | https://github.com/praekelt/jmbo-poll/blob/322cd398372139e9db74a37cb2ce8ab1c2ef17fd/poll/models.py#L30-L54 | def can_vote_on_poll(self, request):
"""Based on jmbo.models.can_vote."""
# can't vote if liking is closed
if self.votes_closed:
return False, 'closed'
# can't vote if liking is disabled
if not self.votes_enabled:
return False, 'disabled'
# anonymous users can't vote if anonymous votes are disabled
if not request.user.is_authenticated() and not \
self.anonymous_votes:
return False, 'auth_required'
# return false if existing votes are found
votes = Vote.objects.filter(
object_id__in=[o.id for o in self.polloption_set.all()],
token=request.secretballot_token
)
if votes.exists():
return False, 'voted'
else:
return True, 'can_vote' | [
"def",
"can_vote_on_poll",
"(",
"self",
",",
"request",
")",
":",
"# can't vote if liking is closed",
"if",
"self",
".",
"votes_closed",
":",
"return",
"False",
",",
"'closed'",
"# can't vote if liking is disabled",
"if",
"not",
"self",
".",
"votes_enabled",
":",
"return",
"False",
",",
"'disabled'",
"# anonymous users can't vote if anonymous votes are disabled",
"if",
"not",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"and",
"not",
"self",
".",
"anonymous_votes",
":",
"return",
"False",
",",
"'auth_required'",
"# return false if existing votes are found",
"votes",
"=",
"Vote",
".",
"objects",
".",
"filter",
"(",
"object_id__in",
"=",
"[",
"o",
".",
"id",
"for",
"o",
"in",
"self",
".",
"polloption_set",
".",
"all",
"(",
")",
"]",
",",
"token",
"=",
"request",
".",
"secretballot_token",
")",
"if",
"votes",
".",
"exists",
"(",
")",
":",
"return",
"False",
",",
"'voted'",
"else",
":",
"return",
"True",
",",
"'can_vote'"
] | Based on jmbo.models.can_vote. | [
"Based",
"on",
"jmbo",
".",
"models",
".",
"can_vote",
"."
] | python | train |
rigetti/quantumflow | quantumflow/forest/__init__.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/forest/__init__.py#L160-L168 | def pyquil_to_image(program: pyquil.Program) -> PIL.Image: # pragma: no cover
"""Returns an image of a pyquil circuit.
See circuit_to_latex() for more details.
"""
circ = pyquil_to_circuit(program)
latex = circuit_to_latex(circ)
img = render_latex(latex)
return img | [
"def",
"pyquil_to_image",
"(",
"program",
":",
"pyquil",
".",
"Program",
")",
"->",
"PIL",
".",
"Image",
":",
"# pragma: no cover",
"circ",
"=",
"pyquil_to_circuit",
"(",
"program",
")",
"latex",
"=",
"circuit_to_latex",
"(",
"circ",
")",
"img",
"=",
"render_latex",
"(",
"latex",
")",
"return",
"img"
] | Returns an image of a pyquil circuit.
See circuit_to_latex() for more details. | [
"Returns",
"an",
"image",
"of",
"a",
"pyquil",
"circuit",
"."
] | python | train |
SpockBotMC/SpockBot | spockbot/mcp/yggdrasil.py | https://github.com/SpockBotMC/SpockBot/blob/f89911551f18357720034fbaa52837a0d09f66ea/spockbot/mcp/yggdrasil.py#L50-L76 | def authenticate(self):
"""
Generate an access token using an username and password. Any existing
client token is invalidated if not provided.
Returns:
dict: Response or error dict
"""
endpoint = '/authenticate'
payload = {
'agent': {
'name': 'Minecraft',
'version': self.ygg_version,
},
'username': self.username,
'password': self.password,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.available_profiles = rep['availableProfiles']
self.selected_profile = rep['selectedProfile']
return True | [
"def",
"authenticate",
"(",
"self",
")",
":",
"endpoint",
"=",
"'/authenticate'",
"payload",
"=",
"{",
"'agent'",
":",
"{",
"'name'",
":",
"'Minecraft'",
",",
"'version'",
":",
"self",
".",
"ygg_version",
",",
"}",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
",",
"'clientToken'",
":",
"self",
".",
"client_token",
",",
"}",
"rep",
"=",
"self",
".",
"_ygg_req",
"(",
"endpoint",
",",
"payload",
")",
"if",
"not",
"rep",
"or",
"'error'",
"in",
"rep",
":",
"return",
"False",
"self",
".",
"access_token",
"=",
"rep",
"[",
"'accessToken'",
"]",
"self",
".",
"client_token",
"=",
"rep",
"[",
"'clientToken'",
"]",
"self",
".",
"available_profiles",
"=",
"rep",
"[",
"'availableProfiles'",
"]",
"self",
".",
"selected_profile",
"=",
"rep",
"[",
"'selectedProfile'",
"]",
"return",
"True"
] | Generate an access token using an username and password. Any existing
client token is invalidated if not provided.
Returns:
dict: Response or error dict | [
"Generate",
"an",
"access",
"token",
"using",
"an",
"username",
"and",
"password",
".",
"Any",
"existing",
"client",
"token",
"is",
"invalidated",
"if",
"not",
"provided",
"."
] | python | train |
JoeVirtual/KonFoo | konfoo/core.py | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1274-L1313 | def view_fields(self, *attributes, **options):
""" Returns a list with the selected field *attribute* or a list with the
dictionaries of the selected field *attributes* for each :class:`Field`
*nested* in the `Sequence`.
The *attributes* of each :class:`Field` for containers *nested* in the
`Sequence` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple fieldnames: sequence of dictionary keys for the selected
field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if ``True`` all :class:`Pointer` fields nested in the
`Sequence` views their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
items = list()
for index, item in enumerate(self):
if is_container(item):
# Container
items.append(item.view_fields(*attributes, **options))
elif is_pointer(item) and get_nested(options):
# Pointer
items.append(item.view_fields(*attributes, **options))
elif is_field(item):
# Field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
fieldnames = options.get('fieldnames', attributes)
items.append(dict(zip(fieldnames, field_getter(item))))
else:
items.append(field_getter(item))
else:
raise MemberTypeError(self, item, index)
return items | [
"def",
"view_fields",
"(",
"self",
",",
"*",
"attributes",
",",
"*",
"*",
"options",
")",
":",
"items",
"=",
"list",
"(",
")",
"for",
"index",
",",
"item",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"is_container",
"(",
"item",
")",
":",
"# Container",
"items",
".",
"append",
"(",
"item",
".",
"view_fields",
"(",
"*",
"attributes",
",",
"*",
"*",
"options",
")",
")",
"elif",
"is_pointer",
"(",
"item",
")",
"and",
"get_nested",
"(",
"options",
")",
":",
"# Pointer",
"items",
".",
"append",
"(",
"item",
".",
"view_fields",
"(",
"*",
"attributes",
",",
"*",
"*",
"options",
")",
")",
"elif",
"is_field",
"(",
"item",
")",
":",
"# Field",
"if",
"attributes",
":",
"field_getter",
"=",
"attrgetter",
"(",
"*",
"attributes",
")",
"else",
":",
"field_getter",
"=",
"attrgetter",
"(",
"'value'",
")",
"if",
"len",
"(",
"attributes",
")",
">",
"1",
":",
"fieldnames",
"=",
"options",
".",
"get",
"(",
"'fieldnames'",
",",
"attributes",
")",
"items",
".",
"append",
"(",
"dict",
"(",
"zip",
"(",
"fieldnames",
",",
"field_getter",
"(",
"item",
")",
")",
")",
")",
"else",
":",
"items",
".",
"append",
"(",
"field_getter",
"(",
"item",
")",
")",
"else",
":",
"raise",
"MemberTypeError",
"(",
"self",
",",
"item",
",",
"index",
")",
"return",
"items"
] | Returns a list with the selected field *attribute* or a list with the
dictionaries of the selected field *attributes* for each :class:`Field`
*nested* in the `Sequence`.
The *attributes* of each :class:`Field` for containers *nested* in the
`Sequence` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple fieldnames: sequence of dictionary keys for the selected
field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if ``True`` all :class:`Pointer` fields nested in the
`Sequence` views their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call). | [
"Returns",
"a",
"list",
"with",
"the",
"selected",
"field",
"*",
"attribute",
"*",
"or",
"a",
"list",
"with",
"the",
"dictionaries",
"of",
"the",
"selected",
"field",
"*",
"attributes",
"*",
"for",
"each",
":",
"class",
":",
"Field",
"*",
"nested",
"*",
"in",
"the",
"Sequence",
"."
] | python | train |
openstack/proliantutils | proliantutils/redfish/resources/system/smart_storage_config.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/smart_storage_config.py#L119-L136 | def delete_raid(self):
"""Clears the RAID configuration from the system.
"""
if not self.logical_drives:
msg = ('No logical drives found on the controller '
'%(controller)s' % {'controller': str(self.controller_id)})
LOG.debug(msg)
raise exception.IloLogicalDriveNotFoundError(msg)
lds = [{
'Actions': [{"Action": "LogicalDriveDelete"}],
'VolumeUniqueIdentifier':
logical_drive.volume_unique_identifier}
for logical_drive in self.logical_drives]
data = {'LogicalDrives': lds, 'DataGuard': 'Permissive'}
self._conn.put(self.settings_uri, data=data) | [
"def",
"delete_raid",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"logical_drives",
":",
"msg",
"=",
"(",
"'No logical drives found on the controller '",
"'%(controller)s'",
"%",
"{",
"'controller'",
":",
"str",
"(",
"self",
".",
"controller_id",
")",
"}",
")",
"LOG",
".",
"debug",
"(",
"msg",
")",
"raise",
"exception",
".",
"IloLogicalDriveNotFoundError",
"(",
"msg",
")",
"lds",
"=",
"[",
"{",
"'Actions'",
":",
"[",
"{",
"\"Action\"",
":",
"\"LogicalDriveDelete\"",
"}",
"]",
",",
"'VolumeUniqueIdentifier'",
":",
"logical_drive",
".",
"volume_unique_identifier",
"}",
"for",
"logical_drive",
"in",
"self",
".",
"logical_drives",
"]",
"data",
"=",
"{",
"'LogicalDrives'",
":",
"lds",
",",
"'DataGuard'",
":",
"'Permissive'",
"}",
"self",
".",
"_conn",
".",
"put",
"(",
"self",
".",
"settings_uri",
",",
"data",
"=",
"data",
")"
] | Clears the RAID configuration from the system. | [
"Clears",
"the",
"RAID",
"configuration",
"from",
"the",
"system",
"."
] | python | train |
chrisrink10/basilisp | src/basilisp/lang/runtime.py | https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L920-L931 | def deref(o, timeout_s=None, timeout_val=None):
"""Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned."""
if isinstance(o, IDeref):
return o.deref()
elif isinstance(o, IBlockingDeref):
return o.deref(timeout_s, timeout_val)
raise TypeError(f"Object of type {type(o)} cannot be dereferenced") | [
"def",
"deref",
"(",
"o",
",",
"timeout_s",
"=",
"None",
",",
"timeout_val",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"IDeref",
")",
":",
"return",
"o",
".",
"deref",
"(",
")",
"elif",
"isinstance",
"(",
"o",
",",
"IBlockingDeref",
")",
":",
"return",
"o",
".",
"deref",
"(",
"timeout_s",
",",
"timeout_val",
")",
"raise",
"TypeError",
"(",
"f\"Object of type {type(o)} cannot be dereferenced\"",
")"
] | Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned. | [
"Dereference",
"a",
"Deref",
"object",
"and",
"return",
"its",
"contents",
"."
] | python | test |
callowayproject/Transmogrify | transmogrify/autodetect/__init__.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/autodetect/__init__.py#L136-L164 | def face_and_energy_detector(image_path, detect_faces=True):
"""
Finds faces and energy in an image
"""
source = Image.open(image_path)
work_width = 800
if source.mode != 'RGB' or source.bits != 8:
source24 = source.convert('RGB')
else:
source24 = source.copy()
grayscaleRMY = source24.convert('L', (0.5, 0.419, 0.081, 0))
w = min(grayscaleRMY.size[0], work_width)
h = w * grayscaleRMY.size[1] / grayscaleRMY.size[0]
b = grayscaleRMY.resize((w, h), Image.BICUBIC)
# b.save('step2.jpg')
if detect_faces:
info = do_face_detection(image_path)
if info:
return CropInfo(gravity=info)
b = b.filter(ImageFilter.GaussianBlur(7))
# b.save('step3.jpg')
sobelXfilter = ImageFilter.Kernel((3, 3), (1, 0, -1, 2, 0, -2, 1, 0, -1), -.5)
sobelYfilter = ImageFilter.Kernel((3, 3), (1, 2, 1, 0, 0, 0, -1, -2, -1), -.5)
b = ImageChops.lighter(b.filter(sobelXfilter), b.filter(sobelYfilter))
b = b.filter(ImageFilter.FIND_EDGES)
# b.save('step4.jpg')
ec = energy_center(b)
return CropInfo(gravity=ec) | [
"def",
"face_and_energy_detector",
"(",
"image_path",
",",
"detect_faces",
"=",
"True",
")",
":",
"source",
"=",
"Image",
".",
"open",
"(",
"image_path",
")",
"work_width",
"=",
"800",
"if",
"source",
".",
"mode",
"!=",
"'RGB'",
"or",
"source",
".",
"bits",
"!=",
"8",
":",
"source24",
"=",
"source",
".",
"convert",
"(",
"'RGB'",
")",
"else",
":",
"source24",
"=",
"source",
".",
"copy",
"(",
")",
"grayscaleRMY",
"=",
"source24",
".",
"convert",
"(",
"'L'",
",",
"(",
"0.5",
",",
"0.419",
",",
"0.081",
",",
"0",
")",
")",
"w",
"=",
"min",
"(",
"grayscaleRMY",
".",
"size",
"[",
"0",
"]",
",",
"work_width",
")",
"h",
"=",
"w",
"*",
"grayscaleRMY",
".",
"size",
"[",
"1",
"]",
"/",
"grayscaleRMY",
".",
"size",
"[",
"0",
"]",
"b",
"=",
"grayscaleRMY",
".",
"resize",
"(",
"(",
"w",
",",
"h",
")",
",",
"Image",
".",
"BICUBIC",
")",
"# b.save('step2.jpg')",
"if",
"detect_faces",
":",
"info",
"=",
"do_face_detection",
"(",
"image_path",
")",
"if",
"info",
":",
"return",
"CropInfo",
"(",
"gravity",
"=",
"info",
")",
"b",
"=",
"b",
".",
"filter",
"(",
"ImageFilter",
".",
"GaussianBlur",
"(",
"7",
")",
")",
"# b.save('step3.jpg')",
"sobelXfilter",
"=",
"ImageFilter",
".",
"Kernel",
"(",
"(",
"3",
",",
"3",
")",
",",
"(",
"1",
",",
"0",
",",
"-",
"1",
",",
"2",
",",
"0",
",",
"-",
"2",
",",
"1",
",",
"0",
",",
"-",
"1",
")",
",",
"-",
".5",
")",
"sobelYfilter",
"=",
"ImageFilter",
".",
"Kernel",
"(",
"(",
"3",
",",
"3",
")",
",",
"(",
"1",
",",
"2",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"-",
"1",
",",
"-",
"2",
",",
"-",
"1",
")",
",",
"-",
".5",
")",
"b",
"=",
"ImageChops",
".",
"lighter",
"(",
"b",
".",
"filter",
"(",
"sobelXfilter",
")",
",",
"b",
".",
"filter",
"(",
"sobelYfilter",
")",
")",
"b",
"=",
"b",
".",
"filter",
"(",
"ImageFilter",
".",
"FIND_EDGES",
")",
"# b.save('step4.jpg')",
"ec",
"=",
"energy_center",
"(",
"b",
")",
"return",
"CropInfo",
"(",
"gravity",
"=",
"ec",
")"
] | Finds faces and energy in an image | [
"Finds",
"faces",
"and",
"energy",
"in",
"an",
"image"
] | python | train |
suryakencana007/baka_model | baka_model/model/helper.py | https://github.com/suryakencana007/baka_model/blob/915c2da9920e973302f5764ae63799acd5ecf0b7/baka_model/model/helper.py#L42-L54 | def guid(*args):
"""
Generates a universally unique ID.
Any arguments only create more randomness.
"""
t = float(time.time() * 1000)
r = float(random.random()*10000000000000)
a = random.random() * 10000000000000
data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)
data = hashlib.md5(data.encode()).hexdigest()[:10]
return data | [
"def",
"guid",
"(",
"*",
"args",
")",
":",
"t",
"=",
"float",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"r",
"=",
"float",
"(",
"random",
".",
"random",
"(",
")",
"*",
"10000000000000",
")",
"a",
"=",
"random",
".",
"random",
"(",
")",
"*",
"10000000000000",
"data",
"=",
"str",
"(",
"t",
")",
"+",
"' '",
"+",
"str",
"(",
"r",
")",
"+",
"' '",
"+",
"str",
"(",
"a",
")",
"+",
"' '",
"+",
"str",
"(",
"args",
")",
"data",
"=",
"hashlib",
".",
"md5",
"(",
"data",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"10",
"]",
"return",
"data"
] | Generates a universally unique ID.
Any arguments only create more randomness. | [
"Generates",
"a",
"universally",
"unique",
"ID",
".",
"Any",
"arguments",
"only",
"create",
"more",
"randomness",
"."
] | python | valid |
Knio/pynmea2 | pynmea2/nmea_utils.py | https://github.com/Knio/pynmea2/blob/c4fc66c6a13dd85ad862b15c516245af6e571456/pynmea2/nmea_utils.py#L29-L39 | def dm_to_sd(dm):
'''
Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm
format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed
decimal (python float) format
'''
# '12319.943281'
if not dm or dm == '0':
return 0.
d, m = re.match(r'^(\d+)(\d\d\.\d+)$', dm).groups()
return float(d) + float(m) / 60 | [
"def",
"dm_to_sd",
"(",
"dm",
")",
":",
"# '12319.943281'\r",
"if",
"not",
"dm",
"or",
"dm",
"==",
"'0'",
":",
"return",
"0.",
"d",
",",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(\\d+)(\\d\\d\\.\\d+)$'",
",",
"dm",
")",
".",
"groups",
"(",
")",
"return",
"float",
"(",
"d",
")",
"+",
"float",
"(",
"m",
")",
"/",
"60"
] | Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm
format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed
decimal (python float) format | [
"Converts",
"a",
"geographic",
"co",
"-",
"ordinate",
"given",
"in",
"degrees",
"/",
"minutes",
"dddmm",
".",
"mmmm",
"format",
"(",
"eg",
"12319",
".",
"943281",
"=",
"123",
"degrees",
"19",
".",
"943281",
"minutes",
")",
"to",
"a",
"signed",
"decimal",
"(",
"python",
"float",
")",
"format"
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/interactive.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1955-L1965 | def do_dd(self, arg):
"""
[~thread] dd <register> - show memory contents as dwords
[~thread] dd <register-register> - show memory contents as dwords
[~thread] dd <register> <size> - show memory contents as dwords
[~process] dd <address> - show memory contents as dwords
[~process] dd <address-address> - show memory contents as dwords
[~process] dd <address> <size> - show memory contents as dwords
"""
self.print_memory_display(arg, HexDump.hexblock_dword)
self.last_display_command = self.do_dd | [
"def",
"do_dd",
"(",
"self",
",",
"arg",
")",
":",
"self",
".",
"print_memory_display",
"(",
"arg",
",",
"HexDump",
".",
"hexblock_dword",
")",
"self",
".",
"last_display_command",
"=",
"self",
".",
"do_dd"
] | [~thread] dd <register> - show memory contents as dwords
[~thread] dd <register-register> - show memory contents as dwords
[~thread] dd <register> <size> - show memory contents as dwords
[~process] dd <address> - show memory contents as dwords
[~process] dd <address-address> - show memory contents as dwords
[~process] dd <address> <size> - show memory contents as dwords | [
"[",
"~thread",
"]",
"dd",
"<register",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords",
"[",
"~thread",
"]",
"dd",
"<register",
"-",
"register",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords",
"[",
"~thread",
"]",
"dd",
"<register",
">",
"<size",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords",
"[",
"~process",
"]",
"dd",
"<address",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords",
"[",
"~process",
"]",
"dd",
"<address",
"-",
"address",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords",
"[",
"~process",
"]",
"dd",
"<address",
">",
"<size",
">",
"-",
"show",
"memory",
"contents",
"as",
"dwords"
] | python | train |
pkgw/pwkit | pwkit/fk10.py | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/fk10.py#L173-L229 | def do_figure9_calc_lowlevel(shlib_path, set_unused=True):
"""Reproduce the calculation used to produce Figure 9 of the Fleischman &
Kuznetsov (2010) paper, using our low-level interfaces.
Input parameters, etc., come from the file ``Flare071231a.pro`` that is
distributed with the paper’s Supplementary Data archive.
Invoke with something like::
from pwkit import fk10
arr = fk10.do_figure9_calc('path/to/libGS_Std_HomSrc_CEH.so.64')
"""
fk10func = FK10Invoker(shlib_path)
in_vals = make_in_vals_array()
in_vals[IN_VAL_AREA] = 1.33e18
in_vals[IN_VAL_DEPTH] = 6e8
in_vals[IN_VAL_T0] = 2.1e7
# EPSILON (these markers are to aid counting indices)
# KAPPA
in_vals[IN_VAL_INTEG_METH] = 16
in_vals[IN_VAL_EMIN] = 0.016
in_vals[IN_VAL_EMAX] = 4.0
# EBREAK
in_vals[IN_VAL_DELTA1] = 3.7
# DELTA2
in_vals[IN_VAL_N0] = 3e9
in_vals[IN_VAL_NB] = 5e9 / 3
in_vals[IN_VAL_B] = 48
in_vals[IN_VAL_THETA] = 50
in_vals[IN_VAL_FREQ0] = 5e8
in_vals[IN_VAL_LOGDFREQ] = 0.02
in_vals[IN_VAL_EDIST] = EDIST_PLW
in_vals[IN_VAL_NFREQ] = 100
in_vals[IN_VAL_PADIST] = PADIST_GLC
in_vals[IN_VAL_LCBDY] = 90
# BEAMDIR
in_vals[IN_VAL_DELTAMU] = 0.4
# A4
# (slot 24 unused)
in_vals[IN_VAL_FCCR] = 12
in_vals[IN_VAL_FWHCR] = in_vals[IN_VAL_FCCR]
in_vals[IN_VAL_RENORMFLAG] = 1
in_vals[IN_VAL_QFLAG] = 2
if set_unused:
# Sanity-checking: these parameters shouldn't affect the calculated
# result.
in_vals[IN_VAL_EPSILON] = 0.05
in_vals[IN_VAL_KAPPA] = 4.0
in_vals[IN_VAL_EBREAK] = 1.0
in_vals[IN_VAL_DELTA2] = 6.0
in_vals[IN_VAL_BEAMDIR] = 90
in_vals[IN_VAL_A4] = 1
return fk10func(in_vals) | [
"def",
"do_figure9_calc_lowlevel",
"(",
"shlib_path",
",",
"set_unused",
"=",
"True",
")",
":",
"fk10func",
"=",
"FK10Invoker",
"(",
"shlib_path",
")",
"in_vals",
"=",
"make_in_vals_array",
"(",
")",
"in_vals",
"[",
"IN_VAL_AREA",
"]",
"=",
"1.33e18",
"in_vals",
"[",
"IN_VAL_DEPTH",
"]",
"=",
"6e8",
"in_vals",
"[",
"IN_VAL_T0",
"]",
"=",
"2.1e7",
"# EPSILON (these markers are to aid counting indices)",
"# KAPPA",
"in_vals",
"[",
"IN_VAL_INTEG_METH",
"]",
"=",
"16",
"in_vals",
"[",
"IN_VAL_EMIN",
"]",
"=",
"0.016",
"in_vals",
"[",
"IN_VAL_EMAX",
"]",
"=",
"4.0",
"# EBREAK",
"in_vals",
"[",
"IN_VAL_DELTA1",
"]",
"=",
"3.7",
"# DELTA2",
"in_vals",
"[",
"IN_VAL_N0",
"]",
"=",
"3e9",
"in_vals",
"[",
"IN_VAL_NB",
"]",
"=",
"5e9",
"/",
"3",
"in_vals",
"[",
"IN_VAL_B",
"]",
"=",
"48",
"in_vals",
"[",
"IN_VAL_THETA",
"]",
"=",
"50",
"in_vals",
"[",
"IN_VAL_FREQ0",
"]",
"=",
"5e8",
"in_vals",
"[",
"IN_VAL_LOGDFREQ",
"]",
"=",
"0.02",
"in_vals",
"[",
"IN_VAL_EDIST",
"]",
"=",
"EDIST_PLW",
"in_vals",
"[",
"IN_VAL_NFREQ",
"]",
"=",
"100",
"in_vals",
"[",
"IN_VAL_PADIST",
"]",
"=",
"PADIST_GLC",
"in_vals",
"[",
"IN_VAL_LCBDY",
"]",
"=",
"90",
"# BEAMDIR",
"in_vals",
"[",
"IN_VAL_DELTAMU",
"]",
"=",
"0.4",
"# A4",
"# (slot 24 unused)",
"in_vals",
"[",
"IN_VAL_FCCR",
"]",
"=",
"12",
"in_vals",
"[",
"IN_VAL_FWHCR",
"]",
"=",
"in_vals",
"[",
"IN_VAL_FCCR",
"]",
"in_vals",
"[",
"IN_VAL_RENORMFLAG",
"]",
"=",
"1",
"in_vals",
"[",
"IN_VAL_QFLAG",
"]",
"=",
"2",
"if",
"set_unused",
":",
"# Sanity-checking: these parameters shouldn't affect the calculated",
"# result.",
"in_vals",
"[",
"IN_VAL_EPSILON",
"]",
"=",
"0.05",
"in_vals",
"[",
"IN_VAL_KAPPA",
"]",
"=",
"4.0",
"in_vals",
"[",
"IN_VAL_EBREAK",
"]",
"=",
"1.0",
"in_vals",
"[",
"IN_VAL_DELTA2",
"]",
"=",
"6.0",
"in_vals",
"[",
"IN_VAL_BEAMDIR",
"]",
"=",
"90",
"in_vals",
"[",
"IN_VAL_A4",
"]",
"=",
"1",
"return",
"fk10func",
"(",
"in_vals",
")"
] | Reproduce the calculation used to produce Figure 9 of the Fleischman &
Kuznetsov (2010) paper, using our low-level interfaces.
Input parameters, etc., come from the file ``Flare071231a.pro`` that is
distributed with the paper’s Supplementary Data archive.
Invoke with something like::
from pwkit import fk10
arr = fk10.do_figure9_calc('path/to/libGS_Std_HomSrc_CEH.so.64') | [
"Reproduce",
"the",
"calculation",
"used",
"to",
"produce",
"Figure",
"9",
"of",
"the",
"Fleischman",
"&",
"Kuznetsov",
"(",
"2010",
")",
"paper",
"using",
"our",
"low",
"-",
"level",
"interfaces",
"."
] | python | train |
VingtCinq/python-mailchimp | mailchimp3/entities/listwebhooks.py | https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/listwebhooks.py#L97-L108 | def delete(self, list_id, webhook_id):
"""
Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str`
"""
self.list_id = list_id
self.webhook_id = webhook_id
return self._mc_client._delete(url=self._build_path(list_id, 'webhooks', webhook_id)) | [
"def",
"delete",
"(",
"self",
",",
"list_id",
",",
"webhook_id",
")",
":",
"self",
".",
"list_id",
"=",
"list_id",
"self",
".",
"webhook_id",
"=",
"webhook_id",
"return",
"self",
".",
"_mc_client",
".",
"_delete",
"(",
"url",
"=",
"self",
".",
"_build_path",
"(",
"list_id",
",",
"'webhooks'",
",",
"webhook_id",
")",
")"
] | Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str` | [
"Delete",
"a",
"specific",
"webhook",
"in",
"a",
"list",
"."
] | python | valid |
royi1000/py-libhdate | hdate/converters.py | https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/converters.py#L125-L149 | def jdn_to_gdate(jdn):
"""
Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year
"""
# pylint: disable=invalid-name
# The algorithm is a verbatim copy from Peter Meyer's article
# No explanation in the article is given for the variables
# Hence the exceptions for pylint and for flake8 (E741)
l = jdn + 68569 # noqa: E741
n = (4 * l) // 146097
l = l - (146097 * n + 3) // 4 # noqa: E741
i = (4000 * (l + 1)) // 1461001 # that's 1,461,001
l = l - (1461 * i) // 4 + 31 # noqa: E741
j = (80 * l) // 2447
day = l - (2447 * j) // 80
l = j // 11 # noqa: E741
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l # that's a lower-case L
return datetime.date(year, month, day) | [
"def",
"jdn_to_gdate",
"(",
"jdn",
")",
":",
"# pylint: disable=invalid-name",
"# The algorithm is a verbatim copy from Peter Meyer's article",
"# No explanation in the article is given for the variables",
"# Hence the exceptions for pylint and for flake8 (E741)",
"l",
"=",
"jdn",
"+",
"68569",
"# noqa: E741",
"n",
"=",
"(",
"4",
"*",
"l",
")",
"//",
"146097",
"l",
"=",
"l",
"-",
"(",
"146097",
"*",
"n",
"+",
"3",
")",
"//",
"4",
"# noqa: E741",
"i",
"=",
"(",
"4000",
"*",
"(",
"l",
"+",
"1",
")",
")",
"//",
"1461001",
"# that's 1,461,001",
"l",
"=",
"l",
"-",
"(",
"1461",
"*",
"i",
")",
"//",
"4",
"+",
"31",
"# noqa: E741",
"j",
"=",
"(",
"80",
"*",
"l",
")",
"//",
"2447",
"day",
"=",
"l",
"-",
"(",
"2447",
"*",
"j",
")",
"//",
"80",
"l",
"=",
"j",
"//",
"11",
"# noqa: E741",
"month",
"=",
"j",
"+",
"2",
"-",
"(",
"12",
"*",
"l",
")",
"year",
"=",
"100",
"*",
"(",
"n",
"-",
"49",
")",
"+",
"i",
"+",
"l",
"# that's a lower-case L",
"return",
"datetime",
".",
"date",
"(",
"year",
",",
"month",
",",
"day",
")"
] | Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year | [
"Convert",
"from",
"the",
"Julian",
"day",
"to",
"the",
"Gregorian",
"day",
"."
] | python | train |
ioos/compliance-checker | compliance_checker/ioos.py | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/ioos.py#L288-L309 | def check_platform_variable_attributes(self, ds):
'''
Platform variables must contain the following attributes:
ioos_code
long_name
short_name
type
:param netCDF4.Dataset ds: An open netCDF dataset
'''
results = []
platform_name = getattr(ds, 'platform', '')
# There can be multiple platforms defined here (space separated)
for platform in platform_name.split(' '):
if platform in ds.variables:
results += [
self._has_var_attr(ds, platform, 'long_name', 'Platform Long Name'),
self._has_var_attr(ds, platform, 'short_name', 'Platform Short Name'),
self._has_var_attr(ds, platform, 'ioos_code', 'Platform IOOS Code'),
self._has_var_attr(ds, platform, 'type', 'Platform Type')
]
return results | [
"def",
"check_platform_variable_attributes",
"(",
"self",
",",
"ds",
")",
":",
"results",
"=",
"[",
"]",
"platform_name",
"=",
"getattr",
"(",
"ds",
",",
"'platform'",
",",
"''",
")",
"# There can be multiple platforms defined here (space separated)",
"for",
"platform",
"in",
"platform_name",
".",
"split",
"(",
"' '",
")",
":",
"if",
"platform",
"in",
"ds",
".",
"variables",
":",
"results",
"+=",
"[",
"self",
".",
"_has_var_attr",
"(",
"ds",
",",
"platform",
",",
"'long_name'",
",",
"'Platform Long Name'",
")",
",",
"self",
".",
"_has_var_attr",
"(",
"ds",
",",
"platform",
",",
"'short_name'",
",",
"'Platform Short Name'",
")",
",",
"self",
".",
"_has_var_attr",
"(",
"ds",
",",
"platform",
",",
"'ioos_code'",
",",
"'Platform IOOS Code'",
")",
",",
"self",
".",
"_has_var_attr",
"(",
"ds",
",",
"platform",
",",
"'type'",
",",
"'Platform Type'",
")",
"]",
"return",
"results"
] | Platform variables must contain the following attributes:
ioos_code
long_name
short_name
type
:param netCDF4.Dataset ds: An open netCDF dataset | [
"Platform",
"variables",
"must",
"contain",
"the",
"following",
"attributes",
":",
"ioos_code",
"long_name",
"short_name",
"type"
] | python | train |
ARMmbed/icetea | icetea_lib/LogManager.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/LogManager.py#L699-L733 | def filter(self, record):
"""
Filter record
:param record: Record to filter
:return:
"""
def modify(value):
"""
Modify logged record, truncating it to max length and logging remaining length
:param value: Record to modify
:return:
"""
if isinstance(value, six.string_types):
if len(value) < ContextFilter.MAXIMUM_LENGTH:
return value
try:
return "{}...[{} more bytes]".format(
value[:ContextFilter.REVEAL_LENGTH],
len(value) - ContextFilter.REVEAL_LENGTH)
except UnicodeError:
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
elif isinstance(value, six.binary_type):
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
else:
return value
record.msg = traverse_json_obj(record.msg, callback=modify)
return True | [
"def",
"filter",
"(",
"self",
",",
"record",
")",
":",
"def",
"modify",
"(",
"value",
")",
":",
"\"\"\"\n Modify logged record, truncating it to max length and logging remaining length\n :param value: Record to modify\n :return:\n \"\"\"",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"if",
"len",
"(",
"value",
")",
"<",
"ContextFilter",
".",
"MAXIMUM_LENGTH",
":",
"return",
"value",
"try",
":",
"return",
"\"{}...[{} more bytes]\"",
".",
"format",
"(",
"value",
"[",
":",
"ContextFilter",
".",
"REVEAL_LENGTH",
"]",
",",
"len",
"(",
"value",
")",
"-",
"ContextFilter",
".",
"REVEAL_LENGTH",
")",
"except",
"UnicodeError",
":",
"return",
"\"{}...[{} more bytes]\"",
".",
"format",
"(",
"repr",
"(",
"value",
"[",
":",
"ContextFilter",
".",
"REVEAL_LENGTH",
"]",
")",
",",
"len",
"(",
"value",
")",
"-",
"ContextFilter",
".",
"REVEAL_LENGTH",
")",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"\"{}...[{} more bytes]\"",
".",
"format",
"(",
"repr",
"(",
"value",
"[",
":",
"ContextFilter",
".",
"REVEAL_LENGTH",
"]",
")",
",",
"len",
"(",
"value",
")",
"-",
"ContextFilter",
".",
"REVEAL_LENGTH",
")",
"else",
":",
"return",
"value",
"record",
".",
"msg",
"=",
"traverse_json_obj",
"(",
"record",
".",
"msg",
",",
"callback",
"=",
"modify",
")",
"return",
"True"
] | Filter record
:param record: Record to filter
:return: | [
"Filter",
"record",
":",
"param",
"record",
":",
"Record",
"to",
"filter",
":",
"return",
":"
] | python | train |
dswah/pyGAM | pygam/terms.py | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L333-L379 | def build_constraints(self, coef, constraint_lam, constraint_l2):
"""
builds the GAM block-diagonal constraint matrix in quadratic form
out of constraint matrices specified for each feature.
behaves like a penalty, but with a very large lambda value, ie 1e6.
Parameters
---------
coefs : array-like containing the coefficients of a term
constraint_lam : float,
penalty to impose on the constraint.
typically this is a very large number.
constraint_l2 : float,
loading to improve the numerical conditioning of the constraint
matrix.
typically this is a very small number.
Returns
-------
C : sparse CSC matrix containing the model constraints in quadratic form
"""
if self.isintercept:
return np.array([[0.]])
Cs = []
for constraint in self.constraints:
if constraint is None:
constraint = 'none'
if constraint in CONSTRAINTS:
constraint = CONSTRAINTS[constraint]
C = constraint(self.n_coefs, coef) * constraint_lam
Cs.append(C)
Cs = np.sum(Cs)
# improve condition
if Cs.nnz > 0:
Cs += sp.sparse.diags(constraint_l2 * np.ones(Cs.shape[0]))
return Cs | [
"def",
"build_constraints",
"(",
"self",
",",
"coef",
",",
"constraint_lam",
",",
"constraint_l2",
")",
":",
"if",
"self",
".",
"isintercept",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"0.",
"]",
"]",
")",
"Cs",
"=",
"[",
"]",
"for",
"constraint",
"in",
"self",
".",
"constraints",
":",
"if",
"constraint",
"is",
"None",
":",
"constraint",
"=",
"'none'",
"if",
"constraint",
"in",
"CONSTRAINTS",
":",
"constraint",
"=",
"CONSTRAINTS",
"[",
"constraint",
"]",
"C",
"=",
"constraint",
"(",
"self",
".",
"n_coefs",
",",
"coef",
")",
"*",
"constraint_lam",
"Cs",
".",
"append",
"(",
"C",
")",
"Cs",
"=",
"np",
".",
"sum",
"(",
"Cs",
")",
"# improve condition",
"if",
"Cs",
".",
"nnz",
">",
"0",
":",
"Cs",
"+=",
"sp",
".",
"sparse",
".",
"diags",
"(",
"constraint_l2",
"*",
"np",
".",
"ones",
"(",
"Cs",
".",
"shape",
"[",
"0",
"]",
")",
")",
"return",
"Cs"
] | builds the GAM block-diagonal constraint matrix in quadratic form
out of constraint matrices specified for each feature.
behaves like a penalty, but with a very large lambda value, ie 1e6.
Parameters
---------
coefs : array-like containing the coefficients of a term
constraint_lam : float,
penalty to impose on the constraint.
typically this is a very large number.
constraint_l2 : float,
loading to improve the numerical conditioning of the constraint
matrix.
typically this is a very small number.
Returns
-------
C : sparse CSC matrix containing the model constraints in quadratic form | [
"builds",
"the",
"GAM",
"block",
"-",
"diagonal",
"constraint",
"matrix",
"in",
"quadratic",
"form",
"out",
"of",
"constraint",
"matrices",
"specified",
"for",
"each",
"feature",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L1090-L1110 | def update_affinity_group(self, affinity_group_name, label,
description=None):
'''
Updates the label and/or the description for an affinity group for the
specified subscription.
affinity_group_name:
The name of the affinity group.
label:
A name for the affinity group. The name can be up to 100 characters
in length.
description:
A description for the affinity group. The description can be up to
1024 characters in length.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
_validate_not_none('label', label)
return self._perform_put(
'/' + self.subscription_id + '/affinitygroups/' +
_str(affinity_group_name),
_XmlSerializer.update_affinity_group_to_xml(label, description)) | [
"def",
"update_affinity_group",
"(",
"self",
",",
"affinity_group_name",
",",
"label",
",",
"description",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'affinity_group_name'",
",",
"affinity_group_name",
")",
"_validate_not_none",
"(",
"'label'",
",",
"label",
")",
"return",
"self",
".",
"_perform_put",
"(",
"'/'",
"+",
"self",
".",
"subscription_id",
"+",
"'/affinitygroups/'",
"+",
"_str",
"(",
"affinity_group_name",
")",
",",
"_XmlSerializer",
".",
"update_affinity_group_to_xml",
"(",
"label",
",",
"description",
")",
")"
] | Updates the label and/or the description for an affinity group for the
specified subscription.
affinity_group_name:
The name of the affinity group.
label:
A name for the affinity group. The name can be up to 100 characters
in length.
description:
A description for the affinity group. The description can be up to
1024 characters in length. | [
"Updates",
"the",
"label",
"and",
"/",
"or",
"the",
"description",
"for",
"an",
"affinity",
"group",
"for",
"the",
"specified",
"subscription",
"."
] | python | test |
apache/incubator-mxnet | python/mxnet/contrib/text/embedding.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/text/embedding.py#L63-L87 | def create(embedding_name, **kwargs):
"""Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `pretrained_file_name`, use
`mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
Returns
-------
An instance of `mxnet.contrib.text.glossary._TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(_TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs) | [
"def",
"create",
"(",
"embedding_name",
",",
"*",
"*",
"kwargs",
")",
":",
"create_text_embedding",
"=",
"registry",
".",
"get_create_func",
"(",
"_TokenEmbedding",
",",
"'token embedding'",
")",
"return",
"create_text_embedding",
"(",
"embedding_name",
",",
"*",
"*",
"kwargs",
")"
] | Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `pretrained_file_name`, use
`mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
Returns
-------
An instance of `mxnet.contrib.text.glossary._TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file. | [
"Creates",
"an",
"instance",
"of",
"token",
"embedding",
"."
] | python | train |
bwohlberg/sporco | sporco/dictlrn/prlcnscdl.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L588-L592 | def getcoef(self):
"""Get final coefficient map array."""
global mp_Z_Y
return np.swapaxes(mp_Z_Y, 0, self.xstep.cri.axisK+1)[0] | [
"def",
"getcoef",
"(",
"self",
")",
":",
"global",
"mp_Z_Y",
"return",
"np",
".",
"swapaxes",
"(",
"mp_Z_Y",
",",
"0",
",",
"self",
".",
"xstep",
".",
"cri",
".",
"axisK",
"+",
"1",
")",
"[",
"0",
"]"
] | Get final coefficient map array. | [
"Get",
"final",
"coefficient",
"map",
"array",
"."
] | python | train |
HPAC/matchpy | matchpy/matching/many_to_one.py | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/many_to_one.py#L434-L443 | def match(self, subject: Expression) -> Iterator[Tuple[Expression, Substitution]]:
"""Match the subject against all the matcher's patterns.
Args:
subject: The subject to match.
Yields:
For every match, a tuple of the matching pattern and the match substitution.
"""
return _MatchIter(self, subject) | [
"def",
"match",
"(",
"self",
",",
"subject",
":",
"Expression",
")",
"->",
"Iterator",
"[",
"Tuple",
"[",
"Expression",
",",
"Substitution",
"]",
"]",
":",
"return",
"_MatchIter",
"(",
"self",
",",
"subject",
")"
] | Match the subject against all the matcher's patterns.
Args:
subject: The subject to match.
Yields:
For every match, a tuple of the matching pattern and the match substitution. | [
"Match",
"the",
"subject",
"against",
"all",
"the",
"matcher",
"s",
"patterns",
"."
] | python | train |
pixelogik/NearPy | nearpy/storage/storage_redis.py | https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/storage/storage_redis.py#L205-L211 | def load_hash_configuration(self, hash_name):
"""
Loads and returns hash configuration
"""
conf = self.redis_object.get(hash_name+'_conf')
return pickle.loads(conf) if conf is not None else None | [
"def",
"load_hash_configuration",
"(",
"self",
",",
"hash_name",
")",
":",
"conf",
"=",
"self",
".",
"redis_object",
".",
"get",
"(",
"hash_name",
"+",
"'_conf'",
")",
"return",
"pickle",
".",
"loads",
"(",
"conf",
")",
"if",
"conf",
"is",
"not",
"None",
"else",
"None"
] | Loads and returns hash configuration | [
"Loads",
"and",
"returns",
"hash",
"configuration"
] | python | train |
welbornprod/colr | colr/controls.py | https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/controls.py#L109-L125 | def erase_line(method=EraseMethod.ALL, file=sys.stdout):
""" Erase a line, or part of a line. See `method` argument below.
Cursor position does not change.
Esc[<method>K
Arguments:
method : One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the line.
EraseMethod.START or 1:
Clear from cursor to the start of the line.
EraseMethod.ALL or 2:
Clear the entire line.
Default: EraseMethod.ALL (2)
"""
erase.line(method).write(file=file) | [
"def",
"erase_line",
"(",
"method",
"=",
"EraseMethod",
".",
"ALL",
",",
"file",
"=",
"sys",
".",
"stdout",
")",
":",
"erase",
".",
"line",
"(",
"method",
")",
".",
"write",
"(",
"file",
"=",
"file",
")"
] | Erase a line, or part of a line. See `method` argument below.
Cursor position does not change.
Esc[<method>K
Arguments:
method : One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the line.
EraseMethod.START or 1:
Clear from cursor to the start of the line.
EraseMethod.ALL or 2:
Clear the entire line.
Default: EraseMethod.ALL (2) | [
"Erase",
"a",
"line",
"or",
"part",
"of",
"a",
"line",
".",
"See",
"method",
"argument",
"below",
".",
"Cursor",
"position",
"does",
"not",
"change",
"."
] | python | train |
coghost/izen | izen/prettify.py | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/prettify.py#L93-L102 | def log_random_sleep(self, minimum=3.0, scale=1.0, hints=None):
"""wrap random sleep.
- log it for debug purpose only
"""
hints = '{} slept'.format(hints) if hints else 'slept'
st = time.time()
helper.random_sleep(minimum, scale)
log.debug('{} {} {}s'.format(
self.symbols.get('sleep', ''), hints, self.color_log(time.time() - st))) | [
"def",
"log_random_sleep",
"(",
"self",
",",
"minimum",
"=",
"3.0",
",",
"scale",
"=",
"1.0",
",",
"hints",
"=",
"None",
")",
":",
"hints",
"=",
"'{} slept'",
".",
"format",
"(",
"hints",
")",
"if",
"hints",
"else",
"'slept'",
"st",
"=",
"time",
".",
"time",
"(",
")",
"helper",
".",
"random_sleep",
"(",
"minimum",
",",
"scale",
")",
"log",
".",
"debug",
"(",
"'{} {} {}s'",
".",
"format",
"(",
"self",
".",
"symbols",
".",
"get",
"(",
"'sleep'",
",",
"''",
")",
",",
"hints",
",",
"self",
".",
"color_log",
"(",
"time",
".",
"time",
"(",
")",
"-",
"st",
")",
")",
")"
] | wrap random sleep.
- log it for debug purpose only | [
"wrap",
"random",
"sleep",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/util/config.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/config.py#L321-L340 | def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0) | [
"def",
"save_config",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
"==",
"{",
"}",
":",
"kwargs",
"=",
"config",
".",
"_config",
"current_config",
"=",
"_load_config",
"(",
")",
"current_config",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"# write to disk",
"fname",
"=",
"_get_config_fname",
"(",
")",
"if",
"fname",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'config filename could not be determined'",
")",
"if",
"not",
"op",
".",
"isdir",
"(",
"op",
".",
"dirname",
"(",
"fname",
")",
")",
":",
"os",
".",
"mkdir",
"(",
"op",
".",
"dirname",
"(",
"fname",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"fid",
":",
"json",
".",
"dump",
"(",
"current_config",
",",
"fid",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"0",
")"
] | Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file. | [
"Save",
"configuration",
"keys",
"to",
"vispy",
"config",
"file"
] | python | train |
pudo/jsongraph | jsongraph/graph.py | https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/graph.py#L104-L111 | def get_uri(self, alias):
""" Get the URI for a given alias. A registered URI will return itself,
otherwise ``None`` is returned. """
if alias in self.aliases.keys():
return self.aliases[alias]
if alias in self.aliases.values():
return alias
raise GraphException('No such schema: %r' % alias) | [
"def",
"get_uri",
"(",
"self",
",",
"alias",
")",
":",
"if",
"alias",
"in",
"self",
".",
"aliases",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"aliases",
"[",
"alias",
"]",
"if",
"alias",
"in",
"self",
".",
"aliases",
".",
"values",
"(",
")",
":",
"return",
"alias",
"raise",
"GraphException",
"(",
"'No such schema: %r'",
"%",
"alias",
")"
] | Get the URI for a given alias. A registered URI will return itself,
otherwise ``None`` is returned. | [
"Get",
"the",
"URI",
"for",
"a",
"given",
"alias",
".",
"A",
"registered",
"URI",
"will",
"return",
"itself",
"otherwise",
"None",
"is",
"returned",
"."
] | python | train |
saltstack/salt | salt/utils/openstack/nova.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L1192-L1202 | def virtual_interface_create(self, name, net_name):
'''
Add an interfaces to a slice
'''
nt_ks = self.compute_conn
serverid = self._server_uuid_from_name(name)
networkid = self.network_show(net_name).get('id', None)
if networkid is None:
return {net_name: False}
nets = nt_ks.virtual_interfaces.create(networkid, serverid)
return nets | [
"def",
"virtual_interface_create",
"(",
"self",
",",
"name",
",",
"net_name",
")",
":",
"nt_ks",
"=",
"self",
".",
"compute_conn",
"serverid",
"=",
"self",
".",
"_server_uuid_from_name",
"(",
"name",
")",
"networkid",
"=",
"self",
".",
"network_show",
"(",
"net_name",
")",
".",
"get",
"(",
"'id'",
",",
"None",
")",
"if",
"networkid",
"is",
"None",
":",
"return",
"{",
"net_name",
":",
"False",
"}",
"nets",
"=",
"nt_ks",
".",
"virtual_interfaces",
".",
"create",
"(",
"networkid",
",",
"serverid",
")",
"return",
"nets"
] | Add an interfaces to a slice | [
"Add",
"an",
"interfaces",
"to",
"a",
"slice"
] | python | train |
pmacosta/peng | docs/support/ptypes.py | https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/docs/support/ptypes.py#L248-L273 | def touchstone_options(obj):
r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
"""
if (not isinstance(obj, dict)) or (
isinstance(obj, dict)
and (sorted(obj.keys()) != sorted(["units", "ptype", "pformat", "z0"]))
):
raise ValueError(pexdoc.pcontracts.get_exdesc())
if not (
(obj["units"].lower() in ["ghz", "mhz", "khz", "hz"])
and (obj["ptype"].lower() in ["s", "y", "z", "h", "g"])
and (obj["pformat"].lower() in ["db", "ma", "ri"])
and isinstance(obj["z0"], float)
and (obj["z0"] >= 0)
):
raise ValueError(pexdoc.pcontracts.get_exdesc()) | [
"def",
"touchstone_options",
"(",
"obj",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"obj",
",",
"dict",
")",
")",
"or",
"(",
"isinstance",
"(",
"obj",
",",
"dict",
")",
"and",
"(",
"sorted",
"(",
"obj",
".",
"keys",
"(",
")",
")",
"!=",
"sorted",
"(",
"[",
"\"units\"",
",",
"\"ptype\"",
",",
"\"pformat\"",
",",
"\"z0\"",
"]",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"pexdoc",
".",
"pcontracts",
".",
"get_exdesc",
"(",
")",
")",
"if",
"not",
"(",
"(",
"obj",
"[",
"\"units\"",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"\"ghz\"",
",",
"\"mhz\"",
",",
"\"khz\"",
",",
"\"hz\"",
"]",
")",
"and",
"(",
"obj",
"[",
"\"ptype\"",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"\"s\"",
",",
"\"y\"",
",",
"\"z\"",
",",
"\"h\"",
",",
"\"g\"",
"]",
")",
"and",
"(",
"obj",
"[",
"\"pformat\"",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"\"db\"",
",",
"\"ma\"",
",",
"\"ri\"",
"]",
")",
"and",
"isinstance",
"(",
"obj",
"[",
"\"z0\"",
"]",
",",
"float",
")",
"and",
"(",
"obj",
"[",
"\"z0\"",
"]",
">=",
"0",
")",
")",
":",
"raise",
"ValueError",
"(",
"pexdoc",
".",
"pcontracts",
".",
"get_exdesc",
"(",
")",
")"
] | r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None | [
"r",
"Validate",
"if",
"an",
"object",
"is",
"an",
":",
"ref",
":",
"TouchstoneOptions",
"pseudo",
"-",
"type",
"object",
"."
] | python | test |
crunchyroll/ef-open | efopen/ef_aws_resolver.py | https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L686-L694 | def kms_decrypt_value(self, lookup):
"""
Args:
lookup: the encrypted value to be decrypted by KMS; base64 encoded
Returns:
The decrypted lookup value
"""
decrypted_lookup = ef_utils.kms_decrypt(EFAwsResolver.__CLIENTS["kms"], lookup)
return decrypted_lookup | [
"def",
"kms_decrypt_value",
"(",
"self",
",",
"lookup",
")",
":",
"decrypted_lookup",
"=",
"ef_utils",
".",
"kms_decrypt",
"(",
"EFAwsResolver",
".",
"__CLIENTS",
"[",
"\"kms\"",
"]",
",",
"lookup",
")",
"return",
"decrypted_lookup"
] | Args:
lookup: the encrypted value to be decrypted by KMS; base64 encoded
Returns:
The decrypted lookup value | [
"Args",
":",
"lookup",
":",
"the",
"encrypted",
"value",
"to",
"be",
"decrypted",
"by",
"KMS",
";",
"base64",
"encoded",
"Returns",
":",
"The",
"decrypted",
"lookup",
"value"
] | python | train |
Yubico/python-yubico | yubico/yubikey_neo_usb_hid.py | https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_neo_usb_hid.py#L345-L350 | def to_frame(self, slot=SLOT.SCAN_MAP):
"""
Return the current configuration as a YubiKeyFrame object.
"""
payload = self.scanmap.ljust(64, b'\0')
return yubikey_frame.YubiKeyFrame(command=slot, payload=payload) | [
"def",
"to_frame",
"(",
"self",
",",
"slot",
"=",
"SLOT",
".",
"SCAN_MAP",
")",
":",
"payload",
"=",
"self",
".",
"scanmap",
".",
"ljust",
"(",
"64",
",",
"b'\\0'",
")",
"return",
"yubikey_frame",
".",
"YubiKeyFrame",
"(",
"command",
"=",
"slot",
",",
"payload",
"=",
"payload",
")"
] | Return the current configuration as a YubiKeyFrame object. | [
"Return",
"the",
"current",
"configuration",
"as",
"a",
"YubiKeyFrame",
"object",
"."
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L1349-L1367 | def uniform_index(index,length):
'''
uniform_index(0,3)
uniform_index(-1,3)
uniform_index(-4,3)
uniform_index(-3,3)
uniform_index(5,3)
'''
if(index<0):
rl = length+index
if(rl<0):
index = 0
else:
index = rl
elif(index>=length):
index = length
else:
index = index
return(index) | [
"def",
"uniform_index",
"(",
"index",
",",
"length",
")",
":",
"if",
"(",
"index",
"<",
"0",
")",
":",
"rl",
"=",
"length",
"+",
"index",
"if",
"(",
"rl",
"<",
"0",
")",
":",
"index",
"=",
"0",
"else",
":",
"index",
"=",
"rl",
"elif",
"(",
"index",
">=",
"length",
")",
":",
"index",
"=",
"length",
"else",
":",
"index",
"=",
"index",
"return",
"(",
"index",
")"
] | uniform_index(0,3)
uniform_index(-1,3)
uniform_index(-4,3)
uniform_index(-3,3)
uniform_index(5,3) | [
"uniform_index",
"(",
"0",
"3",
")",
"uniform_index",
"(",
"-",
"1",
"3",
")",
"uniform_index",
"(",
"-",
"4",
"3",
")",
"uniform_index",
"(",
"-",
"3",
"3",
")",
"uniform_index",
"(",
"5",
"3",
")"
] | python | valid |
rsgalloway/grit | grit/server/cherrypy/__init__.py | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/cherrypy/__init__.py#L747-L768 | def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n") | [
"def",
"respond",
"(",
"self",
")",
":",
"mrbs",
"=",
"self",
".",
"server",
".",
"max_request_body_size",
"if",
"self",
".",
"chunked_read",
":",
"self",
".",
"rfile",
"=",
"ChunkedRFile",
"(",
"self",
".",
"conn",
".",
"rfile",
",",
"mrbs",
")",
"else",
":",
"cl",
"=",
"int",
"(",
"self",
".",
"inheaders",
".",
"get",
"(",
"\"Content-Length\"",
",",
"0",
")",
")",
"if",
"mrbs",
"and",
"mrbs",
"<",
"cl",
":",
"if",
"not",
"self",
".",
"sent_headers",
":",
"self",
".",
"simple_response",
"(",
"\"413 Request Entity Too Large\"",
",",
"\"The entity sent with the request exceeds the maximum \"",
"\"allowed bytes.\"",
")",
"return",
"self",
".",
"rfile",
"=",
"KnownLengthRFile",
"(",
"self",
".",
"conn",
".",
"rfile",
",",
"cl",
")",
"self",
".",
"server",
".",
"gateway",
"(",
"self",
")",
".",
"respond",
"(",
")",
"if",
"(",
"self",
".",
"ready",
"and",
"not",
"self",
".",
"sent_headers",
")",
":",
"self",
".",
"sent_headers",
"=",
"True",
"self",
".",
"send_headers",
"(",
")",
"if",
"self",
".",
"chunked_write",
":",
"self",
".",
"conn",
".",
"wfile",
".",
"sendall",
"(",
"\"0\\r\\n\\r\\n\"",
")"
] | Call the gateway and write its iterable output. | [
"Call",
"the",
"gateway",
"and",
"write",
"its",
"iterable",
"output",
"."
] | python | train |
drdoctr/doctr | doctr/travis.py | https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L383-L400 | def copy_to_tmp(source):
"""
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
"""
tmp_dir = tempfile.mkdtemp()
# Use pathlib because os.path.basename is different depending on whether
# the path ends in a /
p = pathlib.Path(source)
dirname = p.name or 'temp'
new_dir = os.path.join(tmp_dir, dirname)
if os.path.isdir(source):
shutil.copytree(source, new_dir)
else:
shutil.copy2(source, new_dir)
return new_dir | [
"def",
"copy_to_tmp",
"(",
"source",
")",
":",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"# Use pathlib because os.path.basename is different depending on whether",
"# the path ends in a /",
"p",
"=",
"pathlib",
".",
"Path",
"(",
"source",
")",
"dirname",
"=",
"p",
".",
"name",
"or",
"'temp'",
"new_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"dirname",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"source",
")",
":",
"shutil",
".",
"copytree",
"(",
"source",
",",
"new_dir",
")",
"else",
":",
"shutil",
".",
"copy2",
"(",
"source",
",",
"new_dir",
")",
"return",
"new_dir"
] | Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file. | [
"Copies",
"source",
"to",
"a",
"temporary",
"directory",
"and",
"returns",
"the",
"copied",
"location",
"."
] | python | train |
LonamiWebs/Telethon | telethon/tl/custom/message.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/message.py#L206-L253 | def _finish_init(self, client, entities, input_chat):
"""
Finishes the initialization of this message by setting
the client that sent the message and making use of the
known entities.
"""
self._client = client
self._sender = entities.get(self._sender_id)
if self._sender:
try:
self._input_sender = utils.get_input_peer(self._sender)
except TypeError:
self._input_sender = None
self._chat = entities.get(self.chat_id)
self._input_chat = input_chat
if not self._input_chat and self._chat:
try:
self._input_chat = utils.get_input_peer(self._chat)
except TypeError:
self._input_chat = None
self._via_bot = entities.get(self.via_bot_id)
if self._via_bot:
try:
self._via_input_bot = utils.get_input_peer(self._via_bot)
except TypeError:
self._via_input_bot = None
if self.fwd_from:
self._forward = Forward(self._client, self.fwd_from, entities)
if self.action:
if isinstance(self.action, (types.MessageActionChatAddUser,
types.MessageActionChatCreate)):
self._action_entities = [entities.get(i)
for i in self.action.users]
elif isinstance(self.action, types.MessageActionChatDeleteUser):
self._action_entities = [entities.get(self.action.user_id)]
elif isinstance(self.action, types.MessageActionChatJoinedByLink):
self._action_entities = [entities.get(self.action.inviter_id)]
elif isinstance(self.action, types.MessageActionChatMigrateTo):
self._action_entities = [entities.get(utils.get_peer_id(
types.PeerChannel(self.action.channel_id)))]
elif isinstance(
self.action, types.MessageActionChannelMigrateFrom):
self._action_entities = [entities.get(utils.get_peer_id(
types.PeerChat(self.action.chat_id)))] | [
"def",
"_finish_init",
"(",
"self",
",",
"client",
",",
"entities",
",",
"input_chat",
")",
":",
"self",
".",
"_client",
"=",
"client",
"self",
".",
"_sender",
"=",
"entities",
".",
"get",
"(",
"self",
".",
"_sender_id",
")",
"if",
"self",
".",
"_sender",
":",
"try",
":",
"self",
".",
"_input_sender",
"=",
"utils",
".",
"get_input_peer",
"(",
"self",
".",
"_sender",
")",
"except",
"TypeError",
":",
"self",
".",
"_input_sender",
"=",
"None",
"self",
".",
"_chat",
"=",
"entities",
".",
"get",
"(",
"self",
".",
"chat_id",
")",
"self",
".",
"_input_chat",
"=",
"input_chat",
"if",
"not",
"self",
".",
"_input_chat",
"and",
"self",
".",
"_chat",
":",
"try",
":",
"self",
".",
"_input_chat",
"=",
"utils",
".",
"get_input_peer",
"(",
"self",
".",
"_chat",
")",
"except",
"TypeError",
":",
"self",
".",
"_input_chat",
"=",
"None",
"self",
".",
"_via_bot",
"=",
"entities",
".",
"get",
"(",
"self",
".",
"via_bot_id",
")",
"if",
"self",
".",
"_via_bot",
":",
"try",
":",
"self",
".",
"_via_input_bot",
"=",
"utils",
".",
"get_input_peer",
"(",
"self",
".",
"_via_bot",
")",
"except",
"TypeError",
":",
"self",
".",
"_via_input_bot",
"=",
"None",
"if",
"self",
".",
"fwd_from",
":",
"self",
".",
"_forward",
"=",
"Forward",
"(",
"self",
".",
"_client",
",",
"self",
".",
"fwd_from",
",",
"entities",
")",
"if",
"self",
".",
"action",
":",
"if",
"isinstance",
"(",
"self",
".",
"action",
",",
"(",
"types",
".",
"MessageActionChatAddUser",
",",
"types",
".",
"MessageActionChatCreate",
")",
")",
":",
"self",
".",
"_action_entities",
"=",
"[",
"entities",
".",
"get",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"action",
".",
"users",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"action",
",",
"types",
".",
"MessageActionChatDeleteUser",
")",
":",
"self",
".",
"_action_entities",
"=",
"[",
"entities",
".",
"get",
"(",
"self",
".",
"action",
".",
"user_id",
")",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"action",
",",
"types",
".",
"MessageActionChatJoinedByLink",
")",
":",
"self",
".",
"_action_entities",
"=",
"[",
"entities",
".",
"get",
"(",
"self",
".",
"action",
".",
"inviter_id",
")",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"action",
",",
"types",
".",
"MessageActionChatMigrateTo",
")",
":",
"self",
".",
"_action_entities",
"=",
"[",
"entities",
".",
"get",
"(",
"utils",
".",
"get_peer_id",
"(",
"types",
".",
"PeerChannel",
"(",
"self",
".",
"action",
".",
"channel_id",
")",
")",
")",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"action",
",",
"types",
".",
"MessageActionChannelMigrateFrom",
")",
":",
"self",
".",
"_action_entities",
"=",
"[",
"entities",
".",
"get",
"(",
"utils",
".",
"get_peer_id",
"(",
"types",
".",
"PeerChat",
"(",
"self",
".",
"action",
".",
"chat_id",
")",
")",
")",
"]"
] | Finishes the initialization of this message by setting
the client that sent the message and making use of the
known entities. | [
"Finishes",
"the",
"initialization",
"of",
"this",
"message",
"by",
"setting",
"the",
"client",
"that",
"sent",
"the",
"message",
"and",
"making",
"use",
"of",
"the",
"known",
"entities",
"."
] | python | train |
evhub/coconut | coconut/command/command.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/command.py#L288-L303 | def compile_path(self, path, write=True, package=None, *args, **kwargs):
"""Compile a path and returns paths to compiled files."""
path = fixpath(path)
if not isinstance(write, bool):
write = fixpath(write)
if os.path.isfile(path):
if package is None:
package = False
destpath = self.compile_file(path, write, package, *args, **kwargs)
return [destpath] if destpath is not None else []
elif os.path.isdir(path):
if package is None:
package = True
return self.compile_folder(path, write, package, *args, **kwargs)
else:
raise CoconutException("could not find source path", path) | [
"def",
"compile_path",
"(",
"self",
",",
"path",
",",
"write",
"=",
"True",
",",
"package",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"fixpath",
"(",
"path",
")",
"if",
"not",
"isinstance",
"(",
"write",
",",
"bool",
")",
":",
"write",
"=",
"fixpath",
"(",
"write",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"if",
"package",
"is",
"None",
":",
"package",
"=",
"False",
"destpath",
"=",
"self",
".",
"compile_file",
"(",
"path",
",",
"write",
",",
"package",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"[",
"destpath",
"]",
"if",
"destpath",
"is",
"not",
"None",
"else",
"[",
"]",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"if",
"package",
"is",
"None",
":",
"package",
"=",
"True",
"return",
"self",
".",
"compile_folder",
"(",
"path",
",",
"write",
",",
"package",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"CoconutException",
"(",
"\"could not find source path\"",
",",
"path",
")"
] | Compile a path and returns paths to compiled files. | [
"Compile",
"a",
"path",
"and",
"returns",
"paths",
"to",
"compiled",
"files",
"."
] | python | train |
tensorflow/cleverhans | cleverhans/devtools/list_files.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/devtools/list_files.py#L41-L71 | def _list_files(path, suffix=""):
"""
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
"""
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return [] | [
"def",
"_list_files",
"(",
"path",
",",
"suffix",
"=",
"\"\"",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"incomplete",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"complete",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
"for",
"entry",
"in",
"incomplete",
"]",
"lists",
"=",
"[",
"_list_files",
"(",
"subpath",
",",
"suffix",
")",
"for",
"subpath",
"in",
"complete",
"]",
"flattened",
"=",
"[",
"]",
"for",
"one_list",
"in",
"lists",
":",
"for",
"elem",
"in",
"one_list",
":",
"flattened",
".",
"append",
"(",
"elem",
")",
"return",
"flattened",
"else",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
",",
"\"couldn't find file '%s'\"",
"%",
"path",
"if",
"path",
".",
"endswith",
"(",
"suffix",
")",
":",
"return",
"[",
"path",
"]",
"return",
"[",
"]"
] | Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself) | [
"Returns",
"a",
"list",
"of",
"all",
"files",
"ending",
"in",
"suffix",
"contained",
"within",
"path",
"."
] | python | train |
pvlib/pvlib-python | pvlib/modelchain.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/modelchain.py#L478-L494 | def singlediode(self):
"""Deprecated"""
(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) = (
self.system.calcparams_desoto(self.effective_irradiance,
self.temps['temp_cell']))
self.desoto = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
self.dc = self.system.singlediode(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
self.dc = self.system.scale_voltage_current_power(self.dc).fillna(0)
return self | [
"def",
"singlediode",
"(",
"self",
")",
":",
"(",
"photocurrent",
",",
"saturation_current",
",",
"resistance_series",
",",
"resistance_shunt",
",",
"nNsVth",
")",
"=",
"(",
"self",
".",
"system",
".",
"calcparams_desoto",
"(",
"self",
".",
"effective_irradiance",
",",
"self",
".",
"temps",
"[",
"'temp_cell'",
"]",
")",
")",
"self",
".",
"desoto",
"=",
"(",
"photocurrent",
",",
"saturation_current",
",",
"resistance_series",
",",
"resistance_shunt",
",",
"nNsVth",
")",
"self",
".",
"dc",
"=",
"self",
".",
"system",
".",
"singlediode",
"(",
"photocurrent",
",",
"saturation_current",
",",
"resistance_series",
",",
"resistance_shunt",
",",
"nNsVth",
")",
"self",
".",
"dc",
"=",
"self",
".",
"system",
".",
"scale_voltage_current_power",
"(",
"self",
".",
"dc",
")",
".",
"fillna",
"(",
"0",
")",
"return",
"self"
] | Deprecated | [
"Deprecated"
] | python | train |
biocore/burrito-fillings | bfillings/blat.py | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L185-L282 | def _input_as_list(self, data):
'''Takes the positional arguments as input in a list.
The list input here should be [query_file_path, database_file_path,
output_file_path]'''
query, database, output = data
if (not isabs(database)) \
or (not isabs(query)) \
or (not isabs(output)):
raise ApplicationError("Only absolute paths allowed.\n%s" %
', '.join(data))
self._database = FilePath(database)
self._query = FilePath(query)
self._output = ResultPath(output, IsWritten=True)
# check parameters that can only take a particular set of values
# check combination of databse and query type
if self.Parameters['-t'].isOn() and self.Parameters['-q'].isOn() and \
(self.Parameters['-t'].Value, self.Parameters['-q'].Value) not in \
self._valid_combinations:
error_message = "Invalid combination of database and query " + \
"types ('%s', '%s').\n" % \
(self.Paramters['-t'].Value,
self.Parameters['-q'].Value)
error_message += "Must be one of: %s\n" % \
repr(self._valid_combinations)
raise ApplicationError(error_message)
# check database type
if self.Parameters['-t'].isOn() and \
self.Parameters['-t'].Value not in self._database_types:
error_message = "Invalid database type %s\n" % \
self.Parameters['-t'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._database_types)
raise ApplicationError(error_message)
# check query type
if self.Parameters['-q'].isOn() and \
self.Parameters['-q'].Value not in self._query_types:
error_message = "Invalid query type %s\n" % \
self.Parameters['-q'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._query_types)
raise ApplicationError(error_message)
# check mask type
if self.Parameters['-mask'].isOn() and \
self.Parameters['-mask'].Value not in self._mask_types:
error_message = "Invalid mask type %s\n" % \
self.Parameters['-mask']
error_message += "Allowed Values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check qmask type
if self.Parameters['-qMask'].isOn() and \
self.Parameters['-qMask'].Value not in self._mask_types:
error_message = "Invalid qMask type %s\n" % \
self.Parameters['-qMask'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check repeat type
if self.Parameters['-repeats'].isOn() and \
self.Parameters['-repeats'].Value not in self._mask_types:
error_message = "Invalid repeat type %s\n" % \
self.Parameters['-repeat'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check output format
if self.Parameters['-out'].isOn() and \
self.Parameters['-out'].Value not in self._out_types:
error_message = "Invalid output type %s\n" % \
self.Parameters['-out']
error_message += "Allowed values: %s\n" % \
', '.join(self._out_types)
raise ApplicationError(error_message)
return '' | [
"def",
"_input_as_list",
"(",
"self",
",",
"data",
")",
":",
"query",
",",
"database",
",",
"output",
"=",
"data",
"if",
"(",
"not",
"isabs",
"(",
"database",
")",
")",
"or",
"(",
"not",
"isabs",
"(",
"query",
")",
")",
"or",
"(",
"not",
"isabs",
"(",
"output",
")",
")",
":",
"raise",
"ApplicationError",
"(",
"\"Only absolute paths allowed.\\n%s\"",
"%",
"', '",
".",
"join",
"(",
"data",
")",
")",
"self",
".",
"_database",
"=",
"FilePath",
"(",
"database",
")",
"self",
".",
"_query",
"=",
"FilePath",
"(",
"query",
")",
"self",
".",
"_output",
"=",
"ResultPath",
"(",
"output",
",",
"IsWritten",
"=",
"True",
")",
"# check parameters that can only take a particular set of values",
"# check combination of databse and query type",
"if",
"self",
".",
"Parameters",
"[",
"'-t'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"isOn",
"(",
")",
"and",
"(",
"self",
".",
"Parameters",
"[",
"'-t'",
"]",
".",
"Value",
",",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"Value",
")",
"not",
"in",
"self",
".",
"_valid_combinations",
":",
"error_message",
"=",
"\"Invalid combination of database and query \"",
"+",
"\"types ('%s', '%s').\\n\"",
"%",
"(",
"self",
".",
"Paramters",
"[",
"'-t'",
"]",
".",
"Value",
",",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"Value",
")",
"error_message",
"+=",
"\"Must be one of: %s\\n\"",
"%",
"repr",
"(",
"self",
".",
"_valid_combinations",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check database type",
"if",
"self",
".",
"Parameters",
"[",
"'-t'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-t'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_database_types",
":",
"error_message",
"=",
"\"Invalid database type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-t'",
"]",
".",
"Value",
"error_message",
"+=",
"\"Allowed values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_database_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check query type",
"if",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_query_types",
":",
"error_message",
"=",
"\"Invalid query type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-q'",
"]",
".",
"Value",
"error_message",
"+=",
"\"Allowed values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_query_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check mask type",
"if",
"self",
".",
"Parameters",
"[",
"'-mask'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-mask'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_mask_types",
":",
"error_message",
"=",
"\"Invalid mask type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-mask'",
"]",
"error_message",
"+=",
"\"Allowed Values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_mask_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check qmask type",
"if",
"self",
".",
"Parameters",
"[",
"'-qMask'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-qMask'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_mask_types",
":",
"error_message",
"=",
"\"Invalid qMask type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-qMask'",
"]",
".",
"Value",
"error_message",
"+=",
"\"Allowed values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_mask_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check repeat type",
"if",
"self",
".",
"Parameters",
"[",
"'-repeats'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-repeats'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_mask_types",
":",
"error_message",
"=",
"\"Invalid repeat type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-repeat'",
"]",
".",
"Value",
"error_message",
"+=",
"\"Allowed values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_mask_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"# check output format",
"if",
"self",
".",
"Parameters",
"[",
"'-out'",
"]",
".",
"isOn",
"(",
")",
"and",
"self",
".",
"Parameters",
"[",
"'-out'",
"]",
".",
"Value",
"not",
"in",
"self",
".",
"_out_types",
":",
"error_message",
"=",
"\"Invalid output type %s\\n\"",
"%",
"self",
".",
"Parameters",
"[",
"'-out'",
"]",
"error_message",
"+=",
"\"Allowed values: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_out_types",
")",
"raise",
"ApplicationError",
"(",
"error_message",
")",
"return",
"''"
] | Takes the positional arguments as input in a list.
The list input here should be [query_file_path, database_file_path,
output_file_path] | [
"Takes",
"the",
"positional",
"arguments",
"as",
"input",
"in",
"a",
"list",
"."
] | python | train |
yyuu/botornado | boto/s3/bucket.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L360-L397 | def get_all_versions(self, headers=None, **params):
"""
A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
('DeleteMarker', DeleteMarker)],
'versions', headers, **params) | [
"def",
"get_all_versions",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_all",
"(",
"[",
"(",
"'Version'",
",",
"self",
".",
"key_class",
")",
",",
"(",
"'CommonPrefixes'",
",",
"Prefix",
")",
",",
"(",
"'DeleteMarker'",
",",
"DeleteMarker",
")",
"]",
",",
"'versions'",
",",
"headers",
",",
"*",
"*",
"params",
")"
] | A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested | [
"A",
"lower",
"-",
"level",
"version",
"-",
"aware",
"method",
"for",
"listing",
"contents",
"of",
"a",
"bucket",
".",
"This",
"closely",
"models",
"the",
"actual",
"S3",
"API",
"and",
"requires",
"you",
"to",
"manually",
"handle",
"the",
"paging",
"of",
"results",
".",
"For",
"a",
"higher",
"-",
"level",
"method",
"that",
"handles",
"the",
"details",
"of",
"paging",
"for",
"you",
"you",
"can",
"use",
"the",
"list",
"method",
".",
":",
"type",
"max_keys",
":",
"int",
":",
"param",
"max_keys",
":",
"The",
"maximum",
"number",
"of",
"keys",
"to",
"retrieve",
":",
"type",
"prefix",
":",
"string",
":",
"param",
"prefix",
":",
"The",
"prefix",
"of",
"the",
"keys",
"you",
"want",
"to",
"retrieve",
":",
"type",
"key_marker",
":",
"string",
":",
"param",
"key_marker",
":",
"The",
"marker",
"of",
"where",
"you",
"are",
"in",
"the",
"result",
"set",
"with",
"respect",
"to",
"keys",
".",
":",
"type",
"version_id_marker",
":",
"string",
":",
"param",
"version_id_marker",
":",
"The",
"marker",
"of",
"where",
"you",
"are",
"in",
"the",
"result",
"set",
"with",
"respect",
"to",
"version",
"-",
"id",
"s",
".",
":",
"type",
"delimiter",
":",
"string",
":",
"param",
"delimiter",
":",
"If",
"this",
"optional",
"Unicode",
"string",
"parameter",
"is",
"included",
"with",
"your",
"request",
"then",
"keys",
"that",
"contain",
"the",
"same",
"string",
"between",
"the",
"prefix",
"and",
"the",
"first",
"occurrence",
"of",
"the",
"delimiter",
"will",
"be",
"rolled",
"up",
"into",
"a",
"single",
"result",
"element",
"in",
"the",
"CommonPrefixes",
"collection",
".",
"These",
"rolled",
"-",
"up",
"keys",
"are",
"not",
"returned",
"elsewhere",
"in",
"the",
"response",
"."
] | python | train |
vaexio/vaex | packages/vaex-core/vaex/dataframe.py | https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4970-L5078 | def join(self, other, on=None, left_on=None, right_on=None, lsuffix='', rsuffix='', how='left', inplace=False):
"""Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on
If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit
row index).
Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may
change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running
:func:`DataFrame.extract` first.
Example:
>>> a = np.array(['a', 'b', 'c'])
>>> x = np.arange(1,4)
>>> ds1 = vaex.from_arrays(a=a, x=x)
>>> b = np.array(['a', 'b', 'd'])
>>> y = x**2
>>> ds2 = vaex.from_arrays(b=b, y=y)
>>> ds1.join(ds2, left_on='a', right_on='b')
:param other: Other DataFrame to join with (the right side)
:param on: default key for the left table (self)
:param left_on: key for the left table (self), overrides on
:param right_on: default key for the right table (other), overrides on
:param lsuffix: suffix to add to the left column names in case of a name collision
:param rsuffix: similar for the right
:param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values)
'right' is similar with self and other swapped.
:param inplace: {inplace}
:return:
"""
ds = self if inplace else self.copy()
if how == 'left':
left = ds
right = other
elif how == 'right':
left = other
right = ds
lsuffix, rsuffix = rsuffix, lsuffix
left_on, right_on = right_on, left_on
else:
raise ValueError('join type not supported: {}, only left and right'.format(how))
for name in right:
if name in left and name + rsuffix == name + lsuffix:
raise ValueError('column name collision: {} exists in both column, and no proper suffix given'
.format(name))
right = right.extract() # get rid of filters and active_range
assert left.length_unfiltered() == left.length_original()
N = left.length_unfiltered()
N_other = len(right)
left_on = left_on or on
right_on = right_on or on
if left_on is None and right_on is None:
for name in right:
right_name = name
if name in left:
left.rename_column(name, name + lsuffix)
right_name = name + rsuffix
if name in right.virtual_columns:
left.add_virtual_column(right_name, right.virtual_columns[name])
else:
left.add_column(right_name, right.columns[name])
else:
left_values = left.evaluate(left_on, filtered=False)
right_values = right.evaluate(right_on)
# maps from the left_values to row #
if np.ma.isMaskedArray(left_values):
mask = ~left_values.mask
left_values = left_values.data
index_left = dict(zip(left_values[mask], np.arange(N)[mask]))
else:
index_left = dict(zip(left_values, np.arange(N)))
# idem for right
if np.ma.isMaskedArray(right_values):
mask = ~right_values.mask
right_values = right_values.data
index_other = dict(zip(right_values[mask], np.arange(N_other)[mask]))
else:
index_other = dict(zip(right_values, np.arange(N_other)))
# we do a left join, find all rows of the right DataFrame
# that has an entry on the left
# for each row in the right
# find which row it needs to go to in the right
# from_indices = np.zeros(N_other, dtype=np.int64) # row # of right
# to_indices = np.zeros(N_other, dtype=np.int64) # goes to row # on the left
# keep a boolean mask of which rows are found
left_mask = np.ones(N, dtype=np.bool)
# and which row they point to in the right
left_row_to_right = np.zeros(N, dtype=np.int64) - 1
for i in range(N_other):
left_row = index_left.get(right_values[i])
if left_row is not None:
left_mask[left_row] = False # unmask, it exists
left_row_to_right[left_row] = i
lookup = np.ma.array(left_row_to_right, mask=left_mask)
for name in right:
right_name = name
if name in left:
left.rename_column(name, name + lsuffix)
right_name = name + rsuffix
if name in right.virtual_columns:
left.add_virtual_column(right_name, right.virtual_columns[name])
else:
left.add_column(right_name, ColumnIndexed(right, lookup, name))
return left | [
"def",
"join",
"(",
"self",
",",
"other",
",",
"on",
"=",
"None",
",",
"left_on",
"=",
"None",
",",
"right_on",
"=",
"None",
",",
"lsuffix",
"=",
"''",
",",
"rsuffix",
"=",
"''",
",",
"how",
"=",
"'left'",
",",
"inplace",
"=",
"False",
")",
":",
"ds",
"=",
"self",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
")",
"if",
"how",
"==",
"'left'",
":",
"left",
"=",
"ds",
"right",
"=",
"other",
"elif",
"how",
"==",
"'right'",
":",
"left",
"=",
"other",
"right",
"=",
"ds",
"lsuffix",
",",
"rsuffix",
"=",
"rsuffix",
",",
"lsuffix",
"left_on",
",",
"right_on",
"=",
"right_on",
",",
"left_on",
"else",
":",
"raise",
"ValueError",
"(",
"'join type not supported: {}, only left and right'",
".",
"format",
"(",
"how",
")",
")",
"for",
"name",
"in",
"right",
":",
"if",
"name",
"in",
"left",
"and",
"name",
"+",
"rsuffix",
"==",
"name",
"+",
"lsuffix",
":",
"raise",
"ValueError",
"(",
"'column name collision: {} exists in both column, and no proper suffix given'",
".",
"format",
"(",
"name",
")",
")",
"right",
"=",
"right",
".",
"extract",
"(",
")",
"# get rid of filters and active_range",
"assert",
"left",
".",
"length_unfiltered",
"(",
")",
"==",
"left",
".",
"length_original",
"(",
")",
"N",
"=",
"left",
".",
"length_unfiltered",
"(",
")",
"N_other",
"=",
"len",
"(",
"right",
")",
"left_on",
"=",
"left_on",
"or",
"on",
"right_on",
"=",
"right_on",
"or",
"on",
"if",
"left_on",
"is",
"None",
"and",
"right_on",
"is",
"None",
":",
"for",
"name",
"in",
"right",
":",
"right_name",
"=",
"name",
"if",
"name",
"in",
"left",
":",
"left",
".",
"rename_column",
"(",
"name",
",",
"name",
"+",
"lsuffix",
")",
"right_name",
"=",
"name",
"+",
"rsuffix",
"if",
"name",
"in",
"right",
".",
"virtual_columns",
":",
"left",
".",
"add_virtual_column",
"(",
"right_name",
",",
"right",
".",
"virtual_columns",
"[",
"name",
"]",
")",
"else",
":",
"left",
".",
"add_column",
"(",
"right_name",
",",
"right",
".",
"columns",
"[",
"name",
"]",
")",
"else",
":",
"left_values",
"=",
"left",
".",
"evaluate",
"(",
"left_on",
",",
"filtered",
"=",
"False",
")",
"right_values",
"=",
"right",
".",
"evaluate",
"(",
"right_on",
")",
"# maps from the left_values to row #",
"if",
"np",
".",
"ma",
".",
"isMaskedArray",
"(",
"left_values",
")",
":",
"mask",
"=",
"~",
"left_values",
".",
"mask",
"left_values",
"=",
"left_values",
".",
"data",
"index_left",
"=",
"dict",
"(",
"zip",
"(",
"left_values",
"[",
"mask",
"]",
",",
"np",
".",
"arange",
"(",
"N",
")",
"[",
"mask",
"]",
")",
")",
"else",
":",
"index_left",
"=",
"dict",
"(",
"zip",
"(",
"left_values",
",",
"np",
".",
"arange",
"(",
"N",
")",
")",
")",
"# idem for right",
"if",
"np",
".",
"ma",
".",
"isMaskedArray",
"(",
"right_values",
")",
":",
"mask",
"=",
"~",
"right_values",
".",
"mask",
"right_values",
"=",
"right_values",
".",
"data",
"index_other",
"=",
"dict",
"(",
"zip",
"(",
"right_values",
"[",
"mask",
"]",
",",
"np",
".",
"arange",
"(",
"N_other",
")",
"[",
"mask",
"]",
")",
")",
"else",
":",
"index_other",
"=",
"dict",
"(",
"zip",
"(",
"right_values",
",",
"np",
".",
"arange",
"(",
"N_other",
")",
")",
")",
"# we do a left join, find all rows of the right DataFrame",
"# that has an entry on the left",
"# for each row in the right",
"# find which row it needs to go to in the right",
"# from_indices = np.zeros(N_other, dtype=np.int64) # row # of right",
"# to_indices = np.zeros(N_other, dtype=np.int64) # goes to row # on the left",
"# keep a boolean mask of which rows are found",
"left_mask",
"=",
"np",
".",
"ones",
"(",
"N",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"# and which row they point to in the right",
"left_row_to_right",
"=",
"np",
".",
"zeros",
"(",
"N",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"N_other",
")",
":",
"left_row",
"=",
"index_left",
".",
"get",
"(",
"right_values",
"[",
"i",
"]",
")",
"if",
"left_row",
"is",
"not",
"None",
":",
"left_mask",
"[",
"left_row",
"]",
"=",
"False",
"# unmask, it exists",
"left_row_to_right",
"[",
"left_row",
"]",
"=",
"i",
"lookup",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"left_row_to_right",
",",
"mask",
"=",
"left_mask",
")",
"for",
"name",
"in",
"right",
":",
"right_name",
"=",
"name",
"if",
"name",
"in",
"left",
":",
"left",
".",
"rename_column",
"(",
"name",
",",
"name",
"+",
"lsuffix",
")",
"right_name",
"=",
"name",
"+",
"rsuffix",
"if",
"name",
"in",
"right",
".",
"virtual_columns",
":",
"left",
".",
"add_virtual_column",
"(",
"right_name",
",",
"right",
".",
"virtual_columns",
"[",
"name",
"]",
")",
"else",
":",
"left",
".",
"add_column",
"(",
"right_name",
",",
"ColumnIndexed",
"(",
"right",
",",
"lookup",
",",
"name",
")",
")",
"return",
"left"
] | Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on
If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit
row index).
Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may
change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running
:func:`DataFrame.extract` first.
Example:
>>> a = np.array(['a', 'b', 'c'])
>>> x = np.arange(1,4)
>>> ds1 = vaex.from_arrays(a=a, x=x)
>>> b = np.array(['a', 'b', 'd'])
>>> y = x**2
>>> ds2 = vaex.from_arrays(b=b, y=y)
>>> ds1.join(ds2, left_on='a', right_on='b')
:param other: Other DataFrame to join with (the right side)
:param on: default key for the left table (self)
:param left_on: key for the left table (self), overrides on
:param right_on: default key for the right table (other), overrides on
:param lsuffix: suffix to add to the left column names in case of a name collision
:param rsuffix: similar for the right
:param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values)
'right' is similar with self and other swapped.
:param inplace: {inplace}
:return: | [
"Return",
"a",
"DataFrame",
"joined",
"with",
"other",
"DataFrames",
"matched",
"by",
"columns",
"/",
"expression",
"on",
"/",
"left_on",
"/",
"right_on"
] | python | test |
gijzelaerr/python-snap7 | snap7/server.py | https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/server.py#L72-L81 | def register_area(self, area_code, index, userdata):
"""Shares a memory area with the server. That memory block will be
visible by the clients.
"""
size = ctypes.sizeof(userdata)
logger.info("registering area %s, index %s, size %s" % (area_code,
index, size))
size = ctypes.sizeof(userdata)
return self.library.Srv_RegisterArea(self.pointer, area_code, index,
ctypes.byref(userdata), size) | [
"def",
"register_area",
"(",
"self",
",",
"area_code",
",",
"index",
",",
"userdata",
")",
":",
"size",
"=",
"ctypes",
".",
"sizeof",
"(",
"userdata",
")",
"logger",
".",
"info",
"(",
"\"registering area %s, index %s, size %s\"",
"%",
"(",
"area_code",
",",
"index",
",",
"size",
")",
")",
"size",
"=",
"ctypes",
".",
"sizeof",
"(",
"userdata",
")",
"return",
"self",
".",
"library",
".",
"Srv_RegisterArea",
"(",
"self",
".",
"pointer",
",",
"area_code",
",",
"index",
",",
"ctypes",
".",
"byref",
"(",
"userdata",
")",
",",
"size",
")"
] | Shares a memory area with the server. That memory block will be
visible by the clients. | [
"Shares",
"a",
"memory",
"area",
"with",
"the",
"server",
".",
"That",
"memory",
"block",
"will",
"be",
"visible",
"by",
"the",
"clients",
"."
] | python | train |
saltstack/salt | salt/modules/lxc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2632-L2674 | def state(name, path=None):
'''
Returns the state of a container.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.state name
'''
# Don't use _ensure_exists() here, it will mess with _change_state()
cachekey = 'lxc.state.{0}{1}'.format(name, path)
try:
return __context__[cachekey]
except KeyError:
if not exists(name, path=path):
__context__[cachekey] = None
else:
cmd = 'lxc-info'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' -n {0}'.format(name)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
_clear_context()
raise CommandExecutionError(
'Unable to get state of container \'{0}\''.format(name)
)
c_infos = ret['stdout'].splitlines()
c_state = None
for c_info in c_infos:
stat = c_info.split(':')
if stat[0].lower() == 'state':
c_state = stat[1].strip().lower()
break
__context__[cachekey] = c_state
return __context__[cachekey] | [
"def",
"state",
"(",
"name",
",",
"path",
"=",
"None",
")",
":",
"# Don't use _ensure_exists() here, it will mess with _change_state()",
"cachekey",
"=",
"'lxc.state.{0}{1}'",
".",
"format",
"(",
"name",
",",
"path",
")",
"try",
":",
"return",
"__context__",
"[",
"cachekey",
"]",
"except",
"KeyError",
":",
"if",
"not",
"exists",
"(",
"name",
",",
"path",
"=",
"path",
")",
":",
"__context__",
"[",
"cachekey",
"]",
"=",
"None",
"else",
":",
"cmd",
"=",
"'lxc-info'",
"if",
"path",
":",
"cmd",
"+=",
"' -P {0}'",
".",
"format",
"(",
"pipes",
".",
"quote",
"(",
"path",
")",
")",
"cmd",
"+=",
"' -n {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"if",
"ret",
"[",
"'retcode'",
"]",
"!=",
"0",
":",
"_clear_context",
"(",
")",
"raise",
"CommandExecutionError",
"(",
"'Unable to get state of container \\'{0}\\''",
".",
"format",
"(",
"name",
")",
")",
"c_infos",
"=",
"ret",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
"c_state",
"=",
"None",
"for",
"c_info",
"in",
"c_infos",
":",
"stat",
"=",
"c_info",
".",
"split",
"(",
"':'",
")",
"if",
"stat",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"'state'",
":",
"c_state",
"=",
"stat",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"break",
"__context__",
"[",
"cachekey",
"]",
"=",
"c_state",
"return",
"__context__",
"[",
"cachekey",
"]"
] | Returns the state of a container.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.state name | [
"Returns",
"the",
"state",
"of",
"a",
"container",
"."
] | python | train |
kshlm/gant | gant/utils/ssh.py | https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/ssh.py#L10-L20 | def launch_shell(username, hostname, password, port=22):
"""
Launches an ssh shell
"""
if not username or not hostname or not password:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system(sshCmdLine.format(password, tmpFile.name, username, hostname,
port))
return True | [
"def",
"launch_shell",
"(",
"username",
",",
"hostname",
",",
"password",
",",
"port",
"=",
"22",
")",
":",
"if",
"not",
"username",
"or",
"not",
"hostname",
"or",
"not",
"password",
":",
"return",
"False",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"tmpFile",
":",
"os",
".",
"system",
"(",
"sshCmdLine",
".",
"format",
"(",
"password",
",",
"tmpFile",
".",
"name",
",",
"username",
",",
"hostname",
",",
"port",
")",
")",
"return",
"True"
] | Launches an ssh shell | [
"Launches",
"an",
"ssh",
"shell"
] | python | train |
ladybug-tools/ladybug | ladybug/designday.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L457-L493 | def from_ashrae_dict_cooling(cls, ashrae_dict, location,
use_010=False, pressure=None, tau=None):
"""Create a heating design day object from a ASHRAE HOF dictionary.
Args:
ashrae_dict: A dictionary with 32 keys that match those in the
cooling_keys property of this object. Each key should
correspond to a value.
location: Location object for the design day
use_010: Boolean to denote what type of design day to create
(wether it is a 1.0% or a 0.4% design day). Default is
False for a 0.4% annual cooling design day
pressure: Atmospheric pressure in Pa that should be used in the
creation of the humidity condition. Default is 101325 Pa
for pressure at sea level.
tau: Optional tuple containing two values, which will set the
sky condition to be a revised ASHRAE clear sky (Tau model).
The first item of the tuple should be the tau beam value
and the second is for the tau diffuse value. Default is
None, which will default to the original ASHRAE Clear Sky.
"""
db_key = 'DB004' if use_010 is False else 'DB010'
wb_key = 'WB_DB004' if use_010 is False else 'WB_DB010'
perc_str = '0.4' if use_010 is False else '1.0'
pressure = pressure if pressure is not None else 101325
db_cond = DryBulbCondition(float(ashrae_dict[db_key]), float(ashrae_dict['DBR']))
hu_cond = HumidityCondition('Wetbulb', float(ashrae_dict[wb_key]), pressure)
ws_cond = WindCondition(float(ashrae_dict['WS_DB004']),
float(ashrae_dict['WD_DB004']))
month_num = int(ashrae_dict['Month'])
if tau is not None:
sky_cond = RevisedClearSkyCondition(month_num, 21, tau[0], tau[1])
else:
sky_cond = OriginalClearSkyCondition(month_num, 21)
name = '{}% Cooling Design Day for {}'.format(perc_str, location.city)
return cls(name, 'SummerDesignDay', location,
db_cond, hu_cond, ws_cond, sky_cond) | [
"def",
"from_ashrae_dict_cooling",
"(",
"cls",
",",
"ashrae_dict",
",",
"location",
",",
"use_010",
"=",
"False",
",",
"pressure",
"=",
"None",
",",
"tau",
"=",
"None",
")",
":",
"db_key",
"=",
"'DB004'",
"if",
"use_010",
"is",
"False",
"else",
"'DB010'",
"wb_key",
"=",
"'WB_DB004'",
"if",
"use_010",
"is",
"False",
"else",
"'WB_DB010'",
"perc_str",
"=",
"'0.4'",
"if",
"use_010",
"is",
"False",
"else",
"'1.0'",
"pressure",
"=",
"pressure",
"if",
"pressure",
"is",
"not",
"None",
"else",
"101325",
"db_cond",
"=",
"DryBulbCondition",
"(",
"float",
"(",
"ashrae_dict",
"[",
"db_key",
"]",
")",
",",
"float",
"(",
"ashrae_dict",
"[",
"'DBR'",
"]",
")",
")",
"hu_cond",
"=",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"float",
"(",
"ashrae_dict",
"[",
"wb_key",
"]",
")",
",",
"pressure",
")",
"ws_cond",
"=",
"WindCondition",
"(",
"float",
"(",
"ashrae_dict",
"[",
"'WS_DB004'",
"]",
")",
",",
"float",
"(",
"ashrae_dict",
"[",
"'WD_DB004'",
"]",
")",
")",
"month_num",
"=",
"int",
"(",
"ashrae_dict",
"[",
"'Month'",
"]",
")",
"if",
"tau",
"is",
"not",
"None",
":",
"sky_cond",
"=",
"RevisedClearSkyCondition",
"(",
"month_num",
",",
"21",
",",
"tau",
"[",
"0",
"]",
",",
"tau",
"[",
"1",
"]",
")",
"else",
":",
"sky_cond",
"=",
"OriginalClearSkyCondition",
"(",
"month_num",
",",
"21",
")",
"name",
"=",
"'{}% Cooling Design Day for {}'",
".",
"format",
"(",
"perc_str",
",",
"location",
".",
"city",
")",
"return",
"cls",
"(",
"name",
",",
"'SummerDesignDay'",
",",
"location",
",",
"db_cond",
",",
"hu_cond",
",",
"ws_cond",
",",
"sky_cond",
")"
] | Create a heating design day object from a ASHRAE HOF dictionary.
Args:
ashrae_dict: A dictionary with 32 keys that match those in the
cooling_keys property of this object. Each key should
correspond to a value.
location: Location object for the design day
use_010: Boolean to denote what type of design day to create
(wether it is a 1.0% or a 0.4% design day). Default is
False for a 0.4% annual cooling design day
pressure: Atmospheric pressure in Pa that should be used in the
creation of the humidity condition. Default is 101325 Pa
for pressure at sea level.
tau: Optional tuple containing two values, which will set the
sky condition to be a revised ASHRAE clear sky (Tau model).
The first item of the tuple should be the tau beam value
and the second is for the tau diffuse value. Default is
None, which will default to the original ASHRAE Clear Sky. | [
"Create",
"a",
"heating",
"design",
"day",
"object",
"from",
"a",
"ASHRAE",
"HOF",
"dictionary",
"."
] | python | train |
pavlin-policar/openTSNE | openTSNE/pynndescent/utils.py | https://github.com/pavlin-policar/openTSNE/blob/28513a0d669f2f20e7b971c0c6373dc375f72771/openTSNE/pynndescent/utils.py#L107-L134 | def make_heap(n_points, size):
"""Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of data points to track in the heap.
size: int
The number of items to keep on the heap for each data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions.
"""
result = np.zeros((3, n_points, size))
result[0] = -1
result[1] = np.infty
result[2] = 0
return result | [
"def",
"make_heap",
"(",
"n_points",
",",
"size",
")",
":",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"n_points",
",",
"size",
")",
")",
"result",
"[",
"0",
"]",
"=",
"-",
"1",
"result",
"[",
"1",
"]",
"=",
"np",
".",
"infty",
"result",
"[",
"2",
"]",
"=",
"0",
"return",
"result"
] | Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of data points to track in the heap.
size: int
The number of items to keep on the heap for each data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions. | [
"Constructor",
"for",
"the",
"numba",
"enabled",
"heap",
"objects",
".",
"The",
"heaps",
"are",
"used",
"for",
"approximate",
"nearest",
"neighbor",
"search",
"maintaining",
"a",
"list",
"of",
"potential",
"neighbors",
"sorted",
"by",
"their",
"distance",
".",
"We",
"also",
"flag",
"if",
"potential",
"neighbors",
"are",
"newly",
"added",
"to",
"the",
"list",
"or",
"not",
".",
"Internally",
"this",
"is",
"stored",
"as",
"a",
"single",
"ndarray",
";",
"the",
"first",
"axis",
"determines",
"whether",
"we",
"are",
"looking",
"at",
"the",
"array",
"of",
"candidate",
"indices",
"the",
"array",
"of",
"distances",
"or",
"the",
"flag",
"array",
"for",
"whether",
"elements",
"are",
"new",
"or",
"not",
".",
"Each",
"of",
"these",
"arrays",
"are",
"of",
"shape",
"(",
"n_points",
"size",
")"
] | python | train |
da4089/simplefix | simplefix/message.py | https://github.com/da4089/simplefix/blob/10f7f165a99a03467110bee69cc7c083c3531c68/simplefix/message.py#L475-L489 | def append_data(self, len_tag, val_tag, data, header=False):
"""Append raw data, possibly including a embedded SOH.
:param len_tag: Tag number for length field.
:param val_tag: Tag number for value field.
:param data: Raw data byte string.
:param header: Append to header if True; default to body.
Appends two pairs: a length pair, followed by a data pair,
containing the raw data supplied. Example fields that should
use this method include: 95/96, 212/213, 354/355, etc."""
self.append_pair(len_tag, len(data), header=header)
self.append_pair(val_tag, data, header=header)
return | [
"def",
"append_data",
"(",
"self",
",",
"len_tag",
",",
"val_tag",
",",
"data",
",",
"header",
"=",
"False",
")",
":",
"self",
".",
"append_pair",
"(",
"len_tag",
",",
"len",
"(",
"data",
")",
",",
"header",
"=",
"header",
")",
"self",
".",
"append_pair",
"(",
"val_tag",
",",
"data",
",",
"header",
"=",
"header",
")",
"return"
] | Append raw data, possibly including a embedded SOH.
:param len_tag: Tag number for length field.
:param val_tag: Tag number for value field.
:param data: Raw data byte string.
:param header: Append to header if True; default to body.
Appends two pairs: a length pair, followed by a data pair,
containing the raw data supplied. Example fields that should
use this method include: 95/96, 212/213, 354/355, etc. | [
"Append",
"raw",
"data",
"possibly",
"including",
"a",
"embedded",
"SOH",
"."
] | python | train |
kytos/kytos-utils | kytos/cli/commands/napps/api.py | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/cli/commands/napps/api.py#L198-L210 | def _print_napps(cls, napp_list):
"""Format the NApp list to be printed."""
mgr = NAppsManager()
enabled = mgr.get_enabled()
installed = mgr.get_installed()
napps = []
for napp, desc in sorted(napp_list):
status = 'i' if napp in installed else '-'
status += 'e' if napp in enabled else '-'
status = '[{}]'.format(status)
name = '{}/{}'.format(*napp)
napps.append((status, name, desc))
cls.print_napps(napps) | [
"def",
"_print_napps",
"(",
"cls",
",",
"napp_list",
")",
":",
"mgr",
"=",
"NAppsManager",
"(",
")",
"enabled",
"=",
"mgr",
".",
"get_enabled",
"(",
")",
"installed",
"=",
"mgr",
".",
"get_installed",
"(",
")",
"napps",
"=",
"[",
"]",
"for",
"napp",
",",
"desc",
"in",
"sorted",
"(",
"napp_list",
")",
":",
"status",
"=",
"'i'",
"if",
"napp",
"in",
"installed",
"else",
"'-'",
"status",
"+=",
"'e'",
"if",
"napp",
"in",
"enabled",
"else",
"'-'",
"status",
"=",
"'[{}]'",
".",
"format",
"(",
"status",
")",
"name",
"=",
"'{}/{}'",
".",
"format",
"(",
"*",
"napp",
")",
"napps",
".",
"append",
"(",
"(",
"status",
",",
"name",
",",
"desc",
")",
")",
"cls",
".",
"print_napps",
"(",
"napps",
")"
] | Format the NApp list to be printed. | [
"Format",
"the",
"NApp",
"list",
"to",
"be",
"printed",
"."
] | python | train |
saltstack/salt | salt/states/zone.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zone.py#L1141-L1182 | def installed(name, nodataset=False, brand_opts=None):
'''
Ensure zone is installed
name : string
name of the zone
nodataset : boolean
do not create a ZFS file system
brand_opts : boolean
brand specific options to pass
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
zones = __salt__['zoneadm.list'](installed=True, configured=True)
if name in zones:
if zones[name]['state'] == 'configured':
if __opts__['test']:
res_install = {'status': True}
else:
res_install = __salt__['zoneadm.install'](name, nodataset, brand_opts)
ret['result'] = res_install['status']
if ret['result']:
ret['changes'][name] = 'installed'
ret['comment'] = 'The zone {0} was installed.'.format(name)
else:
ret['comment'] = []
ret['comment'].append('Failed to install zone {0}!'.format(name))
if 'message' in res_install:
ret['comment'].append(res_install['message'])
ret['comment'] = "\n".join(ret['comment'])
else:
ret['result'] = True
ret['comment'] = 'zone {0} already installed.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'zone {0} is not configured!'.format(name)
return ret | [
"def",
"installed",
"(",
"name",
",",
"nodataset",
"=",
"False",
",",
"brand_opts",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"zones",
"=",
"__salt__",
"[",
"'zoneadm.list'",
"]",
"(",
"installed",
"=",
"True",
",",
"configured",
"=",
"True",
")",
"if",
"name",
"in",
"zones",
":",
"if",
"zones",
"[",
"name",
"]",
"[",
"'state'",
"]",
"==",
"'configured'",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"res_install",
"=",
"{",
"'status'",
":",
"True",
"}",
"else",
":",
"res_install",
"=",
"__salt__",
"[",
"'zoneadm.install'",
"]",
"(",
"name",
",",
"nodataset",
",",
"brand_opts",
")",
"ret",
"[",
"'result'",
"]",
"=",
"res_install",
"[",
"'status'",
"]",
"if",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'installed'",
"ret",
"[",
"'comment'",
"]",
"=",
"'The zone {0} was installed.'",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"[",
"]",
"ret",
"[",
"'comment'",
"]",
".",
"append",
"(",
"'Failed to install zone {0}!'",
".",
"format",
"(",
"name",
")",
")",
"if",
"'message'",
"in",
"res_install",
":",
"ret",
"[",
"'comment'",
"]",
".",
"append",
"(",
"res_install",
"[",
"'message'",
"]",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"\"\\n\"",
".",
"join",
"(",
"ret",
"[",
"'comment'",
"]",
")",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'zone {0} already installed.'",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'zone {0} is not configured!'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | Ensure zone is installed
name : string
name of the zone
nodataset : boolean
do not create a ZFS file system
brand_opts : boolean
brand specific options to pass | [
"Ensure",
"zone",
"is",
"installed"
] | python | train |
jonathf/chaospy | chaospy/quad/stieltjes.py | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/stieltjes.py#L98-L129 | def _stieltjes_analytical(dist, order, normed):
"""Stieltjes' method with analytical recurrence coefficients."""
dimensions = len(dist)
mom_order = numpy.arange(order+1).repeat(dimensions)
mom_order = mom_order.reshape(order+1, dimensions).T
coeff1, coeff2 = dist.ttr(mom_order)
coeff2[:, 0] = 1.
poly = chaospy.poly.collection.core.variable(dimensions)
if normed:
orth = [
poly**0*numpy.ones(dimensions),
(poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]),
]
for order_ in range(1, order):
orth.append(
(orth[-1]*(poly-coeff1[:, order_])
-orth[-2]*numpy.sqrt(coeff2[:, order_]))
/numpy.sqrt(coeff2[:, order_+1])
)
norms = numpy.ones(coeff2.shape)
else:
orth = [poly-poly, poly**0*numpy.ones(dimensions)]
for order_ in range(order):
orth.append(
orth[-1]*(poly-coeff1[:, order_])
- orth[-2]*coeff2[:, order_]
)
orth = orth[1:]
norms = numpy.cumprod(coeff2, 1)
return orth, norms, coeff1, coeff2 | [
"def",
"_stieltjes_analytical",
"(",
"dist",
",",
"order",
",",
"normed",
")",
":",
"dimensions",
"=",
"len",
"(",
"dist",
")",
"mom_order",
"=",
"numpy",
".",
"arange",
"(",
"order",
"+",
"1",
")",
".",
"repeat",
"(",
"dimensions",
")",
"mom_order",
"=",
"mom_order",
".",
"reshape",
"(",
"order",
"+",
"1",
",",
"dimensions",
")",
".",
"T",
"coeff1",
",",
"coeff2",
"=",
"dist",
".",
"ttr",
"(",
"mom_order",
")",
"coeff2",
"[",
":",
",",
"0",
"]",
"=",
"1.",
"poly",
"=",
"chaospy",
".",
"poly",
".",
"collection",
".",
"core",
".",
"variable",
"(",
"dimensions",
")",
"if",
"normed",
":",
"orth",
"=",
"[",
"poly",
"**",
"0",
"*",
"numpy",
".",
"ones",
"(",
"dimensions",
")",
",",
"(",
"poly",
"-",
"coeff1",
"[",
":",
",",
"0",
"]",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"coeff2",
"[",
":",
",",
"1",
"]",
")",
",",
"]",
"for",
"order_",
"in",
"range",
"(",
"1",
",",
"order",
")",
":",
"orth",
".",
"append",
"(",
"(",
"orth",
"[",
"-",
"1",
"]",
"*",
"(",
"poly",
"-",
"coeff1",
"[",
":",
",",
"order_",
"]",
")",
"-",
"orth",
"[",
"-",
"2",
"]",
"*",
"numpy",
".",
"sqrt",
"(",
"coeff2",
"[",
":",
",",
"order_",
"]",
")",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"coeff2",
"[",
":",
",",
"order_",
"+",
"1",
"]",
")",
")",
"norms",
"=",
"numpy",
".",
"ones",
"(",
"coeff2",
".",
"shape",
")",
"else",
":",
"orth",
"=",
"[",
"poly",
"-",
"poly",
",",
"poly",
"**",
"0",
"*",
"numpy",
".",
"ones",
"(",
"dimensions",
")",
"]",
"for",
"order_",
"in",
"range",
"(",
"order",
")",
":",
"orth",
".",
"append",
"(",
"orth",
"[",
"-",
"1",
"]",
"*",
"(",
"poly",
"-",
"coeff1",
"[",
":",
",",
"order_",
"]",
")",
"-",
"orth",
"[",
"-",
"2",
"]",
"*",
"coeff2",
"[",
":",
",",
"order_",
"]",
")",
"orth",
"=",
"orth",
"[",
"1",
":",
"]",
"norms",
"=",
"numpy",
".",
"cumprod",
"(",
"coeff2",
",",
"1",
")",
"return",
"orth",
",",
"norms",
",",
"coeff1",
",",
"coeff2"
] | Stieltjes' method with analytical recurrence coefficients. | [
"Stieltjes",
"method",
"with",
"analytical",
"recurrence",
"coefficients",
"."
] | python | train |
Naresh1318/crystal | crystal/Crystal.py | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/Crystal.py#L17-L25 | def get_valid_time_stamp():
"""
Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp
"""
time_stamp = str(datetime.datetime.now())
time_stamp = "time_" + time_stamp.replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_")
return time_stamp | [
"def",
"get_valid_time_stamp",
"(",
")",
":",
"time_stamp",
"=",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
"time_stamp",
"=",
"\"time_\"",
"+",
"time_stamp",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\".\"",
",",
"\"_\"",
")",
"return",
"time_stamp"
] | Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp | [
"Get",
"a",
"valid",
"time",
"stamp",
"without",
"illegal",
"characters",
".",
"Adds",
"time_",
"to",
"make",
"the",
"time",
"stamp",
"a",
"valid",
"table",
"name",
"in",
"sql",
".",
":",
"return",
":",
"String",
"extracted",
"timestamp"
] | python | train |
readbeyond/aeneas | aeneas/executejob.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/executejob.py#L220-L296 | def write_output_container(self, output_directory_path):
"""
Write the output container for this job.
Return the path to output container,
which is the concatenation of ``output_directory_path``
and of the output container file or directory name.
:param string output_directory_path: the path to a directory where
the output container must be created
:rtype: string
:raises: :class:`~aeneas.executejob.ExecuteJobOutputError`: if there is a problem while writing the output container
"""
self.log(u"Writing output container for this job")
if self.job is None:
self.log_exc(u"The job object is None", None, True, ExecuteJobOutputError)
if len(self.job) == 0:
self.log_exc(u"The job has no tasks", None, True, ExecuteJobOutputError)
self.log([u"Number of tasks: '%d'", len(self.job)])
# create temporary directory where the sync map files
# will be created
# this temporary directory will be compressed into
# the output container
self.tmp_directory = gf.tmp_directory(root=self.rconf[RuntimeConfiguration.TMP_PATH])
self.log([u"Created temporary directory '%s'", self.tmp_directory])
for task in self.job.tasks:
custom_id = task.configuration["custom_id"]
# check if the task has sync map and sync map file path
if task.sync_map_file_path is None:
self.log_exc(u"Task '%s' has sync_map_file_path not set" % (custom_id), None, True, ExecuteJobOutputError)
if task.sync_map is None:
self.log_exc(u"Task '%s' has sync_map not set" % (custom_id), None, True, ExecuteJobOutputError)
try:
# output sync map
self.log([u"Outputting sync map for task '%s'...", custom_id])
task.output_sync_map_file(self.tmp_directory)
self.log([u"Outputting sync map for task '%s'... done", custom_id])
except Exception as exc:
self.log_exc(u"Error while outputting sync map for task '%s'" % (custom_id), None, True, ExecuteJobOutputError)
# get output container info
output_container_format = self.job.configuration["o_container_format"]
self.log([u"Output container format: '%s'", output_container_format])
output_file_name = self.job.configuration["o_name"]
if ((output_container_format != ContainerFormat.UNPACKED) and
(not output_file_name.endswith(output_container_format))):
self.log(u"Adding extension to output_file_name")
output_file_name += "." + output_container_format
self.log([u"Output file name: '%s'", output_file_name])
output_file_path = gf.norm_join(
output_directory_path,
output_file_name
)
self.log([u"Output file path: '%s'", output_file_path])
try:
self.log(u"Compressing...")
container = Container(
output_file_path,
output_container_format,
logger=self.logger
)
container.compress(self.tmp_directory)
self.log(u"Compressing... done")
self.log([u"Created output file: '%s'", output_file_path])
self.log(u"Writing output container for this job: succeeded")
self.clean(False)
return output_file_path
except Exception as exc:
self.clean(False)
self.log_exc(u"Error while compressing", exc, True, ExecuteJobOutputError)
return None | [
"def",
"write_output_container",
"(",
"self",
",",
"output_directory_path",
")",
":",
"self",
".",
"log",
"(",
"u\"Writing output container for this job\"",
")",
"if",
"self",
".",
"job",
"is",
"None",
":",
"self",
".",
"log_exc",
"(",
"u\"The job object is None\"",
",",
"None",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"if",
"len",
"(",
"self",
".",
"job",
")",
"==",
"0",
":",
"self",
".",
"log_exc",
"(",
"u\"The job has no tasks\"",
",",
"None",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"self",
".",
"log",
"(",
"[",
"u\"Number of tasks: '%d'\"",
",",
"len",
"(",
"self",
".",
"job",
")",
"]",
")",
"# create temporary directory where the sync map files",
"# will be created",
"# this temporary directory will be compressed into",
"# the output container",
"self",
".",
"tmp_directory",
"=",
"gf",
".",
"tmp_directory",
"(",
"root",
"=",
"self",
".",
"rconf",
"[",
"RuntimeConfiguration",
".",
"TMP_PATH",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\"Created temporary directory '%s'\"",
",",
"self",
".",
"tmp_directory",
"]",
")",
"for",
"task",
"in",
"self",
".",
"job",
".",
"tasks",
":",
"custom_id",
"=",
"task",
".",
"configuration",
"[",
"\"custom_id\"",
"]",
"# check if the task has sync map and sync map file path",
"if",
"task",
".",
"sync_map_file_path",
"is",
"None",
":",
"self",
".",
"log_exc",
"(",
"u\"Task '%s' has sync_map_file_path not set\"",
"%",
"(",
"custom_id",
")",
",",
"None",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"if",
"task",
".",
"sync_map",
"is",
"None",
":",
"self",
".",
"log_exc",
"(",
"u\"Task '%s' has sync_map not set\"",
"%",
"(",
"custom_id",
")",
",",
"None",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"try",
":",
"# output sync map",
"self",
".",
"log",
"(",
"[",
"u\"Outputting sync map for task '%s'...\"",
",",
"custom_id",
"]",
")",
"task",
".",
"output_sync_map_file",
"(",
"self",
".",
"tmp_directory",
")",
"self",
".",
"log",
"(",
"[",
"u\"Outputting sync map for task '%s'... done\"",
",",
"custom_id",
"]",
")",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"log_exc",
"(",
"u\"Error while outputting sync map for task '%s'\"",
"%",
"(",
"custom_id",
")",
",",
"None",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"# get output container info",
"output_container_format",
"=",
"self",
".",
"job",
".",
"configuration",
"[",
"\"o_container_format\"",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Output container format: '%s'\"",
",",
"output_container_format",
"]",
")",
"output_file_name",
"=",
"self",
".",
"job",
".",
"configuration",
"[",
"\"o_name\"",
"]",
"if",
"(",
"(",
"output_container_format",
"!=",
"ContainerFormat",
".",
"UNPACKED",
")",
"and",
"(",
"not",
"output_file_name",
".",
"endswith",
"(",
"output_container_format",
")",
")",
")",
":",
"self",
".",
"log",
"(",
"u\"Adding extension to output_file_name\"",
")",
"output_file_name",
"+=",
"\".\"",
"+",
"output_container_format",
"self",
".",
"log",
"(",
"[",
"u\"Output file name: '%s'\"",
",",
"output_file_name",
"]",
")",
"output_file_path",
"=",
"gf",
".",
"norm_join",
"(",
"output_directory_path",
",",
"output_file_name",
")",
"self",
".",
"log",
"(",
"[",
"u\"Output file path: '%s'\"",
",",
"output_file_path",
"]",
")",
"try",
":",
"self",
".",
"log",
"(",
"u\"Compressing...\"",
")",
"container",
"=",
"Container",
"(",
"output_file_path",
",",
"output_container_format",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"container",
".",
"compress",
"(",
"self",
".",
"tmp_directory",
")",
"self",
".",
"log",
"(",
"u\"Compressing... done\"",
")",
"self",
".",
"log",
"(",
"[",
"u\"Created output file: '%s'\"",
",",
"output_file_path",
"]",
")",
"self",
".",
"log",
"(",
"u\"Writing output container for this job: succeeded\"",
")",
"self",
".",
"clean",
"(",
"False",
")",
"return",
"output_file_path",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"clean",
"(",
"False",
")",
"self",
".",
"log_exc",
"(",
"u\"Error while compressing\"",
",",
"exc",
",",
"True",
",",
"ExecuteJobOutputError",
")",
"return",
"None"
] | Write the output container for this job.
Return the path to output container,
which is the concatenation of ``output_directory_path``
and of the output container file or directory name.
:param string output_directory_path: the path to a directory where
the output container must be created
:rtype: string
:raises: :class:`~aeneas.executejob.ExecuteJobOutputError`: if there is a problem while writing the output container | [
"Write",
"the",
"output",
"container",
"for",
"this",
"job",
"."
] | python | train |
quantopian/zipline | zipline/utils/cli.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cli.py#L7-L36 | def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it) | [
"def",
"maybe_show_progress",
"(",
"it",
",",
"show_progress",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"show_progress",
":",
"return",
"click",
".",
"progressbar",
"(",
"it",
",",
"*",
"*",
"kwargs",
")",
"# context manager that just return `it` when we enter it",
"return",
"CallbackManager",
"(",
"lambda",
"it",
"=",
"it",
":",
"it",
")"
] | Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
... | [
"Optionally",
"show",
"a",
"progress",
"bar",
"for",
"the",
"given",
"iterator",
"."
] | python | train |
frmdstryr/enamlx | enamlx/qt/qt_graphics_view.py | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_graphics_view.py#L638-L643 | def mousePressEvent(self, event):
""" Handle the mouse press event for a drag operation.
"""
self.declaration.mouse_press_event(event)
super(QtGraphicsView, self).mousePressEvent(event) | [
"def",
"mousePressEvent",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"declaration",
".",
"mouse_press_event",
"(",
"event",
")",
"super",
"(",
"QtGraphicsView",
",",
"self",
")",
".",
"mousePressEvent",
"(",
"event",
")"
] | Handle the mouse press event for a drag operation. | [
"Handle",
"the",
"mouse",
"press",
"event",
"for",
"a",
"drag",
"operation",
"."
] | python | train |
materials-data-facility/toolbox | mdf_toolbox/sub_helpers.py | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/sub_helpers.py#L23-L97 | def _aggregate(self, scroll_field, scroll_size=SEARCH_LIMIT):
"""Perform an advanced query, and return *all* matching results.
Will automatically perform multiple queries in order to retrieve all results.
Note: All ``aggregate`` queries run in advanced mode.
Arguments:
scroll_field (str): The field on which to scroll. This should be a field
that counts/indexes the entries.
scroll_size (int): Maximum number of records returned per query. Must be
between one and the ``SEARCH_LIMIT`` (inclusive).
**Default:** ``SEARCH_LIMIT``.
Returns:
list of dict: All matching entries.
"""
# Make sure scroll_field is valid
if not scroll_field:
raise AttributeError("scroll_field is required.")
# Make sure the query is set
if not self.initialized:
raise AttributeError('No query has been set.')
# Warn the user if we are changing the setting of advanced
if not self._SearchHelper__query["advanced"]:
warnings.warn('This query will be run in advanced mode.', RuntimeWarning)
self._SearchHelper__query["advanced"] = True
# Inform the user if they set an invalid value for the query size
if scroll_size <= 0:
raise AttributeError('Scroll size must greater than zero')
# Get the total number of records
total = self.search(limit=0, info=True, reset_query=False)[1]["total_query_matches"]
# If aggregate is unnecessary, use Search automatically instead
if total <= SEARCH_LIMIT:
return self.search(limit=SEARCH_LIMIT, reset_query=False)
# Scroll until all results are found
output = []
scroll_pos = 0
while len(output) < total:
# Scroll until the width is small enough to get all records
# `scroll_id`s are unique to each dataset. If multiple datasets
# match a certain query, the total number of matching records
# may exceed the maximum that search will return - even if the
# scroll width is much smaller than that maximum
scroll_width = scroll_size
while True:
query = "({q}) AND ({field}:>={start} AND {field}:<{end})".format(
q=self._SearchHelper__query["q"], field=scroll_field, start=scroll_pos,
end=scroll_pos+scroll_width)
results, info = self.search(q=query, advanced=True, info=True)
# Check to make sure that all the matching records were returned
if info["total_query_matches"] <= len(results):
break
# If not, reduce the scroll width
# new_width is proportional with the proportion of results returned
new_width = scroll_width * (len(results) // info["total_query_matches"])
# scroll_width should never be 0, and should only be 1 in rare circumstances
scroll_width = new_width if new_width > 1 else max(scroll_width//2, 1)
# Append the results to the output
output.extend(results)
scroll_pos += scroll_width
return output | [
"def",
"_aggregate",
"(",
"self",
",",
"scroll_field",
",",
"scroll_size",
"=",
"SEARCH_LIMIT",
")",
":",
"# Make sure scroll_field is valid",
"if",
"not",
"scroll_field",
":",
"raise",
"AttributeError",
"(",
"\"scroll_field is required.\"",
")",
"# Make sure the query is set",
"if",
"not",
"self",
".",
"initialized",
":",
"raise",
"AttributeError",
"(",
"'No query has been set.'",
")",
"# Warn the user if we are changing the setting of advanced",
"if",
"not",
"self",
".",
"_SearchHelper__query",
"[",
"\"advanced\"",
"]",
":",
"warnings",
".",
"warn",
"(",
"'This query will be run in advanced mode.'",
",",
"RuntimeWarning",
")",
"self",
".",
"_SearchHelper__query",
"[",
"\"advanced\"",
"]",
"=",
"True",
"# Inform the user if they set an invalid value for the query size",
"if",
"scroll_size",
"<=",
"0",
":",
"raise",
"AttributeError",
"(",
"'Scroll size must greater than zero'",
")",
"# Get the total number of records",
"total",
"=",
"self",
".",
"search",
"(",
"limit",
"=",
"0",
",",
"info",
"=",
"True",
",",
"reset_query",
"=",
"False",
")",
"[",
"1",
"]",
"[",
"\"total_query_matches\"",
"]",
"# If aggregate is unnecessary, use Search automatically instead",
"if",
"total",
"<=",
"SEARCH_LIMIT",
":",
"return",
"self",
".",
"search",
"(",
"limit",
"=",
"SEARCH_LIMIT",
",",
"reset_query",
"=",
"False",
")",
"# Scroll until all results are found",
"output",
"=",
"[",
"]",
"scroll_pos",
"=",
"0",
"while",
"len",
"(",
"output",
")",
"<",
"total",
":",
"# Scroll until the width is small enough to get all records",
"# `scroll_id`s are unique to each dataset. If multiple datasets",
"# match a certain query, the total number of matching records",
"# may exceed the maximum that search will return - even if the",
"# scroll width is much smaller than that maximum",
"scroll_width",
"=",
"scroll_size",
"while",
"True",
":",
"query",
"=",
"\"({q}) AND ({field}:>={start} AND {field}:<{end})\"",
".",
"format",
"(",
"q",
"=",
"self",
".",
"_SearchHelper__query",
"[",
"\"q\"",
"]",
",",
"field",
"=",
"scroll_field",
",",
"start",
"=",
"scroll_pos",
",",
"end",
"=",
"scroll_pos",
"+",
"scroll_width",
")",
"results",
",",
"info",
"=",
"self",
".",
"search",
"(",
"q",
"=",
"query",
",",
"advanced",
"=",
"True",
",",
"info",
"=",
"True",
")",
"# Check to make sure that all the matching records were returned",
"if",
"info",
"[",
"\"total_query_matches\"",
"]",
"<=",
"len",
"(",
"results",
")",
":",
"break",
"# If not, reduce the scroll width",
"# new_width is proportional with the proportion of results returned",
"new_width",
"=",
"scroll_width",
"*",
"(",
"len",
"(",
"results",
")",
"//",
"info",
"[",
"\"total_query_matches\"",
"]",
")",
"# scroll_width should never be 0, and should only be 1 in rare circumstances",
"scroll_width",
"=",
"new_width",
"if",
"new_width",
">",
"1",
"else",
"max",
"(",
"scroll_width",
"//",
"2",
",",
"1",
")",
"# Append the results to the output",
"output",
".",
"extend",
"(",
"results",
")",
"scroll_pos",
"+=",
"scroll_width",
"return",
"output"
] | Perform an advanced query, and return *all* matching results.
Will automatically perform multiple queries in order to retrieve all results.
Note: All ``aggregate`` queries run in advanced mode.
Arguments:
scroll_field (str): The field on which to scroll. This should be a field
that counts/indexes the entries.
scroll_size (int): Maximum number of records returned per query. Must be
between one and the ``SEARCH_LIMIT`` (inclusive).
**Default:** ``SEARCH_LIMIT``.
Returns:
list of dict: All matching entries. | [
"Perform",
"an",
"advanced",
"query",
"and",
"return",
"*",
"all",
"*",
"matching",
"results",
".",
"Will",
"automatically",
"perform",
"multiple",
"queries",
"in",
"order",
"to",
"retrieve",
"all",
"results",
"."
] | python | train |
DataBiosphere/toil | src/toil/jobStores/fileJobStore.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/fileJobStore.py#L605-L626 | def _getUniqueName(self, fileName, jobStoreID=None, sourceFunctionName="x"):
"""
Create unique file name within a jobStore directory or tmp directory.
:param fileName: A file name, which can be a full path as only the
basename will be used.
:param jobStoreID: If given, the path returned will be in the jobStore directory.
Otherwise, the tmp directory will be used.
:param sourceFunctionName: This name is the name of the function that
generated this file. Defaults to x if that name was not a normal
name. Used for tracking files.
:return: The full path with a unique file name.
"""
fd, absPath = self._getTempFile(jobStoreID)
os.close(fd)
os.unlink(absPath)
# remove the .tmp extension and add the file name
(noExt,ext) = os.path.splitext(absPath)
uniquePath = noExt + '-' + sourceFunctionName + '-' + os.path.basename(fileName)
if os.path.exists(absPath):
return absPath # give up, just return temp name to avoid conflicts
return uniquePath | [
"def",
"_getUniqueName",
"(",
"self",
",",
"fileName",
",",
"jobStoreID",
"=",
"None",
",",
"sourceFunctionName",
"=",
"\"x\"",
")",
":",
"fd",
",",
"absPath",
"=",
"self",
".",
"_getTempFile",
"(",
"jobStoreID",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"unlink",
"(",
"absPath",
")",
"# remove the .tmp extension and add the file name",
"(",
"noExt",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"absPath",
")",
"uniquePath",
"=",
"noExt",
"+",
"'-'",
"+",
"sourceFunctionName",
"+",
"'-'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"fileName",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"absPath",
")",
":",
"return",
"absPath",
"# give up, just return temp name to avoid conflicts",
"return",
"uniquePath"
] | Create unique file name within a jobStore directory or tmp directory.
:param fileName: A file name, which can be a full path as only the
basename will be used.
:param jobStoreID: If given, the path returned will be in the jobStore directory.
Otherwise, the tmp directory will be used.
:param sourceFunctionName: This name is the name of the function that
generated this file. Defaults to x if that name was not a normal
name. Used for tracking files.
:return: The full path with a unique file name. | [
"Create",
"unique",
"file",
"name",
"within",
"a",
"jobStore",
"directory",
"or",
"tmp",
"directory",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/daemon.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemon.py#L1010-L1177 | def do_main_loop(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
"""Main loop for an Alignak daemon
:return: None
"""
# Increased on each loop turn
if self.loop_count is None:
self.loop_count = 0
# Daemon start timestamp
if self.start_time is None:
self.start_time = time.time()
# For the pause duration
logger.info("pause duration: %.2f", self.pause_duration)
# For the maximum expected loop duration
self.maximum_loop_duration = 1.1 * self.maximum_loop_duration
logger.info("maximum expected loop duration: %.2f", self.maximum_loop_duration)
# Treatments before starting the main loop...
self.do_before_loop()
elapsed_time = 0
logger.info("starting main loop: %.2f", self.start_time)
while not self.interrupted:
loop_start_ts = time.time()
# Maybe someone said we will stop...
if self.will_stop and not self.type == 'arbiter':
logger.debug("death-wait mode... waiting for death")
_, _ = self.make_a_pause(1.0)
continue
# Increment loop count
self.loop_count += 1
if self.log_loop:
logger.debug("--- %d", self.loop_count)
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Trying to restore our related daemons lost connections
for satellite in list(self.get_links_of_type(s_type='').values()):
# Not for configuration disabled satellites
if not satellite.active:
continue
if not satellite.alive and not satellite.passive:
logger.info("Trying to restore connection for %s/%s...",
satellite.type, satellite.name)
if self.daemon_connection_init(satellite):
logger.info("Connection restored")
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.have_conf:
_ts = time.time()
self.do_loop_turn()
statsmgr.timer('loop-turn', time.time() - _ts)
else:
logger.info("+++ loop %d, I do not have a configuration", self.loop_count)
if self.daemon_monitoring and (self.loop_count % self.daemon_monitoring_period == 1):
perfdatas = []
my_process = psutil.Process()
with my_process.oneshot():
perfdatas.append("num_threads=%d" % my_process.num_threads())
statsmgr.counter("system.num_threads", my_process.num_threads())
# perfdatas.append("num_ctx_switches=%d" % my_process.num_ctx_switches())
perfdatas.append("num_fds=%d" % my_process.num_fds())
statsmgr.counter("system.num_fds", my_process.num_fds())
# perfdatas.append("num_handles=%d" % my_process.num_handles())
perfdatas.append("create_time=%d" % my_process.create_time())
perfdatas.append("cpu_num=%d" % my_process.cpu_num())
statsmgr.counter("system.cpu_num", my_process.cpu_num())
perfdatas.append("cpu_usable=%d" % len(my_process.cpu_affinity()))
statsmgr.counter("system.cpu_usable", len(my_process.cpu_affinity()))
perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent())
statsmgr.counter("system.cpu_percent", my_process.cpu_percent())
cpu_times_percent = my_process.cpu_times()
for key in cpu_times_percent._fields:
perfdatas.append("cpu_%s_time=%.2fs" % (key,
getattr(cpu_times_percent, key)))
statsmgr.counter("system.cpu_%s_time" % key,
getattr(cpu_times_percent, key))
memory = my_process.memory_full_info()
for key in memory._fields:
perfdatas.append("mem_%s=%db" % (key, getattr(memory, key)))
statsmgr.counter("system.mem_%s" % key, getattr(memory, key))
logger.debug("Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s",
self.name, my_process.name(), my_process.pid, my_process.ppid(),
my_process.status(), " ".join(perfdatas))
if self.activity_log_period and (self.loop_count % self.activity_log_period == 1):
logger.info("Daemon %s is living: loop #%s ;)", self.name, self.loop_count)
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.warning("The arbiter pushed a new configuration... ")
# Loop end
loop_end_ts = time.time()
loop_duration = loop_end_ts - loop_start_ts
pause = self.maximum_loop_duration - loop_duration
if loop_duration > self.maximum_loop_duration:
logger.info("The %s %s loop exceeded the maximum expected loop duration (%.2f). "
"The last loop needed %.2f seconds to execute. "
"You should try to reduce the load on this %s.",
self.type, self.name, self.maximum_loop_duration,
loop_duration, self.type)
# Make a very very short pause ...
pause = 0.01
# Pause the daemon execution to avoid too much load on the system
logger.debug("Before pause: timeout: %s", pause)
work, time_changed = self.make_a_pause(pause)
logger.debug("After pause: %.2f / %.2f, sleep time: %.2f",
work, time_changed, self.sleep_time)
if work > self.pause_duration:
logger.warning("Too much work during the pause (%.2f out of %.2f)! "
"The daemon should rest for a while... but one need to change "
"its code for this. Please log an issue in the project repository!",
work, self.pause_duration)
# self.pause_duration += 0.1
statsmgr.timer('sleep-time', self.sleep_time)
self.sleep_time = 0.0
# And now, the whole average time spent
elapsed_time = loop_end_ts - self.start_time
if self.log_loop:
logger.debug("Elapsed time, current loop: %.2f, from start: %.2f (%d loops)",
loop_duration, elapsed_time, self.loop_count)
statsmgr.gauge('loop-count', self.loop_count)
statsmgr.timer('run-duration', elapsed_time)
# Maybe someone said we will stop...
if self.will_stop:
if self.type == 'arbiter':
self.will_stop = False
else:
logger.info("The arbiter said we will stop soon - go to death-wait mode")
# Maybe someone asked us to die, if so, do it :)
if self.interrupted:
logger.info("Someone asked us to stop now")
continue
# If someone asked us a configuration reloading
if self.need_config_reload and self.type == 'arbiter':
logger.warning("Someone requested a configuration reload")
logger.info("Exiting daemon main loop")
return
# If someone asked us to dump memory, do it
if self.need_dump_environment:
logger.debug('Dumping memory')
self.dump_environment()
self.need_dump_environment = False
logger.info("stopped main loop: %.2f", time.time()) | [
"def",
"do_main_loop",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches, too-many-statements, too-many-locals",
"# Increased on each loop turn",
"if",
"self",
".",
"loop_count",
"is",
"None",
":",
"self",
".",
"loop_count",
"=",
"0",
"# Daemon start timestamp",
"if",
"self",
".",
"start_time",
"is",
"None",
":",
"self",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# For the pause duration",
"logger",
".",
"info",
"(",
"\"pause duration: %.2f\"",
",",
"self",
".",
"pause_duration",
")",
"# For the maximum expected loop duration",
"self",
".",
"maximum_loop_duration",
"=",
"1.1",
"*",
"self",
".",
"maximum_loop_duration",
"logger",
".",
"info",
"(",
"\"maximum expected loop duration: %.2f\"",
",",
"self",
".",
"maximum_loop_duration",
")",
"# Treatments before starting the main loop...",
"self",
".",
"do_before_loop",
"(",
")",
"elapsed_time",
"=",
"0",
"logger",
".",
"info",
"(",
"\"starting main loop: %.2f\"",
",",
"self",
".",
"start_time",
")",
"while",
"not",
"self",
".",
"interrupted",
":",
"loop_start_ts",
"=",
"time",
".",
"time",
"(",
")",
"# Maybe someone said we will stop...",
"if",
"self",
".",
"will_stop",
"and",
"not",
"self",
".",
"type",
"==",
"'arbiter'",
":",
"logger",
".",
"debug",
"(",
"\"death-wait mode... waiting for death\"",
")",
"_",
",",
"_",
"=",
"self",
".",
"make_a_pause",
"(",
"1.0",
")",
"continue",
"# Increment loop count",
"self",
".",
"loop_count",
"+=",
"1",
"if",
"self",
".",
"log_loop",
":",
"logger",
".",
"debug",
"(",
"\"--- %d\"",
",",
"self",
".",
"loop_count",
")",
"# Maybe the arbiter pushed a new configuration...",
"if",
"self",
".",
"watch_for_new_conf",
"(",
"timeout",
"=",
"0.05",
")",
":",
"logger",
".",
"info",
"(",
"\"I got a new configuration...\"",
")",
"# Manage the new configuration",
"self",
".",
"setup_new_conf",
"(",
")",
"# Trying to restore our related daemons lost connections",
"for",
"satellite",
"in",
"list",
"(",
"self",
".",
"get_links_of_type",
"(",
"s_type",
"=",
"''",
")",
".",
"values",
"(",
")",
")",
":",
"# Not for configuration disabled satellites",
"if",
"not",
"satellite",
".",
"active",
":",
"continue",
"if",
"not",
"satellite",
".",
"alive",
"and",
"not",
"satellite",
".",
"passive",
":",
"logger",
".",
"info",
"(",
"\"Trying to restore connection for %s/%s...\"",
",",
"satellite",
".",
"type",
",",
"satellite",
".",
"name",
")",
"if",
"self",
".",
"daemon_connection_init",
"(",
"satellite",
")",
":",
"logger",
".",
"info",
"(",
"\"Connection restored\"",
")",
"# Each loop turn, execute the daemon specific treatment...",
"# only if the daemon has a configuration to manage",
"if",
"self",
".",
"have_conf",
":",
"_ts",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"do_loop_turn",
"(",
")",
"statsmgr",
".",
"timer",
"(",
"'loop-turn'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_ts",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"+++ loop %d, I do not have a configuration\"",
",",
"self",
".",
"loop_count",
")",
"if",
"self",
".",
"daemon_monitoring",
"and",
"(",
"self",
".",
"loop_count",
"%",
"self",
".",
"daemon_monitoring_period",
"==",
"1",
")",
":",
"perfdatas",
"=",
"[",
"]",
"my_process",
"=",
"psutil",
".",
"Process",
"(",
")",
"with",
"my_process",
".",
"oneshot",
"(",
")",
":",
"perfdatas",
".",
"append",
"(",
"\"num_threads=%d\"",
"%",
"my_process",
".",
"num_threads",
"(",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.num_threads\"",
",",
"my_process",
".",
"num_threads",
"(",
")",
")",
"# perfdatas.append(\"num_ctx_switches=%d\" % my_process.num_ctx_switches())",
"perfdatas",
".",
"append",
"(",
"\"num_fds=%d\"",
"%",
"my_process",
".",
"num_fds",
"(",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.num_fds\"",
",",
"my_process",
".",
"num_fds",
"(",
")",
")",
"# perfdatas.append(\"num_handles=%d\" % my_process.num_handles())",
"perfdatas",
".",
"append",
"(",
"\"create_time=%d\"",
"%",
"my_process",
".",
"create_time",
"(",
")",
")",
"perfdatas",
".",
"append",
"(",
"\"cpu_num=%d\"",
"%",
"my_process",
".",
"cpu_num",
"(",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.cpu_num\"",
",",
"my_process",
".",
"cpu_num",
"(",
")",
")",
"perfdatas",
".",
"append",
"(",
"\"cpu_usable=%d\"",
"%",
"len",
"(",
"my_process",
".",
"cpu_affinity",
"(",
")",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.cpu_usable\"",
",",
"len",
"(",
"my_process",
".",
"cpu_affinity",
"(",
")",
")",
")",
"perfdatas",
".",
"append",
"(",
"\"cpu_percent=%.2f%%\"",
"%",
"my_process",
".",
"cpu_percent",
"(",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.cpu_percent\"",
",",
"my_process",
".",
"cpu_percent",
"(",
")",
")",
"cpu_times_percent",
"=",
"my_process",
".",
"cpu_times",
"(",
")",
"for",
"key",
"in",
"cpu_times_percent",
".",
"_fields",
":",
"perfdatas",
".",
"append",
"(",
"\"cpu_%s_time=%.2fs\"",
"%",
"(",
"key",
",",
"getattr",
"(",
"cpu_times_percent",
",",
"key",
")",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.cpu_%s_time\"",
"%",
"key",
",",
"getattr",
"(",
"cpu_times_percent",
",",
"key",
")",
")",
"memory",
"=",
"my_process",
".",
"memory_full_info",
"(",
")",
"for",
"key",
"in",
"memory",
".",
"_fields",
":",
"perfdatas",
".",
"append",
"(",
"\"mem_%s=%db\"",
"%",
"(",
"key",
",",
"getattr",
"(",
"memory",
",",
"key",
")",
")",
")",
"statsmgr",
".",
"counter",
"(",
"\"system.mem_%s\"",
"%",
"key",
",",
"getattr",
"(",
"memory",
",",
"key",
")",
")",
"logger",
".",
"debug",
"(",
"\"Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s\"",
",",
"self",
".",
"name",
",",
"my_process",
".",
"name",
"(",
")",
",",
"my_process",
".",
"pid",
",",
"my_process",
".",
"ppid",
"(",
")",
",",
"my_process",
".",
"status",
"(",
")",
",",
"\" \"",
".",
"join",
"(",
"perfdatas",
")",
")",
"if",
"self",
".",
"activity_log_period",
"and",
"(",
"self",
".",
"loop_count",
"%",
"self",
".",
"activity_log_period",
"==",
"1",
")",
":",
"logger",
".",
"info",
"(",
"\"Daemon %s is living: loop #%s ;)\"",
",",
"self",
".",
"name",
",",
"self",
".",
"loop_count",
")",
"# Maybe the arbiter pushed a new configuration...",
"if",
"self",
".",
"watch_for_new_conf",
"(",
"timeout",
"=",
"0.05",
")",
":",
"logger",
".",
"warning",
"(",
"\"The arbiter pushed a new configuration... \"",
")",
"# Loop end",
"loop_end_ts",
"=",
"time",
".",
"time",
"(",
")",
"loop_duration",
"=",
"loop_end_ts",
"-",
"loop_start_ts",
"pause",
"=",
"self",
".",
"maximum_loop_duration",
"-",
"loop_duration",
"if",
"loop_duration",
">",
"self",
".",
"maximum_loop_duration",
":",
"logger",
".",
"info",
"(",
"\"The %s %s loop exceeded the maximum expected loop duration (%.2f). \"",
"\"The last loop needed %.2f seconds to execute. \"",
"\"You should try to reduce the load on this %s.\"",
",",
"self",
".",
"type",
",",
"self",
".",
"name",
",",
"self",
".",
"maximum_loop_duration",
",",
"loop_duration",
",",
"self",
".",
"type",
")",
"# Make a very very short pause ...",
"pause",
"=",
"0.01",
"# Pause the daemon execution to avoid too much load on the system",
"logger",
".",
"debug",
"(",
"\"Before pause: timeout: %s\"",
",",
"pause",
")",
"work",
",",
"time_changed",
"=",
"self",
".",
"make_a_pause",
"(",
"pause",
")",
"logger",
".",
"debug",
"(",
"\"After pause: %.2f / %.2f, sleep time: %.2f\"",
",",
"work",
",",
"time_changed",
",",
"self",
".",
"sleep_time",
")",
"if",
"work",
">",
"self",
".",
"pause_duration",
":",
"logger",
".",
"warning",
"(",
"\"Too much work during the pause (%.2f out of %.2f)! \"",
"\"The daemon should rest for a while... but one need to change \"",
"\"its code for this. Please log an issue in the project repository!\"",
",",
"work",
",",
"self",
".",
"pause_duration",
")",
"# self.pause_duration += 0.1",
"statsmgr",
".",
"timer",
"(",
"'sleep-time'",
",",
"self",
".",
"sleep_time",
")",
"self",
".",
"sleep_time",
"=",
"0.0",
"# And now, the whole average time spent",
"elapsed_time",
"=",
"loop_end_ts",
"-",
"self",
".",
"start_time",
"if",
"self",
".",
"log_loop",
":",
"logger",
".",
"debug",
"(",
"\"Elapsed time, current loop: %.2f, from start: %.2f (%d loops)\"",
",",
"loop_duration",
",",
"elapsed_time",
",",
"self",
".",
"loop_count",
")",
"statsmgr",
".",
"gauge",
"(",
"'loop-count'",
",",
"self",
".",
"loop_count",
")",
"statsmgr",
".",
"timer",
"(",
"'run-duration'",
",",
"elapsed_time",
")",
"# Maybe someone said we will stop...",
"if",
"self",
".",
"will_stop",
":",
"if",
"self",
".",
"type",
"==",
"'arbiter'",
":",
"self",
".",
"will_stop",
"=",
"False",
"else",
":",
"logger",
".",
"info",
"(",
"\"The arbiter said we will stop soon - go to death-wait mode\"",
")",
"# Maybe someone asked us to die, if so, do it :)",
"if",
"self",
".",
"interrupted",
":",
"logger",
".",
"info",
"(",
"\"Someone asked us to stop now\"",
")",
"continue",
"# If someone asked us a configuration reloading",
"if",
"self",
".",
"need_config_reload",
"and",
"self",
".",
"type",
"==",
"'arbiter'",
":",
"logger",
".",
"warning",
"(",
"\"Someone requested a configuration reload\"",
")",
"logger",
".",
"info",
"(",
"\"Exiting daemon main loop\"",
")",
"return",
"# If someone asked us to dump memory, do it",
"if",
"self",
".",
"need_dump_environment",
":",
"logger",
".",
"debug",
"(",
"'Dumping memory'",
")",
"self",
".",
"dump_environment",
"(",
")",
"self",
".",
"need_dump_environment",
"=",
"False",
"logger",
".",
"info",
"(",
"\"stopped main loop: %.2f\"",
",",
"time",
".",
"time",
"(",
")",
")"
] | Main loop for an Alignak daemon
:return: None | [
"Main",
"loop",
"for",
"an",
"Alignak",
"daemon"
] | python | train |
ihgazni2/edict | edict/edict.py | https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1908-L1929 | def _kvmatrix2d(km,vm):
'''
km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]]
show_kmatrix(km)
vm = [[[222]], ['b']]
show_vmatrix(vm)
d = _kvmatrix2d(km,vm)
'''
d = {}
kmwfs = get_kmwfs(km)
vmwfs = elel.get_wfs(vm)
lngth = vmwfs.__len__()
for i in range(0,lngth):
value = elel.getitem_via_pathlist(vm,vmwfs[i])
cond = elel.is_leaf(value)
if(cond):
_setitem_via_pathlist(d,kmwfs[i],value)
else:
_setdefault_via_pathlist(d,kmwfs[i])
return(d) | [
"def",
"_kvmatrix2d",
"(",
"km",
",",
"vm",
")",
":",
"d",
"=",
"{",
"}",
"kmwfs",
"=",
"get_kmwfs",
"(",
"km",
")",
"vmwfs",
"=",
"elel",
".",
"get_wfs",
"(",
"vm",
")",
"lngth",
"=",
"vmwfs",
".",
"__len__",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"lngth",
")",
":",
"value",
"=",
"elel",
".",
"getitem_via_pathlist",
"(",
"vm",
",",
"vmwfs",
"[",
"i",
"]",
")",
"cond",
"=",
"elel",
".",
"is_leaf",
"(",
"value",
")",
"if",
"(",
"cond",
")",
":",
"_setitem_via_pathlist",
"(",
"d",
",",
"kmwfs",
"[",
"i",
"]",
",",
"value",
")",
"else",
":",
"_setdefault_via_pathlist",
"(",
"d",
",",
"kmwfs",
"[",
"i",
"]",
")",
"return",
"(",
"d",
")"
] | km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]]
show_kmatrix(km)
vm = [[[222]], ['b']]
show_vmatrix(vm)
d = _kvmatrix2d(km,vm) | [
"km",
"=",
"[[[",
"1",
"]",
"[",
"3",
"]]",
"[[",
"1",
"2",
"]",
"[",
"3",
"a",
"]]",
"[[",
"1",
"2",
"22",
"]]]",
"show_kmatrix",
"(",
"km",
")",
"vm",
"=",
"[[[",
"222",
"]]",
"[",
"b",
"]]",
"show_vmatrix",
"(",
"vm",
")",
"d",
"=",
"_kvmatrix2d",
"(",
"km",
"vm",
")"
] | python | train |
common-workflow-language/workflow-service | wes_client/util.py | https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L27-L38 | def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2' | [
"def",
"get_version",
"(",
"extension",
",",
"workflow_file",
")",
":",
"if",
"extension",
"==",
"'py'",
"and",
"two_seven_compatible",
"(",
"workflow_file",
")",
":",
"return",
"'2.7'",
"elif",
"extension",
"==",
"'cwl'",
":",
"return",
"yaml",
".",
"load",
"(",
"open",
"(",
"workflow_file",
")",
")",
"[",
"'cwlVersion'",
"]",
"else",
":",
"# Must be a wdl file.",
"# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142",
"try",
":",
"return",
"[",
"l",
".",
"lstrip",
"(",
"'version'",
")",
"for",
"l",
"in",
"workflow_file",
".",
"splitlines",
"(",
")",
"if",
"'version'",
"in",
"l",
".",
"split",
"(",
"' '",
")",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"return",
"'draft-2'"
] | Determines the version of a .py, .wdl, or .cwl file. | [
"Determines",
"the",
"version",
"of",
"a",
".",
"py",
".",
"wdl",
"or",
".",
"cwl",
"file",
"."
] | python | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api.py | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api.py#L1070-L1091 | def datasets_create_version(self, owner_slug, dataset_slug, dataset_new_version_request, **kwargs): # noqa: E501
"""Create a new dataset version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_version(owner_slug, dataset_slug, dataset_new_version_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_slug: Dataset owner (required)
:param str dataset_slug: Dataset name (required)
:param DatasetNewVersionRequest dataset_new_version_request: Information for creating a new dataset version (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.datasets_create_version_with_http_info(owner_slug, dataset_slug, dataset_new_version_request, **kwargs) # noqa: E501
else:
(data) = self.datasets_create_version_with_http_info(owner_slug, dataset_slug, dataset_new_version_request, **kwargs) # noqa: E501
return data | [
"def",
"datasets_create_version",
"(",
"self",
",",
"owner_slug",
",",
"dataset_slug",
",",
"dataset_new_version_request",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"datasets_create_version_with_http_info",
"(",
"owner_slug",
",",
"dataset_slug",
",",
"dataset_new_version_request",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"datasets_create_version_with_http_info",
"(",
"owner_slug",
",",
"dataset_slug",
",",
"dataset_new_version_request",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Create a new dataset version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_version(owner_slug, dataset_slug, dataset_new_version_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_slug: Dataset owner (required)
:param str dataset_slug: Dataset name (required)
:param DatasetNewVersionRequest dataset_new_version_request: Information for creating a new dataset version (required)
:return: Result
If the method is called asynchronously,
returns the request thread. | [
"Create",
"a",
"new",
"dataset",
"version",
"#",
"noqa",
":",
"E501"
] | python | train |
googleapis/google-cloud-python | api_core/google/api_core/future/polling.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/future/polling.py#L145-L166 | def add_done_callback(self, fn):
"""Add a callback to be executed when the operation is complete.
If the operation is not already complete, this will start a helper
thread to poll for the status of the operation in the background.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete.
"""
if self._result_set:
_helpers.safe_invoke_callback(fn, self)
return
self._done_callbacks.append(fn)
if self._polling_thread is None:
# The polling thread will exit on its own as soon as the operation
# is done.
self._polling_thread = _helpers.start_daemon_thread(
target=self._blocking_poll
) | [
"def",
"add_done_callback",
"(",
"self",
",",
"fn",
")",
":",
"if",
"self",
".",
"_result_set",
":",
"_helpers",
".",
"safe_invoke_callback",
"(",
"fn",
",",
"self",
")",
"return",
"self",
".",
"_done_callbacks",
".",
"append",
"(",
"fn",
")",
"if",
"self",
".",
"_polling_thread",
"is",
"None",
":",
"# The polling thread will exit on its own as soon as the operation",
"# is done.",
"self",
".",
"_polling_thread",
"=",
"_helpers",
".",
"start_daemon_thread",
"(",
"target",
"=",
"self",
".",
"_blocking_poll",
")"
] | Add a callback to be executed when the operation is complete.
If the operation is not already complete, this will start a helper
thread to poll for the status of the operation in the background.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete. | [
"Add",
"a",
"callback",
"to",
"be",
"executed",
"when",
"the",
"operation",
"is",
"complete",
"."
] | python | train |
goshuirc/irc | girc/imapping.py | https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/imapping.py#L42-L57 | def set_std(self, std):
"""Set the standard we'll be using (isupport CASEMAPPING)."""
if not hasattr(self, '_std'):
IMap.__init__(self)
# translation based on std
self._std = std.lower()
# set casemapping maps
self._set_transmaps()
# create translations
if self._lower_chars:
self._lower_trans = str.maketrans(self._upper_chars, self._lower_chars)
if self._upper_chars:
self._upper_trans = str.maketrans(self._lower_chars, self._upper_chars) | [
"def",
"set_std",
"(",
"self",
",",
"std",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_std'",
")",
":",
"IMap",
".",
"__init__",
"(",
"self",
")",
"# translation based on std",
"self",
".",
"_std",
"=",
"std",
".",
"lower",
"(",
")",
"# set casemapping maps",
"self",
".",
"_set_transmaps",
"(",
")",
"# create translations",
"if",
"self",
".",
"_lower_chars",
":",
"self",
".",
"_lower_trans",
"=",
"str",
".",
"maketrans",
"(",
"self",
".",
"_upper_chars",
",",
"self",
".",
"_lower_chars",
")",
"if",
"self",
".",
"_upper_chars",
":",
"self",
".",
"_upper_trans",
"=",
"str",
".",
"maketrans",
"(",
"self",
".",
"_lower_chars",
",",
"self",
".",
"_upper_chars",
")"
] | Set the standard we'll be using (isupport CASEMAPPING). | [
"Set",
"the",
"standard",
"we",
"ll",
"be",
"using",
"(",
"isupport",
"CASEMAPPING",
")",
"."
] | python | train |
gmr/remy | remy/cli.py | https://github.com/gmr/remy/blob/74368ae74e3f2b59376d6f8e457aefbe9c7debdf/remy/cli.py#L64-L90 | def add_jenkins_job_options(parser):
"""Add a new job to Jenkins for updating chef-repo
:rtype: argparse.ArgumentParser
"""
cookbook = parser.add_parser('jenkins', help='Add a new cookbook job to '
'Jenkins')
cookbook.add_argument('jenkins', action='store',
help='The jenkins server hostname')
cookbook.add_argument('name', action='store',
help='The cookbook name')
cookbook.add_argument('cookbook', action='store',
help='The cookbook git repository URL')
cookbook.add_argument('chef_repo', action='store',
help='The chef-repo git repository URL')
cookbook.add_argument('-u', '--username',
action='store',
dest='username',
default=pwd.getpwuid(os.getuid())[0],
help='Specify a different username than the repo '
'owner')
cookbook.add_argument('-n', '--hipchat-notification',
action='store',
dest='hipchat',
help='Hipchat room for notifications')
cookbook.set_defaults(func='new_job') | [
"def",
"add_jenkins_job_options",
"(",
"parser",
")",
":",
"cookbook",
"=",
"parser",
".",
"add_parser",
"(",
"'jenkins'",
",",
"help",
"=",
"'Add a new cookbook job to '",
"'Jenkins'",
")",
"cookbook",
".",
"add_argument",
"(",
"'jenkins'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'The jenkins server hostname'",
")",
"cookbook",
".",
"add_argument",
"(",
"'name'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'The cookbook name'",
")",
"cookbook",
".",
"add_argument",
"(",
"'cookbook'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'The cookbook git repository URL'",
")",
"cookbook",
".",
"add_argument",
"(",
"'chef_repo'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'The chef-repo git repository URL'",
")",
"cookbook",
".",
"add_argument",
"(",
"'-u'",
",",
"'--username'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'username'",
",",
"default",
"=",
"pwd",
".",
"getpwuid",
"(",
"os",
".",
"getuid",
"(",
")",
")",
"[",
"0",
"]",
",",
"help",
"=",
"'Specify a different username than the repo '",
"'owner'",
")",
"cookbook",
".",
"add_argument",
"(",
"'-n'",
",",
"'--hipchat-notification'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'hipchat'",
",",
"help",
"=",
"'Hipchat room for notifications'",
")",
"cookbook",
".",
"set_defaults",
"(",
"func",
"=",
"'new_job'",
")"
] | Add a new job to Jenkins for updating chef-repo
:rtype: argparse.ArgumentParser | [
"Add",
"a",
"new",
"job",
"to",
"Jenkins",
"for",
"updating",
"chef",
"-",
"repo"
] | python | train |
developmentseed/landsat-util | landsat/landsat.py | https://github.com/developmentseed/landsat-util/blob/92dc81771ddaa64a8a9124a89a6516b52485374b/landsat/landsat.py#L166-L273 | def args_options():
""" Generates an arugment parser.
:returns:
Parser object
"""
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
parser.add_argument('--version', action='version', version='%(prog)s version ' + __version__)
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metadata')
# Global search options
parser_search.add_argument('-l', '--limit', default=10, type=int,
help='Search return results limit\n'
'default is 10')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('--latest', default=-1, type=int,
help='returns the N latest images within the last 365 days')
parser_search.add_argument('-c', '--cloud', type=float, default=100.0,
help='Maximum cloud percentage '
'default is 100 perct')
parser_search.add_argument('-p', '--pathrow',
help='Paths and Rows in order separated by comma. Use quotes ("001").'
'Example: path,row,path,row 001,001,190,204')
parser_search.add_argument('--lat', type=float, help='The latitude')
parser_search.add_argument('--lon', type=float, help='The longitude')
parser_search.add_argument('--address', type=str, help='The address')
parser_search.add_argument('--json', action='store_true', help='Returns a bare JSON response')
parser_search.add_argument('--geojson', action='store_true', help='Returns a geojson response')
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. LC81660392014196LGN00")
parser_download.add_argument('-b', '--bands', help='If you specify bands, landsat-util will try to download '
'the band from S3. If the band does not exist, an error is returned', default=None)
parser_download.add_argument('-d', '--dest', help='Destination path')
parser_download.add_argument('-p', '--process', help='Process the image after download', action='store_true')
parser_download.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_download.add_argument('--ndvi', action='store_true',
help='Whether to run the NDVI process. If used, bands parameter is disregarded')
parser_download.add_argument('--ndvigrey', action='store_true', help='Create an NDVI map in grayscale (grey)')
parser_download.add_argument('--clip', help='Clip the image with the bounding box provided. Values must be in ' +
'WGS84 datum, and with longitude and latitude units of decimal degrees ' +
'separated by comma.' +
'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' +
'50.2682767372753')
parser_download.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_download.add_argument('--username', help='USGS Eros account Username (only works if the account has' +
' special inventory access). Username and password as a fallback if the image' +
'is not found on AWS S3 or Google Storage')
parser_download.add_argument('--password', help='USGS Eros username, used as a fallback')
parser_download.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_download.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_download.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_download.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_download.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
parser_process = subparsers.add_parser('process', help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_process.add_argument('--ndvi', action='store_true', help='Create an NDVI map in color.')
parser_process.add_argument('--ndvigrey', action='store_true', help='Create an NDVI map in grayscale (grey)')
parser_process.add_argument('--clip', help='Clip the image with the bounding box provided. Values must be in ' +
'WGS84 datum, and with longitude and latitude units of decimal degrees ' +
'separated by comma.' +
'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' +
'50.2682767372753')
parser_process.add_argument('-b', '--bands', help='specify band combinations. Default is 432'
'Example: --bands 321', default='432')
parser_process.add_argument('-v', '--verbose', action='store_true',
help='Turn on verbosity')
parser_process.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_process.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_process.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_process.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_process.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_process.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
return parser | [
"def",
"args_options",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'landsat'",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
"description",
"=",
"textwrap",
".",
"dedent",
"(",
"DESCRIPTION",
")",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"help",
"=",
"'Landsat Utility'",
",",
"dest",
"=",
"'subs'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s version '",
"+",
"__version__",
")",
"# Search Logic",
"parser_search",
"=",
"subparsers",
".",
"add_parser",
"(",
"'search'",
",",
"help",
"=",
"'Search Landsat metadata'",
")",
"# Global search options",
"parser_search",
".",
"add_argument",
"(",
"'-l'",
",",
"'--limit'",
",",
"default",
"=",
"10",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Search return results limit\\n'",
"'default is 10'",
")",
"parser_search",
".",
"add_argument",
"(",
"'-s'",
",",
"'--start'",
",",
"help",
"=",
"'Start Date - Most formats are accepted '",
"'e.g. Jun 12 2014 OR 06/12/2014'",
")",
"parser_search",
".",
"add_argument",
"(",
"'-e'",
",",
"'--end'",
",",
"help",
"=",
"'End Date - Most formats are accepted '",
"'e.g. Jun 12 2014 OR 06/12/2014'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--latest'",
",",
"default",
"=",
"-",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'returns the N latest images within the last 365 days'",
")",
"parser_search",
".",
"add_argument",
"(",
"'-c'",
",",
"'--cloud'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"100.0",
",",
"help",
"=",
"'Maximum cloud percentage '",
"'default is 100 perct'",
")",
"parser_search",
".",
"add_argument",
"(",
"'-p'",
",",
"'--pathrow'",
",",
"help",
"=",
"'Paths and Rows in order separated by comma. Use quotes (\"001\").'",
"'Example: path,row,path,row 001,001,190,204'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--lat'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'The latitude'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--lon'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'The longitude'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--address'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'The address'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--json'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Returns a bare JSON response'",
")",
"parser_search",
".",
"add_argument",
"(",
"'--geojson'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Returns a geojson response'",
")",
"parser_download",
"=",
"subparsers",
".",
"add_parser",
"(",
"'download'",
",",
"help",
"=",
"'Download images from Google Storage'",
")",
"parser_download",
".",
"add_argument",
"(",
"'scenes'",
",",
"metavar",
"=",
"'sceneID'",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Provide Full sceneID, e.g. LC81660392014196LGN00\"",
")",
"parser_download",
".",
"add_argument",
"(",
"'-b'",
",",
"'--bands'",
",",
"help",
"=",
"'If you specify bands, landsat-util will try to download '",
"'the band from S3. If the band does not exist, an error is returned'",
",",
"default",
"=",
"None",
")",
"parser_download",
".",
"add_argument",
"(",
"'-d'",
",",
"'--dest'",
",",
"help",
"=",
"'Destination path'",
")",
"parser_download",
".",
"add_argument",
"(",
"'-p'",
",",
"'--process'",
",",
"help",
"=",
"'Process the image after download'",
",",
"action",
"=",
"'store_true'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--pansharpen'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Whether to also pansharpen the process '",
"'image. Pansharpening requires larger memory'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--ndvi'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Whether to run the NDVI process. If used, bands parameter is disregarded'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--ndvigrey'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Create an NDVI map in grayscale (grey)'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--clip'",
",",
"help",
"=",
"'Clip the image with the bounding box provided. Values must be in '",
"+",
"'WGS84 datum, and with longitude and latitude units of decimal degrees '",
"+",
"'separated by comma.'",
"+",
"'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,'",
"+",
"'50.2682767372753'",
")",
"parser_download",
".",
"add_argument",
"(",
"'-u'",
",",
"'--upload'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Upload to S3 after the image processing completed'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--username'",
",",
"help",
"=",
"'USGS Eros account Username (only works if the account has'",
"+",
"' special inventory access). Username and password as a fallback if the image'",
"+",
"'is not found on AWS S3 or Google Storage'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--password'",
",",
"help",
"=",
"'USGS Eros username, used as a fallback'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--key'",
",",
"help",
"=",
"'Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '",
"'Environment Variables)'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--secret'",
",",
"help",
"=",
"'Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '",
"'as Environment Variables)'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--bucket'",
",",
"help",
"=",
"'Bucket name (required if uploading to s3)'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--region'",
",",
"help",
"=",
"'URL to S3 region e.g. s3-us-west-2.amazonaws.com'",
")",
"parser_download",
".",
"add_argument",
"(",
"'--force-unzip'",
",",
"help",
"=",
"'Force unzip tar file'",
",",
"action",
"=",
"'store_true'",
")",
"parser_process",
"=",
"subparsers",
".",
"add_parser",
"(",
"'process'",
",",
"help",
"=",
"'Process Landsat imagery'",
")",
"parser_process",
".",
"add_argument",
"(",
"'path'",
",",
"help",
"=",
"'Path to the compressed image file'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--pansharpen'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Whether to also pansharpen the process '",
"'image. Pansharpening requires larger memory'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--ndvi'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Create an NDVI map in color.'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--ndvigrey'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Create an NDVI map in grayscale (grey)'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--clip'",
",",
"help",
"=",
"'Clip the image with the bounding box provided. Values must be in '",
"+",
"'WGS84 datum, and with longitude and latitude units of decimal degrees '",
"+",
"'separated by comma.'",
"+",
"'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,'",
"+",
"'50.2682767372753'",
")",
"parser_process",
".",
"add_argument",
"(",
"'-b'",
",",
"'--bands'",
",",
"help",
"=",
"'specify band combinations. Default is 432'",
"'Example: --bands 321'",
",",
"default",
"=",
"'432'",
")",
"parser_process",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Turn on verbosity'",
")",
"parser_process",
".",
"add_argument",
"(",
"'-u'",
",",
"'--upload'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Upload to S3 after the image processing completed'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--key'",
",",
"help",
"=",
"'Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '",
"'Environment Variables)'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--secret'",
",",
"help",
"=",
"'Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '",
"'as Environment Variables)'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--bucket'",
",",
"help",
"=",
"'Bucket name (required if uploading to s3)'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--region'",
",",
"help",
"=",
"'URL to S3 region e.g. s3-us-west-2.amazonaws.com'",
")",
"parser_process",
".",
"add_argument",
"(",
"'--force-unzip'",
",",
"help",
"=",
"'Force unzip tar file'",
",",
"action",
"=",
"'store_true'",
")",
"return",
"parser"
] | Generates an arugment parser.
:returns:
Parser object | [
"Generates",
"an",
"arugment",
"parser",
"."
] | python | train |
myint/rstcheck | rstcheck.py | https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L949-L985 | def main():
"""Return 0 on success."""
args = parse_args()
if not args.files:
return 0
with enable_sphinx_if_possible():
status = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count())
try:
if len(args.files) > 1:
results = pool.map(
_check_file,
[(name, args) for name in args.files])
else:
# This is for the case where we read from standard in.
results = [_check_file((args.files[0], args))]
for (filename, errors) in results:
for error in errors:
line_number = error[0]
message = error[1]
if not re.match(r'\([A-Z]+/[0-9]+\)', message):
message = '(ERROR/3) ' + message
output_message('{}:{}: {}'.format(filename,
line_number,
message))
status = 1
except (IOError, UnicodeError) as exception:
output_message(exception)
status = 1
return status | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"not",
"args",
".",
"files",
":",
"return",
"0",
"with",
"enable_sphinx_if_possible",
"(",
")",
":",
"status",
"=",
"0",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"try",
":",
"if",
"len",
"(",
"args",
".",
"files",
")",
">",
"1",
":",
"results",
"=",
"pool",
".",
"map",
"(",
"_check_file",
",",
"[",
"(",
"name",
",",
"args",
")",
"for",
"name",
"in",
"args",
".",
"files",
"]",
")",
"else",
":",
"# This is for the case where we read from standard in.",
"results",
"=",
"[",
"_check_file",
"(",
"(",
"args",
".",
"files",
"[",
"0",
"]",
",",
"args",
")",
")",
"]",
"for",
"(",
"filename",
",",
"errors",
")",
"in",
"results",
":",
"for",
"error",
"in",
"errors",
":",
"line_number",
"=",
"error",
"[",
"0",
"]",
"message",
"=",
"error",
"[",
"1",
"]",
"if",
"not",
"re",
".",
"match",
"(",
"r'\\([A-Z]+/[0-9]+\\)'",
",",
"message",
")",
":",
"message",
"=",
"'(ERROR/3) '",
"+",
"message",
"output_message",
"(",
"'{}:{}: {}'",
".",
"format",
"(",
"filename",
",",
"line_number",
",",
"message",
")",
")",
"status",
"=",
"1",
"except",
"(",
"IOError",
",",
"UnicodeError",
")",
"as",
"exception",
":",
"output_message",
"(",
"exception",
")",
"status",
"=",
"1",
"return",
"status"
] | Return 0 on success. | [
"Return",
"0",
"on",
"success",
"."
] | python | train |
Subsets and Splits