text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def weave_module(module, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for "whole module weaving".
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(module):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_module (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
module, aspect, methods, lazy, options)
for attr in dir(module):
func = getattr(module, attr)
if method_matches(attr):
if isroutine(func):
entanglement.merge(patch_module_function(module, func, aspect, force_name=attr, **options))
elif isclass(func):
entanglement.merge(
weave_class(func, aspect, owner=module, name=attr, methods=methods, lazy=lazy, bag=bag, **options),
# it's not consistent with the other ways of weaving a class (it's never weaved as a routine).
# therefore it's disabled until it's considered useful.
# #patch_module_function(module, getattr(module, attr), aspect, force_name=attr, **options),
)
return entanglement
|
[
"def",
"weave_module",
"(",
"module",
",",
"aspect",
",",
"methods",
"=",
"NORMAL_METHODS",
",",
"lazy",
"=",
"False",
",",
"bag",
"=",
"BrokenBag",
",",
"*",
"*",
"options",
")",
":",
"if",
"bag",
".",
"has",
"(",
"module",
")",
":",
"return",
"Nothing",
"entanglement",
"=",
"Rollback",
"(",
")",
"method_matches",
"=",
"make_method_matcher",
"(",
"methods",
")",
"logdebug",
"(",
"\"weave_module (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)\"",
",",
"module",
",",
"aspect",
",",
"methods",
",",
"lazy",
",",
"options",
")",
"for",
"attr",
"in",
"dir",
"(",
"module",
")",
":",
"func",
"=",
"getattr",
"(",
"module",
",",
"attr",
")",
"if",
"method_matches",
"(",
"attr",
")",
":",
"if",
"isroutine",
"(",
"func",
")",
":",
"entanglement",
".",
"merge",
"(",
"patch_module_function",
"(",
"module",
",",
"func",
",",
"aspect",
",",
"force_name",
"=",
"attr",
",",
"*",
"*",
"options",
")",
")",
"elif",
"isclass",
"(",
"func",
")",
":",
"entanglement",
".",
"merge",
"(",
"weave_class",
"(",
"func",
",",
"aspect",
",",
"owner",
"=",
"module",
",",
"name",
"=",
"attr",
",",
"methods",
"=",
"methods",
",",
"lazy",
"=",
"lazy",
",",
"bag",
"=",
"bag",
",",
"*",
"*",
"options",
")",
",",
"# it's not consistent with the other ways of weaving a class (it's never weaved as a routine).",
"# therefore it's disabled until it's considered useful.",
"# #patch_module_function(module, getattr(module, attr), aspect, force_name=attr, **options),",
")",
"return",
"entanglement"
] | 44.172414 | 26.655172 |
def _opts_to_dict(*opts):
'''Convert a tuple of options returned from getopt into a dictionary.'''
ret = {}
for key, val in opts:
if key[:2] == '--':
key = key[2:]
elif key[:1] == '-':
key = key[1:]
if val == '':
val = True
ret[key.replace('-','_')] = val
return ret
|
[
"def",
"_opts_to_dict",
"(",
"*",
"opts",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"opts",
":",
"if",
"key",
"[",
":",
"2",
"]",
"==",
"'--'",
":",
"key",
"=",
"key",
"[",
"2",
":",
"]",
"elif",
"key",
"[",
":",
"1",
"]",
"==",
"'-'",
":",
"key",
"=",
"key",
"[",
"1",
":",
"]",
"if",
"val",
"==",
"''",
":",
"val",
"=",
"True",
"ret",
"[",
"key",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"]",
"=",
"val",
"return",
"ret"
] | 23 | 22.5 |
def pancake_sort(arr):
"""
Pancake_sort
Sorting a given array
mutation of selection sort
reference: https://www.geeksforgeeks.org/pancake-sorting/
Overall time complexity : O(N^2)
"""
len_arr = len(arr)
if len_arr <= 1:
return arr
for cur in range(len(arr), 1, -1):
#Finding index of maximum number in arr
index_max = arr.index(max(arr[0:cur]))
if index_max+1 != cur:
#Needs moving
if index_max != 0:
#reverse from 0 to index_max
arr[:index_max+1] = reversed(arr[:index_max+1])
# Reverse list
arr[:cur] = reversed(arr[:cur])
return arr
|
[
"def",
"pancake_sort",
"(",
"arr",
")",
":",
"len_arr",
"=",
"len",
"(",
"arr",
")",
"if",
"len_arr",
"<=",
"1",
":",
"return",
"arr",
"for",
"cur",
"in",
"range",
"(",
"len",
"(",
"arr",
")",
",",
"1",
",",
"-",
"1",
")",
":",
"#Finding index of maximum number in arr",
"index_max",
"=",
"arr",
".",
"index",
"(",
"max",
"(",
"arr",
"[",
"0",
":",
"cur",
"]",
")",
")",
"if",
"index_max",
"+",
"1",
"!=",
"cur",
":",
"#Needs moving",
"if",
"index_max",
"!=",
"0",
":",
"#reverse from 0 to index_max",
"arr",
"[",
":",
"index_max",
"+",
"1",
"]",
"=",
"reversed",
"(",
"arr",
"[",
":",
"index_max",
"+",
"1",
"]",
")",
"# Reverse list",
"arr",
"[",
":",
"cur",
"]",
"=",
"reversed",
"(",
"arr",
"[",
":",
"cur",
"]",
")",
"return",
"arr"
] | 26.96 | 15.52 |
def send_game(self, chat_id, game_short_name, disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
Use this method to send a game. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendgame
Parameters:
:param chat_id: Unique identifier for the target chat
:type chat_id: int
:param game_short_name: Short name of the game, serves as the unique identifier for the game. Set up your games via Botfather.
:type game_short_name: str|unicode
Optional keyword parameters:
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: int
:param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one ‘Play game_title’ button will be shown. If not empty, the first button must launch the game.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
Returns:
:return: On success, the sent Message is returned
:rtype: pytgbot.api_types.receivable.updates.Message
"""
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
assert_type_or_raise(chat_id, int, parameter_name="chat_id")
assert_type_or_raise(game_short_name, unicode_type, parameter_name="game_short_name")
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
assert_type_or_raise(reply_to_message_id, None, int, parameter_name="reply_to_message_id")
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, parameter_name="reply_markup")
result = self.do("sendGame", chat_id=chat_id, game_short_name=game_short_name, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.updates import Message
try:
return Message.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type Message", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result
|
[
"def",
"send_game",
"(",
"self",
",",
"chat_id",
",",
"game_short_name",
",",
"disable_notification",
"=",
"None",
",",
"reply_to_message_id",
"=",
"None",
",",
"reply_markup",
"=",
"None",
")",
":",
"from",
"pytgbot",
".",
"api_types",
".",
"sendable",
".",
"reply_markup",
"import",
"InlineKeyboardMarkup",
"assert_type_or_raise",
"(",
"chat_id",
",",
"int",
",",
"parameter_name",
"=",
"\"chat_id\"",
")",
"assert_type_or_raise",
"(",
"game_short_name",
",",
"unicode_type",
",",
"parameter_name",
"=",
"\"game_short_name\"",
")",
"assert_type_or_raise",
"(",
"disable_notification",
",",
"None",
",",
"bool",
",",
"parameter_name",
"=",
"\"disable_notification\"",
")",
"assert_type_or_raise",
"(",
"reply_to_message_id",
",",
"None",
",",
"int",
",",
"parameter_name",
"=",
"\"reply_to_message_id\"",
")",
"assert_type_or_raise",
"(",
"reply_markup",
",",
"None",
",",
"InlineKeyboardMarkup",
",",
"parameter_name",
"=",
"\"reply_markup\"",
")",
"result",
"=",
"self",
".",
"do",
"(",
"\"sendGame\"",
",",
"chat_id",
"=",
"chat_id",
",",
"game_short_name",
"=",
"game_short_name",
",",
"disable_notification",
"=",
"disable_notification",
",",
"reply_to_message_id",
"=",
"reply_to_message_id",
",",
"reply_markup",
"=",
"reply_markup",
")",
"if",
"self",
".",
"return_python_objects",
":",
"logger",
".",
"debug",
"(",
"\"Trying to parse {data}\"",
".",
"format",
"(",
"data",
"=",
"repr",
"(",
"result",
")",
")",
")",
"from",
"pytgbot",
".",
"api_types",
".",
"receivable",
".",
"updates",
"import",
"Message",
"try",
":",
"return",
"Message",
".",
"from_array",
"(",
"result",
")",
"except",
"TgApiParseException",
":",
"logger",
".",
"debug",
"(",
"\"Failed parsing as api_type Message\"",
",",
"exc_info",
"=",
"True",
")",
"# end try",
"# no valid parsing so far",
"raise",
"TgApiParseException",
"(",
"\"Could not parse result.\"",
")",
"# See debug log for details!",
"# end if return_python_objects",
"return",
"result"
] | 47.666667 | 34.403509 |
def _TypecheckFunction(function, parent_type_check_dict, stack_location,
self_name):
"""Decorator function to collect and execute type checks."""
type_check_dict = _CollectTypeChecks(function, parent_type_check_dict,
stack_location + 1, self_name)
if not type_check_dict:
return function
def TypecheckWrapper(*args, **kwargs):
arg_dict = _CollectArguments(function, args, kwargs)
errors = _ValidateArguments(arg_dict, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return_value = function(*args, **kwargs)
errors = _ValidateReturnValue(return_value, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return return_value
TypecheckWrapper.__doc__ = function.__doc__
TypecheckWrapper.__name__ = function.__name__
TypecheckWrapper.type_check_dict = type_check_dict
TypecheckWrapper.wrapped_function = function
return TypecheckWrapper
|
[
"def",
"_TypecheckFunction",
"(",
"function",
",",
"parent_type_check_dict",
",",
"stack_location",
",",
"self_name",
")",
":",
"type_check_dict",
"=",
"_CollectTypeChecks",
"(",
"function",
",",
"parent_type_check_dict",
",",
"stack_location",
"+",
"1",
",",
"self_name",
")",
"if",
"not",
"type_check_dict",
":",
"return",
"function",
"def",
"TypecheckWrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"arg_dict",
"=",
"_CollectArguments",
"(",
"function",
",",
"args",
",",
"kwargs",
")",
"errors",
"=",
"_ValidateArguments",
"(",
"arg_dict",
",",
"type_check_dict",
")",
"if",
"errors",
":",
"raise",
"TypeError",
"(",
"\"\\n\"",
".",
"join",
"(",
"errors",
")",
")",
"return_value",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"errors",
"=",
"_ValidateReturnValue",
"(",
"return_value",
",",
"type_check_dict",
")",
"if",
"errors",
":",
"raise",
"TypeError",
"(",
"\"\\n\"",
".",
"join",
"(",
"errors",
")",
")",
"return",
"return_value",
"TypecheckWrapper",
".",
"__doc__",
"=",
"function",
".",
"__doc__",
"TypecheckWrapper",
".",
"__name__",
"=",
"function",
".",
"__name__",
"TypecheckWrapper",
".",
"type_check_dict",
"=",
"type_check_dict",
"TypecheckWrapper",
".",
"wrapped_function",
"=",
"function",
"return",
"TypecheckWrapper"
] | 35.518519 | 18.925926 |
def _collapse_default(self, entry):
"""Collapses the list structure in entry to a single string representing the default
value assigned to a variable or its dimensions.
"""
if isinstance(entry, tuple) or isinstance(entry, list):
sets = []
i = 0
while i < len(entry):
if isinstance(entry[i], str) and i+1 < len(entry) and isinstance(entry[i+1], list):
sets.append((entry[i], entry[i+1]))
i += 2
elif isinstance(entry[i], str) and entry[i] == ",":
i += 1
else:
sets.append((entry[i],))
i += 1
result = []
for s in sets:
if isinstance(s[0], str):
name = s[0].strip(",")
elif len(s) == 1:
name = self._collapse_default(s[0])
if len(s) > 1:
args = self._collapse_default(s[1])
else:
args = []
if len(args) > 0:
result.append("{}({})".format(name, args))
else:
result.append(name)
return ', '.join(result)
else:
if "," in entry:
return entry.split(",")[0].strip()
else:
return entry.strip()
|
[
"def",
"_collapse_default",
"(",
"self",
",",
"entry",
")",
":",
"if",
"isinstance",
"(",
"entry",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"entry",
",",
"list",
")",
":",
"sets",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"entry",
")",
":",
"if",
"isinstance",
"(",
"entry",
"[",
"i",
"]",
",",
"str",
")",
"and",
"i",
"+",
"1",
"<",
"len",
"(",
"entry",
")",
"and",
"isinstance",
"(",
"entry",
"[",
"i",
"+",
"1",
"]",
",",
"list",
")",
":",
"sets",
".",
"append",
"(",
"(",
"entry",
"[",
"i",
"]",
",",
"entry",
"[",
"i",
"+",
"1",
"]",
")",
")",
"i",
"+=",
"2",
"elif",
"isinstance",
"(",
"entry",
"[",
"i",
"]",
",",
"str",
")",
"and",
"entry",
"[",
"i",
"]",
"==",
"\",\"",
":",
"i",
"+=",
"1",
"else",
":",
"sets",
".",
"append",
"(",
"(",
"entry",
"[",
"i",
"]",
",",
")",
")",
"i",
"+=",
"1",
"result",
"=",
"[",
"]",
"for",
"s",
"in",
"sets",
":",
"if",
"isinstance",
"(",
"s",
"[",
"0",
"]",
",",
"str",
")",
":",
"name",
"=",
"s",
"[",
"0",
"]",
".",
"strip",
"(",
"\",\"",
")",
"elif",
"len",
"(",
"s",
")",
"==",
"1",
":",
"name",
"=",
"self",
".",
"_collapse_default",
"(",
"s",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"s",
")",
">",
"1",
":",
"args",
"=",
"self",
".",
"_collapse_default",
"(",
"s",
"[",
"1",
"]",
")",
"else",
":",
"args",
"=",
"[",
"]",
"if",
"len",
"(",
"args",
")",
">",
"0",
":",
"result",
".",
"append",
"(",
"\"{}({})\"",
".",
"format",
"(",
"name",
",",
"args",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"name",
")",
"return",
"', '",
".",
"join",
"(",
"result",
")",
"else",
":",
"if",
"\",\"",
"in",
"entry",
":",
"return",
"entry",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"else",
":",
"return",
"entry",
".",
"strip",
"(",
")"
] | 35.923077 | 15.333333 |
def fail_if_publish_binary_not_installed(binary, publish_target, install_link):
"""Exit (with error message) if ``binary` isn't installed"""
if not shutil.which(binary):
click.secho(
"Publishing to {publish_target} requires {binary} to be installed and configured".format(
publish_target=publish_target, binary=binary
),
bg="red",
fg="white",
bold=True,
err=True,
)
click.echo(
"Follow the instructions at {install_link}".format(
install_link=install_link
),
err=True,
)
sys.exit(1)
|
[
"def",
"fail_if_publish_binary_not_installed",
"(",
"binary",
",",
"publish_target",
",",
"install_link",
")",
":",
"if",
"not",
"shutil",
".",
"which",
"(",
"binary",
")",
":",
"click",
".",
"secho",
"(",
"\"Publishing to {publish_target} requires {binary} to be installed and configured\"",
".",
"format",
"(",
"publish_target",
"=",
"publish_target",
",",
"binary",
"=",
"binary",
")",
",",
"bg",
"=",
"\"red\"",
",",
"fg",
"=",
"\"white\"",
",",
"bold",
"=",
"True",
",",
"err",
"=",
"True",
",",
")",
"click",
".",
"echo",
"(",
"\"Follow the instructions at {install_link}\"",
".",
"format",
"(",
"install_link",
"=",
"install_link",
")",
",",
"err",
"=",
"True",
",",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 34.315789 | 22.105263 |
def _handle_dict_config(self, log_config):
"""Recursively walks and copies the `log_config` dict and searches for filenames.
Translates filenames and creates directories if necessary.
"""
new_dict = dict()
for key in log_config.keys():
if key == 'filename':
filename = log_config[key]
filename = rename_log_file(filename,
env_name=self.env_name,
traj_name=self.traj_name,
set_name=self.set_name,
run_name=self.run_name)
new_dict[key] = filename
try_make_dirs(filename)
elif isinstance(log_config[key], dict):
inner_dict = self._handle_dict_config(log_config[key])
new_dict[key] = inner_dict
else:
new_dict[key] = log_config[key]
return new_dict
|
[
"def",
"_handle_dict_config",
"(",
"self",
",",
"log_config",
")",
":",
"new_dict",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"log_config",
".",
"keys",
"(",
")",
":",
"if",
"key",
"==",
"'filename'",
":",
"filename",
"=",
"log_config",
"[",
"key",
"]",
"filename",
"=",
"rename_log_file",
"(",
"filename",
",",
"env_name",
"=",
"self",
".",
"env_name",
",",
"traj_name",
"=",
"self",
".",
"traj_name",
",",
"set_name",
"=",
"self",
".",
"set_name",
",",
"run_name",
"=",
"self",
".",
"run_name",
")",
"new_dict",
"[",
"key",
"]",
"=",
"filename",
"try_make_dirs",
"(",
"filename",
")",
"elif",
"isinstance",
"(",
"log_config",
"[",
"key",
"]",
",",
"dict",
")",
":",
"inner_dict",
"=",
"self",
".",
"_handle_dict_config",
"(",
"log_config",
"[",
"key",
"]",
")",
"new_dict",
"[",
"key",
"]",
"=",
"inner_dict",
"else",
":",
"new_dict",
"[",
"key",
"]",
"=",
"log_config",
"[",
"key",
"]",
"return",
"new_dict"
] | 43.130435 | 14.956522 |
def logger(self, logger: typing.Union[logging.Logger, str, None]) -> None:
"""Logger instance to use as override."""
if logger is None or isinstance(logger, logging.Logger):
self.__logger = logger
else:
self.__logger = logging.getLogger(logger)
|
[
"def",
"logger",
"(",
"self",
",",
"logger",
":",
"typing",
".",
"Union",
"[",
"logging",
".",
"Logger",
",",
"str",
",",
"None",
"]",
")",
"->",
"None",
":",
"if",
"logger",
"is",
"None",
"or",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"self",
".",
"__logger",
"=",
"logger",
"else",
":",
"self",
".",
"__logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")"
] | 47.833333 | 17.333333 |
def etree_to_text(tree,
guess_punct_space=True,
guess_layout=True,
newline_tags=NEWLINE_TAGS,
double_newline_tags=DOUBLE_NEWLINE_TAGS):
"""
Convert a html tree to text. Tree should be cleaned with
``html_text.html_text.cleaner.clean_html`` before passing to this
function.
See html_text.extract_text docstring for description of the
approach and options.
"""
chunks = []
_NEWLINE = object()
_DOUBLE_NEWLINE = object()
class Context:
""" workaround for missing `nonlocal` in Python 2 """
# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)
prev = _DOUBLE_NEWLINE
def should_add_space(text, prev):
""" Return True if extra whitespace should be added before text """
if prev in {_NEWLINE, _DOUBLE_NEWLINE}:
return False
if not _has_trailing_whitespace(prev):
if _has_punct_after(text) or _has_open_bracket_before(prev):
return False
return True
def get_space_between(text, prev):
if not text or not guess_punct_space:
return ' '
return ' ' if should_add_space(text, prev) else ''
def add_newlines(tag, context):
if not guess_layout:
return
prev = context.prev
if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line
return
if tag in double_newline_tags:
context.prev = _DOUBLE_NEWLINE
chunks.append('\n' if prev is _NEWLINE else '\n\n')
elif tag in newline_tags:
context.prev = _NEWLINE
if prev is not _NEWLINE:
chunks.append('\n')
def add_text(text_content, context):
text = _normalize_whitespace(text_content) if text_content else ''
if not text:
return
space = get_space_between(text, context.prev)
chunks.extend([space, text])
context.prev = text_content
def traverse_text_fragments(tree, context, handle_tail=True):
""" Extract text from the ``tree``: fill ``chunks`` variable """
add_newlines(tree.tag, context)
add_text(tree.text, context)
for child in tree:
traverse_text_fragments(child, context)
add_newlines(tree.tag, context)
if handle_tail:
add_text(tree.tail, context)
traverse_text_fragments(tree, context=Context(), handle_tail=False)
return ''.join(chunks).strip()
|
[
"def",
"etree_to_text",
"(",
"tree",
",",
"guess_punct_space",
"=",
"True",
",",
"guess_layout",
"=",
"True",
",",
"newline_tags",
"=",
"NEWLINE_TAGS",
",",
"double_newline_tags",
"=",
"DOUBLE_NEWLINE_TAGS",
")",
":",
"chunks",
"=",
"[",
"]",
"_NEWLINE",
"=",
"object",
"(",
")",
"_DOUBLE_NEWLINE",
"=",
"object",
"(",
")",
"class",
"Context",
":",
"\"\"\" workaround for missing `nonlocal` in Python 2 \"\"\"",
"# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)",
"prev",
"=",
"_DOUBLE_NEWLINE",
"def",
"should_add_space",
"(",
"text",
",",
"prev",
")",
":",
"\"\"\" Return True if extra whitespace should be added before text \"\"\"",
"if",
"prev",
"in",
"{",
"_NEWLINE",
",",
"_DOUBLE_NEWLINE",
"}",
":",
"return",
"False",
"if",
"not",
"_has_trailing_whitespace",
"(",
"prev",
")",
":",
"if",
"_has_punct_after",
"(",
"text",
")",
"or",
"_has_open_bracket_before",
"(",
"prev",
")",
":",
"return",
"False",
"return",
"True",
"def",
"get_space_between",
"(",
"text",
",",
"prev",
")",
":",
"if",
"not",
"text",
"or",
"not",
"guess_punct_space",
":",
"return",
"' '",
"return",
"' '",
"if",
"should_add_space",
"(",
"text",
",",
"prev",
")",
"else",
"''",
"def",
"add_newlines",
"(",
"tag",
",",
"context",
")",
":",
"if",
"not",
"guess_layout",
":",
"return",
"prev",
"=",
"context",
".",
"prev",
"if",
"prev",
"is",
"_DOUBLE_NEWLINE",
":",
"# don't output more than 1 blank line",
"return",
"if",
"tag",
"in",
"double_newline_tags",
":",
"context",
".",
"prev",
"=",
"_DOUBLE_NEWLINE",
"chunks",
".",
"append",
"(",
"'\\n'",
"if",
"prev",
"is",
"_NEWLINE",
"else",
"'\\n\\n'",
")",
"elif",
"tag",
"in",
"newline_tags",
":",
"context",
".",
"prev",
"=",
"_NEWLINE",
"if",
"prev",
"is",
"not",
"_NEWLINE",
":",
"chunks",
".",
"append",
"(",
"'\\n'",
")",
"def",
"add_text",
"(",
"text_content",
",",
"context",
")",
":",
"text",
"=",
"_normalize_whitespace",
"(",
"text_content",
")",
"if",
"text_content",
"else",
"''",
"if",
"not",
"text",
":",
"return",
"space",
"=",
"get_space_between",
"(",
"text",
",",
"context",
".",
"prev",
")",
"chunks",
".",
"extend",
"(",
"[",
"space",
",",
"text",
"]",
")",
"context",
".",
"prev",
"=",
"text_content",
"def",
"traverse_text_fragments",
"(",
"tree",
",",
"context",
",",
"handle_tail",
"=",
"True",
")",
":",
"\"\"\" Extract text from the ``tree``: fill ``chunks`` variable \"\"\"",
"add_newlines",
"(",
"tree",
".",
"tag",
",",
"context",
")",
"add_text",
"(",
"tree",
".",
"text",
",",
"context",
")",
"for",
"child",
"in",
"tree",
":",
"traverse_text_fragments",
"(",
"child",
",",
"context",
")",
"add_newlines",
"(",
"tree",
".",
"tag",
",",
"context",
")",
"if",
"handle_tail",
":",
"add_text",
"(",
"tree",
".",
"tail",
",",
"context",
")",
"traverse_text_fragments",
"(",
"tree",
",",
"context",
"=",
"Context",
"(",
")",
",",
"handle_tail",
"=",
"False",
")",
"return",
"''",
".",
"join",
"(",
"chunks",
")",
".",
"strip",
"(",
")"
] | 34.690141 | 16.070423 |
def subclasses(cls):
"""Return a set of all Ent subclasses, recursively."""
seen = set()
queue = set([cls])
while queue:
c = queue.pop()
seen.add(c)
sc = c.__subclasses__()
for c in sc:
if c not in seen:
queue.add(c)
seen.remove(cls)
return seen
|
[
"def",
"subclasses",
"(",
"cls",
")",
":",
"seen",
"=",
"set",
"(",
")",
"queue",
"=",
"set",
"(",
"[",
"cls",
"]",
")",
"while",
"queue",
":",
"c",
"=",
"queue",
".",
"pop",
"(",
")",
"seen",
".",
"add",
"(",
"c",
")",
"sc",
"=",
"c",
".",
"__subclasses__",
"(",
")",
"for",
"c",
"in",
"sc",
":",
"if",
"c",
"not",
"in",
"seen",
":",
"queue",
".",
"add",
"(",
"c",
")",
"seen",
".",
"remove",
"(",
"cls",
")",
"return",
"seen"
] | 22.8125 | 18.5625 |
def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r)
|
[
"def",
"adapter",
"(",
"data",
",",
"headers",
",",
"table_format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"keys",
"=",
"(",
"'title'",
",",
")",
"table",
"=",
"table_format_handler",
"[",
"table_format",
"]",
"t",
"=",
"table",
"(",
"[",
"headers",
"]",
"+",
"list",
"(",
"data",
")",
",",
"*",
"*",
"filter_dict_by_key",
"(",
"kwargs",
",",
"keys",
")",
")",
"dimensions",
"=",
"terminaltables",
".",
"width_and_alignment",
".",
"max_dimensions",
"(",
"t",
".",
"table_data",
",",
"t",
".",
"padding_left",
",",
"t",
".",
"padding_right",
")",
"[",
":",
"3",
"]",
"for",
"r",
"in",
"t",
".",
"gen_table",
"(",
"*",
"dimensions",
")",
":",
"yield",
"u''",
".",
"join",
"(",
"r",
")"
] | 33.857143 | 20.357143 |
def map_sequence(stmts_in, **kwargs):
"""Map sequences using the SiteMapper.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to map.
do_methionine_offset : boolean
Whether to check for off-by-one errors in site position (possibly)
attributable to site numbering from mature proteins after
cleavage of the initial methionine. If True, checks the reference
sequence for a known modification at 1 site position greater
than the given one; if there exists such a site, creates the
mapping. Default is True.
do_orthology_mapping : boolean
Whether to check sequence positions for known modification sites
in mouse or rat sequences (based on PhosphoSitePlus data). If a
mouse/rat site is found that is linked to a site in the human
reference sequence, a mapping is created. Default is True.
do_isoform_mapping : boolean
Whether to check sequence positions for known modifications
in other human isoforms of the protein (based on PhosphoSitePlus
data). If a site is found that is linked to a site in the human
reference sequence, a mapping is created. Default is True.
use_cache : boolean
If True, a cache will be created/used from the laction specified by
SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment.
If False, no cache is used. For more details on the cache, see the
SiteMapper class definition.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of mapped statements.
"""
from indra.preassembler.sitemapper import SiteMapper, default_site_map
logger.info('Mapping sites on %d statements...' % len(stmts_in))
kwarg_list = ['do_methionine_offset', 'do_orthology_mapping',
'do_isoform_mapping']
sm = SiteMapper(default_site_map,
use_cache=kwargs.pop('use_cache', False),
**_filter(kwargs, kwarg_list))
valid, mapped = sm.map_sites(stmts_in)
correctly_mapped_stmts = []
for ms in mapped:
correctly_mapped = all([mm.has_mapping() for mm in ms.mapped_mods])
if correctly_mapped:
correctly_mapped_stmts.append(ms.mapped_stmt)
stmts_out = valid + correctly_mapped_stmts
logger.info('%d statements with valid sites' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
del sm
return stmts_out
|
[
"def",
"map_sequence",
"(",
"stmts_in",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"indra",
".",
"preassembler",
".",
"sitemapper",
"import",
"SiteMapper",
",",
"default_site_map",
"logger",
".",
"info",
"(",
"'Mapping sites on %d statements...'",
"%",
"len",
"(",
"stmts_in",
")",
")",
"kwarg_list",
"=",
"[",
"'do_methionine_offset'",
",",
"'do_orthology_mapping'",
",",
"'do_isoform_mapping'",
"]",
"sm",
"=",
"SiteMapper",
"(",
"default_site_map",
",",
"use_cache",
"=",
"kwargs",
".",
"pop",
"(",
"'use_cache'",
",",
"False",
")",
",",
"*",
"*",
"_filter",
"(",
"kwargs",
",",
"kwarg_list",
")",
")",
"valid",
",",
"mapped",
"=",
"sm",
".",
"map_sites",
"(",
"stmts_in",
")",
"correctly_mapped_stmts",
"=",
"[",
"]",
"for",
"ms",
"in",
"mapped",
":",
"correctly_mapped",
"=",
"all",
"(",
"[",
"mm",
".",
"has_mapping",
"(",
")",
"for",
"mm",
"in",
"ms",
".",
"mapped_mods",
"]",
")",
"if",
"correctly_mapped",
":",
"correctly_mapped_stmts",
".",
"append",
"(",
"ms",
".",
"mapped_stmt",
")",
"stmts_out",
"=",
"valid",
"+",
"correctly_mapped_stmts",
"logger",
".",
"info",
"(",
"'%d statements with valid sites'",
"%",
"len",
"(",
"stmts_out",
")",
")",
"dump_pkl",
"=",
"kwargs",
".",
"get",
"(",
"'save'",
")",
"if",
"dump_pkl",
":",
"dump_statements",
"(",
"stmts_out",
",",
"dump_pkl",
")",
"del",
"sm",
"return",
"stmts_out"
] | 45.666667 | 19.894737 |
def get_mutations(study_id, gene_list, mutation_type=None,
case_id=None):
"""Return mutations as a list of genes and list of amino acid changes.
Parameters
----------
study_id : str
The ID of the cBio study.
Example: 'cellline_ccle_broad' or 'paad_icgc'
gene_list : list[str]
A list of genes with their HGNC symbols.
Example: ['BRAF', 'KRAS']
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
case_id : Optional[str]
The case ID within the study to filter to.
Returns
-------
mutations : tuple[list]
A tuple of two lists, the first one containing a list of genes, and
the second one a list of amino acid changes in those genes.
"""
genetic_profile = get_genetic_profiles(study_id, 'mutation')[0]
gene_list_str = ','.join(gene_list)
data = {'cmd': 'getMutationData',
'case_set_id': study_id,
'genetic_profile_id': genetic_profile,
'gene_list': gene_list_str,
'skiprows': -1}
df = send_request(**data)
if case_id:
df = df[df['case_id'] == case_id]
res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'],
'mutation_type', mutation_type)
mutations = {'gene_symbol': list(res['gene_symbol'].values()),
'amino_acid_change': list(res['amino_acid_change'].values())}
return mutations
|
[
"def",
"get_mutations",
"(",
"study_id",
",",
"gene_list",
",",
"mutation_type",
"=",
"None",
",",
"case_id",
"=",
"None",
")",
":",
"genetic_profile",
"=",
"get_genetic_profiles",
"(",
"study_id",
",",
"'mutation'",
")",
"[",
"0",
"]",
"gene_list_str",
"=",
"','",
".",
"join",
"(",
"gene_list",
")",
"data",
"=",
"{",
"'cmd'",
":",
"'getMutationData'",
",",
"'case_set_id'",
":",
"study_id",
",",
"'genetic_profile_id'",
":",
"genetic_profile",
",",
"'gene_list'",
":",
"gene_list_str",
",",
"'skiprows'",
":",
"-",
"1",
"}",
"df",
"=",
"send_request",
"(",
"*",
"*",
"data",
")",
"if",
"case_id",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"'case_id'",
"]",
"==",
"case_id",
"]",
"res",
"=",
"_filter_data_frame",
"(",
"df",
",",
"[",
"'gene_symbol'",
",",
"'amino_acid_change'",
"]",
",",
"'mutation_type'",
",",
"mutation_type",
")",
"mutations",
"=",
"{",
"'gene_symbol'",
":",
"list",
"(",
"res",
"[",
"'gene_symbol'",
"]",
".",
"values",
"(",
")",
")",
",",
"'amino_acid_change'",
":",
"list",
"(",
"res",
"[",
"'amino_acid_change'",
"]",
".",
"values",
"(",
")",
")",
"}",
"return",
"mutations"
] | 37.439024 | 17.121951 |
def secgroup_delete(call=None, kwargs=None):
'''
Deletes the given security group from OpenNebula. Either a name or a secgroup_id
must be supplied.
.. versionadded:: 2016.3.0
name
The name of the security group to delete. Can be used instead of
``secgroup_id``.
secgroup_id
The ID of the security group to delete. Can be used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_delete opennebula name=my-secgroup
salt-cloud --function secgroup_delete opennebula secgroup_id=100
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_delete function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
if secgroup_id:
if name:
log.warning(
'Both the \'secgroup_id\' and \'name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif name:
secgroup_id = get_secgroup_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The secgroup_delete function requires either a \'name\' or a '
'\'secgroup_id\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.secgroup.delete(auth, int(secgroup_id))
data = {
'action': 'secgroup.delete',
'deleted': response[0],
'secgroup_id': response[1],
'error_code': response[2],
}
return data
|
[
"def",
"secgroup_delete",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The secgroup_delete function must be called with -f or --function.'",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"name",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"secgroup_id",
"=",
"kwargs",
".",
"get",
"(",
"'secgroup_id'",
",",
"None",
")",
"if",
"secgroup_id",
":",
"if",
"name",
":",
"log",
".",
"warning",
"(",
"'Both the \\'secgroup_id\\' and \\'name\\' arguments were provided. '",
"'\\'secgroup_id\\' will take precedence.'",
")",
"elif",
"name",
":",
"secgroup_id",
"=",
"get_secgroup_id",
"(",
"kwargs",
"=",
"{",
"'name'",
":",
"name",
"}",
")",
"else",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The secgroup_delete function requires either a \\'name\\' or a '",
"'\\'secgroup_id\\' to be provided.'",
")",
"server",
",",
"user",
",",
"password",
"=",
"_get_xml_rpc",
"(",
")",
"auth",
"=",
"':'",
".",
"join",
"(",
"[",
"user",
",",
"password",
"]",
")",
"response",
"=",
"server",
".",
"one",
".",
"secgroup",
".",
"delete",
"(",
"auth",
",",
"int",
"(",
"secgroup_id",
")",
")",
"data",
"=",
"{",
"'action'",
":",
"'secgroup.delete'",
",",
"'deleted'",
":",
"response",
"[",
"0",
"]",
",",
"'secgroup_id'",
":",
"response",
"[",
"1",
"]",
",",
"'error_code'",
":",
"response",
"[",
"2",
"]",
",",
"}",
"return",
"data"
] | 27.758621 | 25 |
def tendermint_version_is_compatible(running_tm_ver):
"""
Check Tendermint compatability with BigchainDB server
:param running_tm_ver: Version number of the connected Tendermint instance
:type running_tm_ver: str
:return: True/False depending on the compatability with BigchainDB server
:rtype: bool
"""
# Splitting because version can look like this e.g. 0.22.8-40d6dc2e
tm_ver = running_tm_ver.split('-')
if not tm_ver:
return False
for ver in __tm_supported_versions__:
if version.parse(ver) == version.parse(tm_ver[0]):
return True
return False
|
[
"def",
"tendermint_version_is_compatible",
"(",
"running_tm_ver",
")",
":",
"# Splitting because version can look like this e.g. 0.22.8-40d6dc2e",
"tm_ver",
"=",
"running_tm_ver",
".",
"split",
"(",
"'-'",
")",
"if",
"not",
"tm_ver",
":",
"return",
"False",
"for",
"ver",
"in",
"__tm_supported_versions__",
":",
"if",
"version",
".",
"parse",
"(",
"ver",
")",
"==",
"version",
".",
"parse",
"(",
"tm_ver",
"[",
"0",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | 33.833333 | 19.722222 |
def pre_scan(self):
""" Prepare string for scanning. """
escape_re = re.compile(r'\\\n[\t ]+')
self.source = escape_re.sub('', self.source)
|
[
"def",
"pre_scan",
"(",
"self",
")",
":",
"escape_re",
"=",
"re",
".",
"compile",
"(",
"r'\\\\\\n[\\t ]+'",
")",
"self",
".",
"source",
"=",
"escape_re",
".",
"sub",
"(",
"''",
",",
"self",
".",
"source",
")"
] | 40 | 9.5 |
def xcorr(x, y, maxlags):
"""
Streamlined version of matplotlib's `xcorr`, without the plots.
:param x, y: NumPy arrays to cross-correlate
:param maxlags: Max number of lags; result will be `2*maxlags+1` in length
"""
xlen = len(x)
ylen = len(y)
assert xlen == ylen
c = np.correlate(x, y, mode=2)
# normalize
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
lags = np.arange(-maxlags, maxlags + 1)
c = c[xlen - 1 - maxlags:xlen + maxlags]
return c
|
[
"def",
"xcorr",
"(",
"x",
",",
"y",
",",
"maxlags",
")",
":",
"xlen",
"=",
"len",
"(",
"x",
")",
"ylen",
"=",
"len",
"(",
"y",
")",
"assert",
"xlen",
"==",
"ylen",
"c",
"=",
"np",
".",
"correlate",
"(",
"x",
",",
"y",
",",
"mode",
"=",
"2",
")",
"# normalize",
"c",
"/=",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"x",
",",
"x",
")",
"*",
"np",
".",
"dot",
"(",
"y",
",",
"y",
")",
")",
"lags",
"=",
"np",
".",
"arange",
"(",
"-",
"maxlags",
",",
"maxlags",
"+",
"1",
")",
"c",
"=",
"c",
"[",
"xlen",
"-",
"1",
"-",
"maxlags",
":",
"xlen",
"+",
"maxlags",
"]",
"return",
"c"
] | 24.1 | 21.1 |
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
|
[
"def",
"jsonarrpop",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
",",
"index",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRPOP'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"index",
")"
] | 43.166667 | 19.5 |
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
|
[
"def",
"on_all_ok",
"(",
"self",
")",
":",
"# Merge DDB files.",
"out_ddb",
"=",
"self",
".",
"merge_ddb_files",
"(",
")",
"return",
"self",
".",
"Results",
"(",
"node",
"=",
"self",
",",
"returncode",
"=",
"0",
",",
"message",
"=",
"\"DDB merge done\"",
")"
] | 40.666667 | 14.666667 |
def stringDecodeEntities(self, str, what, end, end2, end3):
"""Takes a entity string content and process to do the
adequate substitutions. [67] Reference ::= EntityRef |
CharRef [69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlStringDecodeEntities(self._o, str, what, end, end2, end3)
return ret
|
[
"def",
"stringDecodeEntities",
"(",
"self",
",",
"str",
",",
"what",
",",
"end",
",",
"end2",
",",
"end3",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlStringDecodeEntities",
"(",
"self",
".",
"_o",
",",
"str",
",",
"what",
",",
"end",
",",
"end2",
",",
"end3",
")",
"return",
"ret"
] | 57.666667 | 18.5 |
def to_0d_object_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.
"""
result = np.empty((), dtype=object)
result[()] = value
return result
|
[
"def",
"to_0d_object_array",
"(",
"value",
":",
"Any",
")",
"->",
"np",
".",
"ndarray",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"(",
")",
",",
"dtype",
"=",
"object",
")",
"result",
"[",
"(",
")",
"]",
"=",
"value",
"return",
"result"
] | 34.166667 | 8.5 |
def _load_vocab_file(vocab_file, reserved_tokens=None):
"""Load vocabulary while ensuring reserved tokens are at the top."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
subtoken_list = []
with tf.gfile.Open(vocab_file, mode="r") as f:
for line in f:
subtoken = _native_to_unicode(line.strip())
subtoken = subtoken[1:-1] # Remove surrounding single-quotes
if subtoken in reserved_tokens:
continue
subtoken_list.append(_native_to_unicode(subtoken))
return reserved_tokens + subtoken_list
|
[
"def",
"_load_vocab_file",
"(",
"vocab_file",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"subtoken_list",
"=",
"[",
"]",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"vocab_file",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"subtoken",
"=",
"_native_to_unicode",
"(",
"line",
".",
"strip",
"(",
")",
")",
"subtoken",
"=",
"subtoken",
"[",
"1",
":",
"-",
"1",
"]",
"# Remove surrounding single-quotes",
"if",
"subtoken",
"in",
"reserved_tokens",
":",
"continue",
"subtoken_list",
".",
"append",
"(",
"_native_to_unicode",
"(",
"subtoken",
")",
")",
"return",
"reserved_tokens",
"+",
"subtoken_list"
] | 38.714286 | 14.142857 |
def create_css(self, fileid=None):
"""
Generate the final CSS string
"""
if fileid:
rules = self._rules.get(fileid) or []
else:
rules = self.rules
compress = self._scss_opts.get('compress', True)
if compress:
sc, sp, tb, nl = False, '', '', ''
else:
sc, sp, tb, nl = True, ' ', ' ', '\n'
scope = set()
return self._create_css(rules, scope, sc, sp, tb, nl, not compress and self._scss_opts.get('debug_info', False))
|
[
"def",
"create_css",
"(",
"self",
",",
"fileid",
"=",
"None",
")",
":",
"if",
"fileid",
":",
"rules",
"=",
"self",
".",
"_rules",
".",
"get",
"(",
"fileid",
")",
"or",
"[",
"]",
"else",
":",
"rules",
"=",
"self",
".",
"rules",
"compress",
"=",
"self",
".",
"_scss_opts",
".",
"get",
"(",
"'compress'",
",",
"True",
")",
"if",
"compress",
":",
"sc",
",",
"sp",
",",
"tb",
",",
"nl",
"=",
"False",
",",
"''",
",",
"''",
",",
"''",
"else",
":",
"sc",
",",
"sp",
",",
"tb",
",",
"nl",
"=",
"True",
",",
"' '",
",",
"' '",
",",
"'\\n'",
"scope",
"=",
"set",
"(",
")",
"return",
"self",
".",
"_create_css",
"(",
"rules",
",",
"scope",
",",
"sc",
",",
"sp",
",",
"tb",
",",
"nl",
",",
"not",
"compress",
"and",
"self",
".",
"_scss_opts",
".",
"get",
"(",
"'debug_info'",
",",
"False",
")",
")"
] | 31.117647 | 19.705882 |
def set_provider(self, provider_id):
"""Sets a provider.
arg: provider_id (osid.id.Id): the new provider
raise: InvalidArgument - ``provider_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``provider_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_provider_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(provider_id):
raise errors.InvalidArgument()
self._my_map['providerId'] = str(provider_id)
|
[
"def",
"set_provider",
"(",
"self",
",",
"provider_id",
")",
":",
"if",
"self",
".",
"get_provider_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"_is_valid_id",
"(",
"provider_id",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"self",
".",
"_my_map",
"[",
"'providerId'",
"]",
"=",
"str",
"(",
"provider_id",
")"
] | 40.8 | 15.466667 |
def sounds(self):
"""Return a dictionary of sounds recognized by Pushover and that can be
used in a notification message.
"""
if not Pushover._SOUNDS:
request = Request("get", SOUND_URL, {"token": self.token})
Pushover._SOUNDS = request.answer["sounds"]
return Pushover._SOUNDS
|
[
"def",
"sounds",
"(",
"self",
")",
":",
"if",
"not",
"Pushover",
".",
"_SOUNDS",
":",
"request",
"=",
"Request",
"(",
"\"get\"",
",",
"SOUND_URL",
",",
"{",
"\"token\"",
":",
"self",
".",
"token",
"}",
")",
"Pushover",
".",
"_SOUNDS",
"=",
"request",
".",
"answer",
"[",
"\"sounds\"",
"]",
"return",
"Pushover",
".",
"_SOUNDS"
] | 41.75 | 10.75 |
def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None
|
[
"def",
"resolve_dict_link",
"(",
"self",
",",
"dct",
",",
"array",
"=",
"None",
")",
":",
"sys",
"=",
"dct",
".",
"get",
"(",
"'sys'",
")",
"return",
"self",
".",
"resolve",
"(",
"sys",
"[",
"'linkType'",
"]",
",",
"sys",
"[",
"'id'",
"]",
",",
"array",
")",
"if",
"sys",
"is",
"not",
"None",
"else",
"None"
] | 47.909091 | 24.545455 |
def _initRepository(self):
"""Have mercurial init the workdir as a repository (hg init) if needed.
hg init will also create all needed intermediate directories.
"""
if self._isRepositoryReady():
return defer.succeed(None)
log.msg('hgpoller: initializing working dir from %s' % self.repourl)
d = utils.getProcessOutputAndValue(self.hgbin,
['init', self._absWorkdir()],
env=os.environ)
d.addCallback(self._convertNonZeroToFailure)
d.addErrback(self._stopOnFailure)
d.addCallback(lambda _: log.msg(
"hgpoller: finished initializing working dir %r" % self.workdir))
return d
|
[
"def",
"_initRepository",
"(",
"self",
")",
":",
"if",
"self",
".",
"_isRepositoryReady",
"(",
")",
":",
"return",
"defer",
".",
"succeed",
"(",
"None",
")",
"log",
".",
"msg",
"(",
"'hgpoller: initializing working dir from %s'",
"%",
"self",
".",
"repourl",
")",
"d",
"=",
"utils",
".",
"getProcessOutputAndValue",
"(",
"self",
".",
"hgbin",
",",
"[",
"'init'",
",",
"self",
".",
"_absWorkdir",
"(",
")",
"]",
",",
"env",
"=",
"os",
".",
"environ",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"_convertNonZeroToFailure",
")",
"d",
".",
"addErrback",
"(",
"self",
".",
"_stopOnFailure",
")",
"d",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"log",
".",
"msg",
"(",
"\"hgpoller: finished initializing working dir %r\"",
"%",
"self",
".",
"workdir",
")",
")",
"return",
"d"
] | 46.625 | 16.375 |
def IsHuntStarted(self):
"""Is this hunt considered started?
This method is used to check if new clients should be processed by
this hunt. Note that child flow responses are always processed but
new clients are not allowed to be scheduled unless the hunt is
started.
Returns:
If a new client is allowed to be scheduled on this hunt.
"""
state = self.hunt_obj.Get(self.hunt_obj.Schema.STATE)
if state != "STARTED":
return False
# Stop the hunt due to expiry.
if self.CheckExpiry():
return False
return True
|
[
"def",
"IsHuntStarted",
"(",
"self",
")",
":",
"state",
"=",
"self",
".",
"hunt_obj",
".",
"Get",
"(",
"self",
".",
"hunt_obj",
".",
"Schema",
".",
"STATE",
")",
"if",
"state",
"!=",
"\"STARTED\"",
":",
"return",
"False",
"# Stop the hunt due to expiry.",
"if",
"self",
".",
"CheckExpiry",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | 26.47619 | 23.809524 |
def pfindall(path, *fnames):
"""Find all fnames in the closest ancestor directory.
For the purposes of this function, we are our own closest ancestor.
I.e. given the structure::
.
`-- a
|-- b
| |-- c
| | `-- x.txt
| `-- x.txt
`-- y.txt
the call::
dict(pfindall('a/b/c', 'x.txt', 'y.txt'))
will return::
{
'x.txt': 'a/b/c/x.txt',
'y.txt': 'a/y.txt'
}
``a/b/x.txt`` is not returned, since ``a/b/c/x.txt`` is the "closest"
``x.txt`` when starting from ``a/b/c`` (note: pfindall only looks
"upwards", ie. towards the root).
"""
wd = os.path.abspath(path)
assert os.path.isdir(wd)
def parents():
"""yield successive parent directories
"""
parent = wd
yield parent
while 1:
parent, dirname = os.path.split(parent)
if not dirname:
return
yield parent
for d in parents():
curdirlist = os.listdir(d)
for fname in fnames:
if fname in curdirlist:
yield fname, os.path.normcase(os.path.join(d, fname))
|
[
"def",
"pfindall",
"(",
"path",
",",
"*",
"fnames",
")",
":",
"wd",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"wd",
")",
"def",
"parents",
"(",
")",
":",
"\"\"\"yield successive parent directories\n \"\"\"",
"parent",
"=",
"wd",
"yield",
"parent",
"while",
"1",
":",
"parent",
",",
"dirname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"parent",
")",
"if",
"not",
"dirname",
":",
"return",
"yield",
"parent",
"for",
"d",
"in",
"parents",
"(",
")",
":",
"curdirlist",
"=",
"os",
".",
"listdir",
"(",
"d",
")",
"for",
"fname",
"in",
"fnames",
":",
"if",
"fname",
"in",
"curdirlist",
":",
"yield",
"fname",
",",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"fname",
")",
")"
] | 26.276596 | 19.446809 |
def _handle_tag_grains_refresh(self, tag, data):
'''
Handle a grains_refresh event
'''
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
|
[
"def",
"_handle_tag_grains_refresh",
"(",
"self",
",",
"tag",
",",
"data",
")",
":",
"if",
"(",
"data",
".",
"get",
"(",
"'force_refresh'",
",",
"False",
")",
"or",
"self",
".",
"grains_cache",
"!=",
"self",
".",
"opts",
"[",
"'grains'",
"]",
")",
":",
"self",
".",
"pillar_refresh",
"(",
"force_refresh",
"=",
"True",
")",
"self",
".",
"grains_cache",
"=",
"self",
".",
"opts",
"[",
"'grains'",
"]"
] | 39.25 | 14.5 |
def next_message(self):
"""Block until a message(request or notification) is available.
If any messages were previously enqueued, return the first in queue.
If not, run the event loop until one is received.
"""
if self._is_running:
raise Exception('Event loop already running')
if self._pending_messages:
return self._pending_messages.popleft()
self._async_session.run(self._enqueue_request_and_stop,
self._enqueue_notification_and_stop)
if self._pending_messages:
return self._pending_messages.popleft()
|
[
"def",
"next_message",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_running",
":",
"raise",
"Exception",
"(",
"'Event loop already running'",
")",
"if",
"self",
".",
"_pending_messages",
":",
"return",
"self",
".",
"_pending_messages",
".",
"popleft",
"(",
")",
"self",
".",
"_async_session",
".",
"run",
"(",
"self",
".",
"_enqueue_request_and_stop",
",",
"self",
".",
"_enqueue_notification_and_stop",
")",
"if",
"self",
".",
"_pending_messages",
":",
"return",
"self",
".",
"_pending_messages",
".",
"popleft",
"(",
")"
] | 44.571429 | 16 |
def __GetAuthorizationTokenUsingMasterKey(verb,
resource_id_or_fullname,
resource_type,
headers,
master_key):
"""Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict
"""
# decodes the master key which is encoded in base64
key = base64.b64decode(master_key)
# Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname
text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n'.format(
verb=(verb.lower() or ''),
resource_type=(resource_type.lower() or ''),
resource_id_or_fullname=(resource_id_or_fullname or ''),
x_date=headers.get(http_constants.HttpHeaders.XDate, '').lower(),
http_date=headers.get(http_constants.HttpHeaders.HttpDate, '').lower())
if six.PY2:
body = text.decode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = digest.encode('base64')
else:
# python 3 support
body = text.encode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = base64.encodebytes(digest).decode('utf-8')
master_token = 'master'
token_version = '1.0'
return 'type={type}&ver={ver}&sig={sig}'.format(type=master_token,
ver=token_version,
sig=signature[:-1])
|
[
"def",
"__GetAuthorizationTokenUsingMasterKey",
"(",
"verb",
",",
"resource_id_or_fullname",
",",
"resource_type",
",",
"headers",
",",
"master_key",
")",
":",
"# decodes the master key which is encoded in base64 ",
"key",
"=",
"base64",
".",
"b64decode",
"(",
"master_key",
")",
"# Skipping lower casing of resource_id_or_fullname since it may now contain \"ID\" of the resource as part of the fullname",
"text",
"=",
"'{verb}\\n{resource_type}\\n{resource_id_or_fullname}\\n{x_date}\\n{http_date}\\n'",
".",
"format",
"(",
"verb",
"=",
"(",
"verb",
".",
"lower",
"(",
")",
"or",
"''",
")",
",",
"resource_type",
"=",
"(",
"resource_type",
".",
"lower",
"(",
")",
"or",
"''",
")",
",",
"resource_id_or_fullname",
"=",
"(",
"resource_id_or_fullname",
"or",
"''",
")",
",",
"x_date",
"=",
"headers",
".",
"get",
"(",
"http_constants",
".",
"HttpHeaders",
".",
"XDate",
",",
"''",
")",
".",
"lower",
"(",
")",
",",
"http_date",
"=",
"headers",
".",
"get",
"(",
"http_constants",
".",
"HttpHeaders",
".",
"HttpDate",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"if",
"six",
".",
"PY2",
":",
"body",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"digest",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"body",
",",
"sha256",
")",
".",
"digest",
"(",
")",
"signature",
"=",
"digest",
".",
"encode",
"(",
"'base64'",
")",
"else",
":",
"# python 3 support",
"body",
"=",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
"digest",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"body",
",",
"sha256",
")",
".",
"digest",
"(",
")",
"signature",
"=",
"base64",
".",
"encodebytes",
"(",
"digest",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"master_token",
"=",
"'master'",
"token_version",
"=",
"'1.0'",
"return",
"'type={type}&ver={ver}&sig={sig}'",
".",
"format",
"(",
"type",
"=",
"master_token",
",",
"ver",
"=",
"token_version",
",",
"sig",
"=",
"signature",
"[",
":",
"-",
"1",
"]",
")"
] | 38.021739 | 22.391304 |
def sens_power_board_send(self, timestamp, pwr_brd_status, pwr_brd_led_status, pwr_brd_system_volt, pwr_brd_servo_volt, pwr_brd_mot_l_amp, pwr_brd_mot_r_amp, pwr_brd_servo_1_amp, pwr_brd_servo_2_amp, pwr_brd_servo_3_amp, pwr_brd_servo_4_amp, pwr_brd_aux_amp, force_mavlink1=False):
'''
Monitoring of power board status
timestamp : Timestamp (uint64_t)
pwr_brd_status : Power board status register (uint8_t)
pwr_brd_led_status : Power board leds status (uint8_t)
pwr_brd_system_volt : Power board system voltage (float)
pwr_brd_servo_volt : Power board servo voltage (float)
pwr_brd_mot_l_amp : Power board left motor current sensor (float)
pwr_brd_mot_r_amp : Power board right motor current sensor (float)
pwr_brd_servo_1_amp : Power board servo1 current sensor (float)
pwr_brd_servo_2_amp : Power board servo1 current sensor (float)
pwr_brd_servo_3_amp : Power board servo1 current sensor (float)
pwr_brd_servo_4_amp : Power board servo1 current sensor (float)
pwr_brd_aux_amp : Power board aux current sensor (float)
'''
return self.send(self.sens_power_board_encode(timestamp, pwr_brd_status, pwr_brd_led_status, pwr_brd_system_volt, pwr_brd_servo_volt, pwr_brd_mot_l_amp, pwr_brd_mot_r_amp, pwr_brd_servo_1_amp, pwr_brd_servo_2_amp, pwr_brd_servo_3_amp, pwr_brd_servo_4_amp, pwr_brd_aux_amp), force_mavlink1=force_mavlink1)
|
[
"def",
"sens_power_board_send",
"(",
"self",
",",
"timestamp",
",",
"pwr_brd_status",
",",
"pwr_brd_led_status",
",",
"pwr_brd_system_volt",
",",
"pwr_brd_servo_volt",
",",
"pwr_brd_mot_l_amp",
",",
"pwr_brd_mot_r_amp",
",",
"pwr_brd_servo_1_amp",
",",
"pwr_brd_servo_2_amp",
",",
"pwr_brd_servo_3_amp",
",",
"pwr_brd_servo_4_amp",
",",
"pwr_brd_aux_amp",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"sens_power_board_encode",
"(",
"timestamp",
",",
"pwr_brd_status",
",",
"pwr_brd_led_status",
",",
"pwr_brd_system_volt",
",",
"pwr_brd_servo_volt",
",",
"pwr_brd_mot_l_amp",
",",
"pwr_brd_mot_r_amp",
",",
"pwr_brd_servo_1_amp",
",",
"pwr_brd_servo_2_amp",
",",
"pwr_brd_servo_3_amp",
",",
"pwr_brd_servo_4_amp",
",",
"pwr_brd_aux_amp",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | 87.894737 | 60.736842 |
def configureEndpoint(self, hostName, portNumber):
"""
**Description**
Used to configure the host name and port number the client tries to connect to. Should be called
before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureEndpoint("random.iot.region.amazonaws.com", 8883)
**Parameters**
*hostName* - String that denotes the host name of the user-specific AWS IoT endpoint.
*portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for
TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4 and TLSv1.2 Mutual Authentication
with ALPN extension.
**Returns**
None
"""
endpoint_provider = EndpointProvider()
endpoint_provider.set_host(hostName)
endpoint_provider.set_port(portNumber)
self._mqtt_core.configure_endpoint(endpoint_provider)
if portNumber == 443 and not self._mqtt_core.use_wss():
self._mqtt_core.configure_alpn_protocols()
|
[
"def",
"configureEndpoint",
"(",
"self",
",",
"hostName",
",",
"portNumber",
")",
":",
"endpoint_provider",
"=",
"EndpointProvider",
"(",
")",
"endpoint_provider",
".",
"set_host",
"(",
"hostName",
")",
"endpoint_provider",
".",
"set_port",
"(",
"portNumber",
")",
"self",
".",
"_mqtt_core",
".",
"configure_endpoint",
"(",
"endpoint_provider",
")",
"if",
"portNumber",
"==",
"443",
"and",
"not",
"self",
".",
"_mqtt_core",
".",
"use_wss",
"(",
")",
":",
"self",
".",
"_mqtt_core",
".",
"configure_alpn_protocols",
"(",
")"
] | 32.65625 | 28.90625 |
def HasStorage(self):
"""
Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise.
"""
from neo.Core.State.ContractState import ContractPropertyState
return self.ContractProperties & ContractPropertyState.HasStorage > 0
|
[
"def",
"HasStorage",
"(",
"self",
")",
":",
"from",
"neo",
".",
"Core",
".",
"State",
".",
"ContractState",
"import",
"ContractPropertyState",
"return",
"self",
".",
"ContractProperties",
"&",
"ContractPropertyState",
".",
"HasStorage",
">",
"0"
] | 34.111111 | 19 |
def _write_output_manifest(self, manifest, filestore_root):
"""
Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest
is in the current directory it is overwritten with a warning.
"""
output = os.path.basename(manifest)
fieldnames, source_manifest = self._parse_manifest(manifest)
if 'file_path' not in fieldnames:
fieldnames.append('file_path')
with atomic_write(output, overwrite=True) as f:
delimiter = b'\t' if USING_PYTHON2 else '\t'
writer = csv.DictWriter(f, fieldnames, delimiter=delimiter, quoting=csv.QUOTE_NONE)
writer.writeheader()
for row in source_manifest:
row['file_path'] = self._file_path(row['file_sha256'], filestore_root)
writer.writerow(row)
if os.path.isfile(output):
logger.warning('Overwriting manifest %s', output)
logger.info('Rewrote manifest %s with additional column containing path to downloaded files.', output)
|
[
"def",
"_write_output_manifest",
"(",
"self",
",",
"manifest",
",",
"filestore_root",
")",
":",
"output",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"manifest",
")",
"fieldnames",
",",
"source_manifest",
"=",
"self",
".",
"_parse_manifest",
"(",
"manifest",
")",
"if",
"'file_path'",
"not",
"in",
"fieldnames",
":",
"fieldnames",
".",
"append",
"(",
"'file_path'",
")",
"with",
"atomic_write",
"(",
"output",
",",
"overwrite",
"=",
"True",
")",
"as",
"f",
":",
"delimiter",
"=",
"b'\\t'",
"if",
"USING_PYTHON2",
"else",
"'\\t'",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"f",
",",
"fieldnames",
",",
"delimiter",
"=",
"delimiter",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"writer",
".",
"writeheader",
"(",
")",
"for",
"row",
"in",
"source_manifest",
":",
"row",
"[",
"'file_path'",
"]",
"=",
"self",
".",
"_file_path",
"(",
"row",
"[",
"'file_sha256'",
"]",
",",
"filestore_root",
")",
"writer",
".",
"writerow",
"(",
"row",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"output",
")",
":",
"logger",
".",
"warning",
"(",
"'Overwriting manifest %s'",
",",
"output",
")",
"logger",
".",
"info",
"(",
"'Rewrote manifest %s with additional column containing path to downloaded files.'",
",",
"output",
")"
] | 56.631579 | 21.263158 |
def match_blocks(hash_func, old_children, new_children):
"""Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher(
_is_junk,
a=[hash_func(c) for c in old_children],
b=[hash_func(c) for c in new_children],
)
return sm
|
[
"def",
"match_blocks",
"(",
"hash_func",
",",
"old_children",
",",
"new_children",
")",
":",
"sm",
"=",
"difflib",
".",
"SequenceMatcher",
"(",
"_is_junk",
",",
"a",
"=",
"[",
"hash_func",
"(",
"c",
")",
"for",
"c",
"in",
"old_children",
"]",
",",
"b",
"=",
"[",
"hash_func",
"(",
"c",
")",
"for",
"c",
"in",
"new_children",
"]",
",",
")",
"return",
"sm"
] | 33 | 15.25 |
def ReconcileShadow(self, store_type):
"""Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow)
"""
for k, v in iteritems(self.entry):
if v.pw_entry.store == store_type:
shadow_entry = self.shadow.get(k)
if shadow_entry is not None:
v.pw_entry = shadow_entry
else:
v.pw_entry.store = "UNKNOWN"
|
[
"def",
"ReconcileShadow",
"(",
"self",
",",
"store_type",
")",
":",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"self",
".",
"entry",
")",
":",
"if",
"v",
".",
"pw_entry",
".",
"store",
"==",
"store_type",
":",
"shadow_entry",
"=",
"self",
".",
"shadow",
".",
"get",
"(",
"k",
")",
"if",
"shadow_entry",
"is",
"not",
"None",
":",
"v",
".",
"pw_entry",
"=",
"shadow_entry",
"else",
":",
"v",
".",
"pw_entry",
".",
"store",
"=",
"\"UNKNOWN\""
] | 36.764706 | 14.941176 |
def GetRelativePath(self, path_spec):
"""Returns the relative path based on a resolved path specification.
The relative path is the location of the upper most path specification.
The the location of the mount point is stripped off if relevant.
Args:
path_spec (PathSpec): path specification.
Returns:
str: corresponding relative path or None if the relative path could not
be determined.
Raises:
PathSpecError: if the path specification is incorrect.
"""
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
if not location.startswith(self._mount_point.location):
raise errors.PathSpecError(
'Path specification does not contain mount point.')
else:
if not hasattr(path_spec, 'parent'):
raise errors.PathSpecError('Path specification missing parent.')
if path_spec.parent != self._mount_point:
raise errors.PathSpecError(
'Path specification does not contain mount point.')
path_segments = self._file_system.SplitPath(location)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
mount_point_path_segments = self._file_system.SplitPath(
self._mount_point.location)
path_segments = path_segments[len(mount_point_path_segments):]
return '{0:s}{1:s}'.format(
self._file_system.PATH_SEPARATOR,
self._file_system.PATH_SEPARATOR.join(path_segments))
|
[
"def",
"GetRelativePath",
"(",
"self",
",",
"path_spec",
")",
":",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"location",
"is",
"None",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Path specification missing location.'",
")",
"if",
"path_spec_factory",
".",
"Factory",
".",
"IsSystemLevelTypeIndicator",
"(",
"self",
".",
"_file_system",
".",
"type_indicator",
")",
":",
"if",
"not",
"location",
".",
"startswith",
"(",
"self",
".",
"_mount_point",
".",
"location",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Path specification does not contain mount point.'",
")",
"else",
":",
"if",
"not",
"hasattr",
"(",
"path_spec",
",",
"'parent'",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Path specification missing parent.'",
")",
"if",
"path_spec",
".",
"parent",
"!=",
"self",
".",
"_mount_point",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Path specification does not contain mount point.'",
")",
"path_segments",
"=",
"self",
".",
"_file_system",
".",
"SplitPath",
"(",
"location",
")",
"if",
"path_spec_factory",
".",
"Factory",
".",
"IsSystemLevelTypeIndicator",
"(",
"self",
".",
"_file_system",
".",
"type_indicator",
")",
":",
"mount_point_path_segments",
"=",
"self",
".",
"_file_system",
".",
"SplitPath",
"(",
"self",
".",
"_mount_point",
".",
"location",
")",
"path_segments",
"=",
"path_segments",
"[",
"len",
"(",
"mount_point_path_segments",
")",
":",
"]",
"return",
"'{0:s}{1:s}'",
".",
"format",
"(",
"self",
".",
"_file_system",
".",
"PATH_SEPARATOR",
",",
"self",
".",
"_file_system",
".",
"PATH_SEPARATOR",
".",
"join",
"(",
"path_segments",
")",
")"
] | 37.136364 | 21.522727 |
def prune_empty_node(node, seen):
"""
Recursively remove empty branches and return whether this makes the node
itself empty.
The ``seen`` parameter is used to avoid infinite recursion due to cycles
(you never know).
"""
if node.methods:
return False
if id(node) in seen:
return True
seen = seen | {id(node)}
for branch in list(node.branches):
if prune_empty_node(branch, seen):
node.branches.remove(branch)
else:
return False
return True
|
[
"def",
"prune_empty_node",
"(",
"node",
",",
"seen",
")",
":",
"if",
"node",
".",
"methods",
":",
"return",
"False",
"if",
"id",
"(",
"node",
")",
"in",
"seen",
":",
"return",
"True",
"seen",
"=",
"seen",
"|",
"{",
"id",
"(",
"node",
")",
"}",
"for",
"branch",
"in",
"list",
"(",
"node",
".",
"branches",
")",
":",
"if",
"prune_empty_node",
"(",
"branch",
",",
"seen",
")",
":",
"node",
".",
"branches",
".",
"remove",
"(",
"branch",
")",
"else",
":",
"return",
"False",
"return",
"True"
] | 27.368421 | 16.947368 |
def get_arp_table(self):
"""
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
"""
arp_table = []
command = 'show arp | exclude Incomplete'
output = self._send_command(command)
# Skip the first line which is a header
output = output.split('\n')
output = output[1:]
for line in output:
if len(line) == 0:
return {}
if len(line.split()) == 5:
# Static ARP entries have no interface
# Internet 10.0.0.1 - 0010.2345.1cda ARPA
interface = ''
protocol, address, age, mac, eth_type = line.split()
elif len(line.split()) == 6:
protocol, address, age, mac, eth_type, interface = line.split()
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
try:
if age == '-':
age = 0
age = float(age)
except ValueError:
raise ValueError("Unable to convert age value to float: {}".format(age))
# Validate we matched correctly
if not re.search(RE_IPADDR, address):
raise ValueError("Invalid IP Address detected: {}".format(address))
if not re.search(RE_MAC, mac):
raise ValueError("Invalid MAC Address detected: {}".format(mac))
entry = {
'interface': interface,
'mac': napalm_base.helpers.mac(mac),
'ip': address,
'age': age
}
arp_table.append(entry)
return arp_table
|
[
"def",
"get_arp_table",
"(",
"self",
")",
":",
"arp_table",
"=",
"[",
"]",
"command",
"=",
"'show arp | exclude Incomplete'",
"output",
"=",
"self",
".",
"_send_command",
"(",
"command",
")",
"# Skip the first line which is a header",
"output",
"=",
"output",
".",
"split",
"(",
"'\\n'",
")",
"output",
"=",
"output",
"[",
"1",
":",
"]",
"for",
"line",
"in",
"output",
":",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"return",
"{",
"}",
"if",
"len",
"(",
"line",
".",
"split",
"(",
")",
")",
"==",
"5",
":",
"# Static ARP entries have no interface",
"# Internet 10.0.0.1 - 0010.2345.1cda ARPA",
"interface",
"=",
"''",
"protocol",
",",
"address",
",",
"age",
",",
"mac",
",",
"eth_type",
"=",
"line",
".",
"split",
"(",
")",
"elif",
"len",
"(",
"line",
".",
"split",
"(",
")",
")",
"==",
"6",
":",
"protocol",
",",
"address",
",",
"age",
",",
"mac",
",",
"eth_type",
",",
"interface",
"=",
"line",
".",
"split",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected output from: {}\"",
".",
"format",
"(",
"line",
".",
"split",
"(",
")",
")",
")",
"try",
":",
"if",
"age",
"==",
"'-'",
":",
"age",
"=",
"0",
"age",
"=",
"float",
"(",
"age",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to convert age value to float: {}\"",
".",
"format",
"(",
"age",
")",
")",
"# Validate we matched correctly",
"if",
"not",
"re",
".",
"search",
"(",
"RE_IPADDR",
",",
"address",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid IP Address detected: {}\"",
".",
"format",
"(",
"address",
")",
")",
"if",
"not",
"re",
".",
"search",
"(",
"RE_MAC",
",",
"mac",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid MAC Address detected: {}\"",
".",
"format",
"(",
"mac",
")",
")",
"entry",
"=",
"{",
"'interface'",
":",
"interface",
",",
"'mac'",
":",
"napalm_base",
".",
"helpers",
".",
"mac",
"(",
"mac",
")",
",",
"'ip'",
":",
"address",
",",
"'age'",
":",
"age",
"}",
"arp_table",
".",
"append",
"(",
"entry",
")",
"return",
"arp_table"
] | 34.367647 | 18.279412 |
def _split_field_list(field_list):
"""Split the list of fields for which to extract values into lists by extraction
methods.
- Remove any duplicated field names.
- Raises ValueError with list of any invalid field names in ``field_list``.
"""
lookup_dict = {}
generate_dict = {}
for field_name in field_list or FIELD_NAME_TO_EXTRACT_DICT.keys():
try:
extract_dict = FIELD_NAME_TO_EXTRACT_DICT[field_name]
except KeyError:
assert_invalid_field_list(field_list)
else:
if "lookup_str" in extract_dict:
lookup_dict[field_name] = extract_dict
else:
generate_dict[field_name] = extract_dict
return lookup_dict, generate_dict
|
[
"def",
"_split_field_list",
"(",
"field_list",
")",
":",
"lookup_dict",
"=",
"{",
"}",
"generate_dict",
"=",
"{",
"}",
"for",
"field_name",
"in",
"field_list",
"or",
"FIELD_NAME_TO_EXTRACT_DICT",
".",
"keys",
"(",
")",
":",
"try",
":",
"extract_dict",
"=",
"FIELD_NAME_TO_EXTRACT_DICT",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"assert_invalid_field_list",
"(",
"field_list",
")",
"else",
":",
"if",
"\"lookup_str\"",
"in",
"extract_dict",
":",
"lookup_dict",
"[",
"field_name",
"]",
"=",
"extract_dict",
"else",
":",
"generate_dict",
"[",
"field_name",
"]",
"=",
"extract_dict",
"return",
"lookup_dict",
",",
"generate_dict"
] | 32.130435 | 20.26087 |
def center(a: Union[Set["Point2"], List["Point2"]]) -> "Point2":
""" Returns the central point for points in list """
s = Point2((0, 0))
for p in a:
s += p
return s / len(a)
|
[
"def",
"center",
"(",
"a",
":",
"Union",
"[",
"Set",
"[",
"\"Point2\"",
"]",
",",
"List",
"[",
"\"Point2\"",
"]",
"]",
")",
"->",
"\"Point2\"",
":",
"s",
"=",
"Point2",
"(",
"(",
"0",
",",
"0",
")",
")",
"for",
"p",
"in",
"a",
":",
"s",
"+=",
"p",
"return",
"s",
"/",
"len",
"(",
"a",
")"
] | 35.333333 | 16 |
def resolve_parent_registry_name(self, registry_name, suffix):
"""
Subclasses should override to specify the default suffix, as the
invocation is done without a suffix.
"""
if not registry_name.endswith(suffix):
raise ValueError(
"child module registry name defined with invalid suffix "
"('%s' does not end with '%s')" % (registry_name, suffix))
return registry_name[:-len(suffix)]
|
[
"def",
"resolve_parent_registry_name",
"(",
"self",
",",
"registry_name",
",",
"suffix",
")",
":",
"if",
"not",
"registry_name",
".",
"endswith",
"(",
"suffix",
")",
":",
"raise",
"ValueError",
"(",
"\"child module registry name defined with invalid suffix \"",
"\"('%s' does not end with '%s')\"",
"%",
"(",
"registry_name",
",",
"suffix",
")",
")",
"return",
"registry_name",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]"
] | 42.272727 | 16.818182 |
def get_shell(pid=None, max_depth=6):
"""Get the shell that the supplied pid or os.getpid() is running in.
"""
if not pid:
pid = os.getpid()
processes = dict(_iter_process())
def check_parent(pid, lvl=0):
ppid = processes[pid].get('parent_pid')
shell_name = _get_executable(processes.get(ppid))
if shell_name in SHELL_NAMES:
return (shell_name, processes[ppid]['executable'])
if lvl >= max_depth:
return None
return check_parent(ppid, lvl=lvl + 1)
shell_name = _get_executable(processes.get(pid))
if shell_name in SHELL_NAMES:
return (shell_name, processes[pid]['executable'])
try:
return check_parent(pid)
except KeyError:
return None
|
[
"def",
"get_shell",
"(",
"pid",
"=",
"None",
",",
"max_depth",
"=",
"6",
")",
":",
"if",
"not",
"pid",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"processes",
"=",
"dict",
"(",
"_iter_process",
"(",
")",
")",
"def",
"check_parent",
"(",
"pid",
",",
"lvl",
"=",
"0",
")",
":",
"ppid",
"=",
"processes",
"[",
"pid",
"]",
".",
"get",
"(",
"'parent_pid'",
")",
"shell_name",
"=",
"_get_executable",
"(",
"processes",
".",
"get",
"(",
"ppid",
")",
")",
"if",
"shell_name",
"in",
"SHELL_NAMES",
":",
"return",
"(",
"shell_name",
",",
"processes",
"[",
"ppid",
"]",
"[",
"'executable'",
"]",
")",
"if",
"lvl",
">=",
"max_depth",
":",
"return",
"None",
"return",
"check_parent",
"(",
"ppid",
",",
"lvl",
"=",
"lvl",
"+",
"1",
")",
"shell_name",
"=",
"_get_executable",
"(",
"processes",
".",
"get",
"(",
"pid",
")",
")",
"if",
"shell_name",
"in",
"SHELL_NAMES",
":",
"return",
"(",
"shell_name",
",",
"processes",
"[",
"pid",
"]",
"[",
"'executable'",
"]",
")",
"try",
":",
"return",
"check_parent",
"(",
"pid",
")",
"except",
"KeyError",
":",
"return",
"None"
] | 32.478261 | 14.521739 |
def accept(self):
"""Accept the option.
Returns
-------
an awaitable of :class:`IssueResult`
"""
return self._issue._nation._accept_issue(self._issue.id, self._id)
|
[
"def",
"accept",
"(",
"self",
")",
":",
"return",
"self",
".",
"_issue",
".",
"_nation",
".",
"_accept_issue",
"(",
"self",
".",
"_issue",
".",
"id",
",",
"self",
".",
"_id",
")"
] | 25.625 | 18.875 |
def timeline(self, timeline="home", max_id=None, min_id=None, since_id=None, limit=None):
"""
Fetch statuses, most recent ones first. `timeline` can be 'home', 'local', 'public',
'tag/hashtag' or 'list/id'. See the following functions documentation for what those do.
Local hashtag timelines are supported via the `timeline_hashtag()`_ function.
The default timeline is the "home" timeline.
Media only queries are supported via the `timeline_public()`_ and `timeline_hashtag()`_ functions.
Returns a list of `toot dicts`_.
"""
if max_id != None:
max_id = self.__unpack_id(max_id)
if min_id != None:
min_id = self.__unpack_id(min_id)
if since_id != None:
since_id = self.__unpack_id(since_id)
params_initial = locals()
if timeline == "local":
timeline = "public"
params_initial['local'] = True
params = self.__generate_params(params_initial, ['timeline'])
url = '/api/v1/timelines/{0}'.format(timeline)
return self.__api_request('GET', url, params)
|
[
"def",
"timeline",
"(",
"self",
",",
"timeline",
"=",
"\"home\"",
",",
"max_id",
"=",
"None",
",",
"min_id",
"=",
"None",
",",
"since_id",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"if",
"max_id",
"!=",
"None",
":",
"max_id",
"=",
"self",
".",
"__unpack_id",
"(",
"max_id",
")",
"if",
"min_id",
"!=",
"None",
":",
"min_id",
"=",
"self",
".",
"__unpack_id",
"(",
"min_id",
")",
"if",
"since_id",
"!=",
"None",
":",
"since_id",
"=",
"self",
".",
"__unpack_id",
"(",
"since_id",
")",
"params_initial",
"=",
"locals",
"(",
")",
"if",
"timeline",
"==",
"\"local\"",
":",
"timeline",
"=",
"\"public\"",
"params_initial",
"[",
"'local'",
"]",
"=",
"True",
"params",
"=",
"self",
".",
"__generate_params",
"(",
"params_initial",
",",
"[",
"'timeline'",
"]",
")",
"url",
"=",
"'/api/v1/timelines/{0}'",
".",
"format",
"(",
"timeline",
")",
"return",
"self",
".",
"__api_request",
"(",
"'GET'",
",",
"url",
",",
"params",
")"
] | 38.333333 | 23.533333 |
def make_butterworth_bandpass_b_a(CenterFreq, bandwidth, SampleFreq, order=5, btype='band'):
"""
Generates the b and a coefficients for a butterworth bandpass IIR filter.
Parameters
----------
CenterFreq : float
central frequency of bandpass
bandwidth : float
width of the bandpass from centre to edge
SampleFreq : float
Sample frequency of filter
order : int, optional
order of IIR filter. Is 5 by default
btype : string, optional
type of filter to make e.g. (band, low, high)
Returns
-------
b : ndarray
coefficients multiplying the current and past inputs (feedforward coefficients)
a : ndarray
coefficients multiplying the past outputs (feedback coefficients)
"""
lowcut = CenterFreq-bandwidth/2
highcut = CenterFreq+bandwidth/2
b, a = make_butterworth_b_a(lowcut, highcut, SampleFreq, order, btype)
return b, a
|
[
"def",
"make_butterworth_bandpass_b_a",
"(",
"CenterFreq",
",",
"bandwidth",
",",
"SampleFreq",
",",
"order",
"=",
"5",
",",
"btype",
"=",
"'band'",
")",
":",
"lowcut",
"=",
"CenterFreq",
"-",
"bandwidth",
"/",
"2",
"highcut",
"=",
"CenterFreq",
"+",
"bandwidth",
"/",
"2",
"b",
",",
"a",
"=",
"make_butterworth_b_a",
"(",
"lowcut",
",",
"highcut",
",",
"SampleFreq",
",",
"order",
",",
"btype",
")",
"return",
"b",
",",
"a"
] | 32.928571 | 21.214286 |
def main():
"""Create and use a logger."""
formatter = ColoredFormatter(log_colors={'TRACE': 'yellow'})
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger('example')
logger.addHandler(handler)
logger.setLevel('TRACE')
logger.log(5, 'a message using a custom level')
|
[
"def",
"main",
"(",
")",
":",
"formatter",
"=",
"ColoredFormatter",
"(",
"log_colors",
"=",
"{",
"'TRACE'",
":",
"'yellow'",
"}",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'example'",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"setLevel",
"(",
"'TRACE'",
")",
"logger",
".",
"log",
"(",
"5",
",",
"'a message using a custom level'",
")"
] | 27.583333 | 17.916667 |
def from_dict(cls, d):
"""
Convert a dictionary into an xarray.Dataset.
Input dict can take several forms::
d = {'t': {'dims': ('t'), 'data': t},
'a': {'dims': ('t'), 'data': x},
'b': {'dims': ('t'), 'data': y}}
d = {'coords': {'t': {'dims': 't', 'data': t,
'attrs': {'units':'s'}}},
'attrs': {'title': 'air temperature'},
'dims': 't',
'data_vars': {'a': {'dims': 't', 'data': x, },
'b': {'dims': 't', 'data': y}}}
where 't' is the name of the dimesion, 'a' and 'b' are names of data
variables and t, x, and y are lists, numpy.arrays or pandas objects.
Parameters
----------
d : dict, with a minimum structure of {'var_0': {'dims': [..], \
'data': [..]}, \
...}
Returns
-------
obj : xarray.Dataset
See also
--------
Dataset.to_dict
DataArray.from_dict
"""
if not set(['coords', 'data_vars']).issubset(set(d)):
variables = d.items()
else:
import itertools
variables = itertools.chain(d.get('coords', {}).items(),
d.get('data_vars', {}).items())
try:
variable_dict = OrderedDict([(k, (v['dims'],
v['data'],
v.get('attrs'))) for
k, v in variables])
except KeyError as e:
raise ValueError(
"cannot convert dict without the key "
"'{dims_data}'".format(dims_data=str(e.args[0])))
obj = cls(variable_dict)
# what if coords aren't dims?
coords = set(d.get('coords', {})) - set(d.get('dims', {}))
obj = obj.set_coords(coords)
obj.attrs.update(d.get('attrs', {}))
return obj
|
[
"def",
"from_dict",
"(",
"cls",
",",
"d",
")",
":",
"if",
"not",
"set",
"(",
"[",
"'coords'",
",",
"'data_vars'",
"]",
")",
".",
"issubset",
"(",
"set",
"(",
"d",
")",
")",
":",
"variables",
"=",
"d",
".",
"items",
"(",
")",
"else",
":",
"import",
"itertools",
"variables",
"=",
"itertools",
".",
"chain",
"(",
"d",
".",
"get",
"(",
"'coords'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
",",
"d",
".",
"get",
"(",
"'data_vars'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
"try",
":",
"variable_dict",
"=",
"OrderedDict",
"(",
"[",
"(",
"k",
",",
"(",
"v",
"[",
"'dims'",
"]",
",",
"v",
"[",
"'data'",
"]",
",",
"v",
".",
"get",
"(",
"'attrs'",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"variables",
"]",
")",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"cannot convert dict without the key \"",
"\"'{dims_data}'\"",
".",
"format",
"(",
"dims_data",
"=",
"str",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
")",
"obj",
"=",
"cls",
"(",
"variable_dict",
")",
"# what if coords aren't dims?",
"coords",
"=",
"set",
"(",
"d",
".",
"get",
"(",
"'coords'",
",",
"{",
"}",
")",
")",
"-",
"set",
"(",
"d",
".",
"get",
"(",
"'dims'",
",",
"{",
"}",
")",
")",
"obj",
"=",
"obj",
".",
"set_coords",
"(",
"coords",
")",
"obj",
".",
"attrs",
".",
"update",
"(",
"d",
".",
"get",
"(",
"'attrs'",
",",
"{",
"}",
")",
")",
"return",
"obj"
] | 34.483333 | 21.65 |
def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname)
|
[
"def",
"get_extract_method",
"(",
"path",
")",
":",
"info_path",
"=",
"_get_info_path",
"(",
"path",
")",
"info",
"=",
"_read_info",
"(",
"info_path",
")",
"fname",
"=",
"info",
".",
"get",
"(",
"'original_fname'",
",",
"path",
")",
"if",
"info",
"else",
"path",
"return",
"_guess_extract_method",
"(",
"fname",
")"
] | 44.166667 | 8.333333 |
def decodeTagAttributes(self, text):
"""docstring for decodeTagAttributes"""
attribs = {}
if text.strip() == u'':
return attribs
scanner = _attributePat.scanner(text)
match = scanner.search()
while match:
key, val1, val2, val3, val4 = match.groups()
value = val1 or val2 or val3 or val4
if value:
value = _space.sub(u' ', value).strip()
else:
value = ''
attribs[key] = self.decodeCharReferences(value)
match = scanner.search()
return attribs
|
[
"def",
"decodeTagAttributes",
"(",
"self",
",",
"text",
")",
":",
"attribs",
"=",
"{",
"}",
"if",
"text",
".",
"strip",
"(",
")",
"==",
"u''",
":",
"return",
"attribs",
"scanner",
"=",
"_attributePat",
".",
"scanner",
"(",
"text",
")",
"match",
"=",
"scanner",
".",
"search",
"(",
")",
"while",
"match",
":",
"key",
",",
"val1",
",",
"val2",
",",
"val3",
",",
"val4",
"=",
"match",
".",
"groups",
"(",
")",
"value",
"=",
"val1",
"or",
"val2",
"or",
"val3",
"or",
"val4",
"if",
"value",
":",
"value",
"=",
"_space",
".",
"sub",
"(",
"u' '",
",",
"value",
")",
".",
"strip",
"(",
")",
"else",
":",
"value",
"=",
"''",
"attribs",
"[",
"key",
"]",
"=",
"self",
".",
"decodeCharReferences",
"(",
"value",
")",
"match",
"=",
"scanner",
".",
"search",
"(",
")",
"return",
"attribs"
] | 26 | 16.277778 |
def _AppendRecord(self):
"""Adds current record to result if well formed."""
# If no Values then don't output.
if not self.values:
return
cur_record = []
for value in self.values:
try:
value.OnSaveRecord()
except SkipRecord:
self._ClearRecord()
return
except SkipValue:
continue
# Build current record into a list.
cur_record.append(value.value)
# If no Values in template or whole record is empty then don't output.
if len(cur_record) == (cur_record.count(None) + cur_record.count([])):
return
# Replace any 'None' entries with null string ''.
while None in cur_record:
cur_record[cur_record.index(None)] = ''
self._result.append(cur_record)
self._ClearRecord()
|
[
"def",
"_AppendRecord",
"(",
"self",
")",
":",
"# If no Values then don't output.",
"if",
"not",
"self",
".",
"values",
":",
"return",
"cur_record",
"=",
"[",
"]",
"for",
"value",
"in",
"self",
".",
"values",
":",
"try",
":",
"value",
".",
"OnSaveRecord",
"(",
")",
"except",
"SkipRecord",
":",
"self",
".",
"_ClearRecord",
"(",
")",
"return",
"except",
"SkipValue",
":",
"continue",
"# Build current record into a list.",
"cur_record",
".",
"append",
"(",
"value",
".",
"value",
")",
"# If no Values in template or whole record is empty then don't output.",
"if",
"len",
"(",
"cur_record",
")",
"==",
"(",
"cur_record",
".",
"count",
"(",
"None",
")",
"+",
"cur_record",
".",
"count",
"(",
"[",
"]",
")",
")",
":",
"return",
"# Replace any 'None' entries with null string ''.",
"while",
"None",
"in",
"cur_record",
":",
"cur_record",
"[",
"cur_record",
".",
"index",
"(",
"None",
")",
"]",
"=",
"''",
"self",
".",
"_result",
".",
"append",
"(",
"cur_record",
")",
"self",
".",
"_ClearRecord",
"(",
")"
] | 25.433333 | 20.866667 |
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
"""
if "metageneration" not in self._properties:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get("retentionPolicy")
if policy is None:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
if policy.get("isLocked"):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {"ifMetagenerationMatch": self.metageneration}
if self.user_project is not None:
query_params["userProject"] = self.user_project
path = "/b/{}/lockRetentionPolicy".format(self.name)
api_response = client._connection.api_request(
method="POST", path=path, query_params=query_params, _target_object=self
)
self._set_properties(api_response)
|
[
"def",
"lock_retention_policy",
"(",
"self",
",",
"client",
"=",
"None",
")",
":",
"if",
"\"metageneration\"",
"not",
"in",
"self",
".",
"_properties",
":",
"raise",
"ValueError",
"(",
"\"Bucket has no retention policy assigned: try 'reload'?\"",
")",
"policy",
"=",
"self",
".",
"_properties",
".",
"get",
"(",
"\"retentionPolicy\"",
")",
"if",
"policy",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Bucket has no retention policy assigned: try 'reload'?\"",
")",
"if",
"policy",
".",
"get",
"(",
"\"isLocked\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Bucket's retention policy is already locked.\"",
")",
"client",
"=",
"self",
".",
"_require_client",
"(",
"client",
")",
"query_params",
"=",
"{",
"\"ifMetagenerationMatch\"",
":",
"self",
".",
"metageneration",
"}",
"if",
"self",
".",
"user_project",
"is",
"not",
"None",
":",
"query_params",
"[",
"\"userProject\"",
"]",
"=",
"self",
".",
"user_project",
"path",
"=",
"\"/b/{}/lockRetentionPolicy\"",
".",
"format",
"(",
"self",
".",
"name",
")",
"api_response",
"=",
"client",
".",
"_connection",
".",
"api_request",
"(",
"method",
"=",
"\"POST\"",
",",
"path",
"=",
"path",
",",
"query_params",
"=",
"query_params",
",",
"_target_object",
"=",
"self",
")",
"self",
".",
"_set_properties",
"(",
"api_response",
")"
] | 38.967742 | 24.483871 |
def templates_for_device(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted before it's associate default in the list.
"""
from yacms.conf import settings
if not isinstance(templates, (list, tuple)):
templates = [templates]
device = device_from_request(request)
device_templates = []
for template in templates:
if device:
device_templates.append("%s/%s" % (device, template))
if settings.DEVICE_DEFAULT and settings.DEVICE_DEFAULT != device:
default = "%s/%s" % (settings.DEVICE_DEFAULT, template)
device_templates.append(default)
device_templates.append(template)
return device_templates
|
[
"def",
"templates_for_device",
"(",
"request",
",",
"templates",
")",
":",
"from",
"yacms",
".",
"conf",
"import",
"settings",
"if",
"not",
"isinstance",
"(",
"templates",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"templates",
"=",
"[",
"templates",
"]",
"device",
"=",
"device_from_request",
"(",
"request",
")",
"device_templates",
"=",
"[",
"]",
"for",
"template",
"in",
"templates",
":",
"if",
"device",
":",
"device_templates",
".",
"append",
"(",
"\"%s/%s\"",
"%",
"(",
"device",
",",
"template",
")",
")",
"if",
"settings",
".",
"DEVICE_DEFAULT",
"and",
"settings",
".",
"DEVICE_DEFAULT",
"!=",
"device",
":",
"default",
"=",
"\"%s/%s\"",
"%",
"(",
"settings",
".",
"DEVICE_DEFAULT",
",",
"template",
")",
"device_templates",
".",
"append",
"(",
"default",
")",
"device_templates",
".",
"append",
"(",
"template",
")",
"return",
"device_templates"
] | 41.789474 | 13.052632 |
def scatter_norrec(self, filename=None, individual=False):
"""Create a scatter plot for all diff pairs
Parameters
----------
filename : string, optional
if given, save plot to file
individual : bool, optional
if set to True, return one figure for each row
Returns
-------
fig : matplotlib.Figure or list of :py:class:`matplotlib.Figure.Figure`
objects the figure object
axes : list of matplotlib.axes
the individual axes
"""
# if not otherwise specified, use these column pairs:
std_diff_labels = {
'r': 'rdiff',
'rpha': 'rphadiff',
}
diff_labels = std_diff_labels
# check which columns are present in the data
labels_to_use = {}
for key, item in diff_labels.items():
# only use if BOTH columns are present
if key in self.data.columns and item in self.data.columns:
labels_to_use[key] = item
g_freq = self.data.groupby('frequency')
frequencies = list(sorted(g_freq.groups.keys()))
if individual:
figures = {}
axes_all = {}
else:
Nx = len(labels_to_use.keys())
Ny = len(frequencies)
fig, axes = plt.subplots(
Ny, Nx,
figsize=(Nx * 2.5, Ny * 2.5)
)
for row, (name, item) in enumerate(g_freq):
if individual:
fig, axes_row = plt.subplots(
1, 2, figsize=(16 / 2.54, 6 / 2.54))
else:
axes_row = axes[row, :]
# loop over the various columns
for col_nr, (key, diff_column) in enumerate(
sorted(labels_to_use.items())):
indices = np.where(~np.isnan(item[diff_column]))[0]
ax = axes_row[col_nr]
ax.scatter(
item[key],
item[diff_column],
)
ax.set_xlabel(key)
ax.set_ylabel(diff_column)
ax.set_title('N: {}'.format(len(indices)))
if individual:
fig.tight_layout()
figures[name] = fig
axes_all[name] = axes_row
if individual:
return figures, axes_all
else:
fig.tight_layout()
return fig, axes
|
[
"def",
"scatter_norrec",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"individual",
"=",
"False",
")",
":",
"# if not otherwise specified, use these column pairs:",
"std_diff_labels",
"=",
"{",
"'r'",
":",
"'rdiff'",
",",
"'rpha'",
":",
"'rphadiff'",
",",
"}",
"diff_labels",
"=",
"std_diff_labels",
"# check which columns are present in the data",
"labels_to_use",
"=",
"{",
"}",
"for",
"key",
",",
"item",
"in",
"diff_labels",
".",
"items",
"(",
")",
":",
"# only use if BOTH columns are present",
"if",
"key",
"in",
"self",
".",
"data",
".",
"columns",
"and",
"item",
"in",
"self",
".",
"data",
".",
"columns",
":",
"labels_to_use",
"[",
"key",
"]",
"=",
"item",
"g_freq",
"=",
"self",
".",
"data",
".",
"groupby",
"(",
"'frequency'",
")",
"frequencies",
"=",
"list",
"(",
"sorted",
"(",
"g_freq",
".",
"groups",
".",
"keys",
"(",
")",
")",
")",
"if",
"individual",
":",
"figures",
"=",
"{",
"}",
"axes_all",
"=",
"{",
"}",
"else",
":",
"Nx",
"=",
"len",
"(",
"labels_to_use",
".",
"keys",
"(",
")",
")",
"Ny",
"=",
"len",
"(",
"frequencies",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"Ny",
",",
"Nx",
",",
"figsize",
"=",
"(",
"Nx",
"*",
"2.5",
",",
"Ny",
"*",
"2.5",
")",
")",
"for",
"row",
",",
"(",
"name",
",",
"item",
")",
"in",
"enumerate",
"(",
"g_freq",
")",
":",
"if",
"individual",
":",
"fig",
",",
"axes_row",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"2",
",",
"figsize",
"=",
"(",
"16",
"/",
"2.54",
",",
"6",
"/",
"2.54",
")",
")",
"else",
":",
"axes_row",
"=",
"axes",
"[",
"row",
",",
":",
"]",
"# loop over the various columns",
"for",
"col_nr",
",",
"(",
"key",
",",
"diff_column",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"labels_to_use",
".",
"items",
"(",
")",
")",
")",
":",
"indices",
"=",
"np",
".",
"where",
"(",
"~",
"np",
".",
"isnan",
"(",
"item",
"[",
"diff_column",
"]",
")",
")",
"[",
"0",
"]",
"ax",
"=",
"axes_row",
"[",
"col_nr",
"]",
"ax",
".",
"scatter",
"(",
"item",
"[",
"key",
"]",
",",
"item",
"[",
"diff_column",
"]",
",",
")",
"ax",
".",
"set_xlabel",
"(",
"key",
")",
"ax",
".",
"set_ylabel",
"(",
"diff_column",
")",
"ax",
".",
"set_title",
"(",
"'N: {}'",
".",
"format",
"(",
"len",
"(",
"indices",
")",
")",
")",
"if",
"individual",
":",
"fig",
".",
"tight_layout",
"(",
")",
"figures",
"[",
"name",
"]",
"=",
"fig",
"axes_all",
"[",
"name",
"]",
"=",
"axes_row",
"if",
"individual",
":",
"return",
"figures",
",",
"axes_all",
"else",
":",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig",
",",
"axes"
] | 31.460526 | 16.039474 |
def _merge_command(run, full_result, offset, result):
"""Merge a write command result into the full bulk result.
"""
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
full_result["nModified"] += result["nModified"]
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error)
|
[
"def",
"_merge_command",
"(",
"run",
",",
"full_result",
",",
"offset",
",",
"result",
")",
":",
"affected",
"=",
"result",
".",
"get",
"(",
"\"n\"",
",",
"0",
")",
"if",
"run",
".",
"op_type",
"==",
"_INSERT",
":",
"full_result",
"[",
"\"nInserted\"",
"]",
"+=",
"affected",
"elif",
"run",
".",
"op_type",
"==",
"_DELETE",
":",
"full_result",
"[",
"\"nRemoved\"",
"]",
"+=",
"affected",
"elif",
"run",
".",
"op_type",
"==",
"_UPDATE",
":",
"upserted",
"=",
"result",
".",
"get",
"(",
"\"upserted\"",
")",
"if",
"upserted",
":",
"n_upserted",
"=",
"len",
"(",
"upserted",
")",
"for",
"doc",
"in",
"upserted",
":",
"doc",
"[",
"\"index\"",
"]",
"=",
"run",
".",
"index",
"(",
"doc",
"[",
"\"index\"",
"]",
"+",
"offset",
")",
"full_result",
"[",
"\"upserted\"",
"]",
".",
"extend",
"(",
"upserted",
")",
"full_result",
"[",
"\"nUpserted\"",
"]",
"+=",
"n_upserted",
"full_result",
"[",
"\"nMatched\"",
"]",
"+=",
"(",
"affected",
"-",
"n_upserted",
")",
"else",
":",
"full_result",
"[",
"\"nMatched\"",
"]",
"+=",
"affected",
"full_result",
"[",
"\"nModified\"",
"]",
"+=",
"result",
"[",
"\"nModified\"",
"]",
"write_errors",
"=",
"result",
".",
"get",
"(",
"\"writeErrors\"",
")",
"if",
"write_errors",
":",
"for",
"doc",
"in",
"write_errors",
":",
"# Leave the server response intact for APM.",
"replacement",
"=",
"doc",
".",
"copy",
"(",
")",
"idx",
"=",
"doc",
"[",
"\"index\"",
"]",
"+",
"offset",
"replacement",
"[",
"\"index\"",
"]",
"=",
"run",
".",
"index",
"(",
"idx",
")",
"# Add the failed operation to the error document.",
"replacement",
"[",
"_UOP",
"]",
"=",
"run",
".",
"ops",
"[",
"idx",
"]",
"full_result",
"[",
"\"writeErrors\"",
"]",
".",
"append",
"(",
"replacement",
")",
"wc_error",
"=",
"result",
".",
"get",
"(",
"\"writeConcernError\"",
")",
"if",
"wc_error",
":",
"full_result",
"[",
"\"writeConcernErrors\"",
"]",
".",
"append",
"(",
"wc_error",
")"
] | 35.973684 | 14.526316 |
def activityrequest(self, event):
"""ActivityMonitor event handler for incoming events
:param event with incoming ActivityMonitor message
"""
# self.log("Event: '%s'" % event.__dict__)
try:
action = event.action
data = event.data
self.log("Activityrequest: ", action, data)
except Exception as e:
self.log("Error: '%s' %s" % (e, type(e)), lvl=error)
|
[
"def",
"activityrequest",
"(",
"self",
",",
"event",
")",
":",
"# self.log(\"Event: '%s'\" % event.__dict__)",
"try",
":",
"action",
"=",
"event",
".",
"action",
"data",
"=",
"event",
".",
"data",
"self",
".",
"log",
"(",
"\"Activityrequest: \"",
",",
"action",
",",
"data",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"Error: '%s' %s\"",
"%",
"(",
"e",
",",
"type",
"(",
"e",
")",
")",
",",
"lvl",
"=",
"error",
")"
] | 29 | 19.333333 |
def convert_pointSource(self, node):
"""
Convert the given node into a point source object.
:param node: a node with tag pointGeometry
:returns: a :class:`openquake.hazardlib.source.PointSource` instance
"""
geom = node.pointGeometry
lon_lat = ~geom.Point.pos
msr = valid.SCALEREL[~node.magScaleRel]()
return source.PointSource(
source_id=node['id'],
name=node['name'],
tectonic_region_type=node.attrib.get('tectonicRegion'),
mfd=self.convert_mfdist(node),
rupture_mesh_spacing=self.rupture_mesh_spacing,
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
upper_seismogenic_depth=~geom.upperSeismoDepth,
lower_seismogenic_depth=~geom.lowerSeismoDepth,
location=geo.Point(*lon_lat),
nodal_plane_distribution=self.convert_npdist(node),
hypocenter_distribution=self.convert_hpdist(node),
temporal_occurrence_model=self.get_tom(node))
|
[
"def",
"convert_pointSource",
"(",
"self",
",",
"node",
")",
":",
"geom",
"=",
"node",
".",
"pointGeometry",
"lon_lat",
"=",
"~",
"geom",
".",
"Point",
".",
"pos",
"msr",
"=",
"valid",
".",
"SCALEREL",
"[",
"~",
"node",
".",
"magScaleRel",
"]",
"(",
")",
"return",
"source",
".",
"PointSource",
"(",
"source_id",
"=",
"node",
"[",
"'id'",
"]",
",",
"name",
"=",
"node",
"[",
"'name'",
"]",
",",
"tectonic_region_type",
"=",
"node",
".",
"attrib",
".",
"get",
"(",
"'tectonicRegion'",
")",
",",
"mfd",
"=",
"self",
".",
"convert_mfdist",
"(",
"node",
")",
",",
"rupture_mesh_spacing",
"=",
"self",
".",
"rupture_mesh_spacing",
",",
"magnitude_scaling_relationship",
"=",
"msr",
",",
"rupture_aspect_ratio",
"=",
"~",
"node",
".",
"ruptAspectRatio",
",",
"upper_seismogenic_depth",
"=",
"~",
"geom",
".",
"upperSeismoDepth",
",",
"lower_seismogenic_depth",
"=",
"~",
"geom",
".",
"lowerSeismoDepth",
",",
"location",
"=",
"geo",
".",
"Point",
"(",
"*",
"lon_lat",
")",
",",
"nodal_plane_distribution",
"=",
"self",
".",
"convert_npdist",
"(",
"node",
")",
",",
"hypocenter_distribution",
"=",
"self",
".",
"convert_hpdist",
"(",
"node",
")",
",",
"temporal_occurrence_model",
"=",
"self",
".",
"get_tom",
"(",
"node",
")",
")"
] | 44.375 | 13.541667 |
def get_first_name_last_name(self):
"""
:rtype: str
"""
names = []
if self._get_first_names():
names += self._get_first_names()
if self._get_additional_names():
names += self._get_additional_names()
if self._get_last_names():
names += self._get_last_names()
if names:
return helpers.list_to_string(names, " ")
else:
return self.get_full_name()
|
[
"def",
"get_first_name_last_name",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"if",
"self",
".",
"_get_first_names",
"(",
")",
":",
"names",
"+=",
"self",
".",
"_get_first_names",
"(",
")",
"if",
"self",
".",
"_get_additional_names",
"(",
")",
":",
"names",
"+=",
"self",
".",
"_get_additional_names",
"(",
")",
"if",
"self",
".",
"_get_last_names",
"(",
")",
":",
"names",
"+=",
"self",
".",
"_get_last_names",
"(",
")",
"if",
"names",
":",
"return",
"helpers",
".",
"list_to_string",
"(",
"names",
",",
"\" \"",
")",
"else",
":",
"return",
"self",
".",
"get_full_name",
"(",
")"
] | 30.733333 | 9.266667 |
def mv_normal_cov_like(x, mu, C):
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.cov_mvnorm(r, mu, C) for r in x])
else:
return flib.cov_mvnorm(x, mu, C)
|
[
"def",
"mv_normal_cov_like",
"(",
"x",
",",
"mu",
",",
"C",
")",
":",
"# TODO: Vectorize in Fortran",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"x",
")",
")",
">",
"1",
":",
"return",
"np",
".",
"sum",
"(",
"[",
"flib",
".",
"cov_mvnorm",
"(",
"r",
",",
"mu",
",",
"C",
")",
"for",
"r",
"in",
"x",
"]",
")",
"else",
":",
"return",
"flib",
".",
"cov_mvnorm",
"(",
"x",
",",
"mu",
",",
"C",
")"
] | 29.666667 | 23.333333 |
def healpix_to_lonlat(self, healpix_index, dx=None, dy=None):
"""
Convert HEALPix indices (optionally with offsets) to longitudes/latitudes
Parameters
----------
healpix_index : `~numpy.ndarray`
1-D array of HEALPix indices
dx, dy : `~numpy.ndarray`, optional
1-D arrays of offsets inside the HEALPix pixel, which must be in
the range [0:1] (0.5 is the center of the HEALPix pixels). If not
specified, the position at the center of the pixel is used.
Returns
-------
lon : :class:`~astropy.coordinates.Longitude`
The longitude values
lat : :class:`~astropy.coordinates.Latitude`
The latitude values
"""
return healpix_to_lonlat(healpix_index, self.nside, dx=dx, dy=dy, order=self.order)
|
[
"def",
"healpix_to_lonlat",
"(",
"self",
",",
"healpix_index",
",",
"dx",
"=",
"None",
",",
"dy",
"=",
"None",
")",
":",
"return",
"healpix_to_lonlat",
"(",
"healpix_index",
",",
"self",
".",
"nside",
",",
"dx",
"=",
"dx",
",",
"dy",
"=",
"dy",
",",
"order",
"=",
"self",
".",
"order",
")"
] | 39.809524 | 20.761905 |
def list_results(context, id, sort, limit):
"""list_result(context, id)
List all job results.
>>> dcictl job-results [OPTIONS]
:param string id: ID of the job to consult result for [required]
:param string sort: Field to apply sort
:param integer limit: Max number of rows to return
"""
headers = ['filename', 'name', 'total', 'success', 'failures', 'errors',
'skips', 'time']
result = job.list_results(context, id=id, sort=sort, limit=limit)
utils.format_output(result, context.format, headers)
|
[
"def",
"list_results",
"(",
"context",
",",
"id",
",",
"sort",
",",
"limit",
")",
":",
"headers",
"=",
"[",
"'filename'",
",",
"'name'",
",",
"'total'",
",",
"'success'",
",",
"'failures'",
",",
"'errors'",
",",
"'skips'",
",",
"'time'",
"]",
"result",
"=",
"job",
".",
"list_results",
"(",
"context",
",",
"id",
"=",
"id",
",",
"sort",
"=",
"sort",
",",
"limit",
"=",
"limit",
")",
"utils",
".",
"format_output",
"(",
"result",
",",
"context",
".",
"format",
",",
"headers",
")"
] | 33.6875 | 19.8125 |
def fixed_legend_position(self, fixed_legend_position):
"""Sets the fixed_legend_position of this ChartSettings.
Where the fixed legend should be displayed with respect to the chart # noqa: E501
:param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["RIGHT", "TOP", "LEFT", "BOTTOM"] # noqa: E501
if fixed_legend_position not in allowed_values:
raise ValueError(
"Invalid value for `fixed_legend_position` ({0}), must be one of {1}" # noqa: E501
.format(fixed_legend_position, allowed_values)
)
self._fixed_legend_position = fixed_legend_position
|
[
"def",
"fixed_legend_position",
"(",
"self",
",",
"fixed_legend_position",
")",
":",
"allowed_values",
"=",
"[",
"\"RIGHT\"",
",",
"\"TOP\"",
",",
"\"LEFT\"",
",",
"\"BOTTOM\"",
"]",
"# noqa: E501",
"if",
"fixed_legend_position",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `fixed_legend_position` ({0}), must be one of {1}\"",
"# noqa: E501",
".",
"format",
"(",
"fixed_legend_position",
",",
"allowed_values",
")",
")",
"self",
".",
"_fixed_legend_position",
"=",
"fixed_legend_position"
] | 45.5 | 28.3125 |
def get_packages(self, offset=None, limit=None, api=None):
"""
Return list of packages that belong to this automation
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: sevenbridges Api instance.
:return: AutomationPackage collection
"""
api = api or self._API
return AutomationPackage.query(
automation=self.id, offset=offset, limit=limit, api=api
)
|
[
"def",
"get_packages",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"api",
"=",
"None",
")",
":",
"api",
"=",
"api",
"or",
"self",
".",
"_API",
"return",
"AutomationPackage",
".",
"query",
"(",
"automation",
"=",
"self",
".",
"id",
",",
"offset",
"=",
"offset",
",",
"limit",
"=",
"limit",
",",
"api",
"=",
"api",
")"
] | 38.166667 | 10.166667 |
def _aspirate_plunger_position(self, ul):
"""Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
"""
millimeters = ul / self._ul_per_mm(ul, 'aspirate')
destination_mm = self._get_plunger_position('bottom') + millimeters
return round(destination_mm, 6)
|
[
"def",
"_aspirate_plunger_position",
"(",
"self",
",",
"ul",
")",
":",
"millimeters",
"=",
"ul",
"/",
"self",
".",
"_ul_per_mm",
"(",
"ul",
",",
"'aspirate'",
")",
"destination_mm",
"=",
"self",
".",
"_get_plunger_position",
"(",
"'bottom'",
")",
"+",
"millimeters",
"return",
"round",
"(",
"destination_mm",
",",
"6",
")"
] | 43.181818 | 18.636364 |
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under long name or short name.
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under long name or short name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
return False
|
[
"def",
"_FlagIsRegistered",
"(",
"self",
",",
"flag_obj",
")",
":",
"flag_dict",
"=",
"self",
".",
"FlagDict",
"(",
")",
"# Check whether flag_obj is registered under its long name.",
"name",
"=",
"flag_obj",
".",
"name",
"if",
"flag_dict",
".",
"get",
"(",
"name",
",",
"None",
")",
"==",
"flag_obj",
":",
"return",
"True",
"# Check whether flag_obj is registered under its short name.",
"short_name",
"=",
"flag_obj",
".",
"short_name",
"if",
"(",
"short_name",
"is",
"not",
"None",
"and",
"flag_dict",
".",
"get",
"(",
"short_name",
",",
"None",
")",
"==",
"flag_obj",
")",
":",
"return",
"True",
"return",
"False"
] | 32.75 | 18 |
def streamer(frontend, backend):
"""Simple push/pull streamer
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
"""
try:
context = zmq.Context()
front_pull = context.socket(zmq.PULL)
front_pull.set_hwm(0)
front_pull.bind("tcp://*:%d" % frontend)
back_push = context.socket(zmq.PUSH)
back_push.bind("tcp://*:%d" % backend)
print("streamer started, backend on port : %d\tfrontend on port: %d" % (backend, frontend))
zmq.proxy(front_pull, back_push)
except Exception as e:
print(e)
finally:
front_pull.close()
back_push.close()
context.term()
|
[
"def",
"streamer",
"(",
"frontend",
",",
"backend",
")",
":",
"try",
":",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"front_pull",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PULL",
")",
"front_pull",
".",
"set_hwm",
"(",
"0",
")",
"front_pull",
".",
"bind",
"(",
"\"tcp://*:%d\"",
"%",
"frontend",
")",
"back_push",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUSH",
")",
"back_push",
".",
"bind",
"(",
"\"tcp://*:%d\"",
"%",
"backend",
")",
"print",
"(",
"\"streamer started, backend on port : %d\\tfrontend on port: %d\"",
"%",
"(",
"backend",
",",
"frontend",
")",
")",
"zmq",
".",
"proxy",
"(",
"front_pull",
",",
"back_push",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"finally",
":",
"front_pull",
".",
"close",
"(",
")",
"back_push",
".",
"close",
"(",
")",
"context",
".",
"term",
"(",
")"
] | 28.125 | 17.583333 |
def interface(cls):
'''
Marks the decorated class as an abstract interface.
Injects following classmethods:
.. py:method:: .all(context)
Returns a list of instances of each component in the ``context`` implementing this ``@interface``
:param context: context to look in
:type context: :class:`Context`
:returns: list(``cls``)
.. py:method:: .any(context)
Returns the first suitable instance implementing this ``@interface`` or raises :exc:`NoImplementationError` if none is available.
:param context: context to look in
:type context: :class:`Context`
:returns: ``cls``
.. py:method:: .classes()
Returns a list of classes implementing this ``@interface``
:returns: list(class)
'''
if not cls:
return None
cls.implementations = []
# Inject methods
def _all(cls, context, ignore_exceptions=False):
return list(context.get_components(cls, ignore_exceptions=ignore_exceptions))
cls.all = _all.__get__(cls)
def _any(cls, context):
instances = cls.all(context)
if instances:
return instances[0]
raise NoImplementationError(cls)
cls.any = _any.__get__(cls)
def _classes(cls):
return list(cls.implementations)
cls.classes = _classes.__get__(cls)
log.debug('Registering [%s] (interface)', get_fqdn(cls))
return cls
|
[
"def",
"interface",
"(",
"cls",
")",
":",
"if",
"not",
"cls",
":",
"return",
"None",
"cls",
".",
"implementations",
"=",
"[",
"]",
"# Inject methods",
"def",
"_all",
"(",
"cls",
",",
"context",
",",
"ignore_exceptions",
"=",
"False",
")",
":",
"return",
"list",
"(",
"context",
".",
"get_components",
"(",
"cls",
",",
"ignore_exceptions",
"=",
"ignore_exceptions",
")",
")",
"cls",
".",
"all",
"=",
"_all",
".",
"__get__",
"(",
"cls",
")",
"def",
"_any",
"(",
"cls",
",",
"context",
")",
":",
"instances",
"=",
"cls",
".",
"all",
"(",
"context",
")",
"if",
"instances",
":",
"return",
"instances",
"[",
"0",
"]",
"raise",
"NoImplementationError",
"(",
"cls",
")",
"cls",
".",
"any",
"=",
"_any",
".",
"__get__",
"(",
"cls",
")",
"def",
"_classes",
"(",
"cls",
")",
":",
"return",
"list",
"(",
"cls",
".",
"implementations",
")",
"cls",
".",
"classes",
"=",
"_classes",
".",
"__get__",
"(",
"cls",
")",
"log",
".",
"debug",
"(",
"'Registering [%s] (interface)'",
",",
"get_fqdn",
"(",
"cls",
")",
")",
"return",
"cls"
] | 27.075472 | 24.622642 |
def parse(self):
"""
Reads all lines from the current data source and yields each FileResult objects
"""
if self.data is None:
raise ValueError('No input data provided, unable to parse')
for line in self.data:
parts = line.strip().split()
try:
path = parts[0]
code = parts[1]
path, line, char = path.split(':')[:3]
if not re.match(POSITION, line):
continue
if not re.match(POSITION, char):
continue
if not re.match(ERROR_CODE, code):
continue
if not re.match(FILEPATH, path):
continue
# For parts mismatch
except IndexError:
continue
# For unpack mismatch
except ValueError:
continue
yield path, code, line, char, ' '.join(parts[2:])
|
[
"def",
"parse",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No input data provided, unable to parse'",
")",
"for",
"line",
"in",
"self",
".",
"data",
":",
"parts",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"try",
":",
"path",
"=",
"parts",
"[",
"0",
"]",
"code",
"=",
"parts",
"[",
"1",
"]",
"path",
",",
"line",
",",
"char",
"=",
"path",
".",
"split",
"(",
"':'",
")",
"[",
":",
"3",
"]",
"if",
"not",
"re",
".",
"match",
"(",
"POSITION",
",",
"line",
")",
":",
"continue",
"if",
"not",
"re",
".",
"match",
"(",
"POSITION",
",",
"char",
")",
":",
"continue",
"if",
"not",
"re",
".",
"match",
"(",
"ERROR_CODE",
",",
"code",
")",
":",
"continue",
"if",
"not",
"re",
".",
"match",
"(",
"FILEPATH",
",",
"path",
")",
":",
"continue",
"# For parts mismatch",
"except",
"IndexError",
":",
"continue",
"# For unpack mismatch",
"except",
"ValueError",
":",
"continue",
"yield",
"path",
",",
"code",
",",
"line",
",",
"char",
",",
"' '",
".",
"join",
"(",
"parts",
"[",
"2",
":",
"]",
")"
] | 30.21875 | 17.15625 |
def find_by_field(self, table, field, field_value):
'''
从数据库里查询指定条件的记录
Args:
table: 表名字 str
field: 字段名
field_value: 字段值
return:
成功: [dict] 保存的记录
失败: -1 并打印返回报错信息
'''
sql = "select * from {} where {} = '{}'".format(
table, field, field_value)
res = self.query(sql)
return res
|
[
"def",
"find_by_field",
"(",
"self",
",",
"table",
",",
"field",
",",
"field_value",
")",
":",
"sql",
"=",
"\"select * from {} where {} = '{}'\"",
".",
"format",
"(",
"table",
",",
"field",
",",
"field_value",
")",
"res",
"=",
"self",
".",
"query",
"(",
"sql",
")",
"return",
"res"
] | 26.4 | 17.2 |
def _prepare_base_image(self):
"""
I am a private method for creating (possibly cheap) copies of a
base_image for start_instance to boot.
"""
if not self.base_image:
return defer.succeed(True)
if self.cheap_copy:
clone_cmd = "qemu-img"
clone_args = "create -b %(base)s -f qcow2 %(image)s"
else:
clone_cmd = "cp"
clone_args = "%(base)s %(image)s"
clone_args = clone_args % {
"base": self.base_image,
"image": self.image,
}
log.msg("Cloning base image: %s %s'" % (clone_cmd, clone_args))
d = utils.getProcessValue(clone_cmd, clone_args.split())
def _log_result(res):
log.msg("Cloning exit code was: %d" % res)
return res
def _log_error(err):
log.err("Cloning failed: %s" % err)
return err
d.addCallbacks(_log_result, _log_error)
return d
|
[
"def",
"_prepare_base_image",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"base_image",
":",
"return",
"defer",
".",
"succeed",
"(",
"True",
")",
"if",
"self",
".",
"cheap_copy",
":",
"clone_cmd",
"=",
"\"qemu-img\"",
"clone_args",
"=",
"\"create -b %(base)s -f qcow2 %(image)s\"",
"else",
":",
"clone_cmd",
"=",
"\"cp\"",
"clone_args",
"=",
"\"%(base)s %(image)s\"",
"clone_args",
"=",
"clone_args",
"%",
"{",
"\"base\"",
":",
"self",
".",
"base_image",
",",
"\"image\"",
":",
"self",
".",
"image",
",",
"}",
"log",
".",
"msg",
"(",
"\"Cloning base image: %s %s'\"",
"%",
"(",
"clone_cmd",
",",
"clone_args",
")",
")",
"d",
"=",
"utils",
".",
"getProcessValue",
"(",
"clone_cmd",
",",
"clone_args",
".",
"split",
"(",
")",
")",
"def",
"_log_result",
"(",
"res",
")",
":",
"log",
".",
"msg",
"(",
"\"Cloning exit code was: %d\"",
"%",
"res",
")",
"return",
"res",
"def",
"_log_error",
"(",
"err",
")",
":",
"log",
".",
"err",
"(",
"\"Cloning failed: %s\"",
"%",
"err",
")",
"return",
"err",
"d",
".",
"addCallbacks",
"(",
"_log_result",
",",
"_log_error",
")",
"return",
"d"
] | 27.457143 | 19.4 |
def login_with_password(self, username, password, limit=10):
"""Deprecated. Use ``login`` with ``sync=True``.
Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
limit (int): Deprecated. How many messages to return when syncing.
This will be replaced by a filter API in a later release.
Returns:
str: Access token
Raises:
MatrixRequestError
"""
warn("login_with_password is deprecated. Use login with sync=True.",
DeprecationWarning)
return self.login(username, password, limit, sync=True)
|
[
"def",
"login_with_password",
"(",
"self",
",",
"username",
",",
"password",
",",
"limit",
"=",
"10",
")",
":",
"warn",
"(",
"\"login_with_password is deprecated. Use login with sync=True.\"",
",",
"DeprecationWarning",
")",
"return",
"self",
".",
"login",
"(",
"username",
",",
"password",
",",
"limit",
",",
"sync",
"=",
"True",
")"
] | 33.6 | 21.55 |
def besttype(x):
"""Convert string x to the most useful type, i.e. int, float or unicode string.
If x is a quoted string (single or double quotes) then the quotes
are stripped and the enclosed string returned.
.. Note::
Strings will be returned as Unicode strings (using :func:`to_unicode`).
.. versionchanged:: 0.7.0
removed `encoding keyword argument
"""
x = to_unicode(x) # make unicode as soon as possible
try:
x = x.strip()
except AttributeError:
pass
m = re.match(r"""['"](?P<value>.*)["']$""", x)
if m is None:
# not a quoted string, try different types
for converter in int, float, to_unicode: # try them in increasing order of lenience
try:
return converter(x)
except ValueError:
pass
else:
# quoted string
x = to_unicode(m.group('value'))
return x
|
[
"def",
"besttype",
"(",
"x",
")",
":",
"x",
"=",
"to_unicode",
"(",
"x",
")",
"# make unicode as soon as possible",
"try",
":",
"x",
"=",
"x",
".",
"strip",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"m",
"=",
"re",
".",
"match",
"(",
"r\"\"\"['\"](?P<value>.*)[\"']$\"\"\"",
",",
"x",
")",
"if",
"m",
"is",
"None",
":",
"# not a quoted string, try different types",
"for",
"converter",
"in",
"int",
",",
"float",
",",
"to_unicode",
":",
"# try them in increasing order of lenience",
"try",
":",
"return",
"converter",
"(",
"x",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"# quoted string",
"x",
"=",
"to_unicode",
"(",
"m",
".",
"group",
"(",
"'value'",
")",
")",
"return",
"x"
] | 30.166667 | 21.033333 |
def limit(self, max_):
"""
Limit the result set to a given number of items.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request at most `max_` items.
This method can be called on the class and on objects. When called on
objects, it returns a copy of the object with :attr:`max_` set
accordingly. When called on the class, it creates a fresh object with
:attr:`max_` set accordingly.
"""
if isinstance(self, type):
result = self()
else:
result = copy.deepcopy(self)
result.max_ = max_
return result
|
[
"def",
"limit",
"(",
"self",
",",
"max_",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"type",
")",
":",
"result",
"=",
"self",
"(",
")",
"else",
":",
"result",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"result",
".",
"max_",
"=",
"max_",
"return",
"result"
] | 35.095238 | 18.809524 |
def Calls(self, conditions=None):
"""Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data.
"""
results = set()
if conditions is None:
conditions = [None]
for condition in conditions:
for c in self.Match(*condition):
results.update(self._registry.get(c, []))
return results
|
[
"def",
"Calls",
"(",
"self",
",",
"conditions",
"=",
"None",
")",
":",
"results",
"=",
"set",
"(",
")",
"if",
"conditions",
"is",
"None",
":",
"conditions",
"=",
"[",
"None",
"]",
"for",
"condition",
"in",
"conditions",
":",
"for",
"c",
"in",
"self",
".",
"Match",
"(",
"*",
"condition",
")",
":",
"results",
".",
"update",
"(",
"self",
".",
"_registry",
".",
"get",
"(",
"c",
",",
"[",
"]",
")",
")",
"return",
"results"
] | 27.75 | 16.5 |
def arc_distance(theta_1, phi_1,
theta_2, phi_2):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
temp = np.sin((theta_2-theta_1)/2)**2+np.cos(theta_1)*np.cos(theta_2)*np.sin((phi_2-phi_1)/2)**2
distance_matrix = 2 * (np.arctan2(np.sqrt(temp),np.sqrt(1-temp)))
return distance_matrix
|
[
"def",
"arc_distance",
"(",
"theta_1",
",",
"phi_1",
",",
"theta_2",
",",
"phi_2",
")",
":",
"temp",
"=",
"np",
".",
"sin",
"(",
"(",
"theta_2",
"-",
"theta_1",
")",
"/",
"2",
")",
"**",
"2",
"+",
"np",
".",
"cos",
"(",
"theta_1",
")",
"*",
"np",
".",
"cos",
"(",
"theta_2",
")",
"*",
"np",
".",
"sin",
"(",
"(",
"phi_2",
"-",
"phi_1",
")",
"/",
"2",
")",
"**",
"2",
"distance_matrix",
"=",
"2",
"*",
"(",
"np",
".",
"arctan2",
"(",
"np",
".",
"sqrt",
"(",
"temp",
")",
",",
"np",
".",
"sqrt",
"(",
"1",
"-",
"temp",
")",
")",
")",
"return",
"distance_matrix"
] | 44.75 | 18.75 |
def url(self):
'''
Because invoice URLs are generally emailed, this
includes the default site URL and the protocol specified in
settings.
'''
if self.id:
return '%s://%s%s' % (
getConstant('email__linkProtocol'),
Site.objects.get_current().domain,
reverse('viewInvoice', args=[self.id,]),
)
|
[
"def",
"url",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
":",
"return",
"'%s://%s%s'",
"%",
"(",
"getConstant",
"(",
"'email__linkProtocol'",
")",
",",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
".",
"domain",
",",
"reverse",
"(",
"'viewInvoice'",
",",
"args",
"=",
"[",
"self",
".",
"id",
",",
"]",
")",
",",
")"
] | 33.25 | 20.083333 |
def detectType(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'type': '/detect/stream'}):
'''
Detect the MIME/media type of the stream and return it in text/plain.
:param option:
:param urlOrPaths:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param services:
:return:
'''
paths = getPaths(urlOrPaths)
return [detectType1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths]
|
[
"def",
"detectType",
"(",
"option",
",",
"urlOrPaths",
",",
"serverEndpoint",
"=",
"ServerEndpoint",
",",
"verbose",
"=",
"Verbose",
",",
"tikaServerJar",
"=",
"TikaServerJar",
",",
"responseMimeType",
"=",
"'text/plain'",
",",
"services",
"=",
"{",
"'type'",
":",
"'/detect/stream'",
"}",
")",
":",
"paths",
"=",
"getPaths",
"(",
"urlOrPaths",
")",
"return",
"[",
"detectType1",
"(",
"option",
",",
"path",
",",
"serverEndpoint",
",",
"verbose",
",",
"tikaServerJar",
",",
"responseMimeType",
",",
"services",
")",
"for",
"path",
"in",
"paths",
"]"
] | 37.294118 | 24.705882 |
def _encrypt(self, archive):
"""Encrypts the compressed archive using GPG.
If encryption fails for any reason, it should be logged by sos but not
cause execution to stop. The assumption is that the unencrypted archive
would still be of use to the user, and/or that the end user has another
means of securing the archive.
Returns the name of the encrypted archive, or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used.
"""
arc_name = archive.replace("sosreport-", "secured-sosreport-")
arc_name += ".gpg"
enc_cmd = "gpg --batch -o %s " % arc_name
env = None
if self.enc_opts["key"]:
# need to assume a trusted key here to be able to encrypt the
# archive non-interactively
enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"]
enc_cmd += archive
if self.enc_opts["password"]:
# prevent change of gpg options using a long password, but also
# prevent the addition of quote characters to the passphrase
passwd = "%s" % self.enc_opts["password"].replace('\'"', '')
env = {"sos_gpg": passwd}
enc_cmd += "-c --passphrase-fd 0 "
enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
enc_cmd += archive
r = sos_get_command_output(enc_cmd, timeout=0, env=env)
if r["status"] == 0:
return arc_name
elif r["status"] == 2:
if self.enc_opts["key"]:
msg = "Specified key not in keyring"
else:
msg = "Could not read passphrase"
else:
# TODO: report the actual error from gpg. Currently, we cannot as
# sos_get_command_output() does not capture stderr
msg = "gpg exited with code %s" % r["status"]
raise Exception(msg)
|
[
"def",
"_encrypt",
"(",
"self",
",",
"archive",
")",
":",
"arc_name",
"=",
"archive",
".",
"replace",
"(",
"\"sosreport-\"",
",",
"\"secured-sosreport-\"",
")",
"arc_name",
"+=",
"\".gpg\"",
"enc_cmd",
"=",
"\"gpg --batch -o %s \"",
"%",
"arc_name",
"env",
"=",
"None",
"if",
"self",
".",
"enc_opts",
"[",
"\"key\"",
"]",
":",
"# need to assume a trusted key here to be able to encrypt the",
"# archive non-interactively",
"enc_cmd",
"+=",
"\"--trust-model always -e -r %s \"",
"%",
"self",
".",
"enc_opts",
"[",
"\"key\"",
"]",
"enc_cmd",
"+=",
"archive",
"if",
"self",
".",
"enc_opts",
"[",
"\"password\"",
"]",
":",
"# prevent change of gpg options using a long password, but also",
"# prevent the addition of quote characters to the passphrase",
"passwd",
"=",
"\"%s\"",
"%",
"self",
".",
"enc_opts",
"[",
"\"password\"",
"]",
".",
"replace",
"(",
"'\\'\"'",
",",
"''",
")",
"env",
"=",
"{",
"\"sos_gpg\"",
":",
"passwd",
"}",
"enc_cmd",
"+=",
"\"-c --passphrase-fd 0 \"",
"enc_cmd",
"=",
"\"/bin/bash -c \\\"echo $sos_gpg | %s\\\"\"",
"%",
"enc_cmd",
"enc_cmd",
"+=",
"archive",
"r",
"=",
"sos_get_command_output",
"(",
"enc_cmd",
",",
"timeout",
"=",
"0",
",",
"env",
"=",
"env",
")",
"if",
"r",
"[",
"\"status\"",
"]",
"==",
"0",
":",
"return",
"arc_name",
"elif",
"r",
"[",
"\"status\"",
"]",
"==",
"2",
":",
"if",
"self",
".",
"enc_opts",
"[",
"\"key\"",
"]",
":",
"msg",
"=",
"\"Specified key not in keyring\"",
"else",
":",
"msg",
"=",
"\"Could not read passphrase\"",
"else",
":",
"# TODO: report the actual error from gpg. Currently, we cannot as",
"# sos_get_command_output() does not capture stderr",
"msg",
"=",
"\"gpg exited with code %s\"",
"%",
"r",
"[",
"\"status\"",
"]",
"raise",
"Exception",
"(",
"msg",
")"
] | 45.880952 | 20.071429 |
def _load_client_secrets(filename):
"""Loads client secrets from the given filename.
Args:
filename: The name of the file containing the JSON secret key.
Returns:
A 2-tuple, the first item containing the client id, and the second
item containing a client secret.
"""
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {} is not supported, only the WEB flow '
'type is supported.'.format(client_type))
return client_info['client_id'], client_info['client_secret']
|
[
"def",
"_load_client_secrets",
"(",
"filename",
")",
":",
"client_type",
",",
"client_info",
"=",
"clientsecrets",
".",
"loadfile",
"(",
"filename",
")",
"if",
"client_type",
"!=",
"clientsecrets",
".",
"TYPE_WEB",
":",
"raise",
"ValueError",
"(",
"'The flow specified in {} is not supported, only the WEB flow '",
"'type is supported.'",
".",
"format",
"(",
"client_type",
")",
")",
"return",
"client_info",
"[",
"'client_id'",
"]",
",",
"client_info",
"[",
"'client_secret'",
"]"
] | 36.823529 | 21.470588 |
def delete_peer(self, name, peer_type="REPLICATION"):
"""
Delete a replication peer.
@param name: The name of the peer.
@param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'.
@return: The deleted peer.
@since: API v3
"""
params = self._get_peer_type_param(peer_type)
return self._delete("peers/" + name, ApiCmPeer, params=params, api_version=3)
|
[
"def",
"delete_peer",
"(",
"self",
",",
"name",
",",
"peer_type",
"=",
"\"REPLICATION\"",
")",
":",
"params",
"=",
"self",
".",
"_get_peer_type_param",
"(",
"peer_type",
")",
"return",
"self",
".",
"_delete",
"(",
"\"peers/\"",
"+",
"name",
",",
"ApiCmPeer",
",",
"params",
"=",
"params",
",",
"api_version",
"=",
"3",
")"
] | 36.090909 | 17.363636 |
def get_pe(self):
"""Get the Streams processing element this operator is executing in.
Returns:
PE: Processing element for this operator.
.. versionadded:: 1.9
"""
return PE(self.rest_client.make_request(self.pe), self.rest_client)
|
[
"def",
"get_pe",
"(",
"self",
")",
":",
"return",
"PE",
"(",
"self",
".",
"rest_client",
".",
"make_request",
"(",
"self",
".",
"pe",
")",
",",
"self",
".",
"rest_client",
")"
] | 30.777778 | 20.666667 |
def find_cycle(graph):
"""find a cycle in an undirected graph
:param graph: undirected graph in listlist or listdict format
:returns: list of vertices in a cycle or None
:complexity: `O(|V|+|E|)`
"""
n = len(graph)
prec = [None] * n # ancestor marks for visited vertices
for u in range(n):
if prec[u] is None: # unvisited vertex
S = [u] # start new DFS
prec[u] = u # mark root (not necessary for this algorithm)
while S:
u = S.pop()
for v in graph[u]: # for all neighbors
if v != prec[u]: # except arcs to father in DFS tree
if prec[v] is not None:
cycle = [v, u] # cycle found, (u,v) back edge
while u != prec[v] and u != prec[u]: # directed
u = prec[u] # climb up the tree
cycle.append(u)
return cycle
else:
prec[v] = u # v is new vertex in tree
S.append(v)
return None
|
[
"def",
"find_cycle",
"(",
"graph",
")",
":",
"n",
"=",
"len",
"(",
"graph",
")",
"prec",
"=",
"[",
"None",
"]",
"*",
"n",
"# ancestor marks for visited vertices",
"for",
"u",
"in",
"range",
"(",
"n",
")",
":",
"if",
"prec",
"[",
"u",
"]",
"is",
"None",
":",
"# unvisited vertex",
"S",
"=",
"[",
"u",
"]",
"# start new DFS",
"prec",
"[",
"u",
"]",
"=",
"u",
"# mark root (not necessary for this algorithm)",
"while",
"S",
":",
"u",
"=",
"S",
".",
"pop",
"(",
")",
"for",
"v",
"in",
"graph",
"[",
"u",
"]",
":",
"# for all neighbors",
"if",
"v",
"!=",
"prec",
"[",
"u",
"]",
":",
"# except arcs to father in DFS tree",
"if",
"prec",
"[",
"v",
"]",
"is",
"not",
"None",
":",
"cycle",
"=",
"[",
"v",
",",
"u",
"]",
"# cycle found, (u,v) back edge",
"while",
"u",
"!=",
"prec",
"[",
"v",
"]",
"and",
"u",
"!=",
"prec",
"[",
"u",
"]",
":",
"# directed",
"u",
"=",
"prec",
"[",
"u",
"]",
"# climb up the tree",
"cycle",
".",
"append",
"(",
"u",
")",
"return",
"cycle",
"else",
":",
"prec",
"[",
"v",
"]",
"=",
"u",
"# v is new vertex in tree",
"S",
".",
"append",
"(",
"v",
")",
"return",
"None"
] | 42.222222 | 16.925926 |
def handle_err(*args):
""" Handle fatal errors, caught in __main__ scope.
If DEBUG is set, print a real traceback.
Otherwise, `print_err` any arguments passed.
"""
if DEBUG:
print_err(traceback.format_exc(), color=False)
else:
print_err(*args, newline=True)
|
[
"def",
"handle_err",
"(",
"*",
"args",
")",
":",
"if",
"DEBUG",
":",
"print_err",
"(",
"traceback",
".",
"format_exc",
"(",
")",
",",
"color",
"=",
"False",
")",
"else",
":",
"print_err",
"(",
"*",
"args",
",",
"newline",
"=",
"True",
")"
] | 33 | 12.444444 |
def map(self, map_fn, name="Map"):
"""Applies a map operator to the stream.
Attributes:
map_fn (function): The user-defined logic of the map.
"""
op = Operator(
_generate_uuid(),
OpType.Map,
name,
map_fn,
num_instances=self.env.config.parallelism)
return self.__register(op)
|
[
"def",
"map",
"(",
"self",
",",
"map_fn",
",",
"name",
"=",
"\"Map\"",
")",
":",
"op",
"=",
"Operator",
"(",
"_generate_uuid",
"(",
")",
",",
"OpType",
".",
"Map",
",",
"name",
",",
"map_fn",
",",
"num_instances",
"=",
"self",
".",
"env",
".",
"config",
".",
"parallelism",
")",
"return",
"self",
".",
"__register",
"(",
"op",
")"
] | 28.923077 | 15.615385 |
def compute_step(self, state, lstm_cell=None, input=None, additional_inputs=None):
"""
Compute one step in the RNN.
:return: one variable for RNN and GRU, multiple variables for LSTM
"""
if not self.initialized:
input_dim = None
if input and hasattr(input.tag, 'last_dim'):
input_dim = input.tag.last_dim
self.init(input_dim)
input_map = self.merge_inputs(input, additional_inputs=additional_inputs)
input_map.update({"state": state, "lstm_cell": lstm_cell})
output_map = self.compute_new_state(input_map)
outputs = [output_map.pop("state")]
outputs += output_map.values()
for tensor in outputs:
tensor.tag.last_dim = self.hidden_size
if len(outputs) == 1:
return outputs[0]
else:
return outputs
|
[
"def",
"compute_step",
"(",
"self",
",",
"state",
",",
"lstm_cell",
"=",
"None",
",",
"input",
"=",
"None",
",",
"additional_inputs",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"input_dim",
"=",
"None",
"if",
"input",
"and",
"hasattr",
"(",
"input",
".",
"tag",
",",
"'last_dim'",
")",
":",
"input_dim",
"=",
"input",
".",
"tag",
".",
"last_dim",
"self",
".",
"init",
"(",
"input_dim",
")",
"input_map",
"=",
"self",
".",
"merge_inputs",
"(",
"input",
",",
"additional_inputs",
"=",
"additional_inputs",
")",
"input_map",
".",
"update",
"(",
"{",
"\"state\"",
":",
"state",
",",
"\"lstm_cell\"",
":",
"lstm_cell",
"}",
")",
"output_map",
"=",
"self",
".",
"compute_new_state",
"(",
"input_map",
")",
"outputs",
"=",
"[",
"output_map",
".",
"pop",
"(",
"\"state\"",
")",
"]",
"outputs",
"+=",
"output_map",
".",
"values",
"(",
")",
"for",
"tensor",
"in",
"outputs",
":",
"tensor",
".",
"tag",
".",
"last_dim",
"=",
"self",
".",
"hidden_size",
"if",
"len",
"(",
"outputs",
")",
"==",
"1",
":",
"return",
"outputs",
"[",
"0",
"]",
"else",
":",
"return",
"outputs"
] | 39.409091 | 15.409091 |
def _selectionParameters(self, param):
"""see docstring for selectedParameterTypes"""
components = param['selection']
if len(components) == 0:
return []
# extract the selected component names
editable_sets = []
for comp in components:
# all the keys (component names) for the auto details for components in selection
details = comp.auto_details()
editable_sets.append(set(details.keys()))
editable_paramters = set.intersection(*editable_sets)
# do not allow selecting of filename from here
return list(editable_paramters)
|
[
"def",
"_selectionParameters",
"(",
"self",
",",
"param",
")",
":",
"components",
"=",
"param",
"[",
"'selection'",
"]",
"if",
"len",
"(",
"components",
")",
"==",
"0",
":",
"return",
"[",
"]",
"# extract the selected component names",
"editable_sets",
"=",
"[",
"]",
"for",
"comp",
"in",
"components",
":",
"# all the keys (component names) for the auto details for components in selection",
"details",
"=",
"comp",
".",
"auto_details",
"(",
")",
"editable_sets",
".",
"append",
"(",
"set",
"(",
"details",
".",
"keys",
"(",
")",
")",
")",
"editable_paramters",
"=",
"set",
".",
"intersection",
"(",
"*",
"editable_sets",
")",
"# do not allow selecting of filename from here",
"return",
"list",
"(",
"editable_paramters",
")"
] | 44.857143 | 11.571429 |
def gfrfov(inst, raydir, rframe, abcorr, obsrvr, step, cnfine, result=None):
"""
Determine time intervals when a specified ray intersects the
space bounded by the field-of-view (FOV) of a specified
instrument.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrfov_c.html
:param inst: Name of the instrument.
:type inst: str
:param raydir: Ray's direction vector.
:type raydir: 3-Element Array of Float.
:param rframe: Reference frame of ray's direction vector.
:type rframe: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param step: Step size in seconds for finding FOV events.
:type step: float
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cnfine, stypes.SpiceCell)
assert cnfine.is_double()
if result is None:
result = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(result, stypes.SpiceCell)
assert result.is_double()
inst = stypes.stringToCharP(inst)
raydir = stypes.toDoubleVector(raydir)
rframe = stypes.stringToCharP(rframe)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
step = ctypes.c_double(step)
libspice.gfrfov_c(inst, raydir, rframe, abcorr, obsrvr, step,
ctypes.byref(cnfine), ctypes.byref(result))
return result
|
[
"def",
"gfrfov",
"(",
"inst",
",",
"raydir",
",",
"rframe",
",",
"abcorr",
",",
"obsrvr",
",",
"step",
",",
"cnfine",
",",
"result",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"cnfine",
",",
"stypes",
".",
"SpiceCell",
")",
"assert",
"cnfine",
".",
"is_double",
"(",
")",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"stypes",
".",
"SPICEDOUBLE_CELL",
"(",
"2000",
")",
"else",
":",
"assert",
"isinstance",
"(",
"result",
",",
"stypes",
".",
"SpiceCell",
")",
"assert",
"result",
".",
"is_double",
"(",
")",
"inst",
"=",
"stypes",
".",
"stringToCharP",
"(",
"inst",
")",
"raydir",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"raydir",
")",
"rframe",
"=",
"stypes",
".",
"stringToCharP",
"(",
"rframe",
")",
"abcorr",
"=",
"stypes",
".",
"stringToCharP",
"(",
"abcorr",
")",
"obsrvr",
"=",
"stypes",
".",
"stringToCharP",
"(",
"obsrvr",
")",
"step",
"=",
"ctypes",
".",
"c_double",
"(",
"step",
")",
"libspice",
".",
"gfrfov_c",
"(",
"inst",
",",
"raydir",
",",
"rframe",
",",
"abcorr",
",",
"obsrvr",
",",
"step",
",",
"ctypes",
".",
"byref",
"(",
"cnfine",
")",
",",
"ctypes",
".",
"byref",
"(",
"result",
")",
")",
"return",
"result"
] | 39 | 15.390244 |
def registerDisplay(func):
"""
Registers a function to the display hook queue to be called on hook.
Look at the sys.displayhook documentation for more information.
:param func | <callable>
"""
setup()
ref = weakref.ref(func)
if ref not in _displayhooks:
_displayhooks.append(ref)
|
[
"def",
"registerDisplay",
"(",
"func",
")",
":",
"setup",
"(",
")",
"ref",
"=",
"weakref",
".",
"ref",
"(",
"func",
")",
"if",
"ref",
"not",
"in",
"_displayhooks",
":",
"_displayhooks",
".",
"append",
"(",
"ref",
")"
] | 29 | 15.727273 |
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
[
"def",
"to_sax",
"(",
"walker",
",",
"handler",
")",
":",
"handler",
".",
"startDocument",
"(",
")",
"for",
"prefix",
",",
"namespace",
"in",
"prefix_mapping",
".",
"items",
"(",
")",
":",
"handler",
".",
"startPrefixMapping",
"(",
"prefix",
",",
"namespace",
")",
"for",
"token",
"in",
"walker",
":",
"type",
"=",
"token",
"[",
"\"type\"",
"]",
"if",
"type",
"==",
"\"Doctype\"",
":",
"continue",
"elif",
"type",
"in",
"(",
"\"StartTag\"",
",",
"\"EmptyTag\"",
")",
":",
"attrs",
"=",
"AttributesNSImpl",
"(",
"token",
"[",
"\"data\"",
"]",
",",
"unadjustForeignAttributes",
")",
"handler",
".",
"startElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
",",
"attrs",
")",
"if",
"type",
"==",
"\"EmptyTag\"",
":",
"handler",
".",
"endElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
")",
"elif",
"type",
"==",
"\"EndTag\"",
":",
"handler",
".",
"endElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
")",
"elif",
"type",
"in",
"(",
"\"Characters\"",
",",
"\"SpaceCharacters\"",
")",
":",
"handler",
".",
"characters",
"(",
"token",
"[",
"\"data\"",
"]",
")",
"elif",
"type",
"==",
"\"Comment\"",
":",
"pass",
"else",
":",
"assert",
"False",
",",
"\"Unknown token type\"",
"for",
"prefix",
",",
"namespace",
"in",
"prefix_mapping",
".",
"items",
"(",
")",
":",
"handler",
".",
"endPrefixMapping",
"(",
"prefix",
")",
"handler",
".",
"endDocument",
"(",
")"
] | 36.421053 | 16.631579 |
def InstrumentsCandlesFactory(instrument, params=None):
"""InstrumentsCandlesFactory - generate InstrumentCandles requests.
InstrumentsCandlesFactory is used to retrieve historical data by
automatically generating consecutive requests when the OANDA limit
of *count* records is exceeded.
This is known by calculating the number of candles between *from* and
*to*. If *to* is not specified *to* will be equal to *now*.
The *count* parameter is only used to control the number of records to
retrieve in a single request.
The *includeFirst* parameter is forced to make sure that results do
no have a 1-record gap between consecutive requests.
Parameters
----------
instrument : string (required)
the instrument to create the order for
params: params (optional)
the parameters to specify the historical range,
see the REST-V20 docs regarding 'instrument' at developer.oanda.com
If no params are specified, just a single InstrumentsCandles request
will be generated acting the same as if you had just created it
directly.
Example
-------
The *oandapyV20.API* client processes requests as objects. So,
downloading large historical batches simply comes down to:
>>> import json
>>> from oandapyV20 import API
>>> from oandapyV20.contrib.factories import InstrumentsCandlesFactory
>>>
>>> client = API(access_token=...)
>>> instrument, granularity = "EUR_USD", "M15"
>>> _from = "2017-01-01T00:00:00Z"
>>> params = {
... "from": _from,
... "granularity": granularity,
... "count": 2500,
... }
>>> with open("/tmp/{}.{}".format(instrument, granularity), "w") as OUT:
>>> # The factory returns a generator generating consecutive
>>> # requests to retrieve full history from date 'from' till 'to'
>>> for r in InstrumentsCandlesFactory(instrument=instrument,
... params=params)
>>> client.request(r)
>>> OUT.write(json.dumps(r.response.get('candles'), indent=2))
.. note:: Normally you can't combine *from*, *to* and *count*.
When *count* specified, it is used to calculate the gap between
*to* and *from*. The *params* passed to the generated request
itself does contain the *count* parameter.
"""
RFC3339 = "%Y-%m-%dT%H:%M:%SZ"
# if not specified use the default of 'S5' as OANDA does
gs = granularity_to_time(params.get('granularity', 'S5'))
_from = None
_epoch_from = None
if 'from' in params:
_from = datetime.strptime(params.get('from'), RFC3339)
_epoch_from = int(calendar.timegm(_from.timetuple()))
_to = datetime.utcnow()
if 'to' in params:
_tmp = datetime.strptime(params.get('to'), RFC3339)
# if specified datetime > now, we use 'now' instead
if _tmp > _to:
logger.info("datetime %s is in the future, will be set to 'now'",
params.get('to'))
else:
_to = _tmp
_epoch_to = int(calendar.timegm(_to.timetuple()))
_count = params.get('count', DEFAULT_BATCH)
# OANDA will respond with a V20Error if count > MAX_BATCH
if 'to' in params and 'from' not in params:
raise ValueError("'to' specified without 'from'")
if not params or 'from' not in params:
yield instruments.InstrumentsCandles(instrument=instrument,
params=params)
else:
delta = _epoch_to - _epoch_from
nbars = delta / gs
cpparams = params.copy()
for k in ['count', 'from', 'to']:
if k in cpparams:
del cpparams[k]
# force includeFirst
cpparams.update({"includeFirst": True})
# generate InstrumentsCandles requests for all 'bars', each request
# requesting max. count records
for _ in range(_count, int(((nbars//_count)+1))*_count+1, _count):
to = _epoch_from + _count * gs
if to > _epoch_to:
to = _epoch_to
yparams = cpparams.copy()
yparams.update({"from": secs2time(_epoch_from).strftime(RFC3339)})
yparams.update({"to": secs2time(to).strftime(RFC3339)})
yield instruments.InstrumentsCandles(instrument=instrument,
params=yparams)
_epoch_from = to
|
[
"def",
"InstrumentsCandlesFactory",
"(",
"instrument",
",",
"params",
"=",
"None",
")",
":",
"RFC3339",
"=",
"\"%Y-%m-%dT%H:%M:%SZ\"",
"# if not specified use the default of 'S5' as OANDA does",
"gs",
"=",
"granularity_to_time",
"(",
"params",
".",
"get",
"(",
"'granularity'",
",",
"'S5'",
")",
")",
"_from",
"=",
"None",
"_epoch_from",
"=",
"None",
"if",
"'from'",
"in",
"params",
":",
"_from",
"=",
"datetime",
".",
"strptime",
"(",
"params",
".",
"get",
"(",
"'from'",
")",
",",
"RFC3339",
")",
"_epoch_from",
"=",
"int",
"(",
"calendar",
".",
"timegm",
"(",
"_from",
".",
"timetuple",
"(",
")",
")",
")",
"_to",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"'to'",
"in",
"params",
":",
"_tmp",
"=",
"datetime",
".",
"strptime",
"(",
"params",
".",
"get",
"(",
"'to'",
")",
",",
"RFC3339",
")",
"# if specified datetime > now, we use 'now' instead",
"if",
"_tmp",
">",
"_to",
":",
"logger",
".",
"info",
"(",
"\"datetime %s is in the future, will be set to 'now'\"",
",",
"params",
".",
"get",
"(",
"'to'",
")",
")",
"else",
":",
"_to",
"=",
"_tmp",
"_epoch_to",
"=",
"int",
"(",
"calendar",
".",
"timegm",
"(",
"_to",
".",
"timetuple",
"(",
")",
")",
")",
"_count",
"=",
"params",
".",
"get",
"(",
"'count'",
",",
"DEFAULT_BATCH",
")",
"# OANDA will respond with a V20Error if count > MAX_BATCH",
"if",
"'to'",
"in",
"params",
"and",
"'from'",
"not",
"in",
"params",
":",
"raise",
"ValueError",
"(",
"\"'to' specified without 'from'\"",
")",
"if",
"not",
"params",
"or",
"'from'",
"not",
"in",
"params",
":",
"yield",
"instruments",
".",
"InstrumentsCandles",
"(",
"instrument",
"=",
"instrument",
",",
"params",
"=",
"params",
")",
"else",
":",
"delta",
"=",
"_epoch_to",
"-",
"_epoch_from",
"nbars",
"=",
"delta",
"/",
"gs",
"cpparams",
"=",
"params",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"[",
"'count'",
",",
"'from'",
",",
"'to'",
"]",
":",
"if",
"k",
"in",
"cpparams",
":",
"del",
"cpparams",
"[",
"k",
"]",
"# force includeFirst",
"cpparams",
".",
"update",
"(",
"{",
"\"includeFirst\"",
":",
"True",
"}",
")",
"# generate InstrumentsCandles requests for all 'bars', each request",
"# requesting max. count records",
"for",
"_",
"in",
"range",
"(",
"_count",
",",
"int",
"(",
"(",
"(",
"nbars",
"//",
"_count",
")",
"+",
"1",
")",
")",
"*",
"_count",
"+",
"1",
",",
"_count",
")",
":",
"to",
"=",
"_epoch_from",
"+",
"_count",
"*",
"gs",
"if",
"to",
">",
"_epoch_to",
":",
"to",
"=",
"_epoch_to",
"yparams",
"=",
"cpparams",
".",
"copy",
"(",
")",
"yparams",
".",
"update",
"(",
"{",
"\"from\"",
":",
"secs2time",
"(",
"_epoch_from",
")",
".",
"strftime",
"(",
"RFC3339",
")",
"}",
")",
"yparams",
".",
"update",
"(",
"{",
"\"to\"",
":",
"secs2time",
"(",
"to",
")",
".",
"strftime",
"(",
"RFC3339",
")",
"}",
")",
"yield",
"instruments",
".",
"InstrumentsCandles",
"(",
"instrument",
"=",
"instrument",
",",
"params",
"=",
"yparams",
")",
"_epoch_from",
"=",
"to"
] | 37.547009 | 22.82906 |
def JTl(self):
r'''Joule Thomson coefficient of the chemical in the liquid phase at
its current temperature and pressure, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Utilizes the temperature-derivative method of
:obj:`thermo.volume.VolumeLiquid` and the temperature-dependent heat
capacity method :obj:`thermo.heat_capacity.HeatCapacityLiquid` to
obtain the properties required for the actual calculation.
Examples
--------
>>> Chemical('dodecane', T=400).JTl
-3.0827160465192742e-07
'''
Vml, Cplm, isobaric_expansion_l = self.Vml, self.Cplm, self.isobaric_expansion_l
if all((Vml, Cplm, isobaric_expansion_l)):
return Joule_Thomson(T=self.T, V=Vml, Cp=Cplm, beta=isobaric_expansion_l)
return None
|
[
"def",
"JTl",
"(",
"self",
")",
":",
"Vml",
",",
"Cplm",
",",
"isobaric_expansion_l",
"=",
"self",
".",
"Vml",
",",
"self",
".",
"Cplm",
",",
"self",
".",
"isobaric_expansion_l",
"if",
"all",
"(",
"(",
"Vml",
",",
"Cplm",
",",
"isobaric_expansion_l",
")",
")",
":",
"return",
"Joule_Thomson",
"(",
"T",
"=",
"self",
".",
"T",
",",
"V",
"=",
"Vml",
",",
"Cp",
"=",
"Cplm",
",",
"beta",
"=",
"isobaric_expansion_l",
")",
"return",
"None"
] | 43.73913 | 27.73913 |
def parse_coverage_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'coverage_xml': COVERAGE_XML,
'html_report': None | HTML_REPORT,
'external_css_file': None | CSS_FILE,
}
where `COVERAGE_XML`, `HTML_REPORT`, and `CSS_FILE` are paths.
The path strings may or may not exist.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'coverage_xml',
type=str,
help=COVERAGE_XML_HELP,
nargs='+'
)
parser.add_argument(
'--html-report',
metavar='FILENAME',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--external-css-file',
metavar='FILENAME',
type=str,
default=None,
help=CSS_FILE_HELP,
)
parser.add_argument(
'--compare-branch',
metavar='BRANCH',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
parser.add_argument(
'--fail-under',
metavar='SCORE',
type=float,
default='0',
help=FAIL_UNDER_HELP
)
parser.add_argument(
'--ignore-staged',
action='store_true',
default=False,
help=IGNORE_STAGED_HELP
)
parser.add_argument(
'--ignore-unstaged',
action='store_true',
default=False,
help=IGNORE_UNSTAGED_HELP
)
parser.add_argument(
'--exclude',
metavar='EXCLUDE',
type=str,
nargs='+',
help=EXCLUDE_HELP
)
parser.add_argument(
'--src-roots',
metavar='DIRECTORY',
type=str,
nargs='+',
default=['src/main/java', 'src/test/java'],
help=SRC_ROOTS_HELP
)
return vars(parser.parse_args(argv))
|
[
"def",
"parse_coverage_args",
"(",
"argv",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"DESCRIPTION",
")",
"parser",
".",
"add_argument",
"(",
"'coverage_xml'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"COVERAGE_XML_HELP",
",",
"nargs",
"=",
"'+'",
")",
"parser",
".",
"add_argument",
"(",
"'--html-report'",
",",
"metavar",
"=",
"'FILENAME'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"HTML_REPORT_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--external-css-file'",
",",
"metavar",
"=",
"'FILENAME'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"CSS_FILE_HELP",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--compare-branch'",
",",
"metavar",
"=",
"'BRANCH'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'origin/master'",
",",
"help",
"=",
"COMPARE_BRANCH_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--fail-under'",
",",
"metavar",
"=",
"'SCORE'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"'0'",
",",
"help",
"=",
"FAIL_UNDER_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--ignore-staged'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"IGNORE_STAGED_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--ignore-unstaged'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"IGNORE_UNSTAGED_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--exclude'",
",",
"metavar",
"=",
"'EXCLUDE'",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"EXCLUDE_HELP",
")",
"parser",
".",
"add_argument",
"(",
"'--src-roots'",
",",
"metavar",
"=",
"'DIRECTORY'",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"'+'",
",",
"default",
"=",
"[",
"'src/main/java'",
",",
"'src/test/java'",
"]",
",",
"help",
"=",
"SRC_ROOTS_HELP",
")",
"return",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"argv",
")",
")"
] | 20.465909 | 20.806818 |
async def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs)
|
[
"async",
"def",
"getHealth",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"getHealth\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 28.333333 | 20.333333 |
def glob_config(pattern, *search_dirs):
"""Return glob results for all possible configuration locations.
Note: This method does not check the configuration "base" directory if the pattern includes a subdirectory.
This is done for performance since this is usually used to find *all* configs for a certain component.
"""
patterns = config_search_paths(pattern, *search_dirs, check_exists=False)
for pattern in patterns:
for path in glob.iglob(pattern):
yield path
|
[
"def",
"glob_config",
"(",
"pattern",
",",
"*",
"search_dirs",
")",
":",
"patterns",
"=",
"config_search_paths",
"(",
"pattern",
",",
"*",
"search_dirs",
",",
"check_exists",
"=",
"False",
")",
"for",
"pattern",
"in",
"patterns",
":",
"for",
"path",
"in",
"glob",
".",
"iglob",
"(",
"pattern",
")",
":",
"yield",
"path"
] | 45.818182 | 26.454545 |
def endpoint_get(service, region=None, profile=None, interface=None, **connection_args):
'''
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
'''
auth(profile, **connection_args)
services = service_list(profile, **connection_args)
if service not in services:
return {'Error': 'Could not find the specified service'}
service_id = services[service]['id']
endpoints = endpoint_list(profile, **connection_args)
e = [_f for _f in [e
if e['service_id'] == service_id and
(e['region'] == region if region else True) and
(e['interface'] == interface if interface else True)
else None for e in endpoints.values()] if _f]
if len(e) > 1:
return {'Error': 'Multiple endpoints found ({0}) for the {1} service. Please specify region.'.format(e, service)}
if len(e) == 1:
return e[0]
return {'Error': 'Could not find endpoint for the specified service'}
|
[
"def",
"endpoint_get",
"(",
"service",
",",
"region",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"interface",
"=",
"None",
",",
"*",
"*",
"connection_args",
")",
":",
"auth",
"(",
"profile",
",",
"*",
"*",
"connection_args",
")",
"services",
"=",
"service_list",
"(",
"profile",
",",
"*",
"*",
"connection_args",
")",
"if",
"service",
"not",
"in",
"services",
":",
"return",
"{",
"'Error'",
":",
"'Could not find the specified service'",
"}",
"service_id",
"=",
"services",
"[",
"service",
"]",
"[",
"'id'",
"]",
"endpoints",
"=",
"endpoint_list",
"(",
"profile",
",",
"*",
"*",
"connection_args",
")",
"e",
"=",
"[",
"_f",
"for",
"_f",
"in",
"[",
"e",
"if",
"e",
"[",
"'service_id'",
"]",
"==",
"service_id",
"and",
"(",
"e",
"[",
"'region'",
"]",
"==",
"region",
"if",
"region",
"else",
"True",
")",
"and",
"(",
"e",
"[",
"'interface'",
"]",
"==",
"interface",
"if",
"interface",
"else",
"True",
")",
"else",
"None",
"for",
"e",
"in",
"endpoints",
".",
"values",
"(",
")",
"]",
"if",
"_f",
"]",
"if",
"len",
"(",
"e",
")",
">",
"1",
":",
"return",
"{",
"'Error'",
":",
"'Multiple endpoints found ({0}) for the {1} service. Please specify region.'",
".",
"format",
"(",
"e",
",",
"service",
")",
"}",
"if",
"len",
"(",
"e",
")",
"==",
"1",
":",
"return",
"e",
"[",
"0",
"]",
"return",
"{",
"'Error'",
":",
"'Could not find endpoint for the specified service'",
"}"
] | 38.655172 | 26.172414 |
def epcr_threads(self, formattedprimers, ampliconsize=10000):
"""
Run ePCR in a multi-threaded fashion
"""
# Create the threads for the ePCR analysis
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
threads = Thread(target=self.epcr, args=())
threads.setDaemon(True)
threads.start()
logging.info('Running ePCR analyses')
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
# Get the primers ready
sample[self.analysistype].primers = formattedprimers
# Make the output path
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory,
self.analysistype)
make_path(sample[self.analysistype].reportdir)
outfile = os.path.join(sample[self.analysistype].reportdir, sample.name)
# Set the hashing and mapping commands
sample.commands.famap = '{famap} -b {outfile}.famap {fasta}'\
.format(famap=os.path.join(self.homepath, 'ePCR', 'famap'),
outfile=outfile,
fasta=sample.general.bestassemblyfile)
sample.commands.fahash = '{fahash} -b {outfile}.hash {outfile}.famap'\
.format(fahash=os.path.join(self.homepath, 'ePCR', 'fahash'),
outfile=outfile)
# re-PCR uses the subtyping primers list to search the contigs file using the following parameters
# -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup)
# -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know
# the size of the amplicon
# -n 1 (Set max allowed mismatches per primer pair for lookup)
# -g 0 (Set max allowed indels per primer pair for lookup),
# -G (Print alignments in comments)
# -o {output file}
sample.commands.epcr = \
'{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q ' \
'-o {outfile}.txt {primers}'\
.format(rePCR=os.path.join(self.homepath, 'ePCR', 're-PCR'),
outfile=outfile,
ampsize=ampliconsize,
mismatches=self.mismatches,
primers=sample[self.analysistype].primers)
sample[self.analysistype].resultsfile = '{of}.txt'.format(of=outfile)
# Add the sample object and the output file to the queue
self.epcrqueue.put((sample, outfile))
# Join the threads
self.epcrqueue.join()
|
[
"def",
"epcr_threads",
"(",
"self",
",",
"formattedprimers",
",",
"ampliconsize",
"=",
"10000",
")",
":",
"# Create the threads for the ePCR analysis",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"epcr",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"logging",
".",
"info",
"(",
"'Running ePCR analyses'",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"# Get the primers ready",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"primers",
"=",
"formattedprimers",
"# Make the output path",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"reportdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"self",
".",
"analysistype",
")",
"make_path",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"reportdir",
")",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"reportdir",
",",
"sample",
".",
"name",
")",
"# Set the hashing and mapping commands",
"sample",
".",
"commands",
".",
"famap",
"=",
"'{famap} -b {outfile}.famap {fasta}'",
".",
"format",
"(",
"famap",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"homepath",
",",
"'ePCR'",
",",
"'famap'",
")",
",",
"outfile",
"=",
"outfile",
",",
"fasta",
"=",
"sample",
".",
"general",
".",
"bestassemblyfile",
")",
"sample",
".",
"commands",
".",
"fahash",
"=",
"'{fahash} -b {outfile}.hash {outfile}.famap'",
".",
"format",
"(",
"fahash",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"homepath",
",",
"'ePCR'",
",",
"'fahash'",
")",
",",
"outfile",
"=",
"outfile",
")",
"# re-PCR uses the subtyping primers list to search the contigs file using the following parameters",
"# -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup)",
"# -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know",
"# the size of the amplicon",
"# -n 1 (Set max allowed mismatches per primer pair for lookup)",
"# -g 0 (Set max allowed indels per primer pair for lookup),",
"# -G (Print alignments in comments)",
"# -o {output file}",
"sample",
".",
"commands",
".",
"epcr",
"=",
"'{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q '",
"'-o {outfile}.txt {primers}'",
".",
"format",
"(",
"rePCR",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"homepath",
",",
"'ePCR'",
",",
"'re-PCR'",
")",
",",
"outfile",
"=",
"outfile",
",",
"ampsize",
"=",
"ampliconsize",
",",
"mismatches",
"=",
"self",
".",
"mismatches",
",",
"primers",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"primers",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"resultsfile",
"=",
"'{of}.txt'",
".",
"format",
"(",
"of",
"=",
"outfile",
")",
"# Add the sample object and the output file to the queue",
"self",
".",
"epcrqueue",
".",
"put",
"(",
"(",
"sample",
",",
"outfile",
")",
")",
"# Join the threads",
"self",
".",
"epcrqueue",
".",
"join",
"(",
")"
] | 59.46 | 22.7 |
def isoutside(coords, shape):
r"""
Identifies points that lie outside the specified region.
Parameters
----------
domain_size : array_like
The size and shape of the domain beyond which points should be
trimmed. The argument is treated as follows:
**sphere** : If a scalar or single element list is received, it's
treated as the radius [r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as
the radius and height of a cylinder [r, z] whose central axis
starts at [0, 0, 0] and extends in the positive z-direction.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner
lies at [0, 0, 0].
Returns
-------
An Np-long mask of True values indicating pores that lie outside the
domain.
"""
# Label external pores for trimming below
if len(shape) == 1: # Spherical
# Find external points
r = sp.sqrt(sp.sum(coords**2, axis=1))
Ps = r > shape[0]
elif len(shape) == 2: # Cylindrical
# Find external pores outside radius
r = sp.sqrt(sp.sum(coords[:, [0, 1]]**2, axis=1))
Ps = r > shape[0]
# Find external pores above and below cylinder
if shape[1] > 0:
Ps = Ps + (coords[:, 2] > shape[1])
Ps = Ps + (coords[:, 2] < 0)
else:
pass
elif len(shape) == 3: # Rectilinear
shape = sp.array(shape, dtype=float)
try:
lo_lim = shape[:, 0]
hi_lim = shape[:, 1]
except IndexError:
lo_lim = sp.array([0, 0, 0])
hi_lim = shape
Ps1 = sp.any(coords > hi_lim, axis=1)
Ps2 = sp.any(coords < lo_lim, axis=1)
Ps = Ps1 + Ps2
return Ps
|
[
"def",
"isoutside",
"(",
"coords",
",",
"shape",
")",
":",
"# Label external pores for trimming below",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"# Spherical",
"# Find external points",
"r",
"=",
"sp",
".",
"sqrt",
"(",
"sp",
".",
"sum",
"(",
"coords",
"**",
"2",
",",
"axis",
"=",
"1",
")",
")",
"Ps",
"=",
"r",
">",
"shape",
"[",
"0",
"]",
"elif",
"len",
"(",
"shape",
")",
"==",
"2",
":",
"# Cylindrical",
"# Find external pores outside radius",
"r",
"=",
"sp",
".",
"sqrt",
"(",
"sp",
".",
"sum",
"(",
"coords",
"[",
":",
",",
"[",
"0",
",",
"1",
"]",
"]",
"**",
"2",
",",
"axis",
"=",
"1",
")",
")",
"Ps",
"=",
"r",
">",
"shape",
"[",
"0",
"]",
"# Find external pores above and below cylinder",
"if",
"shape",
"[",
"1",
"]",
">",
"0",
":",
"Ps",
"=",
"Ps",
"+",
"(",
"coords",
"[",
":",
",",
"2",
"]",
">",
"shape",
"[",
"1",
"]",
")",
"Ps",
"=",
"Ps",
"+",
"(",
"coords",
"[",
":",
",",
"2",
"]",
"<",
"0",
")",
"else",
":",
"pass",
"elif",
"len",
"(",
"shape",
")",
"==",
"3",
":",
"# Rectilinear",
"shape",
"=",
"sp",
".",
"array",
"(",
"shape",
",",
"dtype",
"=",
"float",
")",
"try",
":",
"lo_lim",
"=",
"shape",
"[",
":",
",",
"0",
"]",
"hi_lim",
"=",
"shape",
"[",
":",
",",
"1",
"]",
"except",
"IndexError",
":",
"lo_lim",
"=",
"sp",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"0",
"]",
")",
"hi_lim",
"=",
"shape",
"Ps1",
"=",
"sp",
".",
"any",
"(",
"coords",
">",
"hi_lim",
",",
"axis",
"=",
"1",
")",
"Ps2",
"=",
"sp",
".",
"any",
"(",
"coords",
"<",
"lo_lim",
",",
"axis",
"=",
"1",
")",
"Ps",
"=",
"Ps1",
"+",
"Ps2",
"return",
"Ps"
] | 33.833333 | 18.888889 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.