text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def show_guiref(self):
"""Show qtconsole help"""
from qtconsole.usage import gui_reference
self.main.help.show_rich_text(gui_reference, collapse=True)
|
[
"def",
"show_guiref",
"(",
"self",
")",
":",
"from",
"qtconsole",
".",
"usage",
"import",
"gui_reference",
"self",
".",
"main",
".",
"help",
".",
"show_rich_text",
"(",
"gui_reference",
",",
"collapse",
"=",
"True",
")"
] | 43.5 | 13.5 |
def nearly_unique(arr, rel_tol=1e-4, verbose=0):
'''Heuristic method to return the uniques within some precision in a numpy array'''
results = np.array([arr[0]])
for x in arr:
if np.abs(results - x).min() > rel_tol:
results = np.append(results, x)
return results
|
[
"def",
"nearly_unique",
"(",
"arr",
",",
"rel_tol",
"=",
"1e-4",
",",
"verbose",
"=",
"0",
")",
":",
"results",
"=",
"np",
".",
"array",
"(",
"[",
"arr",
"[",
"0",
"]",
"]",
")",
"for",
"x",
"in",
"arr",
":",
"if",
"np",
".",
"abs",
"(",
"results",
"-",
"x",
")",
".",
"min",
"(",
")",
">",
"rel_tol",
":",
"results",
"=",
"np",
".",
"append",
"(",
"results",
",",
"x",
")",
"return",
"results"
] | 41.714286 | 16.857143 |
def format(self, record):
'''
Format the log record to include exc_info if the handler is enabled for a specific log level
'''
formatted_record = super(ExcInfoOnLogLevelFormatMixIn, self).format(record)
exc_info_on_loglevel = getattr(record, 'exc_info_on_loglevel', None)
exc_info_on_loglevel_formatted = getattr(record, 'exc_info_on_loglevel_formatted', None)
if exc_info_on_loglevel is None and exc_info_on_loglevel_formatted is None:
return formatted_record
# If we reached this far it means the log record was created with exc_info_on_loglevel
# If this specific handler is enabled for that record, then we should format it to
# include the exc_info details
if self.level > exc_info_on_loglevel:
# This handler is not enabled for the desired exc_info_on_loglevel, don't include exc_info
return formatted_record
# If we reached this far it means we should include exc_info
if not record.exc_info_on_loglevel_instance and not exc_info_on_loglevel_formatted:
# This should actually never occur
return formatted_record
if record.exc_info_on_loglevel_formatted is None:
# Let's cache the formatted exception to avoid recurring conversions and formatting calls
if self.formatter is None: # pylint: disable=access-member-before-definition
self.formatter = logging._defaultFormatter
record.exc_info_on_loglevel_formatted = self.formatter.formatException(
record.exc_info_on_loglevel_instance
)
# Let's format the record to include exc_info just like python's logging formatted does
if formatted_record[-1:] != '\n':
formatted_record += '\n'
try:
formatted_record += record.exc_info_on_loglevel_formatted
except UnicodeError:
# According to the standard library logging formatter comments:
#
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
formatted_record += record.exc_info_on_loglevel_formatted.decode(sys.getfilesystemencoding(),
'replace')
# Reset the record.exc_info_on_loglevel_instance because it might need
# to "travel" through a multiprocessing process and it might contain
# data which is not pickle'able
record.exc_info_on_loglevel_instance = None
return formatted_record
|
[
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"formatted_record",
"=",
"super",
"(",
"ExcInfoOnLogLevelFormatMixIn",
",",
"self",
")",
".",
"format",
"(",
"record",
")",
"exc_info_on_loglevel",
"=",
"getattr",
"(",
"record",
",",
"'exc_info_on_loglevel'",
",",
"None",
")",
"exc_info_on_loglevel_formatted",
"=",
"getattr",
"(",
"record",
",",
"'exc_info_on_loglevel_formatted'",
",",
"None",
")",
"if",
"exc_info_on_loglevel",
"is",
"None",
"and",
"exc_info_on_loglevel_formatted",
"is",
"None",
":",
"return",
"formatted_record",
"# If we reached this far it means the log record was created with exc_info_on_loglevel",
"# If this specific handler is enabled for that record, then we should format it to",
"# include the exc_info details",
"if",
"self",
".",
"level",
">",
"exc_info_on_loglevel",
":",
"# This handler is not enabled for the desired exc_info_on_loglevel, don't include exc_info",
"return",
"formatted_record",
"# If we reached this far it means we should include exc_info",
"if",
"not",
"record",
".",
"exc_info_on_loglevel_instance",
"and",
"not",
"exc_info_on_loglevel_formatted",
":",
"# This should actually never occur",
"return",
"formatted_record",
"if",
"record",
".",
"exc_info_on_loglevel_formatted",
"is",
"None",
":",
"# Let's cache the formatted exception to avoid recurring conversions and formatting calls",
"if",
"self",
".",
"formatter",
"is",
"None",
":",
"# pylint: disable=access-member-before-definition",
"self",
".",
"formatter",
"=",
"logging",
".",
"_defaultFormatter",
"record",
".",
"exc_info_on_loglevel_formatted",
"=",
"self",
".",
"formatter",
".",
"formatException",
"(",
"record",
".",
"exc_info_on_loglevel_instance",
")",
"# Let's format the record to include exc_info just like python's logging formatted does",
"if",
"formatted_record",
"[",
"-",
"1",
":",
"]",
"!=",
"'\\n'",
":",
"formatted_record",
"+=",
"'\\n'",
"try",
":",
"formatted_record",
"+=",
"record",
".",
"exc_info_on_loglevel_formatted",
"except",
"UnicodeError",
":",
"# According to the standard library logging formatter comments:",
"#",
"# Sometimes filenames have non-ASCII chars, which can lead",
"# to errors when s is Unicode and record.exc_text is str",
"# See issue 8924.",
"# We also use replace for when there are multiple",
"# encodings, e.g. UTF-8 for the filesystem and latin-1",
"# for a script. See issue 13232.",
"formatted_record",
"+=",
"record",
".",
"exc_info_on_loglevel_formatted",
".",
"decode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
",",
"'replace'",
")",
"# Reset the record.exc_info_on_loglevel_instance because it might need",
"# to \"travel\" through a multiprocessing process and it might contain",
"# data which is not pickle'able",
"record",
".",
"exc_info_on_loglevel_instance",
"=",
"None",
"return",
"formatted_record"
] | 54.134615 | 29.711538 |
async def sendPhoto(self, chat_id, photo,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object). If the filename contains
non-ASCII characters and you are using Python 2.7, make sure the
filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return await self._api_request_with_file('sendPhoto', _rectify(p), 'photo', photo)
|
[
"async",
"def",
"sendPhoto",
"(",
"self",
",",
"chat_id",
",",
"photo",
",",
"caption",
"=",
"None",
",",
"parse_mode",
"=",
"None",
",",
"disable_notification",
"=",
"None",
",",
"reply_to_message_id",
"=",
"None",
",",
"reply_markup",
"=",
"None",
")",
":",
"p",
"=",
"_strip",
"(",
"locals",
"(",
")",
",",
"more",
"=",
"[",
"'photo'",
"]",
")",
"return",
"await",
"self",
".",
"_api_request_with_file",
"(",
"'sendPhoto'",
",",
"_rectify",
"(",
"p",
")",
",",
"'photo'",
",",
"photo",
")"
] | 46.684211 | 16.263158 |
def apply2parser(cmd_proxy, parser):
"""
Apply a CmdProxy's arguments and options
to a parser of argparse.
:type cmd_proxy: callable or CmdProxy
:type parser: cmdtree.parser.AParser
:rtype: cmdtree.parser.AParser
"""
if isinstance(cmd_proxy, CmdProxy):
parser_proxy = cmd_proxy.meta.parser
_apply2parser(
parser_proxy.arguments,
parser_proxy.options,
parser,
)
return parser
|
[
"def",
"apply2parser",
"(",
"cmd_proxy",
",",
"parser",
")",
":",
"if",
"isinstance",
"(",
"cmd_proxy",
",",
"CmdProxy",
")",
":",
"parser_proxy",
"=",
"cmd_proxy",
".",
"meta",
".",
"parser",
"_apply2parser",
"(",
"parser_proxy",
".",
"arguments",
",",
"parser_proxy",
".",
"options",
",",
"parser",
",",
")",
"return",
"parser"
] | 28.4375 | 8.5625 |
def parse_query(query_str):
"""
Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument.
"""
def _generate_match_all_fields_query():
# Strip colon character (special character for ES)
stripped_query_str = ' '.join(query_str.replace(':', ' ').split())
return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}
if not isinstance(query_str, six.text_type):
query_str = six.text_type(query_str.decode('utf-8'))
logger.info('Parsing: "' + query_str + '\".')
parser = StatefulParser()
rst_visitor = RestructuringVisitor()
es_visitor = ElasticSearchVisitor()
try:
unrecognized_text, parse_tree = parser.parse(query_str, Query)
if unrecognized_text: # Usually, should never happen.
msg = 'Parser returned unrecognized text: "' + unrecognized_text + \
'" for query: "' + query_str + '".'
if query_str == unrecognized_text and parse_tree is None:
# Didn't recognize anything.
logger.warn(msg)
return _generate_match_all_fields_query()
else:
msg += 'Continuing with recognized parse tree.'
logger.warn(msg)
except SyntaxError as e:
logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: "' + query_str +
'". Continuing with a match_all with the given query.')
return _generate_match_all_fields_query()
# Try-Catch-all exceptions for visitors, so that search functionality never fails for the user.
try:
restructured_parse_tree = parse_tree.accept(rst_visitor)
logger.debug('Parse tree: \n' + emit_tree_format(restructured_parse_tree))
except Exception as e:
logger.exception(
RestructuringVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
try:
es_query = restructured_parse_tree.accept(es_visitor)
except Exception as e:
logger.exception(
ElasticSearchVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
if not es_query:
# Case where an empty query was generated (i.e. date query with malformed date, e.g. "d < 200").
return _generate_match_all_fields_query()
return es_query
|
[
"def",
"parse_query",
"(",
"query_str",
")",
":",
"def",
"_generate_match_all_fields_query",
"(",
")",
":",
"# Strip colon character (special character for ES)",
"stripped_query_str",
"=",
"' '",
".",
"join",
"(",
"query_str",
".",
"replace",
"(",
"':'",
",",
"' '",
")",
".",
"split",
"(",
")",
")",
"return",
"{",
"'multi_match'",
":",
"{",
"'query'",
":",
"stripped_query_str",
",",
"'fields'",
":",
"[",
"'_all'",
"]",
",",
"'zero_terms_query'",
":",
"'all'",
"}",
"}",
"if",
"not",
"isinstance",
"(",
"query_str",
",",
"six",
".",
"text_type",
")",
":",
"query_str",
"=",
"six",
".",
"text_type",
"(",
"query_str",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"logger",
".",
"info",
"(",
"'Parsing: \"'",
"+",
"query_str",
"+",
"'\\\".'",
")",
"parser",
"=",
"StatefulParser",
"(",
")",
"rst_visitor",
"=",
"RestructuringVisitor",
"(",
")",
"es_visitor",
"=",
"ElasticSearchVisitor",
"(",
")",
"try",
":",
"unrecognized_text",
",",
"parse_tree",
"=",
"parser",
".",
"parse",
"(",
"query_str",
",",
"Query",
")",
"if",
"unrecognized_text",
":",
"# Usually, should never happen.",
"msg",
"=",
"'Parser returned unrecognized text: \"'",
"+",
"unrecognized_text",
"+",
"'\" for query: \"'",
"+",
"query_str",
"+",
"'\".'",
"if",
"query_str",
"==",
"unrecognized_text",
"and",
"parse_tree",
"is",
"None",
":",
"# Didn't recognize anything.",
"logger",
".",
"warn",
"(",
"msg",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"else",
":",
"msg",
"+=",
"'Continuing with recognized parse tree.'",
"logger",
".",
"warn",
"(",
"msg",
")",
"except",
"SyntaxError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"'Parser syntax error ('",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"') with query: \"'",
"+",
"query_str",
"+",
"'\". Continuing with a match_all with the given query.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"# Try-Catch-all exceptions for visitors, so that search functionality never fails for the user.",
"try",
":",
"restructured_parse_tree",
"=",
"parse_tree",
".",
"accept",
"(",
"rst_visitor",
")",
"logger",
".",
"debug",
"(",
"'Parse tree: \\n'",
"+",
"emit_tree_format",
"(",
"restructured_parse_tree",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"RestructuringVisitor",
".",
"__name__",
"+",
"\" crashed\"",
"+",
"(",
"\": \"",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"\".\"",
")",
"if",
"six",
".",
"text_type",
"(",
"e",
")",
"else",
"'.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"try",
":",
"es_query",
"=",
"restructured_parse_tree",
".",
"accept",
"(",
"es_visitor",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"ElasticSearchVisitor",
".",
"__name__",
"+",
"\" crashed\"",
"+",
"(",
"\": \"",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"\".\"",
")",
"if",
"six",
".",
"text_type",
"(",
"e",
")",
"else",
"'.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"if",
"not",
"es_query",
":",
"# Case where an empty query was generated (i.e. date query with malformed date, e.g. \"d < 200\").",
"return",
"_generate_match_all_fields_query",
"(",
")",
"return",
"es_query"
] | 39.027778 | 28.805556 |
def labels_to_indices(self, labels: Sequence[str]) -> List[int]:
""" Converts a sequence of labels into their corresponding indices."""
return [self.LABEL_TO_INDEX[label] for label in labels]
|
[
"def",
"labels_to_indices",
"(",
"self",
",",
"labels",
":",
"Sequence",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"int",
"]",
":",
"return",
"[",
"self",
".",
"LABEL_TO_INDEX",
"[",
"label",
"]",
"for",
"label",
"in",
"labels",
"]"
] | 51.25 | 21.75 |
def filter_uuid_list(stmts_in, uuids, **kwargs):
"""Filter to Statements corresponding to given UUIDs
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
uuids : list[str]
A list of UUIDs to filter for.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
invert : Optional[bool]
Invert the filter to remove the Statements corresponding to the given
UUIDs.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
invert = kwargs.get('invert', False)
logger.info('Filtering %d statements for %d UUID%s...' %
(len(stmts_in), len(uuids), 's' if len(uuids) > 1 else ''))
stmts_out = []
for st in stmts_in:
if not invert:
if st.uuid in uuids:
stmts_out.append(st)
else:
if st.uuid not in uuids:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out
|
[
"def",
"filter_uuid_list",
"(",
"stmts_in",
",",
"uuids",
",",
"*",
"*",
"kwargs",
")",
":",
"invert",
"=",
"kwargs",
".",
"get",
"(",
"'invert'",
",",
"False",
")",
"logger",
".",
"info",
"(",
"'Filtering %d statements for %d UUID%s...'",
"%",
"(",
"len",
"(",
"stmts_in",
")",
",",
"len",
"(",
"uuids",
")",
",",
"'s'",
"if",
"len",
"(",
"uuids",
")",
">",
"1",
"else",
"''",
")",
")",
"stmts_out",
"=",
"[",
"]",
"for",
"st",
"in",
"stmts_in",
":",
"if",
"not",
"invert",
":",
"if",
"st",
".",
"uuid",
"in",
"uuids",
":",
"stmts_out",
".",
"append",
"(",
"st",
")",
"else",
":",
"if",
"st",
".",
"uuid",
"not",
"in",
"uuids",
":",
"stmts_out",
".",
"append",
"(",
"st",
")",
"logger",
".",
"info",
"(",
"'%d statements after filter...'",
"%",
"len",
"(",
"stmts_out",
")",
")",
"dump_pkl",
"=",
"kwargs",
".",
"get",
"(",
"'save'",
")",
"if",
"dump_pkl",
":",
"dump_statements",
"(",
"stmts_out",
",",
"dump_pkl",
")",
"return",
"stmts_out"
] | 31.72973 | 17.27027 |
def on_notify_load_status(self, webkitView, *args, **kwargs):
"""Callback function when the page was loaded completely
FYI, this function will be called after $(document).ready()
in jQuery
"""
status = webkitView.get_load_status()
if status == status.FINISHED:
if self.debug is True:
print 'Load finished'
|
[
"def",
"on_notify_load_status",
"(",
"self",
",",
"webkitView",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"status",
"=",
"webkitView",
".",
"get_load_status",
"(",
")",
"if",
"status",
"==",
"status",
".",
"FINISHED",
":",
"if",
"self",
".",
"debug",
"is",
"True",
":",
"print",
"'Load finished'"
] | 41.444444 | 9.777778 |
def _aside_from_xml(self, node, block_def_id, block_usage_id, id_generator):
"""
Create an aside from the xml and attach it to the given block
"""
id_generator = id_generator or self.id_generator
aside_type = node.tag
aside_class = self.load_aside_type(aside_type)
aside_def_id, aside_usage_id = id_generator.create_aside(block_def_id, block_usage_id, aside_type)
keys = ScopeIds(None, aside_type, aside_def_id, aside_usage_id)
aside = aside_class.parse_xml(node, self, keys, id_generator)
aside.save()
|
[
"def",
"_aside_from_xml",
"(",
"self",
",",
"node",
",",
"block_def_id",
",",
"block_usage_id",
",",
"id_generator",
")",
":",
"id_generator",
"=",
"id_generator",
"or",
"self",
".",
"id_generator",
"aside_type",
"=",
"node",
".",
"tag",
"aside_class",
"=",
"self",
".",
"load_aside_type",
"(",
"aside_type",
")",
"aside_def_id",
",",
"aside_usage_id",
"=",
"id_generator",
".",
"create_aside",
"(",
"block_def_id",
",",
"block_usage_id",
",",
"aside_type",
")",
"keys",
"=",
"ScopeIds",
"(",
"None",
",",
"aside_type",
",",
"aside_def_id",
",",
"aside_usage_id",
")",
"aside",
"=",
"aside_class",
".",
"parse_xml",
"(",
"node",
",",
"self",
",",
"keys",
",",
"id_generator",
")",
"aside",
".",
"save",
"(",
")"
] | 47.666667 | 24.333333 |
def create(self, chat_id=None, name=None, owner=None, user_list=None):
"""
创建群聊会话
详情请参考
https://work.weixin.qq.com/api/doc#90000/90135/90245
限制说明:
只允许企业自建应用调用,且应用的可见范围必须是根部门;
群成员人数不可超过管理端配置的“群成员人数上限”,且最大不可超过500人;
每企业创建群数不可超过1000/天;
:param chat_id: 群聊的唯一标志,不能与已有的群重复;字符串类型,最长32个字符。只允许字符0-9及字母a-zA-Z。如果不填,系统会随机生成群id
:param name: 群聊名,最多50个utf8字符,超过将截断
:param owner: 指定群主的id。如果不指定,系统会随机从userlist中选一人作为群主
:param user_list: 会话成员列表,成员用userid来标识。至少2人,至多500人
:return: 返回的 JSON 数据包
"""
data = optionaldict(
chatid=chat_id,
name=name,
owner=owner,
userlist=user_list,
)
return self._post('appchat/create', data=data)
|
[
"def",
"create",
"(",
"self",
",",
"chat_id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"user_list",
"=",
"None",
")",
":",
"data",
"=",
"optionaldict",
"(",
"chatid",
"=",
"chat_id",
",",
"name",
"=",
"name",
",",
"owner",
"=",
"owner",
",",
"userlist",
"=",
"user_list",
",",
")",
"return",
"self",
".",
"_post",
"(",
"'appchat/create'",
",",
"data",
"=",
"data",
")"
] | 30.72 | 19.36 |
def hello(environ, start_response):
'''The WSGI_ application handler which returns an iterable
over the "Hello World!" message.'''
if environ['REQUEST_METHOD'] == 'GET':
data = b'Hello World!\n'
status = '200 OK'
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response(status, response_headers)
return iter([data])
else:
raise MethodNotAllowed
|
[
"def",
"hello",
"(",
"environ",
",",
"start_response",
")",
":",
"if",
"environ",
"[",
"'REQUEST_METHOD'",
"]",
"==",
"'GET'",
":",
"data",
"=",
"b'Hello World!\\n'",
"status",
"=",
"'200 OK'",
"response_headers",
"=",
"[",
"(",
"'Content-type'",
",",
"'text/plain'",
")",
",",
"(",
"'Content-Length'",
",",
"str",
"(",
"len",
"(",
"data",
")",
")",
")",
"]",
"start_response",
"(",
"status",
",",
"response_headers",
")",
"return",
"iter",
"(",
"[",
"data",
"]",
")",
"else",
":",
"raise",
"MethodNotAllowed"
] | 33.928571 | 11.928571 |
def getQueryParams(url):
"""Get URL query parameters."""
query = urlsplit(url)[3]
out.debug(u'Extracting query parameters from %r (%r)...' % (url, query))
return cgi.parse_qs(query)
|
[
"def",
"getQueryParams",
"(",
"url",
")",
":",
"query",
"=",
"urlsplit",
"(",
"url",
")",
"[",
"3",
"]",
"out",
".",
"debug",
"(",
"u'Extracting query parameters from %r (%r)...'",
"%",
"(",
"url",
",",
"query",
")",
")",
"return",
"cgi",
".",
"parse_qs",
"(",
"query",
")"
] | 38.6 | 14.8 |
def paginate_results(self, results, options):
"Return a django.core.paginator.Page of results."
limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT)
paginator = Paginator(results, limit)
page = options.get('page', 1)
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
return results
|
[
"def",
"paginate_results",
"(",
"self",
",",
"results",
",",
"options",
")",
":",
"limit",
"=",
"options",
".",
"get",
"(",
"'limit'",
",",
"settings",
".",
"SELECTABLE_MAX_LIMIT",
")",
"paginator",
"=",
"Paginator",
"(",
"results",
",",
"limit",
")",
"page",
"=",
"options",
".",
"get",
"(",
"'page'",
",",
"1",
")",
"try",
":",
"results",
"=",
"paginator",
".",
"page",
"(",
"page",
")",
"except",
"(",
"EmptyPage",
",",
"InvalidPage",
")",
":",
"results",
"=",
"paginator",
".",
"page",
"(",
"paginator",
".",
"num_pages",
")",
"return",
"results"
] | 42.4 | 12.2 |
def to_json(self):
"""
Returns a json string containing all relevant data to recreate this pyalveo.Client.
"""
data = dict(self.__dict__)
data.pop('context',None)
data['oauth'] = self.oauth.to_dict()
data['cache'] = self.cache.to_dict()
return json.dumps(data)
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"data",
"=",
"dict",
"(",
"self",
".",
"__dict__",
")",
"data",
".",
"pop",
"(",
"'context'",
",",
"None",
")",
"data",
"[",
"'oauth'",
"]",
"=",
"self",
".",
"oauth",
".",
"to_dict",
"(",
")",
"data",
"[",
"'cache'",
"]",
"=",
"self",
".",
"cache",
".",
"to_dict",
"(",
")",
"return",
"json",
".",
"dumps",
"(",
"data",
")"
] | 35.555556 | 12 |
def _connect(self):
"""Connect to the server."""
assert get_thread_ident() == self.ioloop_thread_id
if self._stream:
self._logger.warn('Disconnecting existing connection to {0!r} '
'to create a new connection')
self._disconnect()
yield self._disconnected.until_set()
stream = None
try:
host, port = self._bindaddr
stream = self._stream = yield self._tcp_client.connect(
host, port, max_buffer_size=self.MAX_MSG_SIZE)
stream.set_close_callback(partial(self._stream_closed_callback,
stream))
# our message packets are small, don't delay sending them.
stream.set_nodelay(True)
stream.max_write_buffer_size = self.MAX_WRITE_BUFFER_SIZE
self._logger.debug('Connected to {0} with client addr {1}'
.format(self.bind_address_string,
address_to_string(stream.socket.getsockname())))
if self._connect_failures >= 5:
self._logger.warn("Reconnected to {0}"
.format(self.bind_address_string))
self._connect_failures = 0
except Exception, e:
if self._connect_failures % 5 == 0:
# warn on every fifth failure
# TODO (NM 2015-03-04) This can get a bit verbose, and typically we have
# other mechanisms for tracking failed connections. Consider doing some
# kind of exponential backoff starting at 5 times the reconnect time up to
# once per 5 minutes
self._logger.debug("Failed to connect to {0!r}: {1}"
.format(self._bindaddr, e))
self._connect_failures += 1
stream = None
yield gen.moment
# TODO some kind of error rate limiting?
if self._stream:
# Can't use _disconnect() and wait on self._disconnected, since
# exception may have been raised before _stream_closed_callback
# was attached to the iostream.
self._logger.debug('stream was set even though connecting failed')
self._stream.close()
self._disconnected.set()
if stream:
self._disconnected.clear()
self._connected.set()
self.last_connect_time = self.ioloop.time()
try:
self.notify_connected(True)
except Exception:
self._logger.exception("Notify connect failed. Disconnecting.")
self._disconnect()
|
[
"def",
"_connect",
"(",
"self",
")",
":",
"assert",
"get_thread_ident",
"(",
")",
"==",
"self",
".",
"ioloop_thread_id",
"if",
"self",
".",
"_stream",
":",
"self",
".",
"_logger",
".",
"warn",
"(",
"'Disconnecting existing connection to {0!r} '",
"'to create a new connection'",
")",
"self",
".",
"_disconnect",
"(",
")",
"yield",
"self",
".",
"_disconnected",
".",
"until_set",
"(",
")",
"stream",
"=",
"None",
"try",
":",
"host",
",",
"port",
"=",
"self",
".",
"_bindaddr",
"stream",
"=",
"self",
".",
"_stream",
"=",
"yield",
"self",
".",
"_tcp_client",
".",
"connect",
"(",
"host",
",",
"port",
",",
"max_buffer_size",
"=",
"self",
".",
"MAX_MSG_SIZE",
")",
"stream",
".",
"set_close_callback",
"(",
"partial",
"(",
"self",
".",
"_stream_closed_callback",
",",
"stream",
")",
")",
"# our message packets are small, don't delay sending them.",
"stream",
".",
"set_nodelay",
"(",
"True",
")",
"stream",
".",
"max_write_buffer_size",
"=",
"self",
".",
"MAX_WRITE_BUFFER_SIZE",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Connected to {0} with client addr {1}'",
".",
"format",
"(",
"self",
".",
"bind_address_string",
",",
"address_to_string",
"(",
"stream",
".",
"socket",
".",
"getsockname",
"(",
")",
")",
")",
")",
"if",
"self",
".",
"_connect_failures",
">=",
"5",
":",
"self",
".",
"_logger",
".",
"warn",
"(",
"\"Reconnected to {0}\"",
".",
"format",
"(",
"self",
".",
"bind_address_string",
")",
")",
"self",
".",
"_connect_failures",
"=",
"0",
"except",
"Exception",
",",
"e",
":",
"if",
"self",
".",
"_connect_failures",
"%",
"5",
"==",
"0",
":",
"# warn on every fifth failure",
"# TODO (NM 2015-03-04) This can get a bit verbose, and typically we have",
"# other mechanisms for tracking failed connections. Consider doing some",
"# kind of exponential backoff starting at 5 times the reconnect time up to",
"# once per 5 minutes",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Failed to connect to {0!r}: {1}\"",
".",
"format",
"(",
"self",
".",
"_bindaddr",
",",
"e",
")",
")",
"self",
".",
"_connect_failures",
"+=",
"1",
"stream",
"=",
"None",
"yield",
"gen",
".",
"moment",
"# TODO some kind of error rate limiting?",
"if",
"self",
".",
"_stream",
":",
"# Can't use _disconnect() and wait on self._disconnected, since",
"# exception may have been raised before _stream_closed_callback",
"# was attached to the iostream.",
"self",
".",
"_logger",
".",
"debug",
"(",
"'stream was set even though connecting failed'",
")",
"self",
".",
"_stream",
".",
"close",
"(",
")",
"self",
".",
"_disconnected",
".",
"set",
"(",
")",
"if",
"stream",
":",
"self",
".",
"_disconnected",
".",
"clear",
"(",
")",
"self",
".",
"_connected",
".",
"set",
"(",
")",
"self",
".",
"last_connect_time",
"=",
"self",
".",
"ioloop",
".",
"time",
"(",
")",
"try",
":",
"self",
".",
"notify_connected",
"(",
"True",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"Notify connect failed. Disconnecting.\"",
")",
"self",
".",
"_disconnect",
"(",
")"
] | 46.689655 | 20.310345 |
def set_schema(self, schema_name, include_public=True):
"""
Main API method to current database schema,
but it does not actually modify the db connection.
"""
self.tenant = FakeTenant(schema_name=schema_name)
self.schema_name = schema_name
self.include_public_schema = include_public
self.set_settings_schema(schema_name)
self.search_path_set = False
# Content type can no longer be cached as public and tenant schemas
# have different models. If someone wants to change this, the cache
# needs to be separated between public and shared schemas. If this
# cache isn't cleared, this can cause permission problems. For example,
# on public, a particular model has id 14, but on the tenants it has
# the id 15. if 14 is cached instead of 15, the permissions for the
# wrong model will be fetched.
ContentType.objects.clear_cache()
|
[
"def",
"set_schema",
"(",
"self",
",",
"schema_name",
",",
"include_public",
"=",
"True",
")",
":",
"self",
".",
"tenant",
"=",
"FakeTenant",
"(",
"schema_name",
"=",
"schema_name",
")",
"self",
".",
"schema_name",
"=",
"schema_name",
"self",
".",
"include_public_schema",
"=",
"include_public",
"self",
".",
"set_settings_schema",
"(",
"schema_name",
")",
"self",
".",
"search_path_set",
"=",
"False",
"# Content type can no longer be cached as public and tenant schemas",
"# have different models. If someone wants to change this, the cache",
"# needs to be separated between public and shared schemas. If this",
"# cache isn't cleared, this can cause permission problems. For example,",
"# on public, a particular model has id 14, but on the tenants it has",
"# the id 15. if 14 is cached instead of 15, the permissions for the",
"# wrong model will be fetched.",
"ContentType",
".",
"objects",
".",
"clear_cache",
"(",
")"
] | 52.555556 | 16.666667 |
async def takewhile(source, func):
"""Forward an asynchronous sequence while a condition is met.
The given function takes the item as an argument and returns a boolean
corresponding to the condition to meet. The function can either be
synchronous or asynchronous.
"""
iscorofunc = asyncio.iscoroutinefunction(func)
async with streamcontext(source) as streamer:
async for item in streamer:
result = func(item)
if iscorofunc:
result = await result
if not result:
return
yield item
|
[
"async",
"def",
"takewhile",
"(",
"source",
",",
"func",
")",
":",
"iscorofunc",
"=",
"asyncio",
".",
"iscoroutinefunction",
"(",
"func",
")",
"async",
"with",
"streamcontext",
"(",
"source",
")",
"as",
"streamer",
":",
"async",
"for",
"item",
"in",
"streamer",
":",
"result",
"=",
"func",
"(",
"item",
")",
"if",
"iscorofunc",
":",
"result",
"=",
"await",
"result",
"if",
"not",
"result",
":",
"return",
"yield",
"item"
] | 36.25 | 13.625 |
def init_discrete_hmm_spectral(C_full, nstates, reversible=True, stationary=True, active_set=None, P=None,
eps_A=None, eps_B=None, separate=None):
"""Initializes discrete HMM using spectral clustering of observation counts
Initializes HMM as described in [1]_. First estimates a Markov state model
on the given observations, then uses PCCA+ to coarse-grain the transition
matrix [2]_ which initializes the HMM transition matrix. The HMM output
probabilities are given by Bayesian inversion from the PCCA+ memberships [1]_.
The regularization parameters eps_A and eps_B are used
to guarantee that the hidden transition matrix and output probability matrix
have no zeros. HMM estimation algorithms such as the EM algorithm and the
Bayesian sampling algorithm cannot recover from zero entries, i.e. once they
are zero, they will stay zero.
Parameters
----------
C_full : ndarray(N, N)
Transition count matrix on the full observable state space
nstates : int
The number of hidden states.
reversible : bool
Estimate reversible HMM transition matrix.
stationary : bool
p0 is the stationary distribution of P. In this case, will not
active_set : ndarray(n, dtype=int) or None
Index area. Will estimate kinetics only on the given subset of C
P : ndarray(n, n)
Transition matrix estimated from C (with option reversible). Use this
option if P has already been estimated to avoid estimating it twice.
eps_A : float or None
Minimum transition probability. Default: 0.01 / nstates
eps_B : float or None
Minimum output probability. Default: 0.01 / nfull
separate : None or iterable of int
Force the given set of observed states to stay in a separate hidden state.
The remaining nstates-1 states will be assigned by a metastable decomposition.
Returns
-------
p0 : ndarray(n)
Hidden state initial distribution
A : ndarray(n, n)
Hidden state transition matrix
B : ndarray(n, N)
Hidden-to-observable state output probabilities
Raises
------
ValueError
If the given active set is illegal.
NotImplementedError
If the number of hidden states exceeds the number of observed states.
Examples
--------
Generate initial model for a discrete output model.
>>> import numpy as np
>>> C = np.array([[0.5, 0.5, 0.0], [0.4, 0.5, 0.1], [0.0, 0.1, 0.9]])
>>> initial_model = init_discrete_hmm_spectral(C, 2)
References
----------
.. [1] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden
Markov models for calculating kinetics and metastable states of
complex molecules. J. Chem. Phys. 139, 184114 (2013)
.. [2] S. Kube and M. Weber: A coarse graining method for the identification
of transition rates between molecular conformations.
J. Chem. Phys. 126, 024103 (2007)
"""
# MICROSTATE COUNT MATRIX
nfull = C_full.shape[0]
# INPUTS
if eps_A is None: # default transition probability, in order to avoid zero columns
eps_A = 0.01 / nstates
if eps_B is None: # default output probability, in order to avoid zero columns
eps_B = 0.01 / nfull
# Manage sets
symsum = C_full.sum(axis=0) + C_full.sum(axis=1)
nonempty = np.where(symsum > 0)[0]
if active_set is None:
active_set = nonempty
else:
if np.any(symsum[active_set] == 0):
raise ValueError('Given active set has empty states') # don't tolerate empty states
if P is not None:
if np.shape(P)[0] != active_set.size: # needs to fit to active
raise ValueError('Given initial transition matrix P has shape ' + str(np.shape(P))
+ 'while active set has size ' + str(active_set.size))
# when using separate states, only keep the nonempty ones (the others don't matter)
if separate is None:
active_nonseparate = active_set.copy()
nmeta = nstates
else:
if np.max(separate) >= nfull:
raise ValueError('Separate set has indexes that do not exist in full state space: '
+ str(np.max(separate)))
active_nonseparate = np.array(list(set(active_set) - set(separate)))
nmeta = nstates - 1
# check if we can proceed
if active_nonseparate.size < nmeta:
raise NotImplementedError('Trying to initialize ' + str(nmeta) + '-state HMM from smaller '
+ str(active_nonseparate.size) + '-state MSM.')
# MICROSTATE TRANSITION MATRIX (MSM).
C_active = C_full[np.ix_(active_set, active_set)]
if P is None: # This matrix may be disconnected and have transient states
P_active = _tmatrix_disconnected.estimate_P(C_active, reversible=reversible, maxiter=10000) # short iteration
else:
P_active = P
# MICROSTATE EQUILIBRIUM DISTRIBUTION
pi_active = _tmatrix_disconnected.stationary_distribution(P_active, C=C_active)
pi_full = np.zeros(nfull)
pi_full[active_set] = pi_active
# NONSEPARATE TRANSITION MATRIX FOR PCCA+
C_active_nonseparate = C_full[np.ix_(active_nonseparate, active_nonseparate)]
if reversible and separate is None: # in this case we already have a reversible estimate with the right size
P_active_nonseparate = P_active
else: # not yet reversible. re-estimate
P_active_nonseparate = _tmatrix_disconnected.estimate_P(C_active_nonseparate, reversible=True)
# COARSE-GRAINING WITH PCCA+
if active_nonseparate.size > nmeta:
from msmtools.analysis.dense.pcca import PCCA
pcca_obj = PCCA(P_active_nonseparate, nmeta)
M_active_nonseparate = pcca_obj.memberships # memberships
B_active_nonseparate = pcca_obj.output_probabilities # output probabilities
else: # equal size
M_active_nonseparate = np.eye(nmeta)
B_active_nonseparate = np.eye(nmeta)
# ADD SEPARATE STATE IF NEEDED
if separate is None:
M_active = M_active_nonseparate
else:
M_full = np.zeros((nfull, nstates))
M_full[active_nonseparate, :nmeta] = M_active_nonseparate
M_full[separate, -1] = 1
M_active = M_full[active_set]
# COARSE-GRAINED TRANSITION MATRIX
P_hmm = coarse_grain_transition_matrix(P_active, M_active)
if reversible:
P_hmm = _tmatrix_disconnected.enforce_reversible_on_closed(P_hmm)
C_hmm = M_active.T.dot(C_active).dot(M_active)
pi_hmm = _tmatrix_disconnected.stationary_distribution(P_hmm, C=C_hmm) # need C_hmm in case if A is disconnected
# COARSE-GRAINED OUTPUT DISTRIBUTION
B_hmm = np.zeros((nstates, nfull))
B_hmm[:nmeta, active_nonseparate] = B_active_nonseparate
if separate is not None: # add separate states
B_hmm[-1, separate] = pi_full[separate]
# REGULARIZE SOLUTION
pi_hmm, P_hmm = regularize_hidden(pi_hmm, P_hmm, reversible=reversible, stationary=stationary, C=C_hmm, eps=eps_A)
B_hmm = regularize_pobs(B_hmm, nonempty=nonempty, separate=separate, eps=eps_B)
# print 'cg pi: ', pi_hmm
# print 'cg A:\n ', P_hmm
# print 'cg B:\n ', B_hmm
logger().info('Initial model: ')
logger().info('initial distribution = \n'+str(pi_hmm))
logger().info('transition matrix = \n'+str(P_hmm))
logger().info('output matrix = \n'+str(B_hmm.T))
return pi_hmm, P_hmm, B_hmm
|
[
"def",
"init_discrete_hmm_spectral",
"(",
"C_full",
",",
"nstates",
",",
"reversible",
"=",
"True",
",",
"stationary",
"=",
"True",
",",
"active_set",
"=",
"None",
",",
"P",
"=",
"None",
",",
"eps_A",
"=",
"None",
",",
"eps_B",
"=",
"None",
",",
"separate",
"=",
"None",
")",
":",
"# MICROSTATE COUNT MATRIX",
"nfull",
"=",
"C_full",
".",
"shape",
"[",
"0",
"]",
"# INPUTS",
"if",
"eps_A",
"is",
"None",
":",
"# default transition probability, in order to avoid zero columns",
"eps_A",
"=",
"0.01",
"/",
"nstates",
"if",
"eps_B",
"is",
"None",
":",
"# default output probability, in order to avoid zero columns",
"eps_B",
"=",
"0.01",
"/",
"nfull",
"# Manage sets",
"symsum",
"=",
"C_full",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"+",
"C_full",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"nonempty",
"=",
"np",
".",
"where",
"(",
"symsum",
">",
"0",
")",
"[",
"0",
"]",
"if",
"active_set",
"is",
"None",
":",
"active_set",
"=",
"nonempty",
"else",
":",
"if",
"np",
".",
"any",
"(",
"symsum",
"[",
"active_set",
"]",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Given active set has empty states'",
")",
"# don't tolerate empty states",
"if",
"P",
"is",
"not",
"None",
":",
"if",
"np",
".",
"shape",
"(",
"P",
")",
"[",
"0",
"]",
"!=",
"active_set",
".",
"size",
":",
"# needs to fit to active",
"raise",
"ValueError",
"(",
"'Given initial transition matrix P has shape '",
"+",
"str",
"(",
"np",
".",
"shape",
"(",
"P",
")",
")",
"+",
"'while active set has size '",
"+",
"str",
"(",
"active_set",
".",
"size",
")",
")",
"# when using separate states, only keep the nonempty ones (the others don't matter)",
"if",
"separate",
"is",
"None",
":",
"active_nonseparate",
"=",
"active_set",
".",
"copy",
"(",
")",
"nmeta",
"=",
"nstates",
"else",
":",
"if",
"np",
".",
"max",
"(",
"separate",
")",
">=",
"nfull",
":",
"raise",
"ValueError",
"(",
"'Separate set has indexes that do not exist in full state space: '",
"+",
"str",
"(",
"np",
".",
"max",
"(",
"separate",
")",
")",
")",
"active_nonseparate",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"set",
"(",
"active_set",
")",
"-",
"set",
"(",
"separate",
")",
")",
")",
"nmeta",
"=",
"nstates",
"-",
"1",
"# check if we can proceed",
"if",
"active_nonseparate",
".",
"size",
"<",
"nmeta",
":",
"raise",
"NotImplementedError",
"(",
"'Trying to initialize '",
"+",
"str",
"(",
"nmeta",
")",
"+",
"'-state HMM from smaller '",
"+",
"str",
"(",
"active_nonseparate",
".",
"size",
")",
"+",
"'-state MSM.'",
")",
"# MICROSTATE TRANSITION MATRIX (MSM).",
"C_active",
"=",
"C_full",
"[",
"np",
".",
"ix_",
"(",
"active_set",
",",
"active_set",
")",
"]",
"if",
"P",
"is",
"None",
":",
"# This matrix may be disconnected and have transient states",
"P_active",
"=",
"_tmatrix_disconnected",
".",
"estimate_P",
"(",
"C_active",
",",
"reversible",
"=",
"reversible",
",",
"maxiter",
"=",
"10000",
")",
"# short iteration",
"else",
":",
"P_active",
"=",
"P",
"# MICROSTATE EQUILIBRIUM DISTRIBUTION",
"pi_active",
"=",
"_tmatrix_disconnected",
".",
"stationary_distribution",
"(",
"P_active",
",",
"C",
"=",
"C_active",
")",
"pi_full",
"=",
"np",
".",
"zeros",
"(",
"nfull",
")",
"pi_full",
"[",
"active_set",
"]",
"=",
"pi_active",
"# NONSEPARATE TRANSITION MATRIX FOR PCCA+",
"C_active_nonseparate",
"=",
"C_full",
"[",
"np",
".",
"ix_",
"(",
"active_nonseparate",
",",
"active_nonseparate",
")",
"]",
"if",
"reversible",
"and",
"separate",
"is",
"None",
":",
"# in this case we already have a reversible estimate with the right size",
"P_active_nonseparate",
"=",
"P_active",
"else",
":",
"# not yet reversible. re-estimate",
"P_active_nonseparate",
"=",
"_tmatrix_disconnected",
".",
"estimate_P",
"(",
"C_active_nonseparate",
",",
"reversible",
"=",
"True",
")",
"# COARSE-GRAINING WITH PCCA+",
"if",
"active_nonseparate",
".",
"size",
">",
"nmeta",
":",
"from",
"msmtools",
".",
"analysis",
".",
"dense",
".",
"pcca",
"import",
"PCCA",
"pcca_obj",
"=",
"PCCA",
"(",
"P_active_nonseparate",
",",
"nmeta",
")",
"M_active_nonseparate",
"=",
"pcca_obj",
".",
"memberships",
"# memberships",
"B_active_nonseparate",
"=",
"pcca_obj",
".",
"output_probabilities",
"# output probabilities",
"else",
":",
"# equal size",
"M_active_nonseparate",
"=",
"np",
".",
"eye",
"(",
"nmeta",
")",
"B_active_nonseparate",
"=",
"np",
".",
"eye",
"(",
"nmeta",
")",
"# ADD SEPARATE STATE IF NEEDED",
"if",
"separate",
"is",
"None",
":",
"M_active",
"=",
"M_active_nonseparate",
"else",
":",
"M_full",
"=",
"np",
".",
"zeros",
"(",
"(",
"nfull",
",",
"nstates",
")",
")",
"M_full",
"[",
"active_nonseparate",
",",
":",
"nmeta",
"]",
"=",
"M_active_nonseparate",
"M_full",
"[",
"separate",
",",
"-",
"1",
"]",
"=",
"1",
"M_active",
"=",
"M_full",
"[",
"active_set",
"]",
"# COARSE-GRAINED TRANSITION MATRIX",
"P_hmm",
"=",
"coarse_grain_transition_matrix",
"(",
"P_active",
",",
"M_active",
")",
"if",
"reversible",
":",
"P_hmm",
"=",
"_tmatrix_disconnected",
".",
"enforce_reversible_on_closed",
"(",
"P_hmm",
")",
"C_hmm",
"=",
"M_active",
".",
"T",
".",
"dot",
"(",
"C_active",
")",
".",
"dot",
"(",
"M_active",
")",
"pi_hmm",
"=",
"_tmatrix_disconnected",
".",
"stationary_distribution",
"(",
"P_hmm",
",",
"C",
"=",
"C_hmm",
")",
"# need C_hmm in case if A is disconnected",
"# COARSE-GRAINED OUTPUT DISTRIBUTION",
"B_hmm",
"=",
"np",
".",
"zeros",
"(",
"(",
"nstates",
",",
"nfull",
")",
")",
"B_hmm",
"[",
":",
"nmeta",
",",
"active_nonseparate",
"]",
"=",
"B_active_nonseparate",
"if",
"separate",
"is",
"not",
"None",
":",
"# add separate states",
"B_hmm",
"[",
"-",
"1",
",",
"separate",
"]",
"=",
"pi_full",
"[",
"separate",
"]",
"# REGULARIZE SOLUTION",
"pi_hmm",
",",
"P_hmm",
"=",
"regularize_hidden",
"(",
"pi_hmm",
",",
"P_hmm",
",",
"reversible",
"=",
"reversible",
",",
"stationary",
"=",
"stationary",
",",
"C",
"=",
"C_hmm",
",",
"eps",
"=",
"eps_A",
")",
"B_hmm",
"=",
"regularize_pobs",
"(",
"B_hmm",
",",
"nonempty",
"=",
"nonempty",
",",
"separate",
"=",
"separate",
",",
"eps",
"=",
"eps_B",
")",
"# print 'cg pi: ', pi_hmm",
"# print 'cg A:\\n ', P_hmm",
"# print 'cg B:\\n ', B_hmm",
"logger",
"(",
")",
".",
"info",
"(",
"'Initial model: '",
")",
"logger",
"(",
")",
".",
"info",
"(",
"'initial distribution = \\n'",
"+",
"str",
"(",
"pi_hmm",
")",
")",
"logger",
"(",
")",
".",
"info",
"(",
"'transition matrix = \\n'",
"+",
"str",
"(",
"P_hmm",
")",
")",
"logger",
"(",
")",
".",
"info",
"(",
"'output matrix = \\n'",
"+",
"str",
"(",
"B_hmm",
".",
"T",
")",
")",
"return",
"pi_hmm",
",",
"P_hmm",
",",
"B_hmm"
] | 42.668605 | 24.540698 |
def _format_evidence_text(stmt):
"""Returns evidence metadata with highlighted evidence text.
Parameters
----------
stmt : indra.Statement
The Statement with Evidence to be formatted.
Returns
-------
list of dicts
List of dictionaries corresponding to each Evidence object in the
Statement's evidence list. Each dictionary has keys 'source_api',
'pmid' and 'text', drawn from the corresponding fields in the
Evidence objects. The text entry of the dict includes
`<span>` tags identifying the agents referenced by the Statement.
"""
def get_role(ag_ix):
if isinstance(stmt, Complex) or \
isinstance(stmt, SelfModification) or \
isinstance(stmt, ActiveForm) or isinstance(stmt, Conversion) or\
isinstance(stmt, Translocation):
return 'other'
else:
assert len(stmt.agent_list()) == 2, (len(stmt.agent_list()),
type(stmt))
return 'subject' if ag_ix == 0 else 'object'
ev_list = []
for ix, ev in enumerate(stmt.evidence):
# Expand the source api to include the sub-database
if ev.source_api == 'biopax' and \
'source_sub_id' in ev.annotations and \
ev.annotations['source_sub_id']:
source_api = '%s:%s' % (ev.source_api,
ev.annotations['source_sub_id'])
else:
source_api = ev.source_api
# Prepare the evidence text
if ev.text is None:
format_text = None
else:
indices = []
for ix, ag in enumerate(stmt.agent_list()):
if ag is None:
continue
# If the statement has been preassembled, it will have
# this entry in annotations
try:
ag_text = ev.annotations['agents']['raw_text'][ix]
if ag_text is None:
raise KeyError
# Otherwise we try to get the agent text from db_refs
except KeyError:
ag_text = ag.db_refs.get('TEXT')
if ag_text is None:
continue
role = get_role(ix)
# Get the tag with the correct badge
tag_start = '<span class="badge badge-%s">' % role
tag_close = '</span>'
# Build up a set of indices
indices += [(m.start(), m.start() + len(ag_text),
ag_text, tag_start, tag_close)
for m in re.finditer(re.escape(ag_text),
ev.text)]
format_text = tag_text(ev.text, indices)
ev_list.append({'source_api': source_api,
'pmid': ev.pmid,
'text_refs': ev.text_refs,
'text': format_text,
'source_hash': ev.source_hash })
return ev_list
|
[
"def",
"_format_evidence_text",
"(",
"stmt",
")",
":",
"def",
"get_role",
"(",
"ag_ix",
")",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Complex",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"SelfModification",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Conversion",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Translocation",
")",
":",
"return",
"'other'",
"else",
":",
"assert",
"len",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
"==",
"2",
",",
"(",
"len",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
",",
"type",
"(",
"stmt",
")",
")",
"return",
"'subject'",
"if",
"ag_ix",
"==",
"0",
"else",
"'object'",
"ev_list",
"=",
"[",
"]",
"for",
"ix",
",",
"ev",
"in",
"enumerate",
"(",
"stmt",
".",
"evidence",
")",
":",
"# Expand the source api to include the sub-database",
"if",
"ev",
".",
"source_api",
"==",
"'biopax'",
"and",
"'source_sub_id'",
"in",
"ev",
".",
"annotations",
"and",
"ev",
".",
"annotations",
"[",
"'source_sub_id'",
"]",
":",
"source_api",
"=",
"'%s:%s'",
"%",
"(",
"ev",
".",
"source_api",
",",
"ev",
".",
"annotations",
"[",
"'source_sub_id'",
"]",
")",
"else",
":",
"source_api",
"=",
"ev",
".",
"source_api",
"# Prepare the evidence text",
"if",
"ev",
".",
"text",
"is",
"None",
":",
"format_text",
"=",
"None",
"else",
":",
"indices",
"=",
"[",
"]",
"for",
"ix",
",",
"ag",
"in",
"enumerate",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
":",
"if",
"ag",
"is",
"None",
":",
"continue",
"# If the statement has been preassembled, it will have",
"# this entry in annotations",
"try",
":",
"ag_text",
"=",
"ev",
".",
"annotations",
"[",
"'agents'",
"]",
"[",
"'raw_text'",
"]",
"[",
"ix",
"]",
"if",
"ag_text",
"is",
"None",
":",
"raise",
"KeyError",
"# Otherwise we try to get the agent text from db_refs",
"except",
"KeyError",
":",
"ag_text",
"=",
"ag",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"if",
"ag_text",
"is",
"None",
":",
"continue",
"role",
"=",
"get_role",
"(",
"ix",
")",
"# Get the tag with the correct badge",
"tag_start",
"=",
"'<span class=\"badge badge-%s\">'",
"%",
"role",
"tag_close",
"=",
"'</span>'",
"# Build up a set of indices",
"indices",
"+=",
"[",
"(",
"m",
".",
"start",
"(",
")",
",",
"m",
".",
"start",
"(",
")",
"+",
"len",
"(",
"ag_text",
")",
",",
"ag_text",
",",
"tag_start",
",",
"tag_close",
")",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"re",
".",
"escape",
"(",
"ag_text",
")",
",",
"ev",
".",
"text",
")",
"]",
"format_text",
"=",
"tag_text",
"(",
"ev",
".",
"text",
",",
"indices",
")",
"ev_list",
".",
"append",
"(",
"{",
"'source_api'",
":",
"source_api",
",",
"'pmid'",
":",
"ev",
".",
"pmid",
",",
"'text_refs'",
":",
"ev",
".",
"text_refs",
",",
"'text'",
":",
"format_text",
",",
"'source_hash'",
":",
"ev",
".",
"source_hash",
"}",
")",
"return",
"ev_list"
] | 43.8 | 18 |
def set_secure_boot_mode(self, secure_boot_enable):
"""Enable/Disable secure boot on the server.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('SecureBootEnable',
secure_boot_enable)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg)
|
[
"def",
"set_secure_boot_mode",
"(",
"self",
",",
"secure_boot_enable",
")",
":",
"if",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
":",
"self",
".",
"_change_secure_boot_settings",
"(",
"'SecureBootEnable'",
",",
"secure_boot_enable",
")",
"else",
":",
"msg",
"=",
"(",
"'System is not in UEFI boot mode. \"SecureBoot\" related '",
"'resources cannot be changed.'",
")",
"raise",
"exception",
".",
"IloCommandNotSupportedInBiosError",
"(",
"msg",
")"
] | 47.3125 | 18.25 |
def _bias_scale(x, b, data_format):
"""The multiplication counter part of tf.nn.bias_add."""
if data_format == 'NHWC':
return x * b
elif data_format == 'NCHW':
return x * _to_channel_first_bias(b)
else:
raise ValueError('invalid data_format: %s' % data_format)
|
[
"def",
"_bias_scale",
"(",
"x",
",",
"b",
",",
"data_format",
")",
":",
"if",
"data_format",
"==",
"'NHWC'",
":",
"return",
"x",
"*",
"b",
"elif",
"data_format",
"==",
"'NCHW'",
":",
"return",
"x",
"*",
"_to_channel_first_bias",
"(",
"b",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid data_format: %s'",
"%",
"data_format",
")"
] | 36.625 | 13.125 |
def hicpro_mapping_chart (self):
""" Generate the HiC-Pro Aligned reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Full_Alignments_Read'] = { 'color': '#005ce6', 'name': 'Full reads Alignments' }
keys['Trimmed_Alignments_Read'] = { 'color': '#3385ff', 'name': 'Trimmed reads Alignments' }
keys['Failed_To_Align_Read'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1]['{} [R{}]'.format(s_name, r)] = {
'Full_Alignments_Read': self.hicpro_data[s_name]['global_R{}'.format(r)],
'Trimmed_Alignments_Read': self.hicpro_data[s_name]['local_R{}'.format(r)],
'Failed_To_Align_Read': int(self.hicpro_data[s_name]['total_R{}'.format(r)]) - int(self.hicpro_data[s_name]['mapped_R{}'.format(r)])
}
# Config for the plot
config = {
'id': 'hicpro_mapping_stats_plot',
'title': 'HiC-Pro: Mapping Statistics',
'ylab': '# Reads',
'ylab': '# Reads: Read 1',
'data_labels': [
{'name': 'Read 1', 'ylab': '# Reads: Read 1'},
{'name': 'Read 2', 'ylab': '# Reads: Read 2'}
]
}
return bargraph.plot(data, [keys, keys], config)
|
[
"def",
"hicpro_mapping_chart",
"(",
"self",
")",
":",
"# Specify the order of the different possible categories",
"keys",
"=",
"OrderedDict",
"(",
")",
"keys",
"[",
"'Full_Alignments_Read'",
"]",
"=",
"{",
"'color'",
":",
"'#005ce6'",
",",
"'name'",
":",
"'Full reads Alignments'",
"}",
"keys",
"[",
"'Trimmed_Alignments_Read'",
"]",
"=",
"{",
"'color'",
":",
"'#3385ff'",
",",
"'name'",
":",
"'Trimmed reads Alignments'",
"}",
"keys",
"[",
"'Failed_To_Align_Read'",
"]",
"=",
"{",
"'color'",
":",
"'#a9a2a2'",
",",
"'name'",
":",
"'Failed To Align'",
"}",
"data",
"=",
"[",
"{",
"}",
",",
"{",
"}",
"]",
"for",
"s_name",
"in",
"self",
".",
"hicpro_data",
":",
"for",
"r",
"in",
"[",
"1",
",",
"2",
"]",
":",
"data",
"[",
"r",
"-",
"1",
"]",
"[",
"'{} [R{}]'",
".",
"format",
"(",
"s_name",
",",
"r",
")",
"]",
"=",
"{",
"'Full_Alignments_Read'",
":",
"self",
".",
"hicpro_data",
"[",
"s_name",
"]",
"[",
"'global_R{}'",
".",
"format",
"(",
"r",
")",
"]",
",",
"'Trimmed_Alignments_Read'",
":",
"self",
".",
"hicpro_data",
"[",
"s_name",
"]",
"[",
"'local_R{}'",
".",
"format",
"(",
"r",
")",
"]",
",",
"'Failed_To_Align_Read'",
":",
"int",
"(",
"self",
".",
"hicpro_data",
"[",
"s_name",
"]",
"[",
"'total_R{}'",
".",
"format",
"(",
"r",
")",
"]",
")",
"-",
"int",
"(",
"self",
".",
"hicpro_data",
"[",
"s_name",
"]",
"[",
"'mapped_R{}'",
".",
"format",
"(",
"r",
")",
"]",
")",
"}",
"# Config for the plot",
"config",
"=",
"{",
"'id'",
":",
"'hicpro_mapping_stats_plot'",
",",
"'title'",
":",
"'HiC-Pro: Mapping Statistics'",
",",
"'ylab'",
":",
"'# Reads'",
",",
"'ylab'",
":",
"'# Reads: Read 1'",
",",
"'data_labels'",
":",
"[",
"{",
"'name'",
":",
"'Read 1'",
",",
"'ylab'",
":",
"'# Reads: Read 1'",
"}",
",",
"{",
"'name'",
":",
"'Read 2'",
",",
"'ylab'",
":",
"'# Reads: Read 2'",
"}",
"]",
"}",
"return",
"bargraph",
".",
"plot",
"(",
"data",
",",
"[",
"keys",
",",
"keys",
"]",
",",
"config",
")"
] | 46.806452 | 26.387097 |
def send_keys(self, value, **kwargs):
self.debug_log("Sending keys")
highlight = kwargs.get(
'highlight',
BROME_CONFIG['highlight']['highlight_when_element_receive_keys'] # noqa
)
"""
wait_until_clickable = kwargs.get(
'wait_until_clickable',
BROME_CONFIG['proxy_element']['wait_until_clickable']
)
if wait_until_clickable:
# TODO manage the raise exception better
self.pdriver.wait_until_clickable(
self.selector._selector,
raise_exception=True
)
"""
if highlight:
self.highlight(
style=BROME_CONFIG['highlight']['style_when_element_receive_keys'] # noqa
)
clear = kwargs.get('clear', False)
if clear:
self.clear()
if self.pdriver.bot_diary:
self.pdriver.bot_diary.add_auto_entry(
"I typed '%s' in" % value,
selector=self.selector._selector
)
try:
self._element.send_keys(value)
except exceptions.StaleElementReferenceException as e:
self.debug_log(
"send_keys exception StaleElementReferenceException: %s" % e
)
sleep(2)
self._element = self.pdriver.find(self.selector._selector)
self._element.send_keys(value)
except (
exceptions.InvalidElementStateException,
exceptions.WebDriverException
) as e:
self.debug_log("send_keys exception: %s" % e)
sleep(2)
self._element.send_keys(value)
return True
|
[
"def",
"send_keys",
"(",
"self",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"debug_log",
"(",
"\"Sending keys\"",
")",
"highlight",
"=",
"kwargs",
".",
"get",
"(",
"'highlight'",
",",
"BROME_CONFIG",
"[",
"'highlight'",
"]",
"[",
"'highlight_when_element_receive_keys'",
"]",
"# noqa",
")",
"if",
"highlight",
":",
"self",
".",
"highlight",
"(",
"style",
"=",
"BROME_CONFIG",
"[",
"'highlight'",
"]",
"[",
"'style_when_element_receive_keys'",
"]",
"# noqa",
")",
"clear",
"=",
"kwargs",
".",
"get",
"(",
"'clear'",
",",
"False",
")",
"if",
"clear",
":",
"self",
".",
"clear",
"(",
")",
"if",
"self",
".",
"pdriver",
".",
"bot_diary",
":",
"self",
".",
"pdriver",
".",
"bot_diary",
".",
"add_auto_entry",
"(",
"\"I typed '%s' in\"",
"%",
"value",
",",
"selector",
"=",
"self",
".",
"selector",
".",
"_selector",
")",
"try",
":",
"self",
".",
"_element",
".",
"send_keys",
"(",
"value",
")",
"except",
"exceptions",
".",
"StaleElementReferenceException",
"as",
"e",
":",
"self",
".",
"debug_log",
"(",
"\"send_keys exception StaleElementReferenceException: %s\"",
"%",
"e",
")",
"sleep",
"(",
"2",
")",
"self",
".",
"_element",
"=",
"self",
".",
"pdriver",
".",
"find",
"(",
"self",
".",
"selector",
".",
"_selector",
")",
"self",
".",
"_element",
".",
"send_keys",
"(",
"value",
")",
"except",
"(",
"exceptions",
".",
"InvalidElementStateException",
",",
"exceptions",
".",
"WebDriverException",
")",
"as",
"e",
":",
"self",
".",
"debug_log",
"(",
"\"send_keys exception: %s\"",
"%",
"e",
")",
"sleep",
"(",
"2",
")",
"self",
".",
"_element",
".",
"send_keys",
"(",
"value",
")",
"return",
"True"
] | 30.509091 | 19.090909 |
def select(self, table, columns=None, join=None, where=None, group=None, having=None, order=None, limit=None,
iterator=False, fetch=True):
"""
:type table: string
:type columns: list
:type join: dict
:param join: {'[>]table1(t1)': {'user.id': 't1.user_id'}} -> "LEFT JOIN table AS t1 ON user.id = t1.user_id"
:type where: dict
:type group: string|list
:type having: string
:type order: string|list
:type limit: int|list
# TODO: change to offset
:param limit: The max row number for this query.
If it contains offset, limit must be a list like [offset, limit]
:param iterator: Whether to output the result in a generator. It always returns generator if the cursor is
SSCursor or SSDictCursor, no matter iterator is True or False.
:type fetch: bool
"""
if not columns:
columns = ['*']
where_q, _args = self._where_parser(where)
# TODO: support multiple table
_sql = ''.join(['SELECT ', self._backtick_columns(columns),
' FROM ', self._tablename_parser(table)['formatted_tablename'],
self._join_parser(join),
where_q,
(' GROUP BY ' + self._by_columns(group)) if group else '',
(' HAVING ' + having) if having else '',
(' ORDER BY ' + self._by_columns(order)) if order else '',
self._limit_parser(limit), ';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
execute_result = self.cur.execute(_sql, _args)
if not fetch:
return execute_result
if self.cursorclass in (pymysql.cursors.SSCursor, pymysql.cursors.SSDictCursor):
return self.cur
if iterator:
return self._yield_result()
return self.cur.fetchall()
|
[
"def",
"select",
"(",
"self",
",",
"table",
",",
"columns",
"=",
"None",
",",
"join",
"=",
"None",
",",
"where",
"=",
"None",
",",
"group",
"=",
"None",
",",
"having",
"=",
"None",
",",
"order",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"iterator",
"=",
"False",
",",
"fetch",
"=",
"True",
")",
":",
"if",
"not",
"columns",
":",
"columns",
"=",
"[",
"'*'",
"]",
"where_q",
",",
"_args",
"=",
"self",
".",
"_where_parser",
"(",
"where",
")",
"# TODO: support multiple table",
"_sql",
"=",
"''",
".",
"join",
"(",
"[",
"'SELECT '",
",",
"self",
".",
"_backtick_columns",
"(",
"columns",
")",
",",
"' FROM '",
",",
"self",
".",
"_tablename_parser",
"(",
"table",
")",
"[",
"'formatted_tablename'",
"]",
",",
"self",
".",
"_join_parser",
"(",
"join",
")",
",",
"where_q",
",",
"(",
"' GROUP BY '",
"+",
"self",
".",
"_by_columns",
"(",
"group",
")",
")",
"if",
"group",
"else",
"''",
",",
"(",
"' HAVING '",
"+",
"having",
")",
"if",
"having",
"else",
"''",
",",
"(",
"' ORDER BY '",
"+",
"self",
".",
"_by_columns",
"(",
"order",
")",
")",
"if",
"order",
"else",
"''",
",",
"self",
".",
"_limit_parser",
"(",
"limit",
")",
",",
"';'",
"]",
")",
"if",
"self",
".",
"debug",
":",
"return",
"self",
".",
"cur",
".",
"mogrify",
"(",
"_sql",
",",
"_args",
")",
"execute_result",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"_sql",
",",
"_args",
")",
"if",
"not",
"fetch",
":",
"return",
"execute_result",
"if",
"self",
".",
"cursorclass",
"in",
"(",
"pymysql",
".",
"cursors",
".",
"SSCursor",
",",
"pymysql",
".",
"cursors",
".",
"SSDictCursor",
")",
":",
"return",
"self",
".",
"cur",
"if",
"iterator",
":",
"return",
"self",
".",
"_yield_result",
"(",
")",
"return",
"self",
".",
"cur",
".",
"fetchall",
"(",
")"
] | 39.918367 | 24.081633 |
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
y: a list/tuple/ndarray of l true labels (type must be int/double).
It is used for calculating the accuracy. Use [] if true labels are
unavailable.
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
if scipy and isinstance(x, scipy.ndarray):
x = scipy.ascontiguousarray(x) # enforce row-major
elif sparse and isinstance(x, sparse.spmatrix):
x = x.tocsr()
elif not isinstance(x, (list, tuple)):
raise TypeError("type of x: {0} is not supported!".format(type(x)))
if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, scipy.ndarray))):
raise TypeError("type of y: {0} is not supported!".format(type(y)))
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if scipy and isinstance(x, sparse.spmatrix):
nr_instance = x.shape[0]
else:
nr_instance = len(x)
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
else:
xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
else:
xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
if len(y) == 0:
y = [0] * nr_instance
ACC, MSE, SCC = evaluations(y, pred_labels)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(round(nr_instance*ACC/100)), nr_instance))
return pred_labels, (ACC, MSE, SCC), pred_values
|
[
"def",
"svm_predict",
"(",
"y",
",",
"x",
",",
"m",
",",
"options",
"=",
"\"\"",
")",
":",
"def",
"info",
"(",
"s",
")",
":",
"print",
"(",
"s",
")",
"if",
"scipy",
"and",
"isinstance",
"(",
"x",
",",
"scipy",
".",
"ndarray",
")",
":",
"x",
"=",
"scipy",
".",
"ascontiguousarray",
"(",
"x",
")",
"# enforce row-major",
"elif",
"sparse",
"and",
"isinstance",
"(",
"x",
",",
"sparse",
".",
"spmatrix",
")",
":",
"x",
"=",
"x",
".",
"tocsr",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"type of x: {0} is not supported!\"",
".",
"format",
"(",
"type",
"(",
"x",
")",
")",
")",
"if",
"(",
"not",
"isinstance",
"(",
"y",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
"and",
"(",
"not",
"(",
"scipy",
"and",
"isinstance",
"(",
"y",
",",
"scipy",
".",
"ndarray",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"type of y: {0} is not supported!\"",
".",
"format",
"(",
"type",
"(",
"y",
")",
")",
")",
"predict_probability",
"=",
"0",
"argv",
"=",
"options",
".",
"split",
"(",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"argv",
")",
":",
"if",
"argv",
"[",
"i",
"]",
"==",
"'-b'",
":",
"i",
"+=",
"1",
"predict_probability",
"=",
"int",
"(",
"argv",
"[",
"i",
"]",
")",
"elif",
"argv",
"[",
"i",
"]",
"==",
"'-q'",
":",
"info",
"=",
"print_null",
"else",
":",
"raise",
"ValueError",
"(",
"\"Wrong options\"",
")",
"i",
"+=",
"1",
"svm_type",
"=",
"m",
".",
"get_svm_type",
"(",
")",
"is_prob_model",
"=",
"m",
".",
"is_probability_model",
"(",
")",
"nr_class",
"=",
"m",
".",
"get_nr_class",
"(",
")",
"pred_labels",
"=",
"[",
"]",
"pred_values",
"=",
"[",
"]",
"if",
"scipy",
"and",
"isinstance",
"(",
"x",
",",
"sparse",
".",
"spmatrix",
")",
":",
"nr_instance",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"nr_instance",
"=",
"len",
"(",
"x",
")",
"if",
"predict_probability",
":",
"if",
"not",
"is_prob_model",
":",
"raise",
"ValueError",
"(",
"\"Model does not support probabiliy estimates\"",
")",
"if",
"svm_type",
"in",
"[",
"NU_SVR",
",",
"EPSILON_SVR",
"]",
":",
"info",
"(",
"\"Prob. model for test data: target value = predicted value + z,\\n\"",
"\"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\"",
"%",
"m",
".",
"get_svr_probability",
"(",
")",
")",
"nr_class",
"=",
"0",
"prob_estimates",
"=",
"(",
"c_double",
"*",
"nr_class",
")",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nr_instance",
")",
":",
"if",
"scipy",
"and",
"isinstance",
"(",
"x",
",",
"sparse",
".",
"spmatrix",
")",
":",
"indslice",
"=",
"slice",
"(",
"x",
".",
"indptr",
"[",
"i",
"]",
",",
"x",
".",
"indptr",
"[",
"i",
"+",
"1",
"]",
")",
"xi",
",",
"idx",
"=",
"gen_svm_nodearray",
"(",
"(",
"x",
".",
"indices",
"[",
"indslice",
"]",
",",
"x",
".",
"data",
"[",
"indslice",
"]",
")",
",",
"isKernel",
"=",
"(",
"m",
".",
"param",
".",
"kernel_type",
"==",
"PRECOMPUTED",
")",
")",
"else",
":",
"xi",
",",
"idx",
"=",
"gen_svm_nodearray",
"(",
"x",
"[",
"i",
"]",
",",
"isKernel",
"=",
"(",
"m",
".",
"param",
".",
"kernel_type",
"==",
"PRECOMPUTED",
")",
")",
"label",
"=",
"libsvm",
".",
"svm_predict_probability",
"(",
"m",
",",
"xi",
",",
"prob_estimates",
")",
"values",
"=",
"prob_estimates",
"[",
":",
"nr_class",
"]",
"pred_labels",
"+=",
"[",
"label",
"]",
"pred_values",
"+=",
"[",
"values",
"]",
"else",
":",
"if",
"is_prob_model",
":",
"info",
"(",
"\"Model supports probability estimates, but disabled in predicton.\"",
")",
"if",
"svm_type",
"in",
"(",
"ONE_CLASS",
",",
"EPSILON_SVR",
",",
"NU_SVC",
")",
":",
"nr_classifier",
"=",
"1",
"else",
":",
"nr_classifier",
"=",
"nr_class",
"*",
"(",
"nr_class",
"-",
"1",
")",
"//",
"2",
"dec_values",
"=",
"(",
"c_double",
"*",
"nr_classifier",
")",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nr_instance",
")",
":",
"if",
"scipy",
"and",
"isinstance",
"(",
"x",
",",
"sparse",
".",
"spmatrix",
")",
":",
"indslice",
"=",
"slice",
"(",
"x",
".",
"indptr",
"[",
"i",
"]",
",",
"x",
".",
"indptr",
"[",
"i",
"+",
"1",
"]",
")",
"xi",
",",
"idx",
"=",
"gen_svm_nodearray",
"(",
"(",
"x",
".",
"indices",
"[",
"indslice",
"]",
",",
"x",
".",
"data",
"[",
"indslice",
"]",
")",
",",
"isKernel",
"=",
"(",
"m",
".",
"param",
".",
"kernel_type",
"==",
"PRECOMPUTED",
")",
")",
"else",
":",
"xi",
",",
"idx",
"=",
"gen_svm_nodearray",
"(",
"x",
"[",
"i",
"]",
",",
"isKernel",
"=",
"(",
"m",
".",
"param",
".",
"kernel_type",
"==",
"PRECOMPUTED",
")",
")",
"label",
"=",
"libsvm",
".",
"svm_predict_values",
"(",
"m",
",",
"xi",
",",
"dec_values",
")",
"if",
"(",
"nr_class",
"==",
"1",
")",
":",
"values",
"=",
"[",
"1",
"]",
"else",
":",
"values",
"=",
"dec_values",
"[",
":",
"nr_classifier",
"]",
"pred_labels",
"+=",
"[",
"label",
"]",
"pred_values",
"+=",
"[",
"values",
"]",
"if",
"len",
"(",
"y",
")",
"==",
"0",
":",
"y",
"=",
"[",
"0",
"]",
"*",
"nr_instance",
"ACC",
",",
"MSE",
",",
"SCC",
"=",
"evaluations",
"(",
"y",
",",
"pred_labels",
")",
"if",
"svm_type",
"in",
"[",
"EPSILON_SVR",
",",
"NU_SVR",
"]",
":",
"info",
"(",
"\"Mean squared error = %g (regression)\"",
"%",
"MSE",
")",
"info",
"(",
"\"Squared correlation coefficient = %g (regression)\"",
"%",
"SCC",
")",
"else",
":",
"info",
"(",
"\"Accuracy = %g%% (%d/%d) (classification)\"",
"%",
"(",
"ACC",
",",
"int",
"(",
"round",
"(",
"nr_instance",
"*",
"ACC",
"/",
"100",
")",
")",
",",
"nr_instance",
")",
")",
"return",
"pred_labels",
",",
"(",
"ACC",
",",
"MSE",
",",
"SCC",
")",
",",
"pred_values"
] | 35.327869 | 23.688525 |
def build_image_in_privileged_container(build_image, source, image, parent_registry=None,
target_registries=None, push_buildroot_to=None,
parent_registry_insecure=False,
target_registries_insecure=False,
dont_pull_base_image=False, **kwargs):
"""
build image from provided dockerfile (specified by `source`) in privileged container by
running another docker instance inside the container
:param build_image: str, image where target image should be built
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param parent_registry: str, registry to pull base image from
:param target_registries: list of str, list of registries to push image to (might change in
future)
:param push_buildroot_to: str, repository where buildroot should be pushed
:param parent_registry_insecure: bool, allow connecting to parent registry over plain http
:param target_registries_insecure: bool, allow connecting to target registries over plain http
:param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile
:return: BuildResults
"""
build_json = _prepare_build_json(image, source, parent_registry, target_registries,
parent_registry_insecure, target_registries_insecure,
dont_pull_base_image, **kwargs)
m = PrivilegedBuildManager(build_image, build_json)
build_response = m.build()
if push_buildroot_to:
m.commit_buildroot()
m.push_buildroot(push_buildroot_to)
return build_response
|
[
"def",
"build_image_in_privileged_container",
"(",
"build_image",
",",
"source",
",",
"image",
",",
"parent_registry",
"=",
"None",
",",
"target_registries",
"=",
"None",
",",
"push_buildroot_to",
"=",
"None",
",",
"parent_registry_insecure",
"=",
"False",
",",
"target_registries_insecure",
"=",
"False",
",",
"dont_pull_base_image",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"build_json",
"=",
"_prepare_build_json",
"(",
"image",
",",
"source",
",",
"parent_registry",
",",
"target_registries",
",",
"parent_registry_insecure",
",",
"target_registries_insecure",
",",
"dont_pull_base_image",
",",
"*",
"*",
"kwargs",
")",
"m",
"=",
"PrivilegedBuildManager",
"(",
"build_image",
",",
"build_json",
")",
"build_response",
"=",
"m",
".",
"build",
"(",
")",
"if",
"push_buildroot_to",
":",
"m",
".",
"commit_buildroot",
"(",
")",
"m",
".",
"push_buildroot",
"(",
"push_buildroot_to",
")",
"return",
"build_response"
] | 58.258065 | 30.064516 |
def get_help_msg(self,
dotspace_ending=False, # type: bool
**kwargs):
# type: (...) -> str
"""
The method used to get the formatted help message according to kwargs. By default it returns the 'help_msg'
attribute, whether it is defined at the instance level or at the class level.
The help message is formatted according to help_msg.format(**kwargs), and may be terminated with a dot
and a space if dotspace_ending is set to True.
:param dotspace_ending: True will append a dot and a space at the end of the message if it is not
empty (default is False)
:param kwargs: keyword arguments to format the help message
:return: the formatted help message
"""
context = self.get_context_for_help_msgs(kwargs)
if self.help_msg is not None and len(self.help_msg) > 0:
# create a copy because we will modify it
context = copy(context)
# first format if needed
try:
help_msg = self.help_msg
variables = re.findall("{\S+}", help_msg)
for v in set(variables):
v = v[1:-1]
if v in context and len(str(context[v])) > self.__max_str_length_displayed__:
new_name = '@@@@' + v + '@@@@'
help_msg = help_msg.replace('{' + v + '}', '{' + new_name + '}')
context[new_name] = "(too big for display)"
help_msg = help_msg.format(**context)
except KeyError as e:
# no need to raise from e, __cause__ is set in the constructor
raise HelpMsgFormattingException(self.help_msg, e, context)
# then add a trailing dot and space if needed
if dotspace_ending:
return end_with_dot_space(help_msg)
else:
return help_msg
else:
return ''
|
[
"def",
"get_help_msg",
"(",
"self",
",",
"dotspace_ending",
"=",
"False",
",",
"# type: bool",
"*",
"*",
"kwargs",
")",
":",
"# type: (...) -> str",
"context",
"=",
"self",
".",
"get_context_for_help_msgs",
"(",
"kwargs",
")",
"if",
"self",
".",
"help_msg",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"help_msg",
")",
">",
"0",
":",
"# create a copy because we will modify it",
"context",
"=",
"copy",
"(",
"context",
")",
"# first format if needed",
"try",
":",
"help_msg",
"=",
"self",
".",
"help_msg",
"variables",
"=",
"re",
".",
"findall",
"(",
"\"{\\S+}\"",
",",
"help_msg",
")",
"for",
"v",
"in",
"set",
"(",
"variables",
")",
":",
"v",
"=",
"v",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"v",
"in",
"context",
"and",
"len",
"(",
"str",
"(",
"context",
"[",
"v",
"]",
")",
")",
">",
"self",
".",
"__max_str_length_displayed__",
":",
"new_name",
"=",
"'@@@@'",
"+",
"v",
"+",
"'@@@@'",
"help_msg",
"=",
"help_msg",
".",
"replace",
"(",
"'{'",
"+",
"v",
"+",
"'}'",
",",
"'{'",
"+",
"new_name",
"+",
"'}'",
")",
"context",
"[",
"new_name",
"]",
"=",
"\"(too big for display)\"",
"help_msg",
"=",
"help_msg",
".",
"format",
"(",
"*",
"*",
"context",
")",
"except",
"KeyError",
"as",
"e",
":",
"# no need to raise from e, __cause__ is set in the constructor",
"raise",
"HelpMsgFormattingException",
"(",
"self",
".",
"help_msg",
",",
"e",
",",
"context",
")",
"# then add a trailing dot and space if needed",
"if",
"dotspace_ending",
":",
"return",
"end_with_dot_space",
"(",
"help_msg",
")",
"else",
":",
"return",
"help_msg",
"else",
":",
"return",
"''"
] | 42.717391 | 24.108696 |
def doTranslate(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'all': '/translate/all'}):
'''
Translate the file from source language to destination language.
:param option:
:param urlOrPaths:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param services:
:return:
'''
paths = getPaths(urlOrPaths)
return [doTranslate1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths]
|
[
"def",
"doTranslate",
"(",
"option",
",",
"urlOrPaths",
",",
"serverEndpoint",
"=",
"ServerEndpoint",
",",
"verbose",
"=",
"Verbose",
",",
"tikaServerJar",
"=",
"TikaServerJar",
",",
"responseMimeType",
"=",
"'text/plain'",
",",
"services",
"=",
"{",
"'all'",
":",
"'/translate/all'",
"}",
")",
":",
"paths",
"=",
"getPaths",
"(",
"urlOrPaths",
")",
"return",
"[",
"doTranslate1",
"(",
"option",
",",
"path",
",",
"serverEndpoint",
",",
"verbose",
",",
"tikaServerJar",
",",
"responseMimeType",
",",
"services",
")",
"for",
"path",
"in",
"paths",
"]"
] | 37.117647 | 24.647059 |
def set_range_y(self,val):
""" Set visible range of y data.
Note: Padding must be 0 or it will create an infinite loop
"""
d = self.declaration
if d.auto_range[1]:
return
self.widget.setYRange(*val,padding=0)
|
[
"def",
"set_range_y",
"(",
"self",
",",
"val",
")",
":",
"d",
"=",
"self",
".",
"declaration",
"if",
"d",
".",
"auto_range",
"[",
"1",
"]",
":",
"return",
"self",
".",
"widget",
".",
"setYRange",
"(",
"*",
"val",
",",
"padding",
"=",
"0",
")"
] | 27.8 | 15.6 |
def generate_output(self, writer):
"""
Generates the sitemap file and the stylesheet file and puts them into the content dir.
:param writer: the writer instance
:type writer: pelican.writers.Writer
"""
# write xml stylesheet
with codecs_open(os.path.join(os.path.dirname(__file__), 'sitemap-stylesheet.xsl'), 'r', encoding='utf-8') as fd_origin:
with codecs_open(os.path.join(self.path_output, 'sitemap-stylesheet.xsl'), 'w', encoding='utf-8') as fd_destination:
xsl = fd_origin.read()
# replace some template markers
# TODO use pelican template magic
xsl = xsl.replace('{{ SITENAME }}', self.context.get('SITENAME'))
fd_destination.write(xsl)
# will contain the url nodes as text
urls = ''
# get all articles sorted by time
articles_sorted = sorted(self.context['articles'], key=self.__get_date_key, reverse=True)
# get all pages with date/modified date
pages_with_date = list(
filter(
lambda p: getattr(p, 'modified', False) or getattr(p, 'date', False),
self.context.get('pages')
)
)
pages_with_date_sorted = sorted(pages_with_date, key=self.__get_date_key, reverse=True)
# get all pages without date
pages_without_date = list(
filter(
lambda p: getattr(p, 'modified', None) is None and getattr(p, 'date', None) is None,
self.context.get('pages')
)
)
pages_without_date_sorted = sorted(pages_without_date, key=self.__get_title_key, reverse=False)
# join them, first date sorted, then title sorted
pages_sorted = pages_with_date_sorted + pages_without_date_sorted
# the landing page
if 'index' in self.context.get('DIRECT_TEMPLATES'):
# assume that the index page has changed with the most current article or page
# use the first article or page if no articles
index_reference = None
if len(articles_sorted) > 0:
index_reference = articles_sorted[0]
elif len(pages_sorted) > 0:
index_reference = pages_sorted[0]
if index_reference is not None:
urls += self.__create_url_node_for_content(
index_reference,
'index',
url=self.url_site,
)
# process articles
for article in articles_sorted:
urls += self.__create_url_node_for_content(
article,
'articles',
url=urljoin(self.url_site, article.url)
)
# process pages
for page in pages_sorted:
urls += self.__create_url_node_for_content(
page,
'pages',
url=urljoin(self.url_site, page.url)
)
# process category pages
if self.context.get('CATEGORY_URL'):
urls += self.__process_url_wrapper_elements(self.context.get('categories'))
# process tag pages
if self.context.get('TAG_URL'):
urls += self.__process_url_wrapper_elements(sorted(self.context.get('tags'), key=lambda x: x[0].name))
# process author pages
if self.context.get('AUTHOR_URL'):
urls += self.__process_url_wrapper_elements(self.context.get('authors'))
# handle all DIRECT_TEMPLATES but "index"
for direct_template in list(filter(lambda p: p != 'index', self.context.get('DIRECT_TEMPLATES'))):
# we assume the modification date of the last article as modification date for the listings of
# categories, authors and archives (all values of DIRECT_TEMPLATES but "index")
modification_time = getattr(articles_sorted[0], 'modified', getattr(articles_sorted[0], 'date', None))
url = self.__get_direct_template_url(direct_template)
urls += self.__create_url_node_for_content(None, 'others', url, modification_time)
# write the final sitemap file
with codecs_open(os.path.join(self.path_output, 'sitemap.xml'), 'w', encoding='utf-8') as fd:
fd.write(self.xml_wrap % {
'SITEURL': self.url_site,
'urls': urls
})
|
[
"def",
"generate_output",
"(",
"self",
",",
"writer",
")",
":",
"# write xml stylesheet",
"with",
"codecs_open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'sitemap-stylesheet.xsl'",
")",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fd_origin",
":",
"with",
"codecs_open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path_output",
",",
"'sitemap-stylesheet.xsl'",
")",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fd_destination",
":",
"xsl",
"=",
"fd_origin",
".",
"read",
"(",
")",
"# replace some template markers",
"# TODO use pelican template magic",
"xsl",
"=",
"xsl",
".",
"replace",
"(",
"'{{ SITENAME }}'",
",",
"self",
".",
"context",
".",
"get",
"(",
"'SITENAME'",
")",
")",
"fd_destination",
".",
"write",
"(",
"xsl",
")",
"# will contain the url nodes as text",
"urls",
"=",
"''",
"# get all articles sorted by time",
"articles_sorted",
"=",
"sorted",
"(",
"self",
".",
"context",
"[",
"'articles'",
"]",
",",
"key",
"=",
"self",
".",
"__get_date_key",
",",
"reverse",
"=",
"True",
")",
"# get all pages with date/modified date",
"pages_with_date",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"getattr",
"(",
"p",
",",
"'modified'",
",",
"False",
")",
"or",
"getattr",
"(",
"p",
",",
"'date'",
",",
"False",
")",
",",
"self",
".",
"context",
".",
"get",
"(",
"'pages'",
")",
")",
")",
"pages_with_date_sorted",
"=",
"sorted",
"(",
"pages_with_date",
",",
"key",
"=",
"self",
".",
"__get_date_key",
",",
"reverse",
"=",
"True",
")",
"# get all pages without date",
"pages_without_date",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"getattr",
"(",
"p",
",",
"'modified'",
",",
"None",
")",
"is",
"None",
"and",
"getattr",
"(",
"p",
",",
"'date'",
",",
"None",
")",
"is",
"None",
",",
"self",
".",
"context",
".",
"get",
"(",
"'pages'",
")",
")",
")",
"pages_without_date_sorted",
"=",
"sorted",
"(",
"pages_without_date",
",",
"key",
"=",
"self",
".",
"__get_title_key",
",",
"reverse",
"=",
"False",
")",
"# join them, first date sorted, then title sorted",
"pages_sorted",
"=",
"pages_with_date_sorted",
"+",
"pages_without_date_sorted",
"# the landing page",
"if",
"'index'",
"in",
"self",
".",
"context",
".",
"get",
"(",
"'DIRECT_TEMPLATES'",
")",
":",
"# assume that the index page has changed with the most current article or page",
"# use the first article or page if no articles",
"index_reference",
"=",
"None",
"if",
"len",
"(",
"articles_sorted",
")",
">",
"0",
":",
"index_reference",
"=",
"articles_sorted",
"[",
"0",
"]",
"elif",
"len",
"(",
"pages_sorted",
")",
">",
"0",
":",
"index_reference",
"=",
"pages_sorted",
"[",
"0",
"]",
"if",
"index_reference",
"is",
"not",
"None",
":",
"urls",
"+=",
"self",
".",
"__create_url_node_for_content",
"(",
"index_reference",
",",
"'index'",
",",
"url",
"=",
"self",
".",
"url_site",
",",
")",
"# process articles",
"for",
"article",
"in",
"articles_sorted",
":",
"urls",
"+=",
"self",
".",
"__create_url_node_for_content",
"(",
"article",
",",
"'articles'",
",",
"url",
"=",
"urljoin",
"(",
"self",
".",
"url_site",
",",
"article",
".",
"url",
")",
")",
"# process pages",
"for",
"page",
"in",
"pages_sorted",
":",
"urls",
"+=",
"self",
".",
"__create_url_node_for_content",
"(",
"page",
",",
"'pages'",
",",
"url",
"=",
"urljoin",
"(",
"self",
".",
"url_site",
",",
"page",
".",
"url",
")",
")",
"# process category pages",
"if",
"self",
".",
"context",
".",
"get",
"(",
"'CATEGORY_URL'",
")",
":",
"urls",
"+=",
"self",
".",
"__process_url_wrapper_elements",
"(",
"self",
".",
"context",
".",
"get",
"(",
"'categories'",
")",
")",
"# process tag pages",
"if",
"self",
".",
"context",
".",
"get",
"(",
"'TAG_URL'",
")",
":",
"urls",
"+=",
"self",
".",
"__process_url_wrapper_elements",
"(",
"sorted",
"(",
"self",
".",
"context",
".",
"get",
"(",
"'tags'",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
".",
"name",
")",
")",
"# process author pages",
"if",
"self",
".",
"context",
".",
"get",
"(",
"'AUTHOR_URL'",
")",
":",
"urls",
"+=",
"self",
".",
"__process_url_wrapper_elements",
"(",
"self",
".",
"context",
".",
"get",
"(",
"'authors'",
")",
")",
"# handle all DIRECT_TEMPLATES but \"index\"",
"for",
"direct_template",
"in",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"p",
"!=",
"'index'",
",",
"self",
".",
"context",
".",
"get",
"(",
"'DIRECT_TEMPLATES'",
")",
")",
")",
":",
"# we assume the modification date of the last article as modification date for the listings of",
"# categories, authors and archives (all values of DIRECT_TEMPLATES but \"index\")",
"modification_time",
"=",
"getattr",
"(",
"articles_sorted",
"[",
"0",
"]",
",",
"'modified'",
",",
"getattr",
"(",
"articles_sorted",
"[",
"0",
"]",
",",
"'date'",
",",
"None",
")",
")",
"url",
"=",
"self",
".",
"__get_direct_template_url",
"(",
"direct_template",
")",
"urls",
"+=",
"self",
".",
"__create_url_node_for_content",
"(",
"None",
",",
"'others'",
",",
"url",
",",
"modification_time",
")",
"# write the final sitemap file",
"with",
"codecs_open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path_output",
",",
"'sitemap.xml'",
")",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"self",
".",
"xml_wrap",
"%",
"{",
"'SITEURL'",
":",
"self",
".",
"url_site",
",",
"'urls'",
":",
"urls",
"}",
")"
] | 42.712871 | 24.49505 |
def parseEC2Json2List(jsontext, region):
"""
Takes a JSON and returns a list of InstanceType objects representing EC2 instance params.
:param jsontext:
:param region:
:return:
"""
currentList = json.loads(jsontext)
ec2InstanceList = []
for k, v in iteritems(currentList["products"]):
if "location" in v["attributes"] and v["attributes"]["location"] == region:
# 3 tenant types: 'Host' (always $0.00; just a template?)
# 'Dedicated' (toil does not support; these are pricier)
# 'Shared' (AWS default and what toil uses)
if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared":
if v["attributes"]["operatingSystem"] == "Linux":
# The same instance can appear with multiple "operation"
# values; "RunInstances" is normal, and
# "RunInstances:<code>" is e.g. Linux with MS SQL Server
# installed.
if v["attributes"]["operation"] == "RunInstances":
disks, disk_capacity = parseStorage(v["attributes"]["storage"])
memory = parseMemory(v["attributes"]["memory"])
instance = InstanceType(name=v["attributes"]["instanceType"],
cores=v["attributes"]["vcpu"],
memory=memory,
disks=disks,
disk_capacity=disk_capacity)
if instance not in ec2InstanceList:
ec2InstanceList.append(instance)
else:
raise RuntimeError('EC2 JSON format has likely changed. '
'Duplicate instance {} found.'.format(instance))
return ec2InstanceList
|
[
"def",
"parseEC2Json2List",
"(",
"jsontext",
",",
"region",
")",
":",
"currentList",
"=",
"json",
".",
"loads",
"(",
"jsontext",
")",
"ec2InstanceList",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"currentList",
"[",
"\"products\"",
"]",
")",
":",
"if",
"\"location\"",
"in",
"v",
"[",
"\"attributes\"",
"]",
"and",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"location\"",
"]",
"==",
"region",
":",
"# 3 tenant types: 'Host' (always $0.00; just a template?)",
"# 'Dedicated' (toil does not support; these are pricier)",
"# 'Shared' (AWS default and what toil uses)",
"if",
"\"tenancy\"",
"in",
"v",
"[",
"\"attributes\"",
"]",
"and",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"tenancy\"",
"]",
"==",
"\"Shared\"",
":",
"if",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"operatingSystem\"",
"]",
"==",
"\"Linux\"",
":",
"# The same instance can appear with multiple \"operation\"",
"# values; \"RunInstances\" is normal, and",
"# \"RunInstances:<code>\" is e.g. Linux with MS SQL Server",
"# installed.",
"if",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"operation\"",
"]",
"==",
"\"RunInstances\"",
":",
"disks",
",",
"disk_capacity",
"=",
"parseStorage",
"(",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"storage\"",
"]",
")",
"memory",
"=",
"parseMemory",
"(",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"memory\"",
"]",
")",
"instance",
"=",
"InstanceType",
"(",
"name",
"=",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"instanceType\"",
"]",
",",
"cores",
"=",
"v",
"[",
"\"attributes\"",
"]",
"[",
"\"vcpu\"",
"]",
",",
"memory",
"=",
"memory",
",",
"disks",
"=",
"disks",
",",
"disk_capacity",
"=",
"disk_capacity",
")",
"if",
"instance",
"not",
"in",
"ec2InstanceList",
":",
"ec2InstanceList",
".",
"append",
"(",
"instance",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'EC2 JSON format has likely changed. '",
"'Duplicate instance {} found.'",
".",
"format",
"(",
"instance",
")",
")",
"return",
"ec2InstanceList"
] | 55.885714 | 26.971429 |
def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
"""
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of)
|
[
"def",
"register_entity",
"(",
"self",
",",
"entity_value",
",",
"entity_type",
",",
"alias_of",
"=",
"None",
",",
"domain",
"=",
"0",
")",
":",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"self",
".",
"domains",
"[",
"domain",
"]",
".",
"register_entity",
"(",
"entity_value",
"=",
"entity_value",
",",
"entity_type",
"=",
"entity_type",
",",
"alias_of",
"=",
"alias_of",
")"
] | 50.666667 | 23.733333 |
def write_byte(self, cmd, value):
"""
Writes an 8-bit byte to the specified command register
"""
self.bus.write_byte_data(self.address, cmd, value)
self.log.debug(
"write_byte: Wrote 0x%02X to command register 0x%02X" % (
value, cmd
)
)
|
[
"def",
"write_byte",
"(",
"self",
",",
"cmd",
",",
"value",
")",
":",
"self",
".",
"bus",
".",
"write_byte_data",
"(",
"self",
".",
"address",
",",
"cmd",
",",
"value",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"write_byte: Wrote 0x%02X to command register 0x%02X\"",
"%",
"(",
"value",
",",
"cmd",
")",
")"
] | 31.5 | 16.5 |
def plot_ellipse_matplotlib(cov, pos, ax, nstd=2, **kwargs):
"""
Plot 2d ellipse in 3d using matplotlib backend
"""
from matplotlib.patches import Ellipse
from mpl_toolkits.mplot3d import art3d, Axes3D
ellipse_param, normal = calc_2d_ellipse_properties(cov,nstd)
ellipse_kwds = merge_keywords(ellipse_param, kwargs)
ellip = Ellipse(xy=(0,0), **ellipse_kwds)
ax.add_patch(ellip)
ellip = pathpatch_2d_to_3d(ellip, normal=normal)
ellip = pathpatch_translate(ellip,pos)
return ellip
|
[
"def",
"plot_ellipse_matplotlib",
"(",
"cov",
",",
"pos",
",",
"ax",
",",
"nstd",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"matplotlib",
".",
"patches",
"import",
"Ellipse",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"art3d",
",",
"Axes3D",
"ellipse_param",
",",
"normal",
"=",
"calc_2d_ellipse_properties",
"(",
"cov",
",",
"nstd",
")",
"ellipse_kwds",
"=",
"merge_keywords",
"(",
"ellipse_param",
",",
"kwargs",
")",
"ellip",
"=",
"Ellipse",
"(",
"xy",
"=",
"(",
"0",
",",
"0",
")",
",",
"*",
"*",
"ellipse_kwds",
")",
"ax",
".",
"add_patch",
"(",
"ellip",
")",
"ellip",
"=",
"pathpatch_2d_to_3d",
"(",
"ellip",
",",
"normal",
"=",
"normal",
")",
"ellip",
"=",
"pathpatch_translate",
"(",
"ellip",
",",
"pos",
")",
"return",
"ellip"
] | 36.714286 | 13 |
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
|
[
"def",
"quaternion_rotate",
"(",
"X",
",",
"Y",
")",
":",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"W",
"=",
"np",
".",
"asarray",
"(",
"[",
"makeW",
"(",
"*",
"Y",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"N",
")",
"]",
")",
"Q",
"=",
"np",
".",
"asarray",
"(",
"[",
"makeQ",
"(",
"*",
"X",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"N",
")",
"]",
")",
"Qt_dot_W",
"=",
"np",
".",
"asarray",
"(",
"[",
"np",
".",
"dot",
"(",
"Q",
"[",
"k",
"]",
".",
"T",
",",
"W",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"N",
")",
"]",
")",
"W_minus_Q",
"=",
"np",
".",
"asarray",
"(",
"[",
"W",
"[",
"k",
"]",
"-",
"Q",
"[",
"k",
"]",
"for",
"k",
"in",
"range",
"(",
"N",
")",
"]",
")",
"A",
"=",
"np",
".",
"sum",
"(",
"Qt_dot_W",
",",
"axis",
"=",
"0",
")",
"eigen",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"A",
")",
"r",
"=",
"eigen",
"[",
"1",
"]",
"[",
":",
",",
"eigen",
"[",
"0",
"]",
".",
"argmax",
"(",
")",
"]",
"rot",
"=",
"quaternion_transform",
"(",
"r",
")",
"return",
"rot"
] | 26.923077 | 18.846154 |
def createRecordSensor(network, name, dataSource):
"""
Creates a RecordSensor region that allows us to specify a file record
stream as the input source.
"""
# Specific type of region. Possible options can be found in /nupic/regions/
regionType = "py.RecordSensor"
# Creates a json from specified dictionary.
regionParams = json.dumps({"verbosity": _VERBOSITY})
network.addRegion(name, regionType, regionParams)
# getSelf returns the actual region, instead of a region wrapper
sensorRegion = network.regions[name].getSelf()
# Specify how RecordSensor encodes input values
sensorRegion.encoder = createEncoder()
# Specify which sub-encoder should be used for "actValueOut"
network.regions[name].setParameter("predictedField", "consumption")
# Specify the dataSource as a file record stream instance
sensorRegion.dataSource = dataSource
return sensorRegion
|
[
"def",
"createRecordSensor",
"(",
"network",
",",
"name",
",",
"dataSource",
")",
":",
"# Specific type of region. Possible options can be found in /nupic/regions/",
"regionType",
"=",
"\"py.RecordSensor\"",
"# Creates a json from specified dictionary.",
"regionParams",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"verbosity\"",
":",
"_VERBOSITY",
"}",
")",
"network",
".",
"addRegion",
"(",
"name",
",",
"regionType",
",",
"regionParams",
")",
"# getSelf returns the actual region, instead of a region wrapper",
"sensorRegion",
"=",
"network",
".",
"regions",
"[",
"name",
"]",
".",
"getSelf",
"(",
")",
"# Specify how RecordSensor encodes input values",
"sensorRegion",
".",
"encoder",
"=",
"createEncoder",
"(",
")",
"# Specify which sub-encoder should be used for \"actValueOut\"",
"network",
".",
"regions",
"[",
"name",
"]",
".",
"setParameter",
"(",
"\"predictedField\"",
",",
"\"consumption\"",
")",
"# Specify the dataSource as a file record stream instance",
"sensorRegion",
".",
"dataSource",
"=",
"dataSource",
"return",
"sensorRegion"
] | 34.84 | 20.04 |
def init_localization():
'''prepare l10n'''
locale.setlocale(locale.LC_ALL, '') # User's preferred locale, according to environment
# Use first two characters of country code, defaulting to 'en' in the absence of a preference
loc = locale.getlocale()
lang = loc[0][0:2] if loc[0] else 'en'
filename = "res/messages_%s.mo" % lang
try:
logging.debug( "Opening message file %s for locale %s", filename, loc[0] )
trans = gettext.GNUTranslations(open( filename, "rb" ) )
except IOError:
logging.debug( "Locale not found. Using default messages" )
trans = gettext.NullTranslations()
trans.install()
|
[
"def",
"init_localization",
"(",
")",
":",
"locale",
".",
"setlocale",
"(",
"locale",
".",
"LC_ALL",
",",
"''",
")",
"# User's preferred locale, according to environment",
"# Use first two characters of country code, defaulting to 'en' in the absence of a preference",
"loc",
"=",
"locale",
".",
"getlocale",
"(",
")",
"lang",
"=",
"loc",
"[",
"0",
"]",
"[",
"0",
":",
"2",
"]",
"if",
"loc",
"[",
"0",
"]",
"else",
"'en'",
"filename",
"=",
"\"res/messages_%s.mo\"",
"%",
"lang",
"try",
":",
"logging",
".",
"debug",
"(",
"\"Opening message file %s for locale %s\"",
",",
"filename",
",",
"loc",
"[",
"0",
"]",
")",
"trans",
"=",
"gettext",
".",
"GNUTranslations",
"(",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
")",
"except",
"IOError",
":",
"logging",
".",
"debug",
"(",
"\"Locale not found. Using default messages\"",
")",
"trans",
"=",
"gettext",
".",
"NullTranslations",
"(",
")",
"trans",
".",
"install",
"(",
")"
] | 38.058824 | 26.294118 |
def delete_record_set(self, record_set):
"""Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._deletions += (record_set,)
|
[
"def",
"delete_record_set",
"(",
"self",
",",
"record_set",
")",
":",
"if",
"not",
"isinstance",
"(",
"record_set",
",",
"ResourceRecordSet",
")",
":",
"raise",
"ValueError",
"(",
"\"Pass a ResourceRecordSet\"",
")",
"self",
".",
"_deletions",
"+=",
"(",
"record_set",
",",
")"
] | 41.916667 | 17.75 |
def list(cls, params=None):
"""
Retrieves a list of the model
:param params: params as dictionary
:type params: dict
:return: the list of the parsed xml objects
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(cls._get_request(params=params).text)
)
|
[
"def",
"list",
"(",
"cls",
",",
"params",
"=",
"None",
")",
":",
"return",
"fields",
".",
"ListField",
"(",
"name",
"=",
"cls",
".",
"ENDPOINT",
",",
"init_class",
"=",
"cls",
")",
".",
"decode",
"(",
"cls",
".",
"element_from_string",
"(",
"cls",
".",
"_get_request",
"(",
"params",
"=",
"params",
")",
".",
"text",
")",
")"
] | 31.833333 | 16.833333 |
def handle(self, env, start_response):
"""WSGI entry point for auth requests (ones that match the
self.auth_prefix).
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
req = Request(env)
if self.auth_prefix:
req.path_info_pop()
req.bytes_transferred = '-'
req.client_disconnect = False
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
if 'eventlet.posthooks' in env:
env['eventlet.posthooks'].append(
(self.posthooklogger, (req,), {}))
return self.handle_request(req)(env, start_response)
else:
# Lack of posthook support means that we have to log on the
# start of the response, rather than after all the data has
# been sent. This prevents logging client disconnects
# differently than full transmissions.
response = self.handle_request(req)(env, start_response)
self.posthooklogger(env, req)
return response
except (Exception, TimeoutError):
print("EXCEPTION IN handle: %s: %s" % (format_exc(), env))
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
|
[
"def",
"handle",
"(",
"self",
",",
"env",
",",
"start_response",
")",
":",
"try",
":",
"req",
"=",
"Request",
"(",
"env",
")",
"if",
"self",
".",
"auth_prefix",
":",
"req",
".",
"path_info_pop",
"(",
")",
"req",
".",
"bytes_transferred",
"=",
"'-'",
"req",
".",
"client_disconnect",
"=",
"False",
"if",
"'x-storage-token'",
"in",
"req",
".",
"headers",
"and",
"'x-auth-token'",
"not",
"in",
"req",
".",
"headers",
":",
"req",
".",
"headers",
"[",
"'x-auth-token'",
"]",
"=",
"req",
".",
"headers",
"[",
"'x-storage-token'",
"]",
"if",
"'eventlet.posthooks'",
"in",
"env",
":",
"env",
"[",
"'eventlet.posthooks'",
"]",
".",
"append",
"(",
"(",
"self",
".",
"posthooklogger",
",",
"(",
"req",
",",
")",
",",
"{",
"}",
")",
")",
"return",
"self",
".",
"handle_request",
"(",
"req",
")",
"(",
"env",
",",
"start_response",
")",
"else",
":",
"# Lack of posthook support means that we have to log on the",
"# start of the response, rather than after all the data has",
"# been sent. This prevents logging client disconnects",
"# differently than full transmissions.",
"response",
"=",
"self",
".",
"handle_request",
"(",
"req",
")",
"(",
"env",
",",
"start_response",
")",
"self",
".",
"posthooklogger",
"(",
"env",
",",
"req",
")",
"return",
"response",
"except",
"(",
"Exception",
",",
"TimeoutError",
")",
":",
"print",
"(",
"\"EXCEPTION IN handle: %s: %s\"",
"%",
"(",
"format_exc",
"(",
")",
",",
"env",
")",
")",
"start_response",
"(",
"'500 Server Error'",
",",
"[",
"(",
"'Content-Type'",
",",
"'text/plain'",
")",
"]",
")",
"return",
"[",
"'Internal server error.\\n'",
"]"
] | 46.470588 | 14.794118 |
def or_fault(a, b, out, fault):
"""Returns True if OR(a, b) == out and fault == 0 or OR(a, b) != out and fault == 1."""
if (a or b) == out:
return fault == 0
else:
return fault == 1
|
[
"def",
"or_fault",
"(",
"a",
",",
"b",
",",
"out",
",",
"fault",
")",
":",
"if",
"(",
"a",
"or",
"b",
")",
"==",
"out",
":",
"return",
"fault",
"==",
"0",
"else",
":",
"return",
"fault",
"==",
"1"
] | 34 | 14.5 |
def guess_payload_class(self, payload):
"""
Handles NTPv4 extensions and MAC part (when authentication is used.)
"""
plen = len(payload)
if plen > _NTP_AUTH_MD5_TAIL_SIZE:
return NTPExtensions
elif plen == _NTP_AUTH_MD5_TAIL_SIZE:
return NTPAuthenticator
return Packet.guess_payload_class(self, payload)
|
[
"def",
"guess_payload_class",
"(",
"self",
",",
"payload",
")",
":",
"plen",
"=",
"len",
"(",
"payload",
")",
"if",
"plen",
">",
"_NTP_AUTH_MD5_TAIL_SIZE",
":",
"return",
"NTPExtensions",
"elif",
"plen",
"==",
"_NTP_AUTH_MD5_TAIL_SIZE",
":",
"return",
"NTPAuthenticator",
"return",
"Packet",
".",
"guess_payload_class",
"(",
"self",
",",
"payload",
")"
] | 31.166667 | 13.833333 |
def gpio_interrupts_enable(self):
"""Enables GPIO interrupts."""
try:
bring_gpio_interrupt_into_userspace()
set_gpio_interrupt_edge()
except Timeout as e:
raise InterruptEnableException(
"There was an error bringing gpio%d into userspace. %s" %
(GPIO_INTERRUPT_PIN, e.message)
)
|
[
"def",
"gpio_interrupts_enable",
"(",
"self",
")",
":",
"try",
":",
"bring_gpio_interrupt_into_userspace",
"(",
")",
"set_gpio_interrupt_edge",
"(",
")",
"except",
"Timeout",
"as",
"e",
":",
"raise",
"InterruptEnableException",
"(",
"\"There was an error bringing gpio%d into userspace. %s\"",
"%",
"(",
"GPIO_INTERRUPT_PIN",
",",
"e",
".",
"message",
")",
")"
] | 37.3 | 12.9 |
def utcnow(cls):
"""Return a new datetime representing UTC day and time."""
obj = datetime.datetime.utcnow()
obj = cls(obj, tzinfo=pytz.utc)
return obj
|
[
"def",
"utcnow",
"(",
"cls",
")",
":",
"obj",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"obj",
"=",
"cls",
"(",
"obj",
",",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
"return",
"obj"
] | 32.6 | 11.8 |
def view_hmap(token, dstore):
"""
Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE
"""
try:
poe = valid.probability(token.split(':')[1])
except IndexError:
poe = 0.1
mean = dict(extract(dstore, 'hcurves?kind=mean'))['mean']
oq = dstore['oqparam']
hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
dt = numpy.dtype([('sid', U32)] + [(imt, F32) for imt in oq.imtls])
array = numpy.zeros(len(hmap), dt)
for i, vals in enumerate(hmap):
array[i] = (i, ) + tuple(vals)
array.sort(order=list(oq.imtls)[0])
return rst_table(array[:20])
|
[
"def",
"view_hmap",
"(",
"token",
",",
"dstore",
")",
":",
"try",
":",
"poe",
"=",
"valid",
".",
"probability",
"(",
"token",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
")",
"except",
"IndexError",
":",
"poe",
"=",
"0.1",
"mean",
"=",
"dict",
"(",
"extract",
"(",
"dstore",
",",
"'hcurves?kind=mean'",
")",
")",
"[",
"'mean'",
"]",
"oq",
"=",
"dstore",
"[",
"'oqparam'",
"]",
"hmap",
"=",
"calc",
".",
"make_hmap_array",
"(",
"mean",
",",
"oq",
".",
"imtls",
",",
"[",
"poe",
"]",
",",
"len",
"(",
"mean",
")",
")",
"dt",
"=",
"numpy",
".",
"dtype",
"(",
"[",
"(",
"'sid'",
",",
"U32",
")",
"]",
"+",
"[",
"(",
"imt",
",",
"F32",
")",
"for",
"imt",
"in",
"oq",
".",
"imtls",
"]",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"hmap",
")",
",",
"dt",
")",
"for",
"i",
",",
"vals",
"in",
"enumerate",
"(",
"hmap",
")",
":",
"array",
"[",
"i",
"]",
"=",
"(",
"i",
",",
")",
"+",
"tuple",
"(",
"vals",
")",
"array",
".",
"sort",
"(",
"order",
"=",
"list",
"(",
"oq",
".",
"imtls",
")",
"[",
"0",
"]",
")",
"return",
"rst_table",
"(",
"array",
"[",
":",
"20",
"]",
")"
] | 35.944444 | 13.277778 |
def _derive_namespaces(self):
'''
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes
'''
# iterate through graphs and get unique namespace uris
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for s,p,o in graph:
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(o) # objects
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
logger.debug(self.update_namespaces)
# build unique prefixes dictionary
# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)
for ns_uri in self.update_namespaces:
for k in self.prefixes.__dict__:
if str(ns_uri) == str(self.prefixes.__dict__[k]):
logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri)))
self.update_prefixes[k] = self.prefixes.__dict__[k]
|
[
"def",
"_derive_namespaces",
"(",
"self",
")",
":",
"# iterate through graphs and get unique namespace uris",
"for",
"graph",
"in",
"[",
"self",
".",
"diffs",
".",
"overlap",
",",
"self",
".",
"diffs",
".",
"removed",
",",
"self",
".",
"diffs",
".",
"added",
"]",
":",
"for",
"s",
",",
"p",
",",
"o",
"in",
"graph",
":",
"try",
":",
"ns_prefix",
",",
"ns_uri",
",",
"predicate",
"=",
"graph",
".",
"compute_qname",
"(",
"p",
")",
"# predicates",
"self",
".",
"update_namespaces",
".",
"add",
"(",
"ns_uri",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'could not parse Object URI: %s'",
"%",
"ns_uri",
")",
"try",
":",
"ns_prefix",
",",
"ns_uri",
",",
"predicate",
"=",
"graph",
".",
"compute_qname",
"(",
"o",
")",
"# objects",
"self",
".",
"update_namespaces",
".",
"add",
"(",
"ns_uri",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'could not parse Object URI: %s'",
"%",
"ns_uri",
")",
"logger",
".",
"debug",
"(",
"self",
".",
"update_namespaces",
")",
"# build unique prefixes dictionary",
"# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)",
"for",
"ns_uri",
"in",
"self",
".",
"update_namespaces",
":",
"for",
"k",
"in",
"self",
".",
"prefixes",
".",
"__dict__",
":",
"if",
"str",
"(",
"ns_uri",
")",
"==",
"str",
"(",
"self",
".",
"prefixes",
".",
"__dict__",
"[",
"k",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"'adding prefix %s for uri %s to unique_prefixes'",
"%",
"(",
"k",
",",
"str",
"(",
"ns_uri",
")",
")",
")",
"self",
".",
"update_prefixes",
"[",
"k",
"]",
"=",
"self",
".",
"prefixes",
".",
"__dict__",
"[",
"k",
"]"
] | 36.457143 | 24.914286 |
def all_connected_components(i,j):
'''Associate each label in i with a component #
This function finds all connected components given an array of
associations between labels i and j using a depth-first search.
i & j give the edges of the graph. The first step of the algorithm makes
bidirectional edges, (i->j and j<-i), so it's best to only send the
edges in one direction (although the algorithm can withstand duplicates).
returns a label for each vertex up to the maximum named vertex in i.
'''
if len(i) == 0:
return i
i1 = np.hstack((i,j))
j1 = np.hstack((j,i))
order = np.lexsort((j1,i1))
i=np.ascontiguousarray(i1[order],np.uint32)
j=np.ascontiguousarray(j1[order],np.uint32)
#
# Get indexes and counts of edges per vertex
#
counts = np.ascontiguousarray(np.bincount(i.astype(int)),np.uint32)
indexes = np.ascontiguousarray(np.cumsum(counts)-counts,np.uint32)
#
# This stores the lowest index # during the algorithm - the first
# vertex to be labeled in a connected component.
#
labels = np.zeros(len(counts), np.uint32)
_all_connected_components(i,j,indexes,counts,labels)
return labels
|
[
"def",
"all_connected_components",
"(",
"i",
",",
"j",
")",
":",
"if",
"len",
"(",
"i",
")",
"==",
"0",
":",
"return",
"i",
"i1",
"=",
"np",
".",
"hstack",
"(",
"(",
"i",
",",
"j",
")",
")",
"j1",
"=",
"np",
".",
"hstack",
"(",
"(",
"j",
",",
"i",
")",
")",
"order",
"=",
"np",
".",
"lexsort",
"(",
"(",
"j1",
",",
"i1",
")",
")",
"i",
"=",
"np",
".",
"ascontiguousarray",
"(",
"i1",
"[",
"order",
"]",
",",
"np",
".",
"uint32",
")",
"j",
"=",
"np",
".",
"ascontiguousarray",
"(",
"j1",
"[",
"order",
"]",
",",
"np",
".",
"uint32",
")",
"#",
"# Get indexes and counts of edges per vertex",
"#",
"counts",
"=",
"np",
".",
"ascontiguousarray",
"(",
"np",
".",
"bincount",
"(",
"i",
".",
"astype",
"(",
"int",
")",
")",
",",
"np",
".",
"uint32",
")",
"indexes",
"=",
"np",
".",
"ascontiguousarray",
"(",
"np",
".",
"cumsum",
"(",
"counts",
")",
"-",
"counts",
",",
"np",
".",
"uint32",
")",
"#",
"# This stores the lowest index # during the algorithm - the first",
"# vertex to be labeled in a connected component.",
"#",
"labels",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"counts",
")",
",",
"np",
".",
"uint32",
")",
"_all_connected_components",
"(",
"i",
",",
"j",
",",
"indexes",
",",
"counts",
",",
"labels",
")",
"return",
"labels"
] | 38.419355 | 23.83871 |
def get_help(command):
"""
Get the Cmd help function from the click command
:param command: The click Command object
:return: the help_* method for Cmd
:rtype: function
"""
assert isinstance(command, click.Command)
def help_(self): # pylint: disable=unused-argument
extra = {}
for key, value in command.context_settings.items():
if key not in extra:
extra[key] = value
# Print click's help message
with click.Context(command, info_name=command.name, parent=self.ctx, **extra) as ctx:
click.echo(ctx.get_help(), color=ctx.color)
help_.__name__ = 'help_%s' % command.name
return help_
|
[
"def",
"get_help",
"(",
"command",
")",
":",
"assert",
"isinstance",
"(",
"command",
",",
"click",
".",
"Command",
")",
"def",
"help_",
"(",
"self",
")",
":",
"# pylint: disable=unused-argument",
"extra",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"command",
".",
"context_settings",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"extra",
":",
"extra",
"[",
"key",
"]",
"=",
"value",
"# Print click's help message",
"with",
"click",
".",
"Context",
"(",
"command",
",",
"info_name",
"=",
"command",
".",
"name",
",",
"parent",
"=",
"self",
".",
"ctx",
",",
"*",
"*",
"extra",
")",
"as",
"ctx",
":",
"click",
".",
"echo",
"(",
"ctx",
".",
"get_help",
"(",
")",
",",
"color",
"=",
"ctx",
".",
"color",
")",
"help_",
".",
"__name__",
"=",
"'help_%s'",
"%",
"command",
".",
"name",
"return",
"help_"
] | 32.285714 | 16.761905 |
def _pyxb_from_perm_dict(self, perm_dict):
"""Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object.
"""
norm_perm_list = self._norm_perm_list_from_perm_dict(perm_dict)
return self._pyxb_from_norm_perm_list(norm_perm_list)
|
[
"def",
"_pyxb_from_perm_dict",
"(",
"self",
",",
"perm_dict",
")",
":",
"norm_perm_list",
"=",
"self",
".",
"_norm_perm_list_from_perm_dict",
"(",
"perm_dict",
")",
"return",
"self",
".",
"_pyxb_from_norm_perm_list",
"(",
"norm_perm_list",
")"
] | 51.727273 | 26.181818 |
def load_facts(self, facts):
"""Load a set of facts into the CLIPS data base.
The C equivalent of the CLIPS load-facts command.
Facts can be loaded from a string or from a text file.
"""
facts = facts.encode()
if os.path.exists(facts):
ret = lib.EnvLoadFacts(self._env, facts)
if ret == -1:
raise CLIPSError(self._env)
else:
ret = lib.EnvLoadFactsFromString(self._env, facts, -1)
if ret == -1:
raise CLIPSError(self._env)
return ret
|
[
"def",
"load_facts",
"(",
"self",
",",
"facts",
")",
":",
"facts",
"=",
"facts",
".",
"encode",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"facts",
")",
":",
"ret",
"=",
"lib",
".",
"EnvLoadFacts",
"(",
"self",
".",
"_env",
",",
"facts",
")",
"if",
"ret",
"==",
"-",
"1",
":",
"raise",
"CLIPSError",
"(",
"self",
".",
"_env",
")",
"else",
":",
"ret",
"=",
"lib",
".",
"EnvLoadFactsFromString",
"(",
"self",
".",
"_env",
",",
"facts",
",",
"-",
"1",
")",
"if",
"ret",
"==",
"-",
"1",
":",
"raise",
"CLIPSError",
"(",
"self",
".",
"_env",
")",
"return",
"ret"
] | 28.1 | 19.55 |
def humanize_dates(p_due=None, p_start=None, p_creation=None):
"""
Returns string with humanized versions of p_due, p_start and p_creation.
Examples:
- all dates: "16 days ago, due in a month, started 2 days ago"
- p_due and p_start: "due in a month, started 2 days ago"
- p_creation and p_due: "16 days ago, due in a month"
"""
dates_list = []
if p_creation:
dates_list.append(humanize_date(p_creation))
if p_due:
dates_list.append('due ' + humanize_date(p_due))
if p_start:
now = arrow.now().date()
dates_list.append('{} {}'.format(
'started' if p_start <= now else 'starts',
humanize_date(p_start)
))
return ', '.join(dates_list)
|
[
"def",
"humanize_dates",
"(",
"p_due",
"=",
"None",
",",
"p_start",
"=",
"None",
",",
"p_creation",
"=",
"None",
")",
":",
"dates_list",
"=",
"[",
"]",
"if",
"p_creation",
":",
"dates_list",
".",
"append",
"(",
"humanize_date",
"(",
"p_creation",
")",
")",
"if",
"p_due",
":",
"dates_list",
".",
"append",
"(",
"'due '",
"+",
"humanize_date",
"(",
"p_due",
")",
")",
"if",
"p_start",
":",
"now",
"=",
"arrow",
".",
"now",
"(",
")",
".",
"date",
"(",
")",
"dates_list",
".",
"append",
"(",
"'{} {}'",
".",
"format",
"(",
"'started'",
"if",
"p_start",
"<=",
"now",
"else",
"'starts'",
",",
"humanize_date",
"(",
"p_start",
")",
")",
")",
"return",
"', '",
".",
"join",
"(",
"dates_list",
")"
] | 34.52381 | 18.047619 |
def get_short_uid_dict(self, query=None):
"""Create a dictionary of shortend UIDs for all contacts.
All arguments are only used if the address book is not yet initialized
and will just be handed to self.load().
:param query: see self.load()
:type query: str
:returns: the contacts mapped by the shortes unique prefix of their UID
:rtype: dict(str: CarddavObject)
"""
if self._short_uids is None:
if not self._loaded:
self.load(query)
if not self.contacts:
self._short_uids = {}
elif len(self.contacts) == 1:
self._short_uids = {uid[0:1]: contact
for uid, contact in self.contacts.items()}
else:
self._short_uids = {}
sorted_uids = sorted(self.contacts)
# Prepare for the loop; the first and last items are handled
# seperatly.
item0, item1 = sorted_uids[:2]
same1 = self._compare_uids(item0, item1)
self._short_uids[item0[:same1 + 1]] = self.contacts[item0]
for item_new in sorted_uids[2:]:
# shift the items and the common prefix lenght one further
item0, item1 = item1, item_new
same0, same1 = same1, self._compare_uids(item0, item1)
# compute the final prefix length for item1
same = max(same0, same1)
self._short_uids[item0[:same + 1]] = self.contacts[item0]
# Save the last item.
self._short_uids[item1[:same1 + 1]] = self.contacts[item1]
return self._short_uids
|
[
"def",
"get_short_uid_dict",
"(",
"self",
",",
"query",
"=",
"None",
")",
":",
"if",
"self",
".",
"_short_uids",
"is",
"None",
":",
"if",
"not",
"self",
".",
"_loaded",
":",
"self",
".",
"load",
"(",
"query",
")",
"if",
"not",
"self",
".",
"contacts",
":",
"self",
".",
"_short_uids",
"=",
"{",
"}",
"elif",
"len",
"(",
"self",
".",
"contacts",
")",
"==",
"1",
":",
"self",
".",
"_short_uids",
"=",
"{",
"uid",
"[",
"0",
":",
"1",
"]",
":",
"contact",
"for",
"uid",
",",
"contact",
"in",
"self",
".",
"contacts",
".",
"items",
"(",
")",
"}",
"else",
":",
"self",
".",
"_short_uids",
"=",
"{",
"}",
"sorted_uids",
"=",
"sorted",
"(",
"self",
".",
"contacts",
")",
"# Prepare for the loop; the first and last items are handled",
"# seperatly.",
"item0",
",",
"item1",
"=",
"sorted_uids",
"[",
":",
"2",
"]",
"same1",
"=",
"self",
".",
"_compare_uids",
"(",
"item0",
",",
"item1",
")",
"self",
".",
"_short_uids",
"[",
"item0",
"[",
":",
"same1",
"+",
"1",
"]",
"]",
"=",
"self",
".",
"contacts",
"[",
"item0",
"]",
"for",
"item_new",
"in",
"sorted_uids",
"[",
"2",
":",
"]",
":",
"# shift the items and the common prefix lenght one further",
"item0",
",",
"item1",
"=",
"item1",
",",
"item_new",
"same0",
",",
"same1",
"=",
"same1",
",",
"self",
".",
"_compare_uids",
"(",
"item0",
",",
"item1",
")",
"# compute the final prefix length for item1",
"same",
"=",
"max",
"(",
"same0",
",",
"same1",
")",
"self",
".",
"_short_uids",
"[",
"item0",
"[",
":",
"same",
"+",
"1",
"]",
"]",
"=",
"self",
".",
"contacts",
"[",
"item0",
"]",
"# Save the last item.",
"self",
".",
"_short_uids",
"[",
"item1",
"[",
":",
"same1",
"+",
"1",
"]",
"]",
"=",
"self",
".",
"contacts",
"[",
"item1",
"]",
"return",
"self",
".",
"_short_uids"
] | 46.621622 | 16.405405 |
def set_framebuffer_size_callback(window, cbfun):
"""
Sets the framebuffer resize callback for the specified window.
Wrapper for:
GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _framebuffer_size_callback_repository:
previous_callback = _framebuffer_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWframebuffersizefun(cbfun)
_framebuffer_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetFramebufferSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
|
[
"def",
"set_framebuffer_size_callback",
"(",
"window",
",",
"cbfun",
")",
":",
"window_addr",
"=",
"ctypes",
".",
"cast",
"(",
"ctypes",
".",
"pointer",
"(",
"window",
")",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_long",
")",
")",
".",
"contents",
".",
"value",
"if",
"window_addr",
"in",
"_framebuffer_size_callback_repository",
":",
"previous_callback",
"=",
"_framebuffer_size_callback_repository",
"[",
"window_addr",
"]",
"else",
":",
"previous_callback",
"=",
"None",
"if",
"cbfun",
"is",
"None",
":",
"cbfun",
"=",
"0",
"c_cbfun",
"=",
"_GLFWframebuffersizefun",
"(",
"cbfun",
")",
"_framebuffer_size_callback_repository",
"[",
"window_addr",
"]",
"=",
"(",
"cbfun",
",",
"c_cbfun",
")",
"cbfun",
"=",
"c_cbfun",
"_glfw",
".",
"glfwSetFramebufferSizeCallback",
"(",
"window",
",",
"cbfun",
")",
"if",
"previous_callback",
"is",
"not",
"None",
"and",
"previous_callback",
"[",
"0",
"]",
"!=",
"0",
":",
"return",
"previous_callback",
"[",
"0",
"]"
] | 42.619048 | 22.047619 |
def from_series(cls, series, offset=0):
"""
Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series
"""
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name,
sort=series.sort, offset=offset)
|
[
"def",
"from_series",
"(",
"cls",
",",
"series",
",",
"offset",
"=",
"0",
")",
":",
"return",
"cls",
"(",
"data",
"=",
"series",
".",
"data",
",",
"index",
"=",
"series",
".",
"index",
",",
"data_name",
"=",
"series",
".",
"data_name",
",",
"index_name",
"=",
"series",
".",
"index_name",
",",
"sort",
"=",
"series",
".",
"sort",
",",
"offset",
"=",
"offset",
")"
] | 42.9 | 20.9 |
def parse_data_writer(self, node):
"""
Parses <DataWriter>
@param node: Node containing the <DataWriter> element
@type node: xml.etree.Element
"""
if 'path' in node.lattrib:
path = node.lattrib['path']
else:
self.raise_error('<DataWriter> must specify a path.')
if 'filename' in node.lattrib:
file_path = node.lattrib['filename']
else:
self.raise_error("Data writer for '{0}' must specify a filename.",
path)
self.current_simulation.add_data_writer(DataWriter(path, file_path))
|
[
"def",
"parse_data_writer",
"(",
"self",
",",
"node",
")",
":",
"if",
"'path'",
"in",
"node",
".",
"lattrib",
":",
"path",
"=",
"node",
".",
"lattrib",
"[",
"'path'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"'<DataWriter> must specify a path.'",
")",
"if",
"'filename'",
"in",
"node",
".",
"lattrib",
":",
"file_path",
"=",
"node",
".",
"lattrib",
"[",
"'filename'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"\"Data writer for '{0}' must specify a filename.\"",
",",
"path",
")",
"self",
".",
"current_simulation",
".",
"add_data_writer",
"(",
"DataWriter",
"(",
"path",
",",
"file_path",
")",
")"
] | 30.95 | 18.95 |
def setup_logging(self, defaults=None):
"""
Set up logging via :func:`logging.config.fileConfig`.
Defaults are specified for the special ``__file__`` and ``here``
variables, similar to PasteDeploy config loading. Extra defaults can
optionally be specified as a dict in ``defaults``.
:param defaults: The defaults that will be used when passed to
:func:`logging.config.fileConfig`.
:return: ``None``.
"""
if "loggers" in self.get_sections():
defaults = self._get_defaults(defaults)
fileConfig(self.uri.path, defaults, disable_existing_loggers=False)
else:
logging.basicConfig()
|
[
"def",
"setup_logging",
"(",
"self",
",",
"defaults",
"=",
"None",
")",
":",
"if",
"\"loggers\"",
"in",
"self",
".",
"get_sections",
"(",
")",
":",
"defaults",
"=",
"self",
".",
"_get_defaults",
"(",
"defaults",
")",
"fileConfig",
"(",
"self",
".",
"uri",
".",
"path",
",",
"defaults",
",",
"disable_existing_loggers",
"=",
"False",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
")"
] | 36.315789 | 21.368421 |
def init_app(self, app, entry_point_group='invenio_oauth2server.scopes',
**kwargs):
"""Flask application initialization.
:param app: An instance of :class:`flask.Flask`.
:param entry_point_group: The entrypoint group name to load plugins.
(Default: ``'invenio_oauth2server.scopes'``)
"""
self.init_config(app)
state = _OAuth2ServerState(app, entry_point_group=entry_point_group)
app.extensions['invenio-oauth2server'] = state
return state
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"entry_point_group",
"=",
"'invenio_oauth2server.scopes'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"init_config",
"(",
"app",
")",
"state",
"=",
"_OAuth2ServerState",
"(",
"app",
",",
"entry_point_group",
"=",
"entry_point_group",
")",
"app",
".",
"extensions",
"[",
"'invenio-oauth2server'",
"]",
"=",
"state",
"return",
"state"
] | 37.214286 | 22.428571 |
def Enter(self, n = 1, dl = 0):
"""回车键/换行键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.enter_key, n)
|
[
"def",
"Enter",
"(",
"self",
",",
"n",
"=",
"1",
",",
"dl",
"=",
"0",
")",
":",
"self",
".",
"Delay",
"(",
"dl",
")",
"self",
".",
"keyboard",
".",
"tap_key",
"(",
"self",
".",
"keyboard",
".",
"enter_key",
",",
"n",
")"
] | 28.2 | 8.8 |
def gen_elem_array(size, elem_type=None):
"""
Generates element array of given size and initializes with given type.
Supports container type, used for pre-allocation before deserialization.
:param size:
:param elem_type:
:return:
"""
if elem_type is None or not callable(elem_type):
return [elem_type] * size
if is_type(elem_type, ContainerType):
def elem_type():
return []
res = []
for _ in range(size):
res.append(elem_type())
return res
|
[
"def",
"gen_elem_array",
"(",
"size",
",",
"elem_type",
"=",
"None",
")",
":",
"if",
"elem_type",
"is",
"None",
"or",
"not",
"callable",
"(",
"elem_type",
")",
":",
"return",
"[",
"elem_type",
"]",
"*",
"size",
"if",
"is_type",
"(",
"elem_type",
",",
"ContainerType",
")",
":",
"def",
"elem_type",
"(",
")",
":",
"return",
"[",
"]",
"res",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"size",
")",
":",
"res",
".",
"append",
"(",
"elem_type",
"(",
")",
")",
"return",
"res"
] | 26.684211 | 18.684211 |
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
|
[
"def",
"BFS",
"(",
"G",
",",
"start",
")",
":",
"if",
"start",
"not",
"in",
"G",
".",
"vertices",
":",
"raise",
"GraphInsertError",
"(",
"\"Vertex %s doesn't exist.\"",
"%",
"(",
"start",
",",
")",
")",
"color",
"=",
"{",
"}",
"pred",
"=",
"{",
"}",
"dist",
"=",
"{",
"}",
"queue",
"=",
"Queue",
"(",
")",
"queue",
".",
"put",
"(",
"start",
")",
"for",
"vertex",
"in",
"G",
".",
"vertices",
":",
"color",
"[",
"vertex",
"]",
"=",
"'white'",
"pred",
"[",
"vertex",
"]",
"=",
"None",
"dist",
"[",
"vertex",
"]",
"=",
"0",
"while",
"queue",
".",
"qsize",
"(",
")",
">",
"0",
":",
"current",
"=",
"queue",
".",
"get",
"(",
")",
"for",
"neighbor",
"in",
"G",
".",
"vertices",
"[",
"current",
"]",
":",
"if",
"color",
"[",
"neighbor",
"]",
"==",
"'white'",
":",
"color",
"[",
"neighbor",
"]",
"=",
"'grey'",
"pred",
"[",
"neighbor",
"]",
"=",
"current",
"dist",
"[",
"neighbor",
"]",
"=",
"dist",
"[",
"current",
"]",
"+",
"1",
"queue",
".",
"put",
"(",
"neighbor",
")",
"color",
"[",
"current",
"]",
"=",
"'black'",
"return",
"pred"
] | 30.68 | 13 |
def get_host(name):
"""
Prints the public dns name of `name`, if it exists.
:param name: The instance name.
:type name: ``str``
"""
f = {'instance-state-name': 'running', 'tag:Name': name}
ec2 = boto.connect_ec2(region=get_region())
rs = ec2.get_all_instances(filters=f)
if len(rs) == 0:
raise Exception('Host "%s" not found' % name)
print(rs[0].instances[0].public_dns_name)
|
[
"def",
"get_host",
"(",
"name",
")",
":",
"f",
"=",
"{",
"'instance-state-name'",
":",
"'running'",
",",
"'tag:Name'",
":",
"name",
"}",
"ec2",
"=",
"boto",
".",
"connect_ec2",
"(",
"region",
"=",
"get_region",
"(",
")",
")",
"rs",
"=",
"ec2",
".",
"get_all_instances",
"(",
"filters",
"=",
"f",
")",
"if",
"len",
"(",
"rs",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'Host \"%s\" not found'",
"%",
"name",
")",
"print",
"(",
"rs",
"[",
"0",
"]",
".",
"instances",
"[",
"0",
"]",
".",
"public_dns_name",
")"
] | 31.692308 | 12.615385 |
def get_login_failed_count(name):
'''
Get the the number of failed login attempts
:param str name: The username of the account
:return: The number of failed login attempts
:rtype: int
:raises: CommandExecutionError on user not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' shadow.get_login_failed_count admin
'''
ret = _get_account_policy_data_value(name, 'failedLoginCount')
return salt.utils.mac_utils.parse_return(ret)
|
[
"def",
"get_login_failed_count",
"(",
"name",
")",
":",
"ret",
"=",
"_get_account_policy_data_value",
"(",
"name",
",",
"'failedLoginCount'",
")",
"return",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"parse_return",
"(",
"ret",
")"
] | 24.55 | 26.35 |
def _generate_div_id_chart(prefix="chart_id", digits=8):
"""Generate a random id for div chart.
"""
choices = (random.randrange(0, 52) for _ in range(digits))
return prefix + "".join((string.ascii_letters[x] for x in choices))
|
[
"def",
"_generate_div_id_chart",
"(",
"prefix",
"=",
"\"chart_id\"",
",",
"digits",
"=",
"8",
")",
":",
"choices",
"=",
"(",
"random",
".",
"randrange",
"(",
"0",
",",
"52",
")",
"for",
"_",
"in",
"range",
"(",
"digits",
")",
")",
"return",
"prefix",
"+",
"\"\"",
".",
"join",
"(",
"(",
"string",
".",
"ascii_letters",
"[",
"x",
"]",
"for",
"x",
"in",
"choices",
")",
")"
] | 47.6 | 13.8 |
def add_spectrum(self, label, spectrum, color=None):
"""
Adds a Spectrum for plotting.
Args:
label (str): Label for the Spectrum. Must be unique.
spectrum: Spectrum object
color (str): This is passed on to matplotlib. E.g., "k--" indicates
a dashed black line. If None, a color will be chosen based on
the default color cycle.
"""
self._spectra[label] = spectrum
self.colors.append(
color or
self.colors_cycle[len(self._spectra) % len(self.colors_cycle)])
|
[
"def",
"add_spectrum",
"(",
"self",
",",
"label",
",",
"spectrum",
",",
"color",
"=",
"None",
")",
":",
"self",
".",
"_spectra",
"[",
"label",
"]",
"=",
"spectrum",
"self",
".",
"colors",
".",
"append",
"(",
"color",
"or",
"self",
".",
"colors_cycle",
"[",
"len",
"(",
"self",
".",
"_spectra",
")",
"%",
"len",
"(",
"self",
".",
"colors_cycle",
")",
"]",
")"
] | 38.8 | 16.933333 |
def rfc2822_format(val):
"""
Takes either a date, a datetime, or a string, and returns a string that
represents the value in RFC 2822 format. If a string is passed it is
returned unchanged.
"""
if isinstance(val, six.string_types):
return val
elif isinstance(val, (datetime.datetime, datetime.date)):
# Convert to a timestamp
val = time.mktime(val.timetuple())
if isinstance(val, numbers.Number):
return email.utils.formatdate(val)
else:
# Bail
return val
|
[
"def",
"rfc2822_format",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"return",
"val",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
")",
")",
":",
"# Convert to a timestamp",
"val",
"=",
"time",
".",
"mktime",
"(",
"val",
".",
"timetuple",
"(",
")",
")",
"if",
"isinstance",
"(",
"val",
",",
"numbers",
".",
"Number",
")",
":",
"return",
"email",
".",
"utils",
".",
"formatdate",
"(",
"val",
")",
"else",
":",
"# Bail",
"return",
"val"
] | 32.75 | 14.75 |
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(
site, blength):
if nn[0].species_string == pair[i-1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor[0].species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength,
include_index=True)
tomove = [nn[2] for nn in neighbors if
nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab
|
[
"def",
"repair_broken_bonds",
"(",
"self",
",",
"slab",
",",
"bonds",
")",
":",
"for",
"pair",
"in",
"bonds",
".",
"keys",
"(",
")",
":",
"blength",
"=",
"bonds",
"[",
"pair",
"]",
"# First lets determine which element should be the",
"# reference (center element) to determine broken bonds.",
"# e.g. P for a PO4 bond. Find integer coordination",
"# numbers of the pair of elements wrt to each other",
"cn_dict",
"=",
"{",
"}",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"pair",
")",
":",
"cnlist",
"=",
"[",
"]",
"for",
"site",
"in",
"self",
".",
"oriented_unit_cell",
":",
"poly_coord",
"=",
"0",
"if",
"site",
".",
"species_string",
"==",
"el",
":",
"for",
"nn",
"in",
"self",
".",
"oriented_unit_cell",
".",
"get_neighbors",
"(",
"site",
",",
"blength",
")",
":",
"if",
"nn",
"[",
"0",
"]",
".",
"species_string",
"==",
"pair",
"[",
"i",
"-",
"1",
"]",
":",
"poly_coord",
"+=",
"1",
"cnlist",
".",
"append",
"(",
"poly_coord",
")",
"cn_dict",
"[",
"el",
"]",
"=",
"cnlist",
"# We make the element with the higher coordination our reference",
"if",
"max",
"(",
"cn_dict",
"[",
"pair",
"[",
"0",
"]",
"]",
")",
">",
"max",
"(",
"cn_dict",
"[",
"pair",
"[",
"1",
"]",
"]",
")",
":",
"element1",
",",
"element2",
"=",
"pair",
"else",
":",
"element2",
",",
"element1",
"=",
"pair",
"for",
"i",
",",
"site",
"in",
"enumerate",
"(",
"slab",
")",
":",
"# Determine the coordination of our reference",
"if",
"site",
".",
"species_string",
"==",
"element1",
":",
"poly_coord",
"=",
"0",
"for",
"neighbor",
"in",
"slab",
".",
"get_neighbors",
"(",
"site",
",",
"blength",
")",
":",
"poly_coord",
"+=",
"1",
"if",
"neighbor",
"[",
"0",
"]",
".",
"species_string",
"==",
"element2",
"else",
"0",
"# suppose we find an undercoordinated reference atom",
"if",
"poly_coord",
"not",
"in",
"cn_dict",
"[",
"element1",
"]",
":",
"# We get the reference atom of the broken bonds",
"# (undercoordinated), move it to the other surface",
"slab",
"=",
"self",
".",
"move_to_other_side",
"(",
"slab",
",",
"[",
"i",
"]",
")",
"# find its NNs with the corresponding",
"# species it should be coordinated with",
"neighbors",
"=",
"slab",
".",
"get_neighbors",
"(",
"slab",
"[",
"i",
"]",
",",
"blength",
",",
"include_index",
"=",
"True",
")",
"tomove",
"=",
"[",
"nn",
"[",
"2",
"]",
"for",
"nn",
"in",
"neighbors",
"if",
"nn",
"[",
"0",
"]",
".",
"species_string",
"==",
"element2",
"]",
"tomove",
".",
"append",
"(",
"i",
")",
"# and then move those NNs along with the central",
"# atom back to the other side of the slab again",
"slab",
"=",
"self",
".",
"move_to_other_side",
"(",
"slab",
",",
"tomove",
")",
"return",
"slab"
] | 46.042254 | 22.492958 |
def insert_strain_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
data_reading_group_multi.add_argument("--gps-start-time", nargs='+',
action=MultiDetOptionAction, metavar='IFO:TIME',
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group_multi.add_argument("--gps-end-time", nargs='+', type=int,
action=MultiDetOptionAction, metavar='IFO:TIME',
help="The gps end time of the data "
"(integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+',
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=int, nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
#Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
#Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
#Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
#Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
#optional
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Multiple injections by this factor "
"before injecting into the data.")
data_reading_group_multi.add_argument("--gating-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Text file of gating segments to apply."
" Format of each line is (all times in secs):"
" gps_time zeros_half_width pad_half_width")
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi
|
[
"def",
"insert_strain_option_group_multi_ifo",
"(",
"parser",
")",
":",
"data_reading_group_multi",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Options for obtaining\"",
"\" h(t)\"",
",",
"\"These options are used for generating h(t) either by \"",
"\"reading from a file or by generating it. This is only \"",
"\"needed if the PSD is to be estimated from the data, ie. \"",
"\"if the --psd-estimation option is given. This group \"",
"\"supports reading from multiple ifos simultaneously.\"",
")",
"# Required options",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--gps-start-time\"",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:TIME'",
",",
"help",
"=",
"\"The gps start time of the data \"",
"\"(integer seconds)\"",
",",
"type",
"=",
"int",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--gps-end-time\"",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:TIME'",
",",
"help",
"=",
"\"The gps end time of the data \"",
"\"(integer seconds)\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--strain-high-pass\"",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'IFO:FREQUENCY'",
",",
"help",
"=",
"\"High pass frequency\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--pad-data\"",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"type",
"=",
"int",
",",
"metavar",
"=",
"'IFO:LENGTH'",
",",
"help",
"=",
"\"Extra padding to remove highpass corruption \"",
"\"(integer seconds)\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--taper-data\"",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"metavar",
"=",
"'IFO:LENGTH'",
",",
"help",
"=",
"\"Taper ends of data to zero using the \"",
"\"supplied length as a window (integer seconds)\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--sample-rate\"",
",",
"type",
"=",
"int",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:RATE'",
",",
"help",
"=",
"\"The sample rate to use for h(t) generation \"",
"\" (integer Hz).\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--channel-name\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"MultiDetOptionActionSpecial",
",",
"metavar",
"=",
"'IFO:CHANNEL'",
",",
"help",
"=",
"\"The channel containing the gravitational \"",
"\"strain data\"",
")",
"#Read from cache file",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--frame-cache\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAppendAction",
",",
"metavar",
"=",
"'IFO:FRAME_CACHE'",
",",
"help",
"=",
"\"Cache file containing the frame locations.\"",
")",
"#Read from frame files",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--frame-files\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAppendAction",
",",
"metavar",
"=",
"'IFO:FRAME_FILES'",
",",
"help",
"=",
"\"list of frame files\"",
")",
"# Use datafind to get frame files",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--frame-type\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FRAME_TYPE'",
",",
"help",
"=",
"\"(optional) Replaces frame-files. \"",
"\"Use datafind to get the needed frame \"",
"\"file(s) of this type.\"",
")",
"#Filter frame files by URL",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--frame-sieve\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FRAME_SIEVE'",
",",
"help",
"=",
"\"(optional), Only use frame files where the \"",
"\"URL matches the regular expression given.\"",
")",
"#Generate gaussian noise with given psd",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--fake-strain\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:CHOICE'",
",",
"help",
"=",
"\"Name of model PSD for generating fake \"",
"\"gaussian noise. Choose from %s or zeroNoise\"",
"%",
"(",
"(",
"', '",
")",
".",
"join",
"(",
"pycbc",
".",
"psd",
".",
"get_lalsim_psd_list",
"(",
")",
")",
",",
")",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--fake-strain-seed\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SEED'",
",",
"help",
"=",
"\"Seed value for the generation of fake \"",
"\"colored gaussian noise\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--fake-strain-from-file\"",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FILE'",
",",
"help",
"=",
"\"File containing ASD for generating fake \"",
"\"noise from it.\"",
")",
"#optional",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--injection-file\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FILE'",
",",
"help",
"=",
"\"(optional) Injection file used to add \"",
"\"waveforms into the strain\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--sgburst-injection-file\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FILE'",
",",
"help",
"=",
"\"(optional) Injection file used to add \"",
"\"sine-Gaussian burst waveforms into the strain\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--injection-scale-factor\"",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"\"IFO:VAL\"",
",",
"default",
"=",
"1.",
",",
"help",
"=",
"\"Multiple injections by this factor \"",
"\"before injecting into the data.\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--gating-file\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:FILE'",
",",
"help",
"=",
"\"(optional) Text file of gating segments to apply.\"",
"\" Format of each line is (all times in secs):\"",
"\" gps_time zeros_half_width pad_half_width\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"'--autogating-threshold'",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SIGMA'",
",",
"help",
"=",
"'If given, find and gate glitches '",
"'producing a deviation larger than '",
"'SIGMA in the whitened strain time '",
"'series.'",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"'--autogating-cluster'",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SECONDS'",
",",
"default",
"=",
"5.",
",",
"help",
"=",
"'Length of clustering window for '",
"'detecting glitches for autogating.'",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"'--autogating-width'",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SECONDS'",
",",
"default",
"=",
"0.25",
",",
"help",
"=",
"'Half-width of the gating window.'",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"'--autogating-taper'",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SECONDS'",
",",
"default",
"=",
"0.25",
",",
"help",
"=",
"'Taper the strain before and after '",
"'each gating window over a duration '",
"'of SECONDS.'",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"'--autogating-pad'",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:SECONDS'",
",",
"default",
"=",
"16",
",",
"help",
"=",
"'Ignore the given length of whitened '",
"'strain at the ends of a segment, to '",
"'avoid filters ringing.'",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--normalize-strain\"",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAction",
",",
"metavar",
"=",
"'IFO:VALUE'",
",",
"help",
"=",
"\"(optional) Divide frame data by constant.\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--zpk-z\"",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAppendAction",
",",
"metavar",
"=",
"'IFO:VALUE'",
",",
"help",
"=",
"\"(optional) Zero-pole-gain (zpk) filter strain. \"",
"\"A list of zeros for transfer function\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--zpk-p\"",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAppendAction",
",",
"metavar",
"=",
"'IFO:VALUE'",
",",
"help",
"=",
"\"(optional) Zero-pole-gain (zpk) filter strain. \"",
"\"A list of poles for transfer function\"",
")",
"data_reading_group_multi",
".",
"add_argument",
"(",
"\"--zpk-k\"",
",",
"type",
"=",
"float",
",",
"nargs",
"=",
"\"+\"",
",",
"action",
"=",
"MultiDetOptionAppendAction",
",",
"metavar",
"=",
"'IFO:VALUE'",
",",
"help",
"=",
"\"(optional) Zero-pole-gain (zpk) filter strain. \"",
"\"Transfer function gain\"",
")",
"return",
"data_reading_group_multi"
] | 56.48 | 27.234286 |
def data_filler_detailed_registration(self, number_of_rows, pipe):
'''creates keys with detailed regis. information
'''
try:
for i in range(number_of_rows):
pipe.hmset('detailed_registration:%s' % i, {
'id': rnd_id_generator(self),
'email': self.faker.safe_email(),
'password': self.faker.md5(raw_output=False),
'lastname': self.faker.last_name(),
'name': self.faker.first_name(),
'address': self.faker.address(),
'phone': self.faker.phone_number()
})
pipe.execute()
logger.warning('detailed_registration Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d)
|
[
"def",
"data_filler_detailed_registration",
"(",
"self",
",",
"number_of_rows",
",",
"pipe",
")",
":",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"number_of_rows",
")",
":",
"pipe",
".",
"hmset",
"(",
"'detailed_registration:%s'",
"%",
"i",
",",
"{",
"'id'",
":",
"rnd_id_generator",
"(",
"self",
")",
",",
"'email'",
":",
"self",
".",
"faker",
".",
"safe_email",
"(",
")",
",",
"'password'",
":",
"self",
".",
"faker",
".",
"md5",
"(",
"raw_output",
"=",
"False",
")",
",",
"'lastname'",
":",
"self",
".",
"faker",
".",
"last_name",
"(",
")",
",",
"'name'",
":",
"self",
".",
"faker",
".",
"first_name",
"(",
")",
",",
"'address'",
":",
"self",
".",
"faker",
".",
"address",
"(",
")",
",",
"'phone'",
":",
"self",
".",
"faker",
".",
"phone_number",
"(",
")",
"}",
")",
"pipe",
".",
"execute",
"(",
")",
"logger",
".",
"warning",
"(",
"'detailed_registration Commits are successful after write job!'",
",",
"extra",
"=",
"d",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
",",
"extra",
"=",
"d",
")"
] | 41.9 | 20.6 |
def database(
state, host, name,
# Desired database settings
present=True,
collate=None, charset=None,
user=None, user_hostname='localhost', user_privileges='ALL',
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Add/remove MySQL databases.
+ name: the name of the database
+ present: whether the database should exist or not
+ collate: the collate to use when creating the database
+ charset: the charset to use when creating the database
+ user: MySQL user to grant privileges on this database to
+ user_hostname: the hostname of the MySQL user to grant
+ user_privileges: privileges to grant to any specified user
+ mysql_*: global module arguments, see above
Collate/charset:
these will only be applied if the database does not exist - ie pyinfra
will not attempt to alter the existing databases collate/character sets.
'''
current_databases = host.fact.mysql_databases(
mysql_user, mysql_password,
mysql_host, mysql_port,
)
is_present = name in current_databases
if not present:
if is_present:
yield make_execute_mysql_command(
'DROP DATABASE {0}'.format(name),
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
)
return
# We want the database but it doesn't exist
if present and not is_present:
sql_bits = ['CREATE DATABASE {0}'.format(name)]
if collate:
sql_bits.append('COLLATE {0}'.format(collate))
if charset:
sql_bits.append('CHARSET {0}'.format(charset))
yield make_execute_mysql_command(
' '.join(sql_bits),
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
)
# Ensure any user privileges for this database
if user and user_privileges:
yield privileges(
state, host, user,
user_hostname=user_hostname,
privileges=user_privileges,
database=name,
)
|
[
"def",
"database",
"(",
"state",
",",
"host",
",",
"name",
",",
"# Desired database settings",
"present",
"=",
"True",
",",
"collate",
"=",
"None",
",",
"charset",
"=",
"None",
",",
"user",
"=",
"None",
",",
"user_hostname",
"=",
"'localhost'",
",",
"user_privileges",
"=",
"'ALL'",
",",
"# Details for speaking to MySQL via `mysql` CLI",
"mysql_user",
"=",
"None",
",",
"mysql_password",
"=",
"None",
",",
"mysql_host",
"=",
"None",
",",
"mysql_port",
"=",
"None",
",",
")",
":",
"current_databases",
"=",
"host",
".",
"fact",
".",
"mysql_databases",
"(",
"mysql_user",
",",
"mysql_password",
",",
"mysql_host",
",",
"mysql_port",
",",
")",
"is_present",
"=",
"name",
"in",
"current_databases",
"if",
"not",
"present",
":",
"if",
"is_present",
":",
"yield",
"make_execute_mysql_command",
"(",
"'DROP DATABASE {0}'",
".",
"format",
"(",
"name",
")",
",",
"user",
"=",
"mysql_user",
",",
"password",
"=",
"mysql_password",
",",
"host",
"=",
"mysql_host",
",",
"port",
"=",
"mysql_port",
",",
")",
"return",
"# We want the database but it doesn't exist",
"if",
"present",
"and",
"not",
"is_present",
":",
"sql_bits",
"=",
"[",
"'CREATE DATABASE {0}'",
".",
"format",
"(",
"name",
")",
"]",
"if",
"collate",
":",
"sql_bits",
".",
"append",
"(",
"'COLLATE {0}'",
".",
"format",
"(",
"collate",
")",
")",
"if",
"charset",
":",
"sql_bits",
".",
"append",
"(",
"'CHARSET {0}'",
".",
"format",
"(",
"charset",
")",
")",
"yield",
"make_execute_mysql_command",
"(",
"' '",
".",
"join",
"(",
"sql_bits",
")",
",",
"user",
"=",
"mysql_user",
",",
"password",
"=",
"mysql_password",
",",
"host",
"=",
"mysql_host",
",",
"port",
"=",
"mysql_port",
",",
")",
"# Ensure any user privileges for this database",
"if",
"user",
"and",
"user_privileges",
":",
"yield",
"privileges",
"(",
"state",
",",
"host",
",",
"user",
",",
"user_hostname",
"=",
"user_hostname",
",",
"privileges",
"=",
"user_privileges",
",",
"database",
"=",
"name",
",",
")"
] | 30.71831 | 18.830986 |
def exactly_equal(self, other):
'''
Comparison between VariantCollection instances that takes into account
the info field of Variant instances.
Returns
----------
True if the variants in this collection equal the variants in the other
collection. The Variant.info fields are included in the comparison.
'''
return (
self.__class__ == other.__class__ and
len(self) == len(other) and
all(x.exactly_equal(y) for (x, y) in zip(self, other)))
|
[
"def",
"exactly_equal",
"(",
"self",
",",
"other",
")",
":",
"return",
"(",
"self",
".",
"__class__",
"==",
"other",
".",
"__class__",
"and",
"len",
"(",
"self",
")",
"==",
"len",
"(",
"other",
")",
"and",
"all",
"(",
"x",
".",
"exactly_equal",
"(",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"self",
",",
"other",
")",
")",
")"
] | 38.071429 | 23.642857 |
def delete_shard(self, project_name, logstore_name, shardId):
""" delete a readonly shard
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId: the read only shard id
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shards/" + str(shardId)
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteShardResponse(header, resp)
|
[
"def",
"delete_shard",
"(",
"self",
",",
"project_name",
",",
"logstore_name",
",",
"shardId",
")",
":",
"headers",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"resource",
"=",
"\"/logstores/\"",
"+",
"logstore_name",
"+",
"\"/shards/\"",
"+",
"str",
"(",
"shardId",
")",
"(",
"resp",
",",
"header",
")",
"=",
"self",
".",
"_send",
"(",
"\"DELETE\"",
",",
"project_name",
",",
"None",
",",
"resource",
",",
"params",
",",
"headers",
")",
"return",
"DeleteShardResponse",
"(",
"header",
",",
"resp",
")"
] | 34.272727 | 18.954545 |
def license_present(name):
'''
Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['powerpath.has_powerpath']():
ret['result'] = False
ret['comment'] = 'PowerPath is not installed.'
return ret
licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()]
if name in licenses:
ret['result'] = True
ret['comment'] = 'License key {0} already present'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'License key {0} is set to be added'.format(name)
return ret
data = __salt__['powerpath.add_license'](name)
if data['result']:
ret['changes'] = {name: 'added'}
ret['result'] = True
ret['comment'] = data['output']
return ret
else:
ret['result'] = False
ret['comment'] = data['output']
return ret
|
[
"def",
"license_present",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"if",
"not",
"__salt__",
"[",
"'powerpath.has_powerpath'",
"]",
"(",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'PowerPath is not installed.'",
"return",
"ret",
"licenses",
"=",
"[",
"l",
"[",
"'key'",
"]",
"for",
"l",
"in",
"__salt__",
"[",
"'powerpath.list_licenses'",
"]",
"(",
")",
"]",
"if",
"name",
"in",
"licenses",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'License key {0} already present'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'License key {0} is set to be added'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"data",
"=",
"__salt__",
"[",
"'powerpath.add_license'",
"]",
"(",
"name",
")",
"if",
"data",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"name",
":",
"'added'",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"data",
"[",
"'output'",
"]",
"return",
"ret",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"data",
"[",
"'output'",
"]",
"return",
"ret"
] | 26.775 | 21.075 |
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
|
[
"def",
"_add_tasks",
"(",
"config",
",",
"tasks_file",
",",
"tasks_type",
",",
"priority",
",",
"redundancy",
")",
":",
"try",
":",
"project",
"=",
"find_project_by_short_name",
"(",
"config",
".",
"project",
"[",
"'short_name'",
"]",
",",
"config",
".",
"pbclient",
",",
"config",
".",
"all",
")",
"data",
"=",
"_load_data",
"(",
"tasks_file",
",",
"tasks_type",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"(",
"\"Unknown format for the tasks file. Use json, csv, po or \"",
"\"properties.\"",
")",
"# If true, warn user",
"# if sleep: # pragma: no cover",
"# click.secho(msg, fg='yellow')",
"# Show progress bar",
"with",
"click",
".",
"progressbar",
"(",
"data",
",",
"label",
"=",
"\"Adding Tasks\"",
")",
"as",
"pgbar",
":",
"for",
"d",
"in",
"pgbar",
":",
"task_info",
"=",
"create_task_info",
"(",
"d",
")",
"response",
"=",
"config",
".",
"pbclient",
".",
"create_task",
"(",
"project_id",
"=",
"project",
".",
"id",
",",
"info",
"=",
"task_info",
",",
"n_answers",
"=",
"redundancy",
",",
"priority_0",
"=",
"priority",
")",
"# Check if for the data we have to auto-throttle task creation",
"sleep",
",",
"msg",
"=",
"enable_auto_throttling",
"(",
"config",
",",
"data",
")",
"check_api_error",
"(",
"response",
")",
"# If auto-throttling enabled, sleep for sleep seconds",
"if",
"sleep",
":",
"# pragma: no cover",
"time",
".",
"sleep",
"(",
"sleep",
")",
"return",
"(",
"\"%s tasks added to project: %s\"",
"%",
"(",
"len",
"(",
"data",
")",
",",
"config",
".",
"project",
"[",
"'short_name'",
"]",
")",
")",
"except",
"exceptions",
".",
"ConnectionError",
":",
"return",
"(",
"\"Connection Error! The server %s is not responding\"",
"%",
"config",
".",
"server",
")",
"except",
"(",
"ProjectNotFound",
",",
"TaskNotFound",
")",
":",
"raise"
] | 49.794118 | 19.588235 |
def next(self):
"""Return a column one by one
:raises: StopIteration
"""
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cur_col",
">=",
"len",
"(",
"self",
".",
"_rec",
")",
":",
"self",
".",
"_cur_col",
"=",
"0",
"raise",
"StopIteration",
"col",
"=",
"self",
".",
"_rec",
"[",
"self",
".",
"_cur_col",
"]",
"self",
".",
"_cur_col",
"+=",
"1",
"return",
"col"
] | 25.272727 | 12.363636 |
def valUserCert(self, byts, cacerts=None):
'''
Validate the PEM encoded x509 user certificate bytes and return it.
Args:
byts (bytes): The bytes for the User Certificate.
cacerts (tuple): A tuple of OpenSSL.crypto.X509 CA Certificates.
Raises:
OpenSSL.crypto.X509StoreContextError: If the certificate is not valid.
Returns:
OpenSSL.crypto.X509: The certificate, if it is valid.
'''
cert = crypto.load_certificate(crypto.FILETYPE_PEM, byts)
if cacerts is None:
cacerts = self.getCaCerts()
store = crypto.X509Store()
[store.add_cert(cacert) for cacert in cacerts]
ctx = crypto.X509StoreContext(store, cert)
ctx.verify_certificate() # raises X509StoreContextError if unable to verify
return cert
|
[
"def",
"valUserCert",
"(",
"self",
",",
"byts",
",",
"cacerts",
"=",
"None",
")",
":",
"cert",
"=",
"crypto",
".",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"byts",
")",
"if",
"cacerts",
"is",
"None",
":",
"cacerts",
"=",
"self",
".",
"getCaCerts",
"(",
")",
"store",
"=",
"crypto",
".",
"X509Store",
"(",
")",
"[",
"store",
".",
"add_cert",
"(",
"cacert",
")",
"for",
"cacert",
"in",
"cacerts",
"]",
"ctx",
"=",
"crypto",
".",
"X509StoreContext",
"(",
"store",
",",
"cert",
")",
"ctx",
".",
"verify_certificate",
"(",
")",
"# raises X509StoreContextError if unable to verify",
"return",
"cert"
] | 32.269231 | 27.269231 |
def process_ndex_neighborhood(gene_names, network_id=None,
rdf_out='bel_output.rdf', print_output=True):
"""Return a BelRdfProcessor for an NDEx network neighborhood.
Parameters
----------
gene_names : list
A list of HGNC gene symbols to search the neighborhood of.
Example: ['BRAF', 'MAP2K1']
network_id : Optional[str]
The UUID of the network in NDEx. By default, the BEL Large Corpus
network is used.
rdf_out : Optional[str]
Name of the output file to save the RDF returned by the web service.
This is useful for debugging purposes or to repeat the same query
on an offline RDF file later. Default: bel_output.rdf
Returns
-------
bp : BelRdfProcessor
A BelRdfProcessor object which contains INDRA Statements in bp.statements.
Notes
-----
This function calls process_belrdf to the returned RDF string from the
webservice.
"""
logger.warning('This method is deprecated and the results are not '
'guaranteed to be correct. Please use '
'process_pybel_neighborhood instead.')
if network_id is None:
network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb'
url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id
params = {'searchString': ' '.join(gene_names)}
# The ndex_client returns the rdf as the content of a json dict
res_json = ndex_client.send_request(url, params, is_json=True)
if not res_json:
logger.error('No response for NDEx neighborhood query.')
return None
if res_json.get('error'):
error_msg = res_json.get('message')
logger.error('BEL/RDF response contains error: %s' % error_msg)
return None
rdf = res_json.get('content')
if not rdf:
logger.error('BEL/RDF response is empty.')
return None
with open(rdf_out, 'wb') as fh:
fh.write(rdf.encode('utf-8'))
bp = process_belrdf(rdf, print_output=print_output)
return bp
|
[
"def",
"process_ndex_neighborhood",
"(",
"gene_names",
",",
"network_id",
"=",
"None",
",",
"rdf_out",
"=",
"'bel_output.rdf'",
",",
"print_output",
"=",
"True",
")",
":",
"logger",
".",
"warning",
"(",
"'This method is deprecated and the results are not '",
"'guaranteed to be correct. Please use '",
"'process_pybel_neighborhood instead.'",
")",
"if",
"network_id",
"is",
"None",
":",
"network_id",
"=",
"'9ea3c170-01ad-11e5-ac0f-000c29cb28fb'",
"url",
"=",
"ndex_bel2rdf",
"+",
"'/network/%s/asBELRDF/query'",
"%",
"network_id",
"params",
"=",
"{",
"'searchString'",
":",
"' '",
".",
"join",
"(",
"gene_names",
")",
"}",
"# The ndex_client returns the rdf as the content of a json dict",
"res_json",
"=",
"ndex_client",
".",
"send_request",
"(",
"url",
",",
"params",
",",
"is_json",
"=",
"True",
")",
"if",
"not",
"res_json",
":",
"logger",
".",
"error",
"(",
"'No response for NDEx neighborhood query.'",
")",
"return",
"None",
"if",
"res_json",
".",
"get",
"(",
"'error'",
")",
":",
"error_msg",
"=",
"res_json",
".",
"get",
"(",
"'message'",
")",
"logger",
".",
"error",
"(",
"'BEL/RDF response contains error: %s'",
"%",
"error_msg",
")",
"return",
"None",
"rdf",
"=",
"res_json",
".",
"get",
"(",
"'content'",
")",
"if",
"not",
"rdf",
":",
"logger",
".",
"error",
"(",
"'BEL/RDF response is empty.'",
")",
"return",
"None",
"with",
"open",
"(",
"rdf_out",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"rdf",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"bp",
"=",
"process_belrdf",
"(",
"rdf",
",",
"print_output",
"=",
"print_output",
")",
"return",
"bp"
] | 38.403846 | 22.057692 |
def delete_user_from_group(self, GroupID, UserID):
"""Delete a user from a group."""
# http://teampasswordmanager.com/docs/api-groups/#del_user
log.info('Delete user %s from group %s' % (UserID, GroupID))
self.put('groups/%s/delete_user/%s.json' % (GroupID, UserID))
|
[
"def",
"delete_user_from_group",
"(",
"self",
",",
"GroupID",
",",
"UserID",
")",
":",
"# http://teampasswordmanager.com/docs/api-groups/#del_user",
"log",
".",
"info",
"(",
"'Delete user %s from group %s'",
"%",
"(",
"UserID",
",",
"GroupID",
")",
")",
"self",
".",
"put",
"(",
"'groups/%s/delete_user/%s.json'",
"%",
"(",
"GroupID",
",",
"UserID",
")",
")"
] | 58.8 | 18.6 |
def simplex_iterator(scale, boundary=True):
"""
Systematically iterates through a lattice of points on the 2-simplex.
Parameters
----------
scale: Int
The normalized scale of the simplex, i.e. N such that points (x,y,z)
satisify x + y + z == N
boundary: bool, True
Include the boundary points (tuples where at least one
coordinate is zero)
Yields
------
3-tuples, There are binom(n+2, 2) points (the triangular
number for scale + 1, less 3*(scale+1) if boundary=False
"""
start = 0
if not boundary:
start = 1
for i in range(start, scale + (1 - start)):
for j in range(start, scale + (1 - start) - i):
k = scale - i - j
yield (i, j, k)
|
[
"def",
"simplex_iterator",
"(",
"scale",
",",
"boundary",
"=",
"True",
")",
":",
"start",
"=",
"0",
"if",
"not",
"boundary",
":",
"start",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"scale",
"+",
"(",
"1",
"-",
"start",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"start",
",",
"scale",
"+",
"(",
"1",
"-",
"start",
")",
"-",
"i",
")",
":",
"k",
"=",
"scale",
"-",
"i",
"-",
"j",
"yield",
"(",
"i",
",",
"j",
",",
"k",
")"
] | 27.407407 | 21.703704 |
def is_bare_exception(self, node):
"""
Checks if the node is a bare exception name from an except block.
"""
return isinstance(node, Name) and node.id in self.current_except_names
|
[
"def",
"is_bare_exception",
"(",
"self",
",",
"node",
")",
":",
"return",
"isinstance",
"(",
"node",
",",
"Name",
")",
"and",
"node",
".",
"id",
"in",
"self",
".",
"current_except_names"
] | 34.5 | 19.5 |
def summary(self):
"""Summary by packages and dependencies
"""
print("\nStatus summary")
print("=" * 79)
print("{0}found {1} dependencies in {2} packages.{3}\n".format(
self.grey, self.count_dep, self.count_pkg, self.endc))
|
[
"def",
"summary",
"(",
"self",
")",
":",
"print",
"(",
"\"\\nStatus summary\"",
")",
"print",
"(",
"\"=\"",
"*",
"79",
")",
"print",
"(",
"\"{0}found {1} dependencies in {2} packages.{3}\\n\"",
".",
"format",
"(",
"self",
".",
"grey",
",",
"self",
".",
"count_dep",
",",
"self",
".",
"count_pkg",
",",
"self",
".",
"endc",
")",
")"
] | 38.428571 | 14.714286 |
def get_chunk(self,x,z):
"""
Return a chunk specified by the chunk coordinates x,z. Raise InconceivedChunk
if the chunk is not yet generated. To get the raw NBT data, use get_nbt.
"""
return self.chunkclass(self.get_nbt(x, z))
|
[
"def",
"get_chunk",
"(",
"self",
",",
"x",
",",
"z",
")",
":",
"return",
"self",
".",
"chunkclass",
"(",
"self",
".",
"get_nbt",
"(",
"x",
",",
"z",
")",
")"
] | 43.5 | 18.5 |
def save(self, path):
"""Save the specification of this MLPipeline in a JSON file.
The content of the JSON file is the dict returned by the `to_dict` method.
Args:
path (str): Path to the JSON file to write.
"""
with open(path, 'w') as out_file:
json.dump(self.to_dict(), out_file, indent=4)
|
[
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"out_file",
":",
"json",
".",
"dump",
"(",
"self",
".",
"to_dict",
"(",
")",
",",
"out_file",
",",
"indent",
"=",
"4",
")"
] | 34.8 | 20.1 |
def get_template_sources(self, template_name, template_dirs=None):
"""
Return the absolute paths to "template_name" in the specified app
If the name does not contain an app name (no colon), an empty list
is returned.
The parent FilesystemLoader.load_template_source() will take care
of the actual loading for us.
"""
if ':' not in template_name:
return []
app_name, template_name = template_name.split(":", 1)
template_dir = get_app_template_dir(app_name)
if template_dir:
return [get_template_path(template_dir, template_name, self)]
return []
|
[
"def",
"get_template_sources",
"(",
"self",
",",
"template_name",
",",
"template_dirs",
"=",
"None",
")",
":",
"if",
"':'",
"not",
"in",
"template_name",
":",
"return",
"[",
"]",
"app_name",
",",
"template_name",
"=",
"template_name",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"template_dir",
"=",
"get_app_template_dir",
"(",
"app_name",
")",
"if",
"template_dir",
":",
"return",
"[",
"get_template_path",
"(",
"template_dir",
",",
"template_name",
",",
"self",
")",
"]",
"return",
"[",
"]"
] | 43.333333 | 18.533333 |
def docker_fabric(*args, **kwargs):
"""
:param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
"""
ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API
if ci == CLIENT_API:
return docker_api(*args, **kwargs)
elif ci == CLIENT_CLI:
return docker_cli(*args, **kwargs)
raise ValueError("Invalid client implementation.", ci)
|
[
"def",
"docker_fabric",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ci",
"=",
"kwargs",
".",
"get",
"(",
"'client_implementation'",
")",
"or",
"env",
".",
"get",
"(",
"'docker_fabric_implementation'",
")",
"or",
"CLIENT_API",
"if",
"ci",
"==",
"CLIENT_API",
":",
"return",
"docker_api",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ci",
"==",
"CLIENT_CLI",
":",
"return",
"docker_cli",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"raise",
"ValueError",
"(",
"\"Invalid client implementation.\"",
",",
"ci",
")"
] | 43.538462 | 16 |
def indexed_sum_over_const(cls, ops, kwargs):
r'''Execute an indexed sum over a term that does not depend on the
summation indices
.. math::
\sum_{j=1}^{N} a = N a
>>> a = symbols('a')
>>> i, j = (IdxSym(s) for s in ('i', 'j'))
>>> unicode(Sum(i, 1, 2)(a))
'2 a'
>>> unicode(Sum(j, 1, 2)(Sum(i, 1, 2)(a * i)))
'∑_{i=1}^{2} 2 i a'
'''
term, *ranges = ops
new_ranges = []
new_term = term
for r in ranges:
if r.index_symbol not in term.free_symbols:
try:
new_term *= len(r)
except TypeError:
new_ranges.append(r)
else:
new_ranges.append(r)
if len(new_ranges) == 0:
return new_term
else:
return (new_term, ) + tuple(new_ranges), kwargs
|
[
"def",
"indexed_sum_over_const",
"(",
"cls",
",",
"ops",
",",
"kwargs",
")",
":",
"term",
",",
"",
"*",
"ranges",
"=",
"ops",
"new_ranges",
"=",
"[",
"]",
"new_term",
"=",
"term",
"for",
"r",
"in",
"ranges",
":",
"if",
"r",
".",
"index_symbol",
"not",
"in",
"term",
".",
"free_symbols",
":",
"try",
":",
"new_term",
"*=",
"len",
"(",
"r",
")",
"except",
"TypeError",
":",
"new_ranges",
".",
"append",
"(",
"r",
")",
"else",
":",
"new_ranges",
".",
"append",
"(",
"r",
")",
"if",
"len",
"(",
"new_ranges",
")",
"==",
"0",
":",
"return",
"new_term",
"else",
":",
"return",
"(",
"new_term",
",",
")",
"+",
"tuple",
"(",
"new_ranges",
")",
",",
"kwargs"
] | 25.933333 | 19.266667 |
def data_and_labels(self):
"""
Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids
"""
sample_ids = np.array(self.keys)
label_dict = self.labels
matrix = np.full([self.num_samples, self.num_features], np.nan)
labels = np.full([self.num_samples, 1], np.nan)
for ix, sample in enumerate(sample_ids):
matrix[ix, :] = self.__data[sample]
labels[ix] = label_dict[sample]
return matrix, np.ravel(labels), sample_ids
|
[
"def",
"data_and_labels",
"(",
"self",
")",
":",
"sample_ids",
"=",
"np",
".",
"array",
"(",
"self",
".",
"keys",
")",
"label_dict",
"=",
"self",
".",
"labels",
"matrix",
"=",
"np",
".",
"full",
"(",
"[",
"self",
".",
"num_samples",
",",
"self",
".",
"num_features",
"]",
",",
"np",
".",
"nan",
")",
"labels",
"=",
"np",
".",
"full",
"(",
"[",
"self",
".",
"num_samples",
",",
"1",
"]",
",",
"np",
".",
"nan",
")",
"for",
"ix",
",",
"sample",
"in",
"enumerate",
"(",
"sample_ids",
")",
":",
"matrix",
"[",
"ix",
",",
":",
"]",
"=",
"self",
".",
"__data",
"[",
"sample",
"]",
"labels",
"[",
"ix",
"]",
"=",
"label_dict",
"[",
"sample",
"]",
"return",
"matrix",
",",
"np",
".",
"ravel",
"(",
"labels",
")",
",",
"sample_ids"
] | 33.185185 | 19.333333 |
def write(self, message):
"""
(coroutine)
Write a single message into the pipe.
"""
if self.done_f.done():
raise BrokenPipeError
try:
yield From(write_message_to_pipe(self.pipe_instance.pipe_handle, message))
except BrokenPipeError:
self.done_f.set_result(None)
raise
|
[
"def",
"write",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"done_f",
".",
"done",
"(",
")",
":",
"raise",
"BrokenPipeError",
"try",
":",
"yield",
"From",
"(",
"write_message_to_pipe",
"(",
"self",
".",
"pipe_instance",
".",
"pipe_handle",
",",
"message",
")",
")",
"except",
"BrokenPipeError",
":",
"self",
".",
"done_f",
".",
"set_result",
"(",
"None",
")",
"raise"
] | 27.692308 | 15.692308 |
def is_job_complete(job_id, conn=None):
"""
is_job_done function checks to if Brain.Jobs Status is Completed
Completed is defined in statics as Done|Stopped|Error
:param job_id: <str> id for the job
:param conn: (optional)<connection> to run on
:return: <dict> if job is done <false> if
"""
result = False
job = RBJ.get(job_id).run(conn)
if job and job.get(STATUS_FIELD) in COMPLETED:
result = job
return result
|
[
"def",
"is_job_complete",
"(",
"job_id",
",",
"conn",
"=",
"None",
")",
":",
"result",
"=",
"False",
"job",
"=",
"RBJ",
".",
"get",
"(",
"job_id",
")",
".",
"run",
"(",
"conn",
")",
"if",
"job",
"and",
"job",
".",
"get",
"(",
"STATUS_FIELD",
")",
"in",
"COMPLETED",
":",
"result",
"=",
"job",
"return",
"result"
] | 30.066667 | 14.733333 |
def _createTimeSeriesObjects(self, timeSeries, filename):
"""
Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method
"""
try:
# Determine number of value columns
valColumns = len(timeSeries[0]['values'])
# Create List of GSSHAPY TimeSeries objects
series = []
for i in range(0, valColumns):
ts = TimeSeries()
ts.timeSeriesFile = self
series.append(ts)
for record in timeSeries:
for index, value in enumerate(record['values']):
# Create GSSHAPY TimeSeriesValue objects
tsVal = TimeSeriesValue(simTime=record['time'],
value=value)
# Associate with appropriate TimeSeries object via the index
tsVal.timeSeries = series[index]
except IndexError:
log.warning(('%s was opened, but the contents of the file were empty.'
'This file will not be read into the database.') % filename)
except:
raise
|
[
"def",
"_createTimeSeriesObjects",
"(",
"self",
",",
"timeSeries",
",",
"filename",
")",
":",
"try",
":",
"# Determine number of value columns",
"valColumns",
"=",
"len",
"(",
"timeSeries",
"[",
"0",
"]",
"[",
"'values'",
"]",
")",
"# Create List of GSSHAPY TimeSeries objects",
"series",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"valColumns",
")",
":",
"ts",
"=",
"TimeSeries",
"(",
")",
"ts",
".",
"timeSeriesFile",
"=",
"self",
"series",
".",
"append",
"(",
"ts",
")",
"for",
"record",
"in",
"timeSeries",
":",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"record",
"[",
"'values'",
"]",
")",
":",
"# Create GSSHAPY TimeSeriesValue objects",
"tsVal",
"=",
"TimeSeriesValue",
"(",
"simTime",
"=",
"record",
"[",
"'time'",
"]",
",",
"value",
"=",
"value",
")",
"# Associate with appropriate TimeSeries object via the index",
"tsVal",
".",
"timeSeries",
"=",
"series",
"[",
"index",
"]",
"except",
"IndexError",
":",
"log",
".",
"warning",
"(",
"(",
"'%s was opened, but the contents of the file were empty.'",
"'This file will not be read into the database.'",
")",
"%",
"filename",
")",
"except",
":",
"raise"
] | 40.071429 | 19.571429 |
def _get_index(self):
"""
Get the guideline's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is not None:
parent = glyph
else:
parent = self.font
if parent is None:
return None
return parent.guidelines.index(self)
|
[
"def",
"_get_index",
"(",
"self",
")",
":",
"glyph",
"=",
"self",
".",
"glyph",
"if",
"glyph",
"is",
"not",
"None",
":",
"parent",
"=",
"glyph",
"else",
":",
"parent",
"=",
"self",
".",
"font",
"if",
"parent",
"is",
"None",
":",
"return",
"None",
"return",
"parent",
".",
"guidelines",
".",
"index",
"(",
"self",
")"
] | 24.933333 | 12.266667 |
def build_global(self, global_node):
"""parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
"""
config_block_lines = self.__build_config_block(
global_node.config_block)
return config.Global(config_block=config_block_lines)
|
[
"def",
"build_global",
"(",
"self",
",",
"global_node",
")",
":",
"config_block_lines",
"=",
"self",
".",
"__build_config_block",
"(",
"global_node",
".",
"config_block",
")",
"return",
"config",
".",
"Global",
"(",
"config_block",
"=",
"config_block_lines",
")"
] | 30 | 18.461538 |
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
|
[
"def",
"BytesDecoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
",",
"key",
",",
"new_default",
")",
":",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_LENGTH_DELIMITED",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"def",
"DecodeRepeatedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"value",
".",
"append",
"(",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
")",
"# Predict that the next tag is another copy of the same repeated field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
"==",
"end",
":",
"# Prediction failed. Return.",
"return",
"new_pos",
"return",
"DecodeRepeatedField",
"else",
":",
"def",
"DecodeField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"field_dict",
"[",
"key",
"]",
"=",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"return",
"new_pos",
"return",
"DecodeField"
] | 37.057143 | 17.057143 |
def data_request(self, payload, timeout=TIMEOUT):
"""Perform a data_request and return the result."""
request_url = self.base_url + "/data_request"
return requests.get(request_url, timeout=timeout, params=payload)
|
[
"def",
"data_request",
"(",
"self",
",",
"payload",
",",
"timeout",
"=",
"TIMEOUT",
")",
":",
"request_url",
"=",
"self",
".",
"base_url",
"+",
"\"/data_request\"",
"return",
"requests",
".",
"get",
"(",
"request_url",
",",
"timeout",
"=",
"timeout",
",",
"params",
"=",
"payload",
")"
] | 58.5 | 13.75 |
def map(self, callable):
""" Apply 'callable' function over all values. """
for k,v in self.iteritems():
self[k] = callable(v)
|
[
"def",
"map",
"(",
"self",
",",
"callable",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"iteritems",
"(",
")",
":",
"self",
"[",
"k",
"]",
"=",
"callable",
"(",
"v",
")"
] | 37.75 | 6.75 |
def p_static_scalar_unary_op(p):
'''static_scalar : PLUS static_scalar
| MINUS static_scalar'''
p[0] = ast.UnaryOp(p[1], p[2], lineno=p.lineno(1))
|
[
"def",
"p_static_scalar_unary_op",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"UnaryOp",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 43 | 7 |
def getResponse(self, context=""):
""" Poll for finished block or first byte ACK.
Args:
context (str): internal serial call context.
Returns:
string: Response, implict cast from byte array.
"""
waits = 0 # allowed interval counter
response_str = "" # returned bytes in string default
try:
waits = 0 # allowed interval counter
while (waits < self.m_max_waits):
bytes_to_read = self.m_ser.inWaiting()
if bytes_to_read > 0:
next_chunk = str(self.m_ser.read(bytes_to_read)).encode('ascii', 'ignore')
response_str += next_chunk
if (len(response_str) == 255):
time.sleep(self.m_force_wait)
return response_str
if (len(response_str) == 1) and (response_str.encode('hex') == '06'):
time.sleep(self.m_force_wait)
return response_str
else: # hang out -- half shortest expected interval (50 ms)
waits += 1
time.sleep(self.m_force_wait)
response_str = ""
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return response_str
|
[
"def",
"getResponse",
"(",
"self",
",",
"context",
"=",
"\"\"",
")",
":",
"waits",
"=",
"0",
"# allowed interval counter",
"response_str",
"=",
"\"\"",
"# returned bytes in string default",
"try",
":",
"waits",
"=",
"0",
"# allowed interval counter",
"while",
"(",
"waits",
"<",
"self",
".",
"m_max_waits",
")",
":",
"bytes_to_read",
"=",
"self",
".",
"m_ser",
".",
"inWaiting",
"(",
")",
"if",
"bytes_to_read",
">",
"0",
":",
"next_chunk",
"=",
"str",
"(",
"self",
".",
"m_ser",
".",
"read",
"(",
"bytes_to_read",
")",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"response_str",
"+=",
"next_chunk",
"if",
"(",
"len",
"(",
"response_str",
")",
"==",
"255",
")",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"m_force_wait",
")",
"return",
"response_str",
"if",
"(",
"len",
"(",
"response_str",
")",
"==",
"1",
")",
"and",
"(",
"response_str",
".",
"encode",
"(",
"'hex'",
")",
"==",
"'06'",
")",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"m_force_wait",
")",
"return",
"response_str",
"else",
":",
"# hang out -- half shortest expected interval (50 ms)",
"waits",
"+=",
"1",
"time",
".",
"sleep",
"(",
"self",
".",
"m_force_wait",
")",
"response_str",
"=",
"\"\"",
"except",
":",
"ekm_log",
"(",
"traceback",
".",
"format_exc",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
"return",
"response_str"
] | 40.625 | 17.78125 |
def close(self):
"""
Mark the latch as closed, and cause every sleeping thread to be woken,
with :class:`mitogen.core.LatchError` raised in each thread.
"""
self._lock.acquire()
try:
self.closed = True
while self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._wake(wsock, cookie)
self._waking += 1
finally:
self._lock.release()
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
".",
"closed",
"=",
"True",
"while",
"self",
".",
"_waking",
"<",
"len",
"(",
"self",
".",
"_sleeping",
")",
":",
"wsock",
",",
"cookie",
"=",
"self",
".",
"_sleeping",
"[",
"self",
".",
"_waking",
"]",
"self",
".",
"_wake",
"(",
"wsock",
",",
"cookie",
")",
"self",
".",
"_waking",
"+=",
"1",
"finally",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | 34.928571 | 15.214286 |
def build_distribution():
"""Build distributions of the code."""
result = invoke.run('python setup.py sdist bdist_egg bdist_wheel',
warn=True, hide=True)
if result.ok:
print("[{}GOOD{}] Distribution built without errors."
.format(GOOD_COLOR, RESET_COLOR))
else:
print('[{}ERROR{}] Something broke trying to package your '
'code...'.format(ERROR_COLOR, RESET_COLOR))
print(result.stderr)
sys.exit(1)
|
[
"def",
"build_distribution",
"(",
")",
":",
"result",
"=",
"invoke",
".",
"run",
"(",
"'python setup.py sdist bdist_egg bdist_wheel'",
",",
"warn",
"=",
"True",
",",
"hide",
"=",
"True",
")",
"if",
"result",
".",
"ok",
":",
"print",
"(",
"\"[{}GOOD{}] Distribution built without errors.\"",
".",
"format",
"(",
"GOOD_COLOR",
",",
"RESET_COLOR",
")",
")",
"else",
":",
"print",
"(",
"'[{}ERROR{}] Something broke trying to package your '",
"'code...'",
".",
"format",
"(",
"ERROR_COLOR",
",",
"RESET_COLOR",
")",
")",
"print",
"(",
"result",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 40.583333 | 17.416667 |
def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key
|
[
"def",
"generate_unique_key",
"(",
"master_key_path",
",",
"url",
")",
":",
"with",
"open",
"(",
"master_key_path",
",",
"'r'",
")",
"as",
"f",
":",
"master_key",
"=",
"f",
".",
"read",
"(",
")",
"assert",
"len",
"(",
"master_key",
")",
"==",
"32",
",",
"'Invalid Key! Must be 32 characters. '",
"'Key: {}, Length: {}'",
".",
"format",
"(",
"master_key",
",",
"len",
"(",
"master_key",
")",
")",
"new_key",
"=",
"hashlib",
".",
"sha256",
"(",
"master_key",
"+",
"url",
")",
".",
"digest",
"(",
")",
"assert",
"len",
"(",
"new_key",
")",
"==",
"32",
",",
"'New key is invalid and is not 32 characters: {}'",
".",
"format",
"(",
"new_key",
")",
"return",
"new_key"
] | 49.785714 | 24.928571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.