repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
apache/airflow | airflow/contrib/hooks/pinot_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L66-L76 | def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall() | [
"def",
"get_records",
"(",
"self",
",",
"sql",
")",
":",
"with",
"self",
".",
"get_conn",
"(",
")",
"as",
"cur",
":",
"cur",
".",
"execute",
"(",
"sql",
")",
"return",
"cur",
".",
"fetchall",
"(",
")"
]
| Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str | [
"Executes",
"the",
"sql",
"and",
"returns",
"a",
"set",
"of",
"records",
"."
]
| python | test |
mbj4668/pyang | pyang/plugins/jsonxsl.py | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/jsonxsl.py#L124-L135 | def process_rpc(self, rpc):
"""Process input and output parts of `rpc`."""
p = "/nc:rpc/" + self.qname(rpc)
tmpl = self.xsl_template(p)
inp = rpc.search_one("input")
if inp is not None:
ct = self.xsl_calltemplate("rpc-input", tmpl)
self.xsl_withparam("nsid", rpc.i_module.i_modulename + ":", ct)
self.process_children(inp, p, 2)
outp = rpc.search_one("output")
if outp is not None:
self.process_children(outp, "/nc:rpc-reply", 1) | [
"def",
"process_rpc",
"(",
"self",
",",
"rpc",
")",
":",
"p",
"=",
"\"/nc:rpc/\"",
"+",
"self",
".",
"qname",
"(",
"rpc",
")",
"tmpl",
"=",
"self",
".",
"xsl_template",
"(",
"p",
")",
"inp",
"=",
"rpc",
".",
"search_one",
"(",
"\"input\"",
")",
"if",
"inp",
"is",
"not",
"None",
":",
"ct",
"=",
"self",
".",
"xsl_calltemplate",
"(",
"\"rpc-input\"",
",",
"tmpl",
")",
"self",
".",
"xsl_withparam",
"(",
"\"nsid\"",
",",
"rpc",
".",
"i_module",
".",
"i_modulename",
"+",
"\":\"",
",",
"ct",
")",
"self",
".",
"process_children",
"(",
"inp",
",",
"p",
",",
"2",
")",
"outp",
"=",
"rpc",
".",
"search_one",
"(",
"\"output\"",
")",
"if",
"outp",
"is",
"not",
"None",
":",
"self",
".",
"process_children",
"(",
"outp",
",",
"\"/nc:rpc-reply\"",
",",
"1",
")"
]
| Process input and output parts of `rpc`. | [
"Process",
"input",
"and",
"output",
"parts",
"of",
"rpc",
"."
]
| python | train |
sdss/sdss_access | python/sdss_access/path/path.py | https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/path/path.py#L453-L485 | def refine(self, filelist, regex, filterdir='out', **kwargs):
''' Returns a list of files filterd by a regular expression
Parameters
----------
filelist : list
A list of files to filter on.
regex : str
The regular expression string to filter your list
filterdir: {'in', 'out'}
Indicates the filter to be inclusive or exclusive
'out' removes the items satisfying the regular expression
'in' keeps the items satisfying the regular expression
Returns
-------
refine : list
A file list refined by an input regular expression.
'''
assert filelist, 'Must provide a list of filenames to refine on'
assert regex, 'Must provide a regular expression to refine the file list'
r = re.compile(regex)
# icheck filter direction; default is out
assert filterdir in ['in', 'out'], 'Filter direction must be either "in" or "out"'
if filterdir == 'out':
subset = list(filter(lambda i: r.search(i), filelist))
elif filterdir == 'in':
subset = list(filter(lambda i: not r.search(i), filelist))
return subset | [
"def",
"refine",
"(",
"self",
",",
"filelist",
",",
"regex",
",",
"filterdir",
"=",
"'out'",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"filelist",
",",
"'Must provide a list of filenames to refine on'",
"assert",
"regex",
",",
"'Must provide a regular expression to refine the file list'",
"r",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"# icheck filter direction; default is out",
"assert",
"filterdir",
"in",
"[",
"'in'",
",",
"'out'",
"]",
",",
"'Filter direction must be either \"in\" or \"out\"'",
"if",
"filterdir",
"==",
"'out'",
":",
"subset",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"i",
":",
"r",
".",
"search",
"(",
"i",
")",
",",
"filelist",
")",
")",
"elif",
"filterdir",
"==",
"'in'",
":",
"subset",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"i",
":",
"not",
"r",
".",
"search",
"(",
"i",
")",
",",
"filelist",
")",
")",
"return",
"subset"
]
| Returns a list of files filterd by a regular expression
Parameters
----------
filelist : list
A list of files to filter on.
regex : str
The regular expression string to filter your list
filterdir: {'in', 'out'}
Indicates the filter to be inclusive or exclusive
'out' removes the items satisfying the regular expression
'in' keeps the items satisfying the regular expression
Returns
-------
refine : list
A file list refined by an input regular expression. | [
"Returns",
"a",
"list",
"of",
"files",
"filterd",
"by",
"a",
"regular",
"expression"
]
| python | train |
llllllllll/codetransformer | codetransformer/decompiler/_343.py | https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L182-L211 | def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | [
"def",
"make_if_statement",
"(",
"instr",
",",
"queue",
",",
"stack",
",",
"context",
")",
":",
"test_expr",
"=",
"make_expr",
"(",
"stack",
")",
"if",
"isinstance",
"(",
"instr",
",",
"instrs",
".",
"POP_JUMP_IF_TRUE",
")",
":",
"test_expr",
"=",
"ast",
".",
"UnaryOp",
"(",
"op",
"=",
"ast",
".",
"Not",
"(",
")",
",",
"operand",
"=",
"test_expr",
")",
"first_block",
"=",
"popwhile",
"(",
"op",
".",
"is_not",
"(",
"instr",
".",
"arg",
")",
",",
"queue",
",",
"side",
"=",
"'left'",
")",
"if",
"isinstance",
"(",
"first_block",
"[",
"-",
"1",
"]",
",",
"instrs",
".",
"RETURN_VALUE",
")",
":",
"body",
"=",
"instrs_to_body",
"(",
"first_block",
",",
"context",
")",
"return",
"ast",
".",
"If",
"(",
"test",
"=",
"test_expr",
",",
"body",
"=",
"body",
",",
"orelse",
"=",
"[",
"]",
")",
"jump_to_end",
"=",
"expect",
"(",
"first_block",
".",
"pop",
"(",
")",
",",
"instrs",
".",
"JUMP_FORWARD",
",",
"\"at end of if-block\"",
")",
"body",
"=",
"instrs_to_body",
"(",
"first_block",
",",
"context",
")",
"# First instruction after the whole if-block.",
"end",
"=",
"jump_to_end",
".",
"arg",
"if",
"instr",
".",
"arg",
"is",
"jump_to_end",
".",
"arg",
":",
"orelse",
"=",
"[",
"]",
"else",
":",
"orelse",
"=",
"instrs_to_body",
"(",
"popwhile",
"(",
"op",
".",
"is_not",
"(",
"end",
")",
",",
"queue",
",",
"side",
"=",
"'left'",
")",
",",
"context",
",",
")",
"return",
"ast",
".",
"If",
"(",
"test",
"=",
"test_expr",
",",
"body",
"=",
"body",
",",
"orelse",
"=",
"orelse",
")"
]
| Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE. | [
"Make",
"an",
"ast",
".",
"If",
"block",
"from",
"a",
"POP_JUMP_IF_TRUE",
"or",
"POP_JUMP_IF_FALSE",
"."
]
| python | train |
markchil/gptools | gptools/mean.py | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/mean.py#L446-L484 | def linear(X, n, *args, **kwargs):
"""Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
"""
hyper_deriv = kwargs.pop('hyper_deriv', None)
m = scipy.asarray(args[:-1])
b = args[-1]
if sum(n) > 1:
return scipy.zeros(X.shape[0])
elif sum(n) == 0:
if hyper_deriv is not None:
if hyper_deriv < len(m):
return X[:, hyper_deriv]
elif hyper_deriv == len(m):
return scipy.ones(X.shape[0])
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return (m * X).sum(axis=1) + b
else:
# sum(n) == 1:
if hyper_deriv is not None:
if n[hyper_deriv] == 1:
return scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0])
return m[n == 1] * scipy.ones(X.shape[0]) | [
"def",
"linear",
"(",
"X",
",",
"n",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"hyper_deriv",
"=",
"kwargs",
".",
"pop",
"(",
"'hyper_deriv'",
",",
"None",
")",
"m",
"=",
"scipy",
".",
"asarray",
"(",
"args",
"[",
":",
"-",
"1",
"]",
")",
"b",
"=",
"args",
"[",
"-",
"1",
"]",
"if",
"sum",
"(",
"n",
")",
">",
"1",
":",
"return",
"scipy",
".",
"zeros",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"elif",
"sum",
"(",
"n",
")",
"==",
"0",
":",
"if",
"hyper_deriv",
"is",
"not",
"None",
":",
"if",
"hyper_deriv",
"<",
"len",
"(",
"m",
")",
":",
"return",
"X",
"[",
":",
",",
"hyper_deriv",
"]",
"elif",
"hyper_deriv",
"==",
"len",
"(",
"m",
")",
":",
"return",
"scipy",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for hyper_deriv, \"",
"+",
"str",
"(",
"hyper_deriv",
")",
")",
"else",
":",
"return",
"(",
"m",
"*",
"X",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"+",
"b",
"else",
":",
"# sum(n) == 1:",
"if",
"hyper_deriv",
"is",
"not",
"None",
":",
"if",
"n",
"[",
"hyper_deriv",
"]",
"==",
"1",
":",
"return",
"scipy",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"return",
"scipy",
".",
"zeros",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"m",
"[",
"n",
"==",
"1",
"]",
"*",
"scipy",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")"
]
| Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`. | [
"Linear",
"mean",
"function",
"of",
"arbitrary",
"dimension",
"suitable",
"for",
"use",
"with",
":",
"py",
":",
"class",
":",
"MeanFunction",
".",
"The",
"form",
"is",
":",
"math",
":",
"m_0",
"*",
"X",
"[",
":",
"0",
"]",
"+",
"m_1",
"*",
"X",
"[",
":",
"1",
"]",
"+",
"\\",
"dots",
"+",
"b",
".",
"Parameters",
"----------",
"X",
":",
"array",
"(",
"M",
"D",
")",
"The",
"points",
"to",
"evaluate",
"the",
"model",
"at",
".",
"n",
":",
"array",
"of",
"non",
"-",
"negative",
"int",
"(",
"D",
")",
"The",
"derivative",
"order",
"to",
"take",
"specified",
"as",
"an",
"integer",
"order",
"for",
"each",
"dimension",
"in",
"X",
".",
"*",
"args",
":",
"num_dim",
"+",
"1",
"floats",
"The",
"slopes",
"for",
"each",
"dimension",
"plus",
"the",
"constant",
"term",
".",
"Must",
"be",
"of",
"the",
"form",
"m0",
"m1",
"...",
"b",
"."
]
| python | train |
andymccurdy/redis-py | redis/client.py | https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L2164-L2182 | def xrange(self, name, min='-', max='+', count=None):
"""
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
"""
pieces = [min, max]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XRANGE count must be a positive integer')
pieces.append(Token.get_token('COUNT'))
pieces.append(str(count))
return self.execute_command('XRANGE', name, *pieces) | [
"def",
"xrange",
"(",
"self",
",",
"name",
",",
"min",
"=",
"'-'",
",",
"max",
"=",
"'+'",
",",
"count",
"=",
"None",
")",
":",
"pieces",
"=",
"[",
"min",
",",
"max",
"]",
"if",
"count",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"count",
",",
"(",
"int",
",",
"long",
")",
")",
"or",
"count",
"<",
"1",
":",
"raise",
"DataError",
"(",
"'XRANGE count must be a positive integer'",
")",
"pieces",
".",
"append",
"(",
"Token",
".",
"get_token",
"(",
"'COUNT'",
")",
")",
"pieces",
".",
"append",
"(",
"str",
"(",
"count",
")",
")",
"return",
"self",
".",
"execute_command",
"(",
"'XRANGE'",
",",
"name",
",",
"*",
"pieces",
")"
]
| Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available. | [
"Read",
"stream",
"values",
"within",
"an",
"interval",
".",
"name",
":",
"name",
"of",
"the",
"stream",
".",
"start",
":",
"first",
"stream",
"ID",
".",
"defaults",
"to",
"-",
"meaning",
"the",
"earliest",
"available",
".",
"finish",
":",
"last",
"stream",
"ID",
".",
"defaults",
"to",
"+",
"meaning",
"the",
"latest",
"available",
".",
"count",
":",
"if",
"set",
"only",
"return",
"this",
"many",
"items",
"beginning",
"with",
"the",
"earliest",
"available",
"."
]
| python | train |
Cog-Creators/Red-Lavalink | lavalink/rest_api.py | https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/rest_api.py#L102-L111 | def exception_message(self) -> Union[str, None]:
"""
On Lavalink V3, if there was an exception during a load or get tracks call
this property will be populated with the error message.
If there was no error this property will be ``None``.
"""
if self.has_error:
exception_data = self._raw.get("exception", {})
return exception_data.get("message")
return None | [
"def",
"exception_message",
"(",
"self",
")",
"->",
"Union",
"[",
"str",
",",
"None",
"]",
":",
"if",
"self",
".",
"has_error",
":",
"exception_data",
"=",
"self",
".",
"_raw",
".",
"get",
"(",
"\"exception\"",
",",
"{",
"}",
")",
"return",
"exception_data",
".",
"get",
"(",
"\"message\"",
")",
"return",
"None"
]
| On Lavalink V3, if there was an exception during a load or get tracks call
this property will be populated with the error message.
If there was no error this property will be ``None``. | [
"On",
"Lavalink",
"V3",
"if",
"there",
"was",
"an",
"exception",
"during",
"a",
"load",
"or",
"get",
"tracks",
"call",
"this",
"property",
"will",
"be",
"populated",
"with",
"the",
"error",
"message",
".",
"If",
"there",
"was",
"no",
"error",
"this",
"property",
"will",
"be",
"None",
"."
]
| python | train |
jamieleshaw/lurklib | lurklib/channel.py | https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L289-L335 | def names(self, channel):
"""
Get a list of users in the channel.
Required arguments:
* channel - Channel to get list of users for.
"""
with self.lock:
self.is_in_channel(channel)
self.send('NAMES %s' % channel)
names = []
while self.readable():
msg = self._recv(expected_replies=('353', '366'))
if msg[0] == '353':
new_names = msg[2].split()[2:]
new_names[0] = new_names[0].replace(':', '', 1)
names.extend(new_names)
elif msg[0] == '366':
channel = msg[2].split()[0]
break
for name in names:
prefix = ''
if name[0] in self.priv_types:
prefix = name[0]
name = name[1:]
if prefix == '~':
self.channels[channel]['USERS'][name] = \
['~', '', '', '', '']
elif prefix == '&':
self.channels[channel]['USERS'][name] = \
['', '&', '', '', '']
elif prefix == '@':
self.channels[channel]['USERS'][name] = \
['', '', '@', '', '']
elif prefix == '%':
self.channels[channel]['USERS'][name] = \
['', '', '', '%', '']
elif prefix == '+':
self.channels[channel]['USERS'][name] = \
['', '', '', '', '+']
else:
self.channels[channel]['USERS'][name] = \
['', '', '', '', '']
return names | [
"def",
"names",
"(",
"self",
",",
"channel",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"is_in_channel",
"(",
"channel",
")",
"self",
".",
"send",
"(",
"'NAMES %s'",
"%",
"channel",
")",
"names",
"=",
"[",
"]",
"while",
"self",
".",
"readable",
"(",
")",
":",
"msg",
"=",
"self",
".",
"_recv",
"(",
"expected_replies",
"=",
"(",
"'353'",
",",
"'366'",
")",
")",
"if",
"msg",
"[",
"0",
"]",
"==",
"'353'",
":",
"new_names",
"=",
"msg",
"[",
"2",
"]",
".",
"split",
"(",
")",
"[",
"2",
":",
"]",
"new_names",
"[",
"0",
"]",
"=",
"new_names",
"[",
"0",
"]",
".",
"replace",
"(",
"':'",
",",
"''",
",",
"1",
")",
"names",
".",
"extend",
"(",
"new_names",
")",
"elif",
"msg",
"[",
"0",
"]",
"==",
"'366'",
":",
"channel",
"=",
"msg",
"[",
"2",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"break",
"for",
"name",
"in",
"names",
":",
"prefix",
"=",
"''",
"if",
"name",
"[",
"0",
"]",
"in",
"self",
".",
"priv_types",
":",
"prefix",
"=",
"name",
"[",
"0",
"]",
"name",
"=",
"name",
"[",
"1",
":",
"]",
"if",
"prefix",
"==",
"'~'",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"'~'",
",",
"''",
",",
"''",
",",
"''",
",",
"''",
"]",
"elif",
"prefix",
"==",
"'&'",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"''",
",",
"'&'",
",",
"''",
",",
"''",
",",
"''",
"]",
"elif",
"prefix",
"==",
"'@'",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"''",
",",
"''",
",",
"'@'",
",",
"''",
",",
"''",
"]",
"elif",
"prefix",
"==",
"'%'",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"''",
",",
"''",
",",
"''",
",",
"'%'",
",",
"''",
"]",
"elif",
"prefix",
"==",
"'+'",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"''",
",",
"''",
",",
"''",
",",
"''",
",",
"'+'",
"]",
"else",
":",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'USERS'",
"]",
"[",
"name",
"]",
"=",
"[",
"''",
",",
"''",
",",
"''",
",",
"''",
",",
"''",
"]",
"return",
"names"
]
| Get a list of users in the channel.
Required arguments:
* channel - Channel to get list of users for. | [
"Get",
"a",
"list",
"of",
"users",
"in",
"the",
"channel",
".",
"Required",
"arguments",
":",
"*",
"channel",
"-",
"Channel",
"to",
"get",
"list",
"of",
"users",
"for",
"."
]
| python | train |
GoogleCloudPlatform/cloud-debug-python | src/googleclouddebugger/gcp_hub_client.py | https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L475-L497 | def _ComputeUniquifier(self, debuggee):
"""Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version.
"""
uniquifier = hashlib.sha1()
# Compute hash of application files if we don't have source context. This
# way we can still distinguish between different deployments.
if ('minorversion' not in debuggee.get('labels', []) and
'sourceContexts' not in debuggee):
uniquifier_computer.ComputeApplicationUniquifier(uniquifier)
return uniquifier.hexdigest() | [
"def",
"_ComputeUniquifier",
"(",
"self",
",",
"debuggee",
")",
":",
"uniquifier",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"# Compute hash of application files if we don't have source context. This",
"# way we can still distinguish between different deployments.",
"if",
"(",
"'minorversion'",
"not",
"in",
"debuggee",
".",
"get",
"(",
"'labels'",
",",
"[",
"]",
")",
"and",
"'sourceContexts'",
"not",
"in",
"debuggee",
")",
":",
"uniquifier_computer",
".",
"ComputeApplicationUniquifier",
"(",
"uniquifier",
")",
"return",
"uniquifier",
".",
"hexdigest",
"(",
")"
]
| Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version. | [
"Computes",
"debuggee",
"uniquifier",
"."
]
| python | train |
tgalal/yowsup | yowsup/axolotl/manager.py | https://github.com/tgalal/yowsup/blob/b0739461ba962bf221fc76047d9d60d8ce61bc3e/yowsup/axolotl/manager.py#L192-L203 | def group_encrypt(self, groupid, message):
"""
:param groupid:
:type groupid: str
:param message:
:type message: bytes
:return:
:rtype:
"""
logger.debug("group_encrypt(groupid=%s, message=%s)" % (groupid, message))
group_cipher = self._get_group_cipher(groupid, self._username)
return group_cipher.encrypt(message + self._generate_random_padding()) | [
"def",
"group_encrypt",
"(",
"self",
",",
"groupid",
",",
"message",
")",
":",
"logger",
".",
"debug",
"(",
"\"group_encrypt(groupid=%s, message=%s)\"",
"%",
"(",
"groupid",
",",
"message",
")",
")",
"group_cipher",
"=",
"self",
".",
"_get_group_cipher",
"(",
"groupid",
",",
"self",
".",
"_username",
")",
"return",
"group_cipher",
".",
"encrypt",
"(",
"message",
"+",
"self",
".",
"_generate_random_padding",
"(",
")",
")"
]
| :param groupid:
:type groupid: str
:param message:
:type message: bytes
:return:
:rtype: | [
":",
"param",
"groupid",
":",
":",
"type",
"groupid",
":",
"str",
":",
"param",
"message",
":",
":",
"type",
"message",
":",
"bytes",
":",
"return",
":",
":",
"rtype",
":"
]
| python | train |
googleapis/google-cloud-python | error_reporting/google/cloud/errorreporting_v1beta1/gapic/error_stats_service_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/error_reporting/google/cloud/errorreporting_v1beta1/gapic/error_stats_service_client.py#L472-L542 | def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"delete_events",
"(",
"self",
",",
"project_name",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"delete_events\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"delete_events\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"delete_events",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"DeleteEvents\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"DeleteEvents\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"error_stats_service_pb2",
".",
"DeleteEventsRequest",
"(",
"project_name",
"=",
"project_name",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"[",
"]",
"metadata",
"=",
"list",
"(",
"metadata",
")",
"try",
":",
"routing_header",
"=",
"[",
"(",
"\"project_name\"",
",",
"project_name",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"routing_metadata",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"routing_header",
")",
"metadata",
".",
"append",
"(",
"routing_metadata",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"delete_events\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
]
| Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Deletes",
"all",
"error",
"events",
"of",
"a",
"given",
"project",
"."
]
| python | train |
wummel/linkchecker | linkcheck/configuration/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/__init__.py#L123-L133 | def get_system_cert_file():
"""Try to find a system-wide SSL certificate file.
@return: the filename to the cert file
@raises: ValueError when no system cert file could be found
"""
if os.name == 'posix':
filename = "/etc/ssl/certs/ca-certificates.crt"
if os.path.isfile(filename):
return filename
msg = "no system certificate file found"
raise ValueError(msg) | [
"def",
"get_system_cert_file",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'posix'",
":",
"filename",
"=",
"\"/etc/ssl/certs/ca-certificates.crt\"",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"return",
"filename",
"msg",
"=",
"\"no system certificate file found\"",
"raise",
"ValueError",
"(",
"msg",
")"
]
| Try to find a system-wide SSL certificate file.
@return: the filename to the cert file
@raises: ValueError when no system cert file could be found | [
"Try",
"to",
"find",
"a",
"system",
"-",
"wide",
"SSL",
"certificate",
"file",
"."
]
| python | train |
shoebot/shoebot | lib/photobot/__init__.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L433-L443 | def down(self):
"""Moves the layer down in the stacking order.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
i = max(0, i-1)
self.canvas.layers.insert(i, self) | [
"def",
"down",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"index",
"(",
")",
"if",
"i",
"!=",
"None",
":",
"del",
"self",
".",
"canvas",
".",
"layers",
"[",
"i",
"]",
"i",
"=",
"max",
"(",
"0",
",",
"i",
"-",
"1",
")",
"self",
".",
"canvas",
".",
"layers",
".",
"insert",
"(",
"i",
",",
"self",
")"
]
| Moves the layer down in the stacking order. | [
"Moves",
"the",
"layer",
"down",
"in",
"the",
"stacking",
"order",
"."
]
| python | valid |
jhermann/rituals | src/rituals/util/scm/git.py | https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/util/scm/git.py#L87-L90 | def tag(self, label, message=None):
"""Tag the current workdir state."""
options = ' -m "{}" -a'.format(message) if message else ''
self.run_elective('git tag{} "{}"'.format(options, label)) | [
"def",
"tag",
"(",
"self",
",",
"label",
",",
"message",
"=",
"None",
")",
":",
"options",
"=",
"' -m \"{}\" -a'",
".",
"format",
"(",
"message",
")",
"if",
"message",
"else",
"''",
"self",
".",
"run_elective",
"(",
"'git tag{} \"{}\"'",
".",
"format",
"(",
"options",
",",
"label",
")",
")"
]
| Tag the current workdir state. | [
"Tag",
"the",
"current",
"workdir",
"state",
"."
]
| python | valid |
waqasbhatti/astrobase | astrobase/lcmath.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L1233-L1334 | def phase_bin_magseries(phases, mags,
binsize=0.005,
minbinelems=7):
'''Bins a phased magnitude/flux time-series using the bin size provided.
Parameters
----------
phases,mags : np.array
The phased magnitude/flux time-series to bin in phase. Non-finite
elements will be removed from these arrays. At least 10 elements in each
array are required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'phasebin_indices': a list of the index arrays into the
nan-filtered input arrays per each bin,
'phasebins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedphases': the phase values associated with each phase bin;
this is the median of the phase value in each bin,
'binnedmags': the mag/flux values associated with each phase bin;
this is the median of the mags/fluxes in each bin}
'''
# check if the input arrays are ok
if not(phases.shape and mags.shape and len(phases) > 10 and len(mags) > 10):
LOGERROR("input time/mag arrays don't have enough elements")
return
# find all the finite values of the magnitudes and phases
finiteind = np.isfinite(mags) & np.isfinite(phases)
finite_phases = phases[finiteind]
finite_mags = mags[finiteind]
nbins = int(np.ceil((np.nanmax(finite_phases) -
np.nanmin(finite_phases))/binsize) + 1)
minphase = np.nanmin(finite_phases)
phasebins = [(minphase + x*binsize) for x in range(nbins)]
# make a KD-tree on the PHASEs so we can do fast distance calculations. we
# need to add a bogus y coord to make this a problem that KD-trees can
# solve.
time_coords = np.array([[x,1.0] for x in finite_phases])
phasetree = cKDTree(time_coords)
binned_finite_phaseseries_indices = []
collected_binned_mags = {}
for phase in phasebins:
# find all bin indices close to within binsize of this point using the
# cKDTree query. we use the p-norm = 1 for pairwise Euclidean distance.
bin_indices = phasetree.query_ball_point(np.array([phase,1.0]),
binsize/2.0, p=1.0)
# if the bin_indices have already been collected, then we're
# done with this bin, move to the next one. if they haven't,
# then this is the start of a new bin.
if (bin_indices not in binned_finite_phaseseries_indices and
len(bin_indices) >= minbinelems):
binned_finite_phaseseries_indices.append(bin_indices)
# convert to ndarrays
binned_finite_phaseseries_indices = [np.array(x) for x in
binned_finite_phaseseries_indices]
collected_binned_mags['phasebins_indices'] = (
binned_finite_phaseseries_indices
)
collected_binned_mags['phasebins'] = phasebins
collected_binned_mags['nbins'] = len(binned_finite_phaseseries_indices)
# collect the finite_phases
binned_phase = np.array([np.median(finite_phases[x])
for x in binned_finite_phaseseries_indices])
collected_binned_mags['binnedphases'] = binned_phase
collected_binned_mags['binsize'] = binsize
# median bin the magnitudes according to the calculated indices
collected_binned_mags['binnedmags'] = (
np.array([np.median(finite_mags[x])
for x in binned_finite_phaseseries_indices])
)
return collected_binned_mags | [
"def",
"phase_bin_magseries",
"(",
"phases",
",",
"mags",
",",
"binsize",
"=",
"0.005",
",",
"minbinelems",
"=",
"7",
")",
":",
"# check if the input arrays are ok",
"if",
"not",
"(",
"phases",
".",
"shape",
"and",
"mags",
".",
"shape",
"and",
"len",
"(",
"phases",
")",
">",
"10",
"and",
"len",
"(",
"mags",
")",
">",
"10",
")",
":",
"LOGERROR",
"(",
"\"input time/mag arrays don't have enough elements\"",
")",
"return",
"# find all the finite values of the magnitudes and phases",
"finiteind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"&",
"np",
".",
"isfinite",
"(",
"phases",
")",
"finite_phases",
"=",
"phases",
"[",
"finiteind",
"]",
"finite_mags",
"=",
"mags",
"[",
"finiteind",
"]",
"nbins",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"np",
".",
"nanmax",
"(",
"finite_phases",
")",
"-",
"np",
".",
"nanmin",
"(",
"finite_phases",
")",
")",
"/",
"binsize",
")",
"+",
"1",
")",
"minphase",
"=",
"np",
".",
"nanmin",
"(",
"finite_phases",
")",
"phasebins",
"=",
"[",
"(",
"minphase",
"+",
"x",
"*",
"binsize",
")",
"for",
"x",
"in",
"range",
"(",
"nbins",
")",
"]",
"# make a KD-tree on the PHASEs so we can do fast distance calculations. we",
"# need to add a bogus y coord to make this a problem that KD-trees can",
"# solve.",
"time_coords",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"x",
",",
"1.0",
"]",
"for",
"x",
"in",
"finite_phases",
"]",
")",
"phasetree",
"=",
"cKDTree",
"(",
"time_coords",
")",
"binned_finite_phaseseries_indices",
"=",
"[",
"]",
"collected_binned_mags",
"=",
"{",
"}",
"for",
"phase",
"in",
"phasebins",
":",
"# find all bin indices close to within binsize of this point using the",
"# cKDTree query. we use the p-norm = 1 for pairwise Euclidean distance.",
"bin_indices",
"=",
"phasetree",
".",
"query_ball_point",
"(",
"np",
".",
"array",
"(",
"[",
"phase",
",",
"1.0",
"]",
")",
",",
"binsize",
"/",
"2.0",
",",
"p",
"=",
"1.0",
")",
"# if the bin_indices have already been collected, then we're",
"# done with this bin, move to the next one. if they haven't,",
"# then this is the start of a new bin.",
"if",
"(",
"bin_indices",
"not",
"in",
"binned_finite_phaseseries_indices",
"and",
"len",
"(",
"bin_indices",
")",
">=",
"minbinelems",
")",
":",
"binned_finite_phaseseries_indices",
".",
"append",
"(",
"bin_indices",
")",
"# convert to ndarrays",
"binned_finite_phaseseries_indices",
"=",
"[",
"np",
".",
"array",
"(",
"x",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
"collected_binned_mags",
"[",
"'phasebins_indices'",
"]",
"=",
"(",
"binned_finite_phaseseries_indices",
")",
"collected_binned_mags",
"[",
"'phasebins'",
"]",
"=",
"phasebins",
"collected_binned_mags",
"[",
"'nbins'",
"]",
"=",
"len",
"(",
"binned_finite_phaseseries_indices",
")",
"# collect the finite_phases",
"binned_phase",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_phases",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
")",
"collected_binned_mags",
"[",
"'binnedphases'",
"]",
"=",
"binned_phase",
"collected_binned_mags",
"[",
"'binsize'",
"]",
"=",
"binsize",
"# median bin the magnitudes according to the calculated indices",
"collected_binned_mags",
"[",
"'binnedmags'",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_mags",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
")",
")",
"return",
"collected_binned_mags"
]
| Bins a phased magnitude/flux time-series using the bin size provided.
Parameters
----------
phases,mags : np.array
The phased magnitude/flux time-series to bin in phase. Non-finite
elements will be removed from these arrays. At least 10 elements in each
array are required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'phasebin_indices': a list of the index arrays into the
nan-filtered input arrays per each bin,
'phasebins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedphases': the phase values associated with each phase bin;
this is the median of the phase value in each bin,
'binnedmags': the mag/flux values associated with each phase bin;
this is the median of the mags/fluxes in each bin} | [
"Bins",
"a",
"phased",
"magnitude",
"/",
"flux",
"time",
"-",
"series",
"using",
"the",
"bin",
"size",
"provided",
"."
]
| python | valid |
samfoo/vt102 | vt102/__init__.py | https://github.com/samfoo/vt102/blob/ff5be883bc9a880a422b09bb87b210d7c408cf2c/vt102/__init__.py#L209-L221 | def _end_escape_sequence(self, char):
"""
Handle the end of an escape sequence. The final character in an escape
sequence is the command to execute, which corresponds to the event that
is dispatched here.
"""
num = ord(char)
if num in self.sequence:
self.dispatch(self.sequence[num], *self.params)
self.state = "stream"
self.current_param = ""
self.params = [] | [
"def",
"_end_escape_sequence",
"(",
"self",
",",
"char",
")",
":",
"num",
"=",
"ord",
"(",
"char",
")",
"if",
"num",
"in",
"self",
".",
"sequence",
":",
"self",
".",
"dispatch",
"(",
"self",
".",
"sequence",
"[",
"num",
"]",
",",
"*",
"self",
".",
"params",
")",
"self",
".",
"state",
"=",
"\"stream\"",
"self",
".",
"current_param",
"=",
"\"\"",
"self",
".",
"params",
"=",
"[",
"]"
]
| Handle the end of an escape sequence. The final character in an escape
sequence is the command to execute, which corresponds to the event that
is dispatched here. | [
"Handle",
"the",
"end",
"of",
"an",
"escape",
"sequence",
".",
"The",
"final",
"character",
"in",
"an",
"escape",
"sequence",
"is",
"the",
"command",
"to",
"execute",
"which",
"corresponds",
"to",
"the",
"event",
"that",
"is",
"dispatched",
"here",
"."
]
| python | train |
cloudbase/python-hnvclient | hnv/client.py | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L811-L816 | def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
raw_settings = raw_data.get("qosSettings", {})
qos_settings = QosSettings.from_raw_data(raw_settings)
raw_data["qosSettings"] = qos_settings
return super(PortSettings, cls).process_raw_data(raw_data) | [
"def",
"process_raw_data",
"(",
"cls",
",",
"raw_data",
")",
":",
"raw_settings",
"=",
"raw_data",
".",
"get",
"(",
"\"qosSettings\"",
",",
"{",
"}",
")",
"qos_settings",
"=",
"QosSettings",
".",
"from_raw_data",
"(",
"raw_settings",
")",
"raw_data",
"[",
"\"qosSettings\"",
"]",
"=",
"qos_settings",
"return",
"super",
"(",
"PortSettings",
",",
"cls",
")",
".",
"process_raw_data",
"(",
"raw_data",
")"
]
| Create a new model using raw API response. | [
"Create",
"a",
"new",
"model",
"using",
"raw",
"API",
"response",
"."
]
| python | train |
alexandrovteam/pyimzML | pyimzml/ImzMLParser.py | https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L266-L283 | def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict["pixel size x"]
pixel_size_y = self.imzmldict["pixel size y"]
except KeyError:
raise KeyError("Could not find all pixel size attributes in imzML file")
image_x, image_y = self.coordinates[i][:2]
return image_x * pixel_size_x, image_y * pixel_size_y | [
"def",
"get_physical_coordinates",
"(",
"self",
",",
"i",
")",
":",
"try",
":",
"pixel_size_x",
"=",
"self",
".",
"imzmldict",
"[",
"\"pixel size x\"",
"]",
"pixel_size_y",
"=",
"self",
".",
"imzmldict",
"[",
"\"pixel size y\"",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Could not find all pixel size attributes in imzML file\"",
")",
"image_x",
",",
"image_y",
"=",
"self",
".",
"coordinates",
"[",
"i",
"]",
"[",
":",
"2",
"]",
"return",
"image_x",
"*",
"pixel_size_x",
",",
"image_y",
"*",
"pixel_size_y"
]
| For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y" | [
"For",
"a",
"pixel",
"index",
"i",
"return",
"the",
"real",
"-",
"world",
"coordinates",
"in",
"nanometers",
"."
]
| python | train |
iandees/pyosm | pyosm/parsing.py | https://github.com/iandees/pyosm/blob/532dffceae91e2bce89c530ceff627bc8210f8aa/pyosm/parsing.py#L213-L280 | def iter_osm_stream(start_sqn=None, base_url='https://planet.openstreetmap.org/replication/minute', expected_interval=60, parse_timestamps=True, state_dir=None):
"""Start processing an OSM diff stream and yield one changeset at a time to
the caller."""
# If the user specifies a state_dir, read the state from the statefile there
if state_dir:
if not os.path.exists(state_dir):
raise Exception('Specified state_dir "%s" doesn\'t exist.' % state_dir)
if os.path.exists('%s/state.txt' % state_dir):
with open('%s/state.txt' % state_dir) as f:
state = readState(f)
start_sqn = state['sequenceNumber']
# If no start_sqn, assume to start from the most recent diff
if not start_sqn:
u = urllib2.urlopen('%s/state.txt' % base_url)
state = readState(u)
else:
sqnStr = str(start_sqn).zfill(9)
u = urllib2.urlopen('%s/%s/%s/%s.state.txt' % (base_url, sqnStr[0:3], sqnStr[3:6], sqnStr[6:9]))
state = readState(u)
interval_fudge = 0.0
while True:
sqnStr = state['sequenceNumber'].zfill(9)
url = '%s/%s/%s/%s.osc.gz' % (base_url, sqnStr[0:3], sqnStr[3:6], sqnStr[6:9])
content = urllib2.urlopen(url)
content = StringIO.StringIO(content.read())
gzipper = gzip.GzipFile(fileobj=content)
for a in iter_osm_change_file(gzipper, parse_timestamps):
yield a
# After parsing the OSC, check to see how much time is remaining
stateTs = datetime.datetime.strptime(state['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
yield (None, model.Finished(state['sequenceNumber'], stateTs))
nextTs = stateTs + datetime.timedelta(seconds=expected_interval + interval_fudge)
if datetime.datetime.utcnow() < nextTs:
timeToSleep = (nextTs - datetime.datetime.utcnow()).total_seconds()
else:
timeToSleep = 0.0
time.sleep(timeToSleep)
# Then try to fetch the next state file
sqnStr = str(int(state['sequenceNumber']) + 1).zfill(9)
url = '%s/%s/%s/%s.state.txt' % (base_url, sqnStr[0:3], sqnStr[3:6], sqnStr[6:9])
delay = 1.0
while True:
try:
u = urllib2.urlopen(url)
interval_fudge -= (interval_fudge / 2.0)
break
except urllib2.HTTPError as e:
if e.code == 404:
time.sleep(delay)
delay = min(delay * 2, 13)
interval_fudge += delay
if state_dir:
with open('%s/state.txt' % state_dir, 'w') as f:
f.write(u.read())
with open('%s/state.txt' % state_dir, 'r') as f:
state = readState(f)
else:
state = readState(u) | [
"def",
"iter_osm_stream",
"(",
"start_sqn",
"=",
"None",
",",
"base_url",
"=",
"'https://planet.openstreetmap.org/replication/minute'",
",",
"expected_interval",
"=",
"60",
",",
"parse_timestamps",
"=",
"True",
",",
"state_dir",
"=",
"None",
")",
":",
"# If the user specifies a state_dir, read the state from the statefile there",
"if",
"state_dir",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"state_dir",
")",
":",
"raise",
"Exception",
"(",
"'Specified state_dir \"%s\" doesn\\'t exist.'",
"%",
"state_dir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"'%s/state.txt'",
"%",
"state_dir",
")",
":",
"with",
"open",
"(",
"'%s/state.txt'",
"%",
"state_dir",
")",
"as",
"f",
":",
"state",
"=",
"readState",
"(",
"f",
")",
"start_sqn",
"=",
"state",
"[",
"'sequenceNumber'",
"]",
"# If no start_sqn, assume to start from the most recent diff",
"if",
"not",
"start_sqn",
":",
"u",
"=",
"urllib2",
".",
"urlopen",
"(",
"'%s/state.txt'",
"%",
"base_url",
")",
"state",
"=",
"readState",
"(",
"u",
")",
"else",
":",
"sqnStr",
"=",
"str",
"(",
"start_sqn",
")",
".",
"zfill",
"(",
"9",
")",
"u",
"=",
"urllib2",
".",
"urlopen",
"(",
"'%s/%s/%s/%s.state.txt'",
"%",
"(",
"base_url",
",",
"sqnStr",
"[",
"0",
":",
"3",
"]",
",",
"sqnStr",
"[",
"3",
":",
"6",
"]",
",",
"sqnStr",
"[",
"6",
":",
"9",
"]",
")",
")",
"state",
"=",
"readState",
"(",
"u",
")",
"interval_fudge",
"=",
"0.0",
"while",
"True",
":",
"sqnStr",
"=",
"state",
"[",
"'sequenceNumber'",
"]",
".",
"zfill",
"(",
"9",
")",
"url",
"=",
"'%s/%s/%s/%s.osc.gz'",
"%",
"(",
"base_url",
",",
"sqnStr",
"[",
"0",
":",
"3",
"]",
",",
"sqnStr",
"[",
"3",
":",
"6",
"]",
",",
"sqnStr",
"[",
"6",
":",
"9",
"]",
")",
"content",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"content",
"=",
"StringIO",
".",
"StringIO",
"(",
"content",
".",
"read",
"(",
")",
")",
"gzipper",
"=",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"content",
")",
"for",
"a",
"in",
"iter_osm_change_file",
"(",
"gzipper",
",",
"parse_timestamps",
")",
":",
"yield",
"a",
"# After parsing the OSC, check to see how much time is remaining",
"stateTs",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"state",
"[",
"'timestamp'",
"]",
",",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
"yield",
"(",
"None",
",",
"model",
".",
"Finished",
"(",
"state",
"[",
"'sequenceNumber'",
"]",
",",
"stateTs",
")",
")",
"nextTs",
"=",
"stateTs",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"expected_interval",
"+",
"interval_fudge",
")",
"if",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"<",
"nextTs",
":",
"timeToSleep",
"=",
"(",
"nextTs",
"-",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
".",
"total_seconds",
"(",
")",
"else",
":",
"timeToSleep",
"=",
"0.0",
"time",
".",
"sleep",
"(",
"timeToSleep",
")",
"# Then try to fetch the next state file",
"sqnStr",
"=",
"str",
"(",
"int",
"(",
"state",
"[",
"'sequenceNumber'",
"]",
")",
"+",
"1",
")",
".",
"zfill",
"(",
"9",
")",
"url",
"=",
"'%s/%s/%s/%s.state.txt'",
"%",
"(",
"base_url",
",",
"sqnStr",
"[",
"0",
":",
"3",
"]",
",",
"sqnStr",
"[",
"3",
":",
"6",
"]",
",",
"sqnStr",
"[",
"6",
":",
"9",
"]",
")",
"delay",
"=",
"1.0",
"while",
"True",
":",
"try",
":",
"u",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"interval_fudge",
"-=",
"(",
"interval_fudge",
"/",
"2.0",
")",
"break",
"except",
"urllib2",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"404",
":",
"time",
".",
"sleep",
"(",
"delay",
")",
"delay",
"=",
"min",
"(",
"delay",
"*",
"2",
",",
"13",
")",
"interval_fudge",
"+=",
"delay",
"if",
"state_dir",
":",
"with",
"open",
"(",
"'%s/state.txt'",
"%",
"state_dir",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"u",
".",
"read",
"(",
")",
")",
"with",
"open",
"(",
"'%s/state.txt'",
"%",
"state_dir",
",",
"'r'",
")",
"as",
"f",
":",
"state",
"=",
"readState",
"(",
"f",
")",
"else",
":",
"state",
"=",
"readState",
"(",
"u",
")"
]
| Start processing an OSM diff stream and yield one changeset at a time to
the caller. | [
"Start",
"processing",
"an",
"OSM",
"diff",
"stream",
"and",
"yield",
"one",
"changeset",
"at",
"a",
"time",
"to",
"the",
"caller",
"."
]
| python | test |
jwodder/doapi | doapi/droplet.py | https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L394-L410 | def rebuild(self, image):
"""
Rebuild the droplet with the specified image
A rebuild action functions just like a new create. [APIDocs]_
:param image: an image ID, an image slug, or an `Image` object
representing the image the droplet should use as a base
:type image: integer, string, or `Image`
:return: an `Action` representing the in-progress operation on the
droplet
:rtype: Action
:raises DOAPIError: if the API endpoint replies with an error
"""
if isinstance(image, Image):
image = image.id
return self.act(type='rebuild', image=image) | [
"def",
"rebuild",
"(",
"self",
",",
"image",
")",
":",
"if",
"isinstance",
"(",
"image",
",",
"Image",
")",
":",
"image",
"=",
"image",
".",
"id",
"return",
"self",
".",
"act",
"(",
"type",
"=",
"'rebuild'",
",",
"image",
"=",
"image",
")"
]
| Rebuild the droplet with the specified image
A rebuild action functions just like a new create. [APIDocs]_
:param image: an image ID, an image slug, or an `Image` object
representing the image the droplet should use as a base
:type image: integer, string, or `Image`
:return: an `Action` representing the in-progress operation on the
droplet
:rtype: Action
:raises DOAPIError: if the API endpoint replies with an error | [
"Rebuild",
"the",
"droplet",
"with",
"the",
"specified",
"image"
]
| python | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L25-L106 | def build(elements, subscript_dict, namespace, outfile_name):
"""
Actually constructs and writes the python representation of the model
Parameters
----------
elements: list
Each element is a dictionary, with the various components needed to assemble
a model component in python syntax. This will contain multiple entries for
elements that have multiple definitions in the original file, and which need
to be combined.
subscript_dict: dictionary
A dictionary containing the names of subscript families (dimensions) as keys, and
a list of the possible positions within that dimension for each value
namespace: dictionary
Translation from original model element names (keys) to python safe
function identifiers (values)
outfile_name: string
The name of the file to write the model to.
"""
# Todo: deal with model level documentation
# Todo: Make np, PySD.functions import conditional on usage in the file
# Todo: Make presence of subscript_dict instantiation conditional on usage
# Todo: Sort elements (alphabetically? group stock funcs?)
elements = merge_partial_elements(elements)
functions = [build_element(element, subscript_dict) for element in elements]
text = '''
"""
Python model "%(outfile)s"
Translated using PySD version %(version)s
"""
from __future__ import division
import numpy as np
from pysd import utils
import xarray as xr
from pysd.py_backend.functions import cache
from pysd.py_backend import functions
_subscript_dict = %(subscript_dict)s
_namespace = %(namespace)s
__pysd_version__ = "%(version)s"
__data = {
'scope': None,
'time': lambda: 0
}
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data['time']()
%(functions)s
''' % {'subscript_dict': repr(subscript_dict),
'functions': '\n'.join(functions),
# 'namespace': '{\n' + '\n'.join(['%s: %s' % (key, namespace[key]) for key in
# namespace.keys()]) + '\n}',
'namespace': repr(namespace),
'outfile': os.path.basename(outfile_name),
'version': __version__}
style_file = pkg_resources.resource_filename("pysd", "py_backend/output_style.yapf")
text = text.replace('\t', ' ')
text, changed = yapf.yapf_api.FormatCode(textwrap.dedent(text),
style_config=style_file)
# this is used for testing
if outfile_name == 'return':
return text
with open(outfile_name, 'w', encoding='UTF-8') as out:
out.write(text) | [
"def",
"build",
"(",
"elements",
",",
"subscript_dict",
",",
"namespace",
",",
"outfile_name",
")",
":",
"# Todo: deal with model level documentation",
"# Todo: Make np, PySD.functions import conditional on usage in the file",
"# Todo: Make presence of subscript_dict instantiation conditional on usage",
"# Todo: Sort elements (alphabetically? group stock funcs?)",
"elements",
"=",
"merge_partial_elements",
"(",
"elements",
")",
"functions",
"=",
"[",
"build_element",
"(",
"element",
",",
"subscript_dict",
")",
"for",
"element",
"in",
"elements",
"]",
"text",
"=",
"'''\n \"\"\"\n Python model \"%(outfile)s\"\n Translated using PySD version %(version)s\n \"\"\"\n from __future__ import division\n import numpy as np\n from pysd import utils\n import xarray as xr\n\n from pysd.py_backend.functions import cache\n from pysd.py_backend import functions\n\n _subscript_dict = %(subscript_dict)s\n\n _namespace = %(namespace)s\n\n __pysd_version__ = \"%(version)s\"\n \n __data = {\n 'scope': None,\n 'time': lambda: 0\n }\n\n def _init_outer_references(data):\n for key in data:\n __data[key] = data[key]\n \n def time():\n return __data['time']()\n \n %(functions)s\n\n '''",
"%",
"{",
"'subscript_dict'",
":",
"repr",
"(",
"subscript_dict",
")",
",",
"'functions'",
":",
"'\\n'",
".",
"join",
"(",
"functions",
")",
",",
"# 'namespace': '{\\n' + '\\n'.join(['%s: %s' % (key, namespace[key]) for key in",
"# namespace.keys()]) + '\\n}',",
"'namespace'",
":",
"repr",
"(",
"namespace",
")",
",",
"'outfile'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"outfile_name",
")",
",",
"'version'",
":",
"__version__",
"}",
"style_file",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"\"pysd\"",
",",
"\"py_backend/output_style.yapf\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\t'",
",",
"' '",
")",
"text",
",",
"changed",
"=",
"yapf",
".",
"yapf_api",
".",
"FormatCode",
"(",
"textwrap",
".",
"dedent",
"(",
"text",
")",
",",
"style_config",
"=",
"style_file",
")",
"# this is used for testing",
"if",
"outfile_name",
"==",
"'return'",
":",
"return",
"text",
"with",
"open",
"(",
"outfile_name",
",",
"'w'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"text",
")"
]
| Actually constructs and writes the python representation of the model
Parameters
----------
elements: list
Each element is a dictionary, with the various components needed to assemble
a model component in python syntax. This will contain multiple entries for
elements that have multiple definitions in the original file, and which need
to be combined.
subscript_dict: dictionary
A dictionary containing the names of subscript families (dimensions) as keys, and
a list of the possible positions within that dimension for each value
namespace: dictionary
Translation from original model element names (keys) to python safe
function identifiers (values)
outfile_name: string
The name of the file to write the model to. | [
"Actually",
"constructs",
"and",
"writes",
"the",
"python",
"representation",
"of",
"the",
"model"
]
| python | train |
kontron/python-ipmi | pyipmi/interfaces/aardvark.py | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/aardvark.py#L131-L168 | def _send_and_receive(self, target, lun, netfn, cmdid, payload):
"""Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring
"""
self._inc_sequence_number()
# assemble IPMB header
header = IpmbHeaderReq()
header.netfn = netfn
header.rs_lun = lun
header.rs_sa = target.ipmb_address
header.rq_seq = self.next_sequence_number
header.rq_lun = 0
header.rq_sa = self.slave_address
header.cmd_id = cmdid
retries = 0
while retries < self.max_retries:
try:
self._send_raw(header, payload)
rx_data = self._receive_raw(header)
break
except IpmiTimeoutError:
log().warning('I2C transaction timed out'),
retries += 1
else:
raise IpmiTimeoutError()
return rx_data.tostring()[5:-1] | [
"def",
"_send_and_receive",
"(",
"self",
",",
"target",
",",
"lun",
",",
"netfn",
",",
"cmdid",
",",
"payload",
")",
":",
"self",
".",
"_inc_sequence_number",
"(",
")",
"# assemble IPMB header",
"header",
"=",
"IpmbHeaderReq",
"(",
")",
"header",
".",
"netfn",
"=",
"netfn",
"header",
".",
"rs_lun",
"=",
"lun",
"header",
".",
"rs_sa",
"=",
"target",
".",
"ipmb_address",
"header",
".",
"rq_seq",
"=",
"self",
".",
"next_sequence_number",
"header",
".",
"rq_lun",
"=",
"0",
"header",
".",
"rq_sa",
"=",
"self",
".",
"slave_address",
"header",
".",
"cmd_id",
"=",
"cmdid",
"retries",
"=",
"0",
"while",
"retries",
"<",
"self",
".",
"max_retries",
":",
"try",
":",
"self",
".",
"_send_raw",
"(",
"header",
",",
"payload",
")",
"rx_data",
"=",
"self",
".",
"_receive_raw",
"(",
"header",
")",
"break",
"except",
"IpmiTimeoutError",
":",
"log",
"(",
")",
".",
"warning",
"(",
"'I2C transaction timed out'",
")",
",",
"retries",
"+=",
"1",
"else",
":",
"raise",
"IpmiTimeoutError",
"(",
")",
"return",
"rx_data",
".",
"tostring",
"(",
")",
"[",
"5",
":",
"-",
"1",
"]"
]
| Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring | [
"Send",
"and",
"receive",
"data",
"using",
"aardvark",
"interface",
"."
]
| python | train |
matthew-sochor/transfer | transfer/input.py | https://github.com/matthew-sochor/transfer/blob/c1931a16459275faa7a5e9860fbed079a4848b80/transfer/input.py#L57-L76 | def bool_input(message):
'''
Ask a user for a boolean input
args:
message (str): Prompt for user
returns:
bool_in (boolean): Input boolean
'''
while True:
suffix = ' (true or false): '
inp = input(message + suffix)
if inp.lower() == 'true':
return True
elif inp.lower() == 'false':
return False
else:
print(colored('Must be either true or false, try again!', 'red')) | [
"def",
"bool_input",
"(",
"message",
")",
":",
"while",
"True",
":",
"suffix",
"=",
"' (true or false): '",
"inp",
"=",
"input",
"(",
"message",
"+",
"suffix",
")",
"if",
"inp",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"return",
"True",
"elif",
"inp",
".",
"lower",
"(",
")",
"==",
"'false'",
":",
"return",
"False",
"else",
":",
"print",
"(",
"colored",
"(",
"'Must be either true or false, try again!'",
",",
"'red'",
")",
")"
]
| Ask a user for a boolean input
args:
message (str): Prompt for user
returns:
bool_in (boolean): Input boolean | [
"Ask",
"a",
"user",
"for",
"a",
"boolean",
"input"
]
| python | train |
tgalal/yowsup | yowsup/config/transforms/config_dict.py | https://github.com/tgalal/yowsup/blob/b0739461ba962bf221fc76047d9d60d8ce61bc3e/yowsup/config/transforms/config_dict.py#L8-L18 | def transform(self, config):
"""
:param config:
:type config: dict
:return:
:rtype: yowsup.config.config.Config
"""
out = {}
for prop in vars(config):
out[prop] = getattr(config, prop)
return out | [
"def",
"transform",
"(",
"self",
",",
"config",
")",
":",
"out",
"=",
"{",
"}",
"for",
"prop",
"in",
"vars",
"(",
"config",
")",
":",
"out",
"[",
"prop",
"]",
"=",
"getattr",
"(",
"config",
",",
"prop",
")",
"return",
"out"
]
| :param config:
:type config: dict
:return:
:rtype: yowsup.config.config.Config | [
":",
"param",
"config",
":",
":",
"type",
"config",
":",
"dict",
":",
"return",
":",
":",
"rtype",
":",
"yowsup",
".",
"config",
".",
"config",
".",
"Config"
]
| python | train |
hvac/hvac | hvac/api/secrets_engines/azure.py | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/azure.py#L99-L136 | def create_or_update_role(self, name, azure_roles, ttl="", max_ttl="", mount_point=DEFAULT_MOUNT_POINT):
"""Create or update a Vault role.
The provided Azure roles must exist for this call to succeed. See the Azure secrets roles docs for more
information about roles.
Supported methods:
POST: /{mount_point}/roles/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param azure_roles: List of Azure roles to be assigned to the generated service principal.
:type azure_roles: list(dict)
:param ttl: Specifies the default TTL for service principals generated using this role. Accepts time suffixed
strings ("1h") or an integer number of seconds. Defaults to the system/engine default TTL time.
:type ttl: str | unicode
:param max_ttl: Specifies the maximum TTL for service principals generated using this role. Accepts time
suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine max TTL time.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'azure_roles': json.dumps(azure_roles),
'ttl': ttl,
'max_ttl': max_ttl,
}
api_path = '/v1/{mount_point}/roles/{name}'.format(
mount_point=mount_point,
name=name
)
return self._adapter.post(
url=api_path,
json=params,
) | [
"def",
"create_or_update_role",
"(",
"self",
",",
"name",
",",
"azure_roles",
",",
"ttl",
"=",
"\"\"",
",",
"max_ttl",
"=",
"\"\"",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"params",
"=",
"{",
"'azure_roles'",
":",
"json",
".",
"dumps",
"(",
"azure_roles",
")",
",",
"'ttl'",
":",
"ttl",
",",
"'max_ttl'",
":",
"max_ttl",
",",
"}",
"api_path",
"=",
"'/v1/{mount_point}/roles/{name}'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
",",
"name",
"=",
"name",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
]
| Create or update a Vault role.
The provided Azure roles must exist for this call to succeed. See the Azure secrets roles docs for more
information about roles.
Supported methods:
POST: /{mount_point}/roles/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param azure_roles: List of Azure roles to be assigned to the generated service principal.
:type azure_roles: list(dict)
:param ttl: Specifies the default TTL for service principals generated using this role. Accepts time suffixed
strings ("1h") or an integer number of seconds. Defaults to the system/engine default TTL time.
:type ttl: str | unicode
:param max_ttl: Specifies the maximum TTL for service principals generated using this role. Accepts time
suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine max TTL time.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response | [
"Create",
"or",
"update",
"a",
"Vault",
"role",
"."
]
| python | train |
eonpatapon/contrail-api-cli | contrail_api_cli/client.py | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/client.py#L136-L147 | def post_json(self, url, data, cls=None, **kwargs):
"""
POST data to the api-server
:param url: resource location (eg: "/type/uuid")
:type url: str
:param cls: JSONEncoder class
:type cls: JSONEncoder
"""
kwargs['data'] = to_json(data, cls=cls)
kwargs['headers'] = self.default_headers
return self.post(url, **kwargs).json() | [
"def",
"post_json",
"(",
"self",
",",
"url",
",",
"data",
",",
"cls",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'data'",
"]",
"=",
"to_json",
"(",
"data",
",",
"cls",
"=",
"cls",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"self",
".",
"default_headers",
"return",
"self",
".",
"post",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
".",
"json",
"(",
")"
]
| POST data to the api-server
:param url: resource location (eg: "/type/uuid")
:type url: str
:param cls: JSONEncoder class
:type cls: JSONEncoder | [
"POST",
"data",
"to",
"the",
"api",
"-",
"server"
]
| python | train |
ellmetha/django-machina | machina/apps/forum_member/views.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_member/views.py#L175-L177 | def perform_permissions_check(self, user, obj, perms):
""" Performs the permission check. """
return self.request.forum_permission_handler.can_subscribe_to_topic(obj, user) | [
"def",
"perform_permissions_check",
"(",
"self",
",",
"user",
",",
"obj",
",",
"perms",
")",
":",
"return",
"self",
".",
"request",
".",
"forum_permission_handler",
".",
"can_subscribe_to_topic",
"(",
"obj",
",",
"user",
")"
]
| Performs the permission check. | [
"Performs",
"the",
"permission",
"check",
"."
]
| python | train |
KelSolaar/Umbra | umbra/preferences.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/preferences.py#L283-L297 | def set_default_preferences(self):
"""
Defines the default settings file content.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing default settings!")
for key in self.__default_settings.allKeys():
self.__settings.setValue(key, self.__default_settings.value(key))
self.set_default_layouts()
return True | [
"def",
"set_default_preferences",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> Initializing default settings!\"",
")",
"for",
"key",
"in",
"self",
".",
"__default_settings",
".",
"allKeys",
"(",
")",
":",
"self",
".",
"__settings",
".",
"setValue",
"(",
"key",
",",
"self",
".",
"__default_settings",
".",
"value",
"(",
"key",
")",
")",
"self",
".",
"set_default_layouts",
"(",
")",
"return",
"True"
]
| Defines the default settings file content.
:return: Method success.
:rtype: bool | [
"Defines",
"the",
"default",
"settings",
"file",
"content",
"."
]
| python | train |
rpcope1/HackerNewsAPI-Py | HackerNewsAPI/API.py | https://github.com/rpcope1/HackerNewsAPI-Py/blob/b231aed24ec59fc32af320bbef27d48cc4b69914/HackerNewsAPI/API.py#L28-L37 | def _make_request(self, suburl):
"""
Helper function for making requests
:param suburl: The suburl to query
:return: Decoded json object
"""
url = "{}/{}".format(self.API_BASE_URL, suburl)
response = self.session.get(url)
response.raise_for_status()
return response.json() | [
"def",
"_make_request",
"(",
"self",
",",
"suburl",
")",
":",
"url",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"API_BASE_URL",
",",
"suburl",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Helper function for making requests
:param suburl: The suburl to query
:return: Decoded json object | [
"Helper",
"function",
"for",
"making",
"requests",
":",
"param",
"suburl",
":",
"The",
"suburl",
"to",
"query",
":",
"return",
":",
"Decoded",
"json",
"object"
]
| python | train |
cloudera/cm_api | python/src/cm_api/endpoints/types.py | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/types.py#L216-L226 | def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in attrs.iteritems():
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v) | [
"def",
"_set_attrs",
"(",
"self",
",",
"attrs",
",",
"allow_ro",
"=",
"False",
",",
"from_json",
"=",
"True",
")",
":",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"iteritems",
"(",
")",
":",
"attr",
"=",
"self",
".",
"_check_attr",
"(",
"k",
",",
"allow_ro",
")",
"if",
"attr",
"and",
"from_json",
":",
"v",
"=",
"attr",
".",
"from_json",
"(",
"self",
".",
"_get_resource_root",
"(",
")",
",",
"v",
")",
"object",
".",
"__setattr__",
"(",
"self",
",",
"k",
",",
"v",
")"
]
| Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values. | [
"Sets",
"all",
"the",
"attributes",
"in",
"the",
"dictionary",
".",
"Optionally",
"allows",
"setting",
"read",
"-",
"only",
"attributes",
"(",
"e",
".",
"g",
".",
"when",
"deserializing",
"from",
"JSON",
")",
"and",
"skipping",
"JSON",
"deserialization",
"of",
"values",
"."
]
| python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9407-L9420 | def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
'''
return MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index) | [
"def",
"mission_request_partial_list_encode",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"start_index",
",",
"end_index",
")",
":",
"return",
"MAVLink_mission_request_partial_list_message",
"(",
"target_system",
",",
"target_component",
",",
"start_index",
",",
"end_index",
")"
]
| Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t) | [
"Request",
"a",
"partial",
"list",
"of",
"mission",
"items",
"from",
"the",
"system",
"/",
"component",
".",
"http",
":",
"//",
"qgroundcontrol",
".",
"org",
"/",
"mavlink",
"/",
"waypoint_protocol",
".",
"If",
"start",
"and",
"end",
"index",
"are",
"the",
"same",
"just",
"send",
"one",
"waypoint",
"."
]
| python | train |
saltstack/salt | salt/modules/firewalld.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L1027-L1044 | def remove_rich_rule(zone, rule, permanent=True):
'''
Add a rich rule to a zone
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.remove_rich_rule zone 'rule'
'''
cmd = "--zone={0} --remove-rich-rule='{1}'".format(zone, rule)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd) | [
"def",
"remove_rich_rule",
"(",
"zone",
",",
"rule",
",",
"permanent",
"=",
"True",
")",
":",
"cmd",
"=",
"\"--zone={0} --remove-rich-rule='{1}'\"",
".",
"format",
"(",
"zone",
",",
"rule",
")",
"if",
"permanent",
":",
"cmd",
"+=",
"' --permanent'",
"return",
"__firewall_cmd",
"(",
"cmd",
")"
]
| Add a rich rule to a zone
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.remove_rich_rule zone 'rule' | [
"Add",
"a",
"rich",
"rule",
"to",
"a",
"zone"
]
| python | train |
openstack/quark | quark/drivers/nvp_driver.py | https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/nvp_driver.py#L740-L748 | def get_lswitch_ids_for_network(self, context, network_id):
"""Public interface for fetching lswitch ids for a given network.
NOTE(morgabra) This is here because calling private methods
from outside the class feels wrong, and we need to be able to
fetch lswitch ids for use in other drivers.
"""
lswitches = self._lswitches_for_network(context, network_id).results()
return [s['uuid'] for s in lswitches["results"]] | [
"def",
"get_lswitch_ids_for_network",
"(",
"self",
",",
"context",
",",
"network_id",
")",
":",
"lswitches",
"=",
"self",
".",
"_lswitches_for_network",
"(",
"context",
",",
"network_id",
")",
".",
"results",
"(",
")",
"return",
"[",
"s",
"[",
"'uuid'",
"]",
"for",
"s",
"in",
"lswitches",
"[",
"\"results\"",
"]",
"]"
]
| Public interface for fetching lswitch ids for a given network.
NOTE(morgabra) This is here because calling private methods
from outside the class feels wrong, and we need to be able to
fetch lswitch ids for use in other drivers. | [
"Public",
"interface",
"for",
"fetching",
"lswitch",
"ids",
"for",
"a",
"given",
"network",
"."
]
| python | valid |
googlefonts/fontbakery | Lib/fontbakery/profiles/googlefonts.py | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3781-L3807 | def com_google_fonts_check_name_family_and_style_max_length(ttFont):
"""Combined length of family and style must not exceed 27 characters."""
from fontbakery.utils import (get_name_entries,
get_name_entry_strings)
failed = False
for familyname in get_name_entries(ttFont,
NameID.FONT_FAMILY_NAME):
# we'll only match family/style name entries with the same platform ID:
plat = familyname.platformID
familyname_str = familyname.string.decode(familyname.getEncoding())
for stylename_str in get_name_entry_strings(ttFont,
NameID.FONT_SUBFAMILY_NAME,
platformID=plat):
if len(familyname_str + stylename_str) > 27:
failed = True
yield WARN, ("The combined length of family and style"
" exceeds 27 chars in the following '{}' entries:"
" FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'"
"").format(PlatformID(plat).name,
familyname_str,
stylename_str)
yield WARN, ("Please take a look at the conversation at"
" https://github.com/googlefonts/fontbakery/issues/2179"
" in order to understand the reasoning behing these"
" name table records max-length criteria.")
if not failed:
yield PASS, "All name entries are good." | [
"def",
"com_google_fonts_check_name_family_and_style_max_length",
"(",
"ttFont",
")",
":",
"from",
"fontbakery",
".",
"utils",
"import",
"(",
"get_name_entries",
",",
"get_name_entry_strings",
")",
"failed",
"=",
"False",
"for",
"familyname",
"in",
"get_name_entries",
"(",
"ttFont",
",",
"NameID",
".",
"FONT_FAMILY_NAME",
")",
":",
"# we'll only match family/style name entries with the same platform ID:",
"plat",
"=",
"familyname",
".",
"platformID",
"familyname_str",
"=",
"familyname",
".",
"string",
".",
"decode",
"(",
"familyname",
".",
"getEncoding",
"(",
")",
")",
"for",
"stylename_str",
"in",
"get_name_entry_strings",
"(",
"ttFont",
",",
"NameID",
".",
"FONT_SUBFAMILY_NAME",
",",
"platformID",
"=",
"plat",
")",
":",
"if",
"len",
"(",
"familyname_str",
"+",
"stylename_str",
")",
">",
"27",
":",
"failed",
"=",
"True",
"yield",
"WARN",
",",
"(",
"\"The combined length of family and style\"",
"\" exceeds 27 chars in the following '{}' entries:\"",
"\" FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'\"",
"\"\"",
")",
".",
"format",
"(",
"PlatformID",
"(",
"plat",
")",
".",
"name",
",",
"familyname_str",
",",
"stylename_str",
")",
"yield",
"WARN",
",",
"(",
"\"Please take a look at the conversation at\"",
"\" https://github.com/googlefonts/fontbakery/issues/2179\"",
"\" in order to understand the reasoning behing these\"",
"\" name table records max-length criteria.\"",
")",
"if",
"not",
"failed",
":",
"yield",
"PASS",
",",
"\"All name entries are good.\""
]
| Combined length of family and style must not exceed 27 characters. | [
"Combined",
"length",
"of",
"family",
"and",
"style",
"must",
"not",
"exceed",
"27",
"characters",
"."
]
| python | train |
bloomreach/s4cmd | s4cmd.py | https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L175-L189 | def fail(message, exc_info=None, status=1, stacktrace=False):
'''Utility function to handle runtime failures gracefully.
Show concise information if possible, then terminate program.
'''
text = message
if exc_info:
text += str(exc_info)
error(text)
if stacktrace:
error(traceback.format_exc())
clean_tempfiles()
if __name__ == '__main__':
sys.exit(status)
else:
raise RuntimeError(status) | [
"def",
"fail",
"(",
"message",
",",
"exc_info",
"=",
"None",
",",
"status",
"=",
"1",
",",
"stacktrace",
"=",
"False",
")",
":",
"text",
"=",
"message",
"if",
"exc_info",
":",
"text",
"+=",
"str",
"(",
"exc_info",
")",
"error",
"(",
"text",
")",
"if",
"stacktrace",
":",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"clean_tempfiles",
"(",
")",
"if",
"__name__",
"==",
"'__main__'",
":",
"sys",
".",
"exit",
"(",
"status",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"status",
")"
]
| Utility function to handle runtime failures gracefully.
Show concise information if possible, then terminate program. | [
"Utility",
"function",
"to",
"handle",
"runtime",
"failures",
"gracefully",
".",
"Show",
"concise",
"information",
"if",
"possible",
"then",
"terminate",
"program",
"."
]
| python | test |
log2timeline/plaso | plaso/parsers/esedb_plugins/srum.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/esedb_plugins/srum.py#L479-L496 | def ParseNetworkConnectivityUsage(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
"""Parses the network connectivity usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
"""
# TODO: consider making ConnectStartTime + ConnectedTime an event.
self._ParseGUIDTable(
parser_mediator, cache, database, table,
self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP,
SRUMNetworkConnectivityUsageEventData) | [
"def",
"ParseNetworkConnectivityUsage",
"(",
"self",
",",
"parser_mediator",
",",
"cache",
"=",
"None",
",",
"database",
"=",
"None",
",",
"table",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"# TODO: consider making ConnectStartTime + ConnectedTime an event.",
"self",
".",
"_ParseGUIDTable",
"(",
"parser_mediator",
",",
"cache",
",",
"database",
",",
"table",
",",
"self",
".",
"_NETWORK_CONNECTIVITY_USAGE_VALUES_MAP",
",",
"SRUMNetworkConnectivityUsageEventData",
")"
]
| Parses the network connectivity usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table. | [
"Parses",
"the",
"network",
"connectivity",
"usage",
"monitor",
"table",
"."
]
| python | train |
klen/muffin-rest | muffin_rest/peewee.py | https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/peewee.py#L95-L104 | def get_one(self, request, **kwargs):
"""Load a resource."""
resource = request.match_info.get(self.name)
if not resource:
return None
try:
return self.collection.where(self.meta.model_pk == resource).get()
except Exception:
raise RESTNotFound(reason='Resource not found.') | [
"def",
"get_one",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"request",
".",
"match_info",
".",
"get",
"(",
"self",
".",
"name",
")",
"if",
"not",
"resource",
":",
"return",
"None",
"try",
":",
"return",
"self",
".",
"collection",
".",
"where",
"(",
"self",
".",
"meta",
".",
"model_pk",
"==",
"resource",
")",
".",
"get",
"(",
")",
"except",
"Exception",
":",
"raise",
"RESTNotFound",
"(",
"reason",
"=",
"'Resource not found.'",
")"
]
| Load a resource. | [
"Load",
"a",
"resource",
"."
]
| python | train |
klmitch/metatools | metatools.py | https://github.com/klmitch/metatools/blob/7161cf22ef2b194cfd4406e85b81e39a49104d9d/metatools.py#L134-L173 | def inherit_set(base, namespace, attr_name,
inherit=lambda i: True):
"""
Perform inheritance of sets. Returns a list of items that
were inherited, for post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the set
to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
item, and the item will be added and included
in the items list only if the function returns
``True``. By default, all items are added and
included in the items list.
"""
items = []
# Get the sets to compare
base_set = getattr(base, attr_name, set())
new_set = namespace.setdefault(attr_name, set())
for item in base_set:
# Skip items that have been overridden or that we
# shouldn't inherit
if item in new_set or (inherit and not inherit(item)):
continue
# Inherit the item
if inherit:
new_set.add(item)
items.append(item)
return items | [
"def",
"inherit_set",
"(",
"base",
",",
"namespace",
",",
"attr_name",
",",
"inherit",
"=",
"lambda",
"i",
":",
"True",
")",
":",
"items",
"=",
"[",
"]",
"# Get the sets to compare",
"base_set",
"=",
"getattr",
"(",
"base",
",",
"attr_name",
",",
"set",
"(",
")",
")",
"new_set",
"=",
"namespace",
".",
"setdefault",
"(",
"attr_name",
",",
"set",
"(",
")",
")",
"for",
"item",
"in",
"base_set",
":",
"# Skip items that have been overridden or that we",
"# shouldn't inherit",
"if",
"item",
"in",
"new_set",
"or",
"(",
"inherit",
"and",
"not",
"inherit",
"(",
"item",
")",
")",
":",
"continue",
"# Inherit the item",
"if",
"inherit",
":",
"new_set",
".",
"add",
"(",
"item",
")",
"items",
".",
"append",
"(",
"item",
")",
"return",
"items"
]
| Perform inheritance of sets. Returns a list of items that
were inherited, for post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the set
to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
item, and the item will be added and included
in the items list only if the function returns
``True``. By default, all items are added and
included in the items list. | [
"Perform",
"inheritance",
"of",
"sets",
".",
"Returns",
"a",
"list",
"of",
"items",
"that",
"were",
"inherited",
"for",
"post",
"-",
"processing",
"."
]
| python | train |
pandas-dev/pandas | pandas/core/frame.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7033-L7115 | def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
"'{method}' was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols) | [
"def",
"corr",
"(",
"self",
",",
"method",
"=",
"'pearson'",
",",
"min_periods",
"=",
"1",
")",
":",
"numeric_df",
"=",
"self",
".",
"_get_numeric_data",
"(",
")",
"cols",
"=",
"numeric_df",
".",
"columns",
"idx",
"=",
"cols",
".",
"copy",
"(",
")",
"mat",
"=",
"numeric_df",
".",
"values",
"if",
"method",
"==",
"'pearson'",
":",
"correl",
"=",
"libalgos",
".",
"nancorr",
"(",
"ensure_float64",
"(",
"mat",
")",
",",
"minp",
"=",
"min_periods",
")",
"elif",
"method",
"==",
"'spearman'",
":",
"correl",
"=",
"libalgos",
".",
"nancorr_spearman",
"(",
"ensure_float64",
"(",
"mat",
")",
",",
"minp",
"=",
"min_periods",
")",
"elif",
"method",
"==",
"'kendall'",
"or",
"callable",
"(",
"method",
")",
":",
"if",
"min_periods",
"is",
"None",
":",
"min_periods",
"=",
"1",
"mat",
"=",
"ensure_float64",
"(",
"mat",
")",
".",
"T",
"corrf",
"=",
"nanops",
".",
"get_corr_func",
"(",
"method",
")",
"K",
"=",
"len",
"(",
"cols",
")",
"correl",
"=",
"np",
".",
"empty",
"(",
"(",
"K",
",",
"K",
")",
",",
"dtype",
"=",
"float",
")",
"mask",
"=",
"np",
".",
"isfinite",
"(",
"mat",
")",
"for",
"i",
",",
"ac",
"in",
"enumerate",
"(",
"mat",
")",
":",
"for",
"j",
",",
"bc",
"in",
"enumerate",
"(",
"mat",
")",
":",
"if",
"i",
">",
"j",
":",
"continue",
"valid",
"=",
"mask",
"[",
"i",
"]",
"&",
"mask",
"[",
"j",
"]",
"if",
"valid",
".",
"sum",
"(",
")",
"<",
"min_periods",
":",
"c",
"=",
"np",
".",
"nan",
"elif",
"i",
"==",
"j",
":",
"c",
"=",
"1.",
"elif",
"not",
"valid",
".",
"all",
"(",
")",
":",
"c",
"=",
"corrf",
"(",
"ac",
"[",
"valid",
"]",
",",
"bc",
"[",
"valid",
"]",
")",
"else",
":",
"c",
"=",
"corrf",
"(",
"ac",
",",
"bc",
")",
"correl",
"[",
"i",
",",
"j",
"]",
"=",
"c",
"correl",
"[",
"j",
",",
"i",
"]",
"=",
"c",
"else",
":",
"raise",
"ValueError",
"(",
"\"method must be either 'pearson', \"",
"\"'spearman', 'kendall', or a callable, \"",
"\"'{method}' was supplied\"",
".",
"format",
"(",
"method",
"=",
"method",
")",
")",
"return",
"self",
".",
"_constructor",
"(",
"correl",
",",
"index",
"=",
"idx",
",",
"columns",
"=",
"cols",
")"
]
| Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0 | [
"Compute",
"pairwise",
"correlation",
"of",
"columns",
"excluding",
"NA",
"/",
"null",
"values",
"."
]
| python | train |
pmorissette/bt | bt/core.py | https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L175-L181 | def value(self):
"""
Current value of the Node
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._value | [
"def",
"value",
"(",
"self",
")",
":",
"if",
"self",
".",
"root",
".",
"stale",
":",
"self",
".",
"root",
".",
"update",
"(",
"self",
".",
"root",
".",
"now",
",",
"None",
")",
"return",
"self",
".",
"_value"
]
| Current value of the Node | [
"Current",
"value",
"of",
"the",
"Node"
]
| python | train |
fermiPy/fermipy | fermipy/jobs/link.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/link.py#L346-L350 | def _fill_argparser(self, parser):
"""Fill an `argparser.ArgumentParser` with the options from this chain
"""
for key, val in self._options.items():
add_argument(parser, key, val) | [
"def",
"_fill_argparser",
"(",
"self",
",",
"parser",
")",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_options",
".",
"items",
"(",
")",
":",
"add_argument",
"(",
"parser",
",",
"key",
",",
"val",
")"
]
| Fill an `argparser.ArgumentParser` with the options from this chain | [
"Fill",
"an",
"argparser",
".",
"ArgumentParser",
"with",
"the",
"options",
"from",
"this",
"chain"
]
| python | train |
blockstack/blockstack-core | blockstack/lib/config.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/config.py#L1168-L1173 | def get_announce_filename( working_dir ):
"""
Get the path to the file that stores all of the announcements.
"""
announce_filepath = os.path.join( working_dir, get_default_virtualchain_impl().get_virtual_chain_name() ) + '.announce'
return announce_filepath | [
"def",
"get_announce_filename",
"(",
"working_dir",
")",
":",
"announce_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"working_dir",
",",
"get_default_virtualchain_impl",
"(",
")",
".",
"get_virtual_chain_name",
"(",
")",
")",
"+",
"'.announce'",
"return",
"announce_filepath"
]
| Get the path to the file that stores all of the announcements. | [
"Get",
"the",
"path",
"to",
"the",
"file",
"that",
"stores",
"all",
"of",
"the",
"announcements",
"."
]
| python | train |
gitpython-developers/GitPython | git/objects/submodule/base.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/submodule/base.py#L912-L957 | def set_parent_commit(self, commit, check=True):
"""Set this instance to use the given commit whose tree is supposed to
contain the .gitmodules blob.
:param commit:
Commit'ish reference pointing at the root_tree, or None to always point to the
most recent commit
:param check:
if True, relatively expensive checks will be performed to verify
validity of the submodule.
:raise ValueError: if the commit's tree didn't contain the .gitmodules blob.
:raise ValueError:
if the parent commit didn't store this submodule under the current path
:return: self"""
if commit is None:
self._parent_commit = None
return self
# end handle None
pcommit = self.repo.commit(commit)
pctree = pcommit.tree
if self.k_modules_file not in pctree:
raise ValueError("Tree of commit %s did not contain the %s file" % (commit, self.k_modules_file))
# END handle exceptions
prev_pc = self._parent_commit
self._parent_commit = pcommit
if check:
parser = self._config_parser(self.repo, self._parent_commit, read_only=True)
if not parser.has_section(sm_section(self.name)):
self._parent_commit = prev_pc
raise ValueError("Submodule at path %r did not exist in parent commit %s" % (self.path, commit))
# END handle submodule did not exist
# END handle checking mode
# update our sha, it could have changed
# If check is False, we might see a parent-commit that doesn't even contain the submodule anymore.
# in that case, mark our sha as being NULL
try:
self.binsha = pctree[self.path].binsha
except KeyError:
self.binsha = self.NULL_BIN_SHA
# end
self._clear_cache()
return self | [
"def",
"set_parent_commit",
"(",
"self",
",",
"commit",
",",
"check",
"=",
"True",
")",
":",
"if",
"commit",
"is",
"None",
":",
"self",
".",
"_parent_commit",
"=",
"None",
"return",
"self",
"# end handle None",
"pcommit",
"=",
"self",
".",
"repo",
".",
"commit",
"(",
"commit",
")",
"pctree",
"=",
"pcommit",
".",
"tree",
"if",
"self",
".",
"k_modules_file",
"not",
"in",
"pctree",
":",
"raise",
"ValueError",
"(",
"\"Tree of commit %s did not contain the %s file\"",
"%",
"(",
"commit",
",",
"self",
".",
"k_modules_file",
")",
")",
"# END handle exceptions",
"prev_pc",
"=",
"self",
".",
"_parent_commit",
"self",
".",
"_parent_commit",
"=",
"pcommit",
"if",
"check",
":",
"parser",
"=",
"self",
".",
"_config_parser",
"(",
"self",
".",
"repo",
",",
"self",
".",
"_parent_commit",
",",
"read_only",
"=",
"True",
")",
"if",
"not",
"parser",
".",
"has_section",
"(",
"sm_section",
"(",
"self",
".",
"name",
")",
")",
":",
"self",
".",
"_parent_commit",
"=",
"prev_pc",
"raise",
"ValueError",
"(",
"\"Submodule at path %r did not exist in parent commit %s\"",
"%",
"(",
"self",
".",
"path",
",",
"commit",
")",
")",
"# END handle submodule did not exist",
"# END handle checking mode",
"# update our sha, it could have changed",
"# If check is False, we might see a parent-commit that doesn't even contain the submodule anymore.",
"# in that case, mark our sha as being NULL",
"try",
":",
"self",
".",
"binsha",
"=",
"pctree",
"[",
"self",
".",
"path",
"]",
".",
"binsha",
"except",
"KeyError",
":",
"self",
".",
"binsha",
"=",
"self",
".",
"NULL_BIN_SHA",
"# end",
"self",
".",
"_clear_cache",
"(",
")",
"return",
"self"
]
| Set this instance to use the given commit whose tree is supposed to
contain the .gitmodules blob.
:param commit:
Commit'ish reference pointing at the root_tree, or None to always point to the
most recent commit
:param check:
if True, relatively expensive checks will be performed to verify
validity of the submodule.
:raise ValueError: if the commit's tree didn't contain the .gitmodules blob.
:raise ValueError:
if the parent commit didn't store this submodule under the current path
:return: self | [
"Set",
"this",
"instance",
"to",
"use",
"the",
"given",
"commit",
"whose",
"tree",
"is",
"supposed",
"to",
"contain",
"the",
".",
"gitmodules",
"blob",
"."
]
| python | train |
jaredLunde/vital-tools | vital/tools/encoding.py | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/encoding.py#L216-L279 | def text_badness(text):
u'''
Look for red flags that text is encoded incorrectly:
Obvious problems:
- The replacement character \ufffd, indicating a decoding error
- Unassigned or private-use Unicode characters
Very weird things:
- Adjacent letters from two different scripts
- Letters in scripts that are very rarely used on computers (and
therefore, someone who is using them will probably get Unicode right)
- Improbable control characters, such as 0x81
Moderately weird things:
- Improbable single-byte characters, such as ƒ or ¬
- Letters in somewhat rare scripts
'''
assert isinstance(text, str)
errors = 0
very_weird_things = 0
weird_things = 0
prev_letter_script = None
unicodedata_name = unicodedata.name
unicodedata_category = unicodedata.category
for char in text:
index = ord(char)
if index < 256:
# Deal quickly with the first 256 characters.
weird_things += SINGLE_BYTE_WEIRDNESS[index]
if SINGLE_BYTE_LETTERS[index]:
prev_letter_script = 'latin'
else:
prev_letter_script = None
else:
category = unicodedata_category(char)
if category == 'Co':
# Unassigned or private use
errors += 1
elif index == 0xfffd:
# Replacement character
errors += 1
elif index in WINDOWS_1252_GREMLINS:
lowchar = char.encode('WINDOWS_1252').decode('latin-1')
weird_things += SINGLE_BYTE_WEIRDNESS[ord(lowchar)] - 0.5
if category[0] == 'L':
# It's a letter. What kind of letter? This is typically found
# in the first word of the letter's Unicode name.
name = unicodedata_name(char)
scriptname = name.split()[0]
freq, script = SCRIPT_TABLE.get(scriptname, (0, 'other'))
if prev_letter_script:
if script != prev_letter_script:
very_weird_things += 1
if freq == 1:
weird_things += 2
elif freq == 0:
very_weird_things += 1
prev_letter_script = script
else:
prev_letter_script = None
return 100 * errors + 10 * very_weird_things + weird_things | [
"def",
"text_badness",
"(",
"text",
")",
":",
"assert",
"isinstance",
"(",
"text",
",",
"str",
")",
"errors",
"=",
"0",
"very_weird_things",
"=",
"0",
"weird_things",
"=",
"0",
"prev_letter_script",
"=",
"None",
"unicodedata_name",
"=",
"unicodedata",
".",
"name",
"unicodedata_category",
"=",
"unicodedata",
".",
"category",
"for",
"char",
"in",
"text",
":",
"index",
"=",
"ord",
"(",
"char",
")",
"if",
"index",
"<",
"256",
":",
"# Deal quickly with the first 256 characters.",
"weird_things",
"+=",
"SINGLE_BYTE_WEIRDNESS",
"[",
"index",
"]",
"if",
"SINGLE_BYTE_LETTERS",
"[",
"index",
"]",
":",
"prev_letter_script",
"=",
"'latin'",
"else",
":",
"prev_letter_script",
"=",
"None",
"else",
":",
"category",
"=",
"unicodedata_category",
"(",
"char",
")",
"if",
"category",
"==",
"'Co'",
":",
"# Unassigned or private use",
"errors",
"+=",
"1",
"elif",
"index",
"==",
"0xfffd",
":",
"# Replacement character",
"errors",
"+=",
"1",
"elif",
"index",
"in",
"WINDOWS_1252_GREMLINS",
":",
"lowchar",
"=",
"char",
".",
"encode",
"(",
"'WINDOWS_1252'",
")",
".",
"decode",
"(",
"'latin-1'",
")",
"weird_things",
"+=",
"SINGLE_BYTE_WEIRDNESS",
"[",
"ord",
"(",
"lowchar",
")",
"]",
"-",
"0.5",
"if",
"category",
"[",
"0",
"]",
"==",
"'L'",
":",
"# It's a letter. What kind of letter? This is typically found",
"# in the first word of the letter's Unicode name.",
"name",
"=",
"unicodedata_name",
"(",
"char",
")",
"scriptname",
"=",
"name",
".",
"split",
"(",
")",
"[",
"0",
"]",
"freq",
",",
"script",
"=",
"SCRIPT_TABLE",
".",
"get",
"(",
"scriptname",
",",
"(",
"0",
",",
"'other'",
")",
")",
"if",
"prev_letter_script",
":",
"if",
"script",
"!=",
"prev_letter_script",
":",
"very_weird_things",
"+=",
"1",
"if",
"freq",
"==",
"1",
":",
"weird_things",
"+=",
"2",
"elif",
"freq",
"==",
"0",
":",
"very_weird_things",
"+=",
"1",
"prev_letter_script",
"=",
"script",
"else",
":",
"prev_letter_script",
"=",
"None",
"return",
"100",
"*",
"errors",
"+",
"10",
"*",
"very_weird_things",
"+",
"weird_things"
]
| u'''
Look for red flags that text is encoded incorrectly:
Obvious problems:
- The replacement character \ufffd, indicating a decoding error
- Unassigned or private-use Unicode characters
Very weird things:
- Adjacent letters from two different scripts
- Letters in scripts that are very rarely used on computers (and
therefore, someone who is using them will probably get Unicode right)
- Improbable control characters, such as 0x81
Moderately weird things:
- Improbable single-byte characters, such as ƒ or ¬
- Letters in somewhat rare scripts | [
"u",
"Look",
"for",
"red",
"flags",
"that",
"text",
"is",
"encoded",
"incorrectly",
":"
]
| python | train |
iotile/coretools | iotilecore/iotile/core/hw/transport/server/standard.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/server/standard.py#L91-L120 | async def client_event_handler(self, client_id, event_tuple, user_data):
"""Method called to actually send an event to a client.
Users of this class should override this method to actually forward
device events to their clients. It is called with the client_id
passed to (or returned from) :meth:`setup_client` as well as the
user_data object that was included there.
The event tuple is a 3-tuple of:
- connection string
- event name
- event object
If you override this to be acoroutine, it will be awaited. The
default implementation just logs the event.
Args:
client_id (str): The client_id that this event should be forwarded
to.
event_tuple (tuple): The connection_string, event_name and event_object
that should be forwarded.
user_data (object): Any user data that was passed to setup_client.
"""
conn_string, event_name, _event = event_tuple
self._logger.debug("Ignoring event %s from device %s forwarded for client %s",
event_name, conn_string, client_id)
return None | [
"async",
"def",
"client_event_handler",
"(",
"self",
",",
"client_id",
",",
"event_tuple",
",",
"user_data",
")",
":",
"conn_string",
",",
"event_name",
",",
"_event",
"=",
"event_tuple",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Ignoring event %s from device %s forwarded for client %s\"",
",",
"event_name",
",",
"conn_string",
",",
"client_id",
")",
"return",
"None"
]
| Method called to actually send an event to a client.
Users of this class should override this method to actually forward
device events to their clients. It is called with the client_id
passed to (or returned from) :meth:`setup_client` as well as the
user_data object that was included there.
The event tuple is a 3-tuple of:
- connection string
- event name
- event object
If you override this to be acoroutine, it will be awaited. The
default implementation just logs the event.
Args:
client_id (str): The client_id that this event should be forwarded
to.
event_tuple (tuple): The connection_string, event_name and event_object
that should be forwarded.
user_data (object): Any user data that was passed to setup_client. | [
"Method",
"called",
"to",
"actually",
"send",
"an",
"event",
"to",
"a",
"client",
"."
]
| python | train |
Datary/scrapbag | scrapbag/files.py | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L94-L109 | def open_remote_url(urls, **kwargs):
"""Open the url and check that it stores a file.
Args:
:urls: Endpoint to take the file
"""
if isinstance(urls, str):
urls = [urls]
for url in urls:
try:
web_file = requests.get(url, stream=True, **kwargs)
if 'html' in web_file.headers['content-type']:
raise ValueError("HTML source file retrieved.")
return web_file
except Exception as ex:
logger.error('Fail to open remote url - {}'.format(ex))
continue | [
"def",
"open_remote_url",
"(",
"urls",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"urls",
",",
"str",
")",
":",
"urls",
"=",
"[",
"urls",
"]",
"for",
"url",
"in",
"urls",
":",
"try",
":",
"web_file",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"if",
"'html'",
"in",
"web_file",
".",
"headers",
"[",
"'content-type'",
"]",
":",
"raise",
"ValueError",
"(",
"\"HTML source file retrieved.\"",
")",
"return",
"web_file",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"'Fail to open remote url - {}'",
".",
"format",
"(",
"ex",
")",
")",
"continue"
]
| Open the url and check that it stores a file.
Args:
:urls: Endpoint to take the file | [
"Open",
"the",
"url",
"and",
"check",
"that",
"it",
"stores",
"a",
"file",
".",
"Args",
":",
":",
"urls",
":",
"Endpoint",
"to",
"take",
"the",
"file"
]
| python | train |
knipknap/exscript | Exscript/protocols/protocol.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/protocol.py#L907-L930 | def auto_app_authorize(self, account=None, flush=True, bailout=False):
"""
Like authorize(), but instead of just waiting for a user or
password prompt, it automatically initiates the authorization
procedure by sending a driver-specific command.
In the case of devices that understand AAA, that means sending
a command to the device. For example, on routers running Cisco
IOS, this command executes the 'enable' command before expecting
the password.
In the case of a device that is not recognized to support AAA, this
method does nothing.
:type account: Account
:param account: An account object, like login().
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type bailout: bool
:param bailout: Whether to wait for a prompt after sending the password.
"""
with self._get_account(account) as account:
self._dbg(1, 'Calling driver.auto_authorize().')
self.get_driver().auto_authorize(self, account, flush, bailout) | [
"def",
"auto_app_authorize",
"(",
"self",
",",
"account",
"=",
"None",
",",
"flush",
"=",
"True",
",",
"bailout",
"=",
"False",
")",
":",
"with",
"self",
".",
"_get_account",
"(",
"account",
")",
"as",
"account",
":",
"self",
".",
"_dbg",
"(",
"1",
",",
"'Calling driver.auto_authorize().'",
")",
"self",
".",
"get_driver",
"(",
")",
".",
"auto_authorize",
"(",
"self",
",",
"account",
",",
"flush",
",",
"bailout",
")"
]
| Like authorize(), but instead of just waiting for a user or
password prompt, it automatically initiates the authorization
procedure by sending a driver-specific command.
In the case of devices that understand AAA, that means sending
a command to the device. For example, on routers running Cisco
IOS, this command executes the 'enable' command before expecting
the password.
In the case of a device that is not recognized to support AAA, this
method does nothing.
:type account: Account
:param account: An account object, like login().
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type bailout: bool
:param bailout: Whether to wait for a prompt after sending the password. | [
"Like",
"authorize",
"()",
"but",
"instead",
"of",
"just",
"waiting",
"for",
"a",
"user",
"or",
"password",
"prompt",
"it",
"automatically",
"initiates",
"the",
"authorization",
"procedure",
"by",
"sending",
"a",
"driver",
"-",
"specific",
"command",
"."
]
| python | train |
michael-lazar/rtv | rtv/packages/praw/__init__.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L2347-L2359 | def get_multireddit(self, redditor, multi, *args, **kwargs):
"""Return a Multireddit object for the author and name specified.
:param redditor: The username or Redditor object of the user
who owns the multireddit.
:param multi: The name of the multireddit to fetch.
The additional parameters are passed directly into the
:class:`.Multireddit` constructor.
"""
return objects.Multireddit(self, six.text_type(redditor), multi,
*args, **kwargs) | [
"def",
"get_multireddit",
"(",
"self",
",",
"redditor",
",",
"multi",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"objects",
".",
"Multireddit",
"(",
"self",
",",
"six",
".",
"text_type",
"(",
"redditor",
")",
",",
"multi",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Return a Multireddit object for the author and name specified.
:param redditor: The username or Redditor object of the user
who owns the multireddit.
:param multi: The name of the multireddit to fetch.
The additional parameters are passed directly into the
:class:`.Multireddit` constructor. | [
"Return",
"a",
"Multireddit",
"object",
"for",
"the",
"author",
"and",
"name",
"specified",
"."
]
| python | train |
Garee/pytodoist | pytodoist/api.py | https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/api.py#L384-L413 | def get_redirect_link(self, api_token, **kwargs):
"""Return the absolute URL to redirect or to open in
a browser. The first time the link is used it logs in the user
automatically and performs a redirect to a given page. Once used,
the link keeps working as a plain redirect.
:param api_token: The user's login api_token.
:type api_token: str
:param path: The path to redirect the user's browser. Default ``/app``.
:type path: str
:param hash: The has part of the path to redirect the user's browser.
:type hash: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.login('[email protected]', 'password')
>>> user_info = response.json()
>>> user_api_token = user_info['api_token']
>>> response = api.get_redirect_link(user_api_token)
>>> link_info = response.json()
>>> redirect_link = link_info['link']
>>> print(redirect_link)
https://todoist.com/secureRedirect?path=adflk...
"""
params = {
'token': api_token
}
return self._get('get_redirect_link', params, **kwargs) | [
"def",
"get_redirect_link",
"(",
"self",
",",
"api_token",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'token'",
":",
"api_token",
"}",
"return",
"self",
".",
"_get",
"(",
"'get_redirect_link'",
",",
"params",
",",
"*",
"*",
"kwargs",
")"
]
| Return the absolute URL to redirect or to open in
a browser. The first time the link is used it logs in the user
automatically and performs a redirect to a given page. Once used,
the link keeps working as a plain redirect.
:param api_token: The user's login api_token.
:type api_token: str
:param path: The path to redirect the user's browser. Default ``/app``.
:type path: str
:param hash: The has part of the path to redirect the user's browser.
:type hash: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.login('[email protected]', 'password')
>>> user_info = response.json()
>>> user_api_token = user_info['api_token']
>>> response = api.get_redirect_link(user_api_token)
>>> link_info = response.json()
>>> redirect_link = link_info['link']
>>> print(redirect_link)
https://todoist.com/secureRedirect?path=adflk... | [
"Return",
"the",
"absolute",
"URL",
"to",
"redirect",
"or",
"to",
"open",
"in",
"a",
"browser",
".",
"The",
"first",
"time",
"the",
"link",
"is",
"used",
"it",
"logs",
"in",
"the",
"user",
"automatically",
"and",
"performs",
"a",
"redirect",
"to",
"a",
"given",
"page",
".",
"Once",
"used",
"the",
"link",
"keeps",
"working",
"as",
"a",
"plain",
"redirect",
"."
]
| python | train |
ff0000/scarlet | scarlet/cms/sites.py | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/sites.py#L299-L342 | def index(self, request, extra_context=None):
"""
Displays the dashboard. Includes the main
navigation that the user has permission for as well
as the cms log for those sections. The log list can
be filtered by those same sections
and is paginated.
"""
dashboard = self.get_dashboard_urls(request)
dash_blocks = self.get_dashboard_blocks(request)
sections, titles = self._get_allowed_sections(dashboard)
choices = zip(sections, titles)
choices.sort(key=lambda tup: tup[1])
choices.insert(0, ('', 'All'))
class SectionFilterForm(BaseFilterForm):
section = forms.ChoiceField(required=False, choices=choices)
form = SectionFilterForm(request.GET)
filter_kwargs = form.get_filter_kwargs()
if not filter_kwargs and not request.user.is_superuser:
filter_kwargs['section__in'] = sections
cms_logs = models.CMSLog.objects.filter(**filter_kwargs
).order_by('-when')
template = self.dashboard_template or 'cms/dashboard.html'
paginator = Paginator(cms_logs[:20 * 100], 20,
allow_empty_first_page=True)
page_number = request.GET.get('page') or 1
try:
page_number = int(page_number)
except ValueError:
page_number = 1
page = paginator.page(page_number)
return TemplateResponse(request, [template], {
'dashboard': dashboard, 'blocks': dash_blocks,
'page': page, 'bundle': self._registry.values()[0],
'form': form},) | [
"def",
"index",
"(",
"self",
",",
"request",
",",
"extra_context",
"=",
"None",
")",
":",
"dashboard",
"=",
"self",
".",
"get_dashboard_urls",
"(",
"request",
")",
"dash_blocks",
"=",
"self",
".",
"get_dashboard_blocks",
"(",
"request",
")",
"sections",
",",
"titles",
"=",
"self",
".",
"_get_allowed_sections",
"(",
"dashboard",
")",
"choices",
"=",
"zip",
"(",
"sections",
",",
"titles",
")",
"choices",
".",
"sort",
"(",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"1",
"]",
")",
"choices",
".",
"insert",
"(",
"0",
",",
"(",
"''",
",",
"'All'",
")",
")",
"class",
"SectionFilterForm",
"(",
"BaseFilterForm",
")",
":",
"section",
"=",
"forms",
".",
"ChoiceField",
"(",
"required",
"=",
"False",
",",
"choices",
"=",
"choices",
")",
"form",
"=",
"SectionFilterForm",
"(",
"request",
".",
"GET",
")",
"filter_kwargs",
"=",
"form",
".",
"get_filter_kwargs",
"(",
")",
"if",
"not",
"filter_kwargs",
"and",
"not",
"request",
".",
"user",
".",
"is_superuser",
":",
"filter_kwargs",
"[",
"'section__in'",
"]",
"=",
"sections",
"cms_logs",
"=",
"models",
".",
"CMSLog",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"filter_kwargs",
")",
".",
"order_by",
"(",
"'-when'",
")",
"template",
"=",
"self",
".",
"dashboard_template",
"or",
"'cms/dashboard.html'",
"paginator",
"=",
"Paginator",
"(",
"cms_logs",
"[",
":",
"20",
"*",
"100",
"]",
",",
"20",
",",
"allow_empty_first_page",
"=",
"True",
")",
"page_number",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
")",
"or",
"1",
"try",
":",
"page_number",
"=",
"int",
"(",
"page_number",
")",
"except",
"ValueError",
":",
"page_number",
"=",
"1",
"page",
"=",
"paginator",
".",
"page",
"(",
"page_number",
")",
"return",
"TemplateResponse",
"(",
"request",
",",
"[",
"template",
"]",
",",
"{",
"'dashboard'",
":",
"dashboard",
",",
"'blocks'",
":",
"dash_blocks",
",",
"'page'",
":",
"page",
",",
"'bundle'",
":",
"self",
".",
"_registry",
".",
"values",
"(",
")",
"[",
"0",
"]",
",",
"'form'",
":",
"form",
"}",
",",
")"
]
| Displays the dashboard. Includes the main
navigation that the user has permission for as well
as the cms log for those sections. The log list can
be filtered by those same sections
and is paginated. | [
"Displays",
"the",
"dashboard",
".",
"Includes",
"the",
"main",
"navigation",
"that",
"the",
"user",
"has",
"permission",
"for",
"as",
"well",
"as",
"the",
"cms",
"log",
"for",
"those",
"sections",
".",
"The",
"log",
"list",
"can",
"be",
"filtered",
"by",
"those",
"same",
"sections",
"and",
"is",
"paginated",
"."
]
| python | train |
fboender/ansible-cmdb | lib/mako/runtime.py | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/runtime.py#L502-L505 | def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs) | [
"def",
"include_file",
"(",
"self",
",",
"uri",
",",
"*",
"*",
"kwargs",
")",
":",
"_include_file",
"(",
"self",
".",
"context",
",",
"uri",
",",
"self",
".",
"_templateuri",
",",
"*",
"*",
"kwargs",
")"
]
| Include a file at the given ``uri``. | [
"Include",
"a",
"file",
"at",
"the",
"given",
"uri",
"."
]
| python | train |
nickoala/telepot | telepot/__init__.py | https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L25-L57 | def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
elif 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
elif 'id' in msg and 'query' in msg:
return 'inline_query'
elif 'result_id' in msg:
return 'chosen_inline_result'
elif 'id' in msg and 'shipping_address' in msg:
return 'shipping_query'
elif 'id' in msg and 'total_amount' in msg:
return 'pre_checkout_query'
else:
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg) | [
"def",
"flavor",
"(",
"msg",
")",
":",
"if",
"'message_id'",
"in",
"msg",
":",
"return",
"'chat'",
"elif",
"'id'",
"in",
"msg",
"and",
"'chat_instance'",
"in",
"msg",
":",
"return",
"'callback_query'",
"elif",
"'id'",
"in",
"msg",
"and",
"'query'",
"in",
"msg",
":",
"return",
"'inline_query'",
"elif",
"'result_id'",
"in",
"msg",
":",
"return",
"'chosen_inline_result'",
"elif",
"'id'",
"in",
"msg",
"and",
"'shipping_address'",
"in",
"msg",
":",
"return",
"'shipping_query'",
"elif",
"'id'",
"in",
"msg",
"and",
"'total_amount'",
"in",
"msg",
":",
"return",
"'pre_checkout_query'",
"else",
":",
"top_keys",
"=",
"list",
"(",
"msg",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"top_keys",
")",
"==",
"1",
":",
"return",
"top_keys",
"[",
"0",
"]",
"raise",
"exception",
".",
"BadFlavor",
"(",
"msg",
")"
]
| Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
An event's flavor is determined by the single top-level key. | [
"Return",
"flavor",
"of",
"message",
"or",
"event",
"."
]
| python | train |
saltstack/salt | salt/cloud/clouds/opennebula.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L3135-L3202 | def vm_disk_save(name, kwargs=None, call=None):
'''
Sets the disk to be saved in the given image.
.. versionadded:: 2016.3.0
name
The name of the VM containing the disk to save.
disk_id
The ID of the disk to save.
image_name
The name of the new image where the disk will be saved.
image_type
The type for the new image. If not set, then the default ``ONED`` Configuration
will be used. Other valid types include: OS, CDROM, DATABLOCK, KERNEL, RAMDISK,
and CONTEXT.
snapshot_id
The ID of the snapshot to export. If not set, the current image state will be
used.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image image_type=CONTEXT snapshot_id=10
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_disk_save action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
disk_id = kwargs.get('disk_id', None)
image_name = kwargs.get('image_name', None)
image_type = kwargs.get('image_type', '')
snapshot_id = int(kwargs.get('snapshot_id', '-1'))
if disk_id is None or image_name is None:
raise SaltCloudSystemExit(
'The vm_disk_save function requires a \'disk_id\' and an \'image_name\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.disksave(auth,
vm_id,
int(disk_id),
image_name,
image_type,
snapshot_id)
data = {
'action': 'vm.disksave',
'saved': response[0],
'image_id': response[1],
'error_code': response[2],
}
return data | [
"def",
"vm_disk_save",
"(",
"name",
",",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The vm_disk_save action must be called with -a or --action.'",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"disk_id",
"=",
"kwargs",
".",
"get",
"(",
"'disk_id'",
",",
"None",
")",
"image_name",
"=",
"kwargs",
".",
"get",
"(",
"'image_name'",
",",
"None",
")",
"image_type",
"=",
"kwargs",
".",
"get",
"(",
"'image_type'",
",",
"''",
")",
"snapshot_id",
"=",
"int",
"(",
"kwargs",
".",
"get",
"(",
"'snapshot_id'",
",",
"'-1'",
")",
")",
"if",
"disk_id",
"is",
"None",
"or",
"image_name",
"is",
"None",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The vm_disk_save function requires a \\'disk_id\\' and an \\'image_name\\' '",
"'to be provided.'",
")",
"server",
",",
"user",
",",
"password",
"=",
"_get_xml_rpc",
"(",
")",
"auth",
"=",
"':'",
".",
"join",
"(",
"[",
"user",
",",
"password",
"]",
")",
"vm_id",
"=",
"int",
"(",
"get_vm_id",
"(",
"kwargs",
"=",
"{",
"'name'",
":",
"name",
"}",
")",
")",
"response",
"=",
"server",
".",
"one",
".",
"vm",
".",
"disksave",
"(",
"auth",
",",
"vm_id",
",",
"int",
"(",
"disk_id",
")",
",",
"image_name",
",",
"image_type",
",",
"snapshot_id",
")",
"data",
"=",
"{",
"'action'",
":",
"'vm.disksave'",
",",
"'saved'",
":",
"response",
"[",
"0",
"]",
",",
"'image_id'",
":",
"response",
"[",
"1",
"]",
",",
"'error_code'",
":",
"response",
"[",
"2",
"]",
",",
"}",
"return",
"data"
]
| Sets the disk to be saved in the given image.
.. versionadded:: 2016.3.0
name
The name of the VM containing the disk to save.
disk_id
The ID of the disk to save.
image_name
The name of the new image where the disk will be saved.
image_type
The type for the new image. If not set, then the default ``ONED`` Configuration
will be used. Other valid types include: OS, CDROM, DATABLOCK, KERNEL, RAMDISK,
and CONTEXT.
snapshot_id
The ID of the snapshot to export. If not set, the current image state will be
used.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image image_type=CONTEXT snapshot_id=10 | [
"Sets",
"the",
"disk",
"to",
"be",
"saved",
"in",
"the",
"given",
"image",
"."
]
| python | train |
spencerahill/aospy | aospy/automate.py | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/automate.py#L280-L288 | def _submit_calcs_on_client(calcs, client, func):
"""Submit calculations via dask.bag and a distributed client"""
logging.info('Connected to client: {}'.format(client))
if LooseVersion(dask.__version__) < '0.18':
dask_option_setter = dask.set_options
else:
dask_option_setter = dask.config.set
with dask_option_setter(get=client.get):
return db.from_sequence(calcs).map(func).compute() | [
"def",
"_submit_calcs_on_client",
"(",
"calcs",
",",
"client",
",",
"func",
")",
":",
"logging",
".",
"info",
"(",
"'Connected to client: {}'",
".",
"format",
"(",
"client",
")",
")",
"if",
"LooseVersion",
"(",
"dask",
".",
"__version__",
")",
"<",
"'0.18'",
":",
"dask_option_setter",
"=",
"dask",
".",
"set_options",
"else",
":",
"dask_option_setter",
"=",
"dask",
".",
"config",
".",
"set",
"with",
"dask_option_setter",
"(",
"get",
"=",
"client",
".",
"get",
")",
":",
"return",
"db",
".",
"from_sequence",
"(",
"calcs",
")",
".",
"map",
"(",
"func",
")",
".",
"compute",
"(",
")"
]
| Submit calculations via dask.bag and a distributed client | [
"Submit",
"calculations",
"via",
"dask",
".",
"bag",
"and",
"a",
"distributed",
"client"
]
| python | train |
atlassian-api/atlassian-python-api | atlassian/confluence.py | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L218-L236 | def get_all_draft_pages_from_space_through_cql(self, space, start=0, limit=500, status='draft'):
"""
Search list of draft pages by space key
Use case is cleanup old drafts from Confluence
:param space: Space Key
:param status: Can be changed
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:return:
"""
url = 'rest/api/content?cql=space=spaceKey={space} and status={status}'.format(space=space,
status=status)
params = {}
if limit:
params['limit'] = limit
if start:
params['start'] = start
return (self.get(url, params=params) or {}).get('results') | [
"def",
"get_all_draft_pages_from_space_through_cql",
"(",
"self",
",",
"space",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"500",
",",
"status",
"=",
"'draft'",
")",
":",
"url",
"=",
"'rest/api/content?cql=space=spaceKey={space} and status={status}'",
".",
"format",
"(",
"space",
"=",
"space",
",",
"status",
"=",
"status",
")",
"params",
"=",
"{",
"}",
"if",
"limit",
":",
"params",
"[",
"'limit'",
"]",
"=",
"limit",
"if",
"start",
":",
"params",
"[",
"'start'",
"]",
"=",
"start",
"return",
"(",
"self",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"or",
"{",
"}",
")",
".",
"get",
"(",
"'results'",
")"
]
| Search list of draft pages by space key
Use case is cleanup old drafts from Confluence
:param space: Space Key
:param status: Can be changed
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:return: | [
"Search",
"list",
"of",
"draft",
"pages",
"by",
"space",
"key",
"Use",
"case",
"is",
"cleanup",
"old",
"drafts",
"from",
"Confluence",
":",
"param",
"space",
":",
"Space",
"Key",
":",
"param",
"status",
":",
"Can",
"be",
"changed",
":",
"param",
"start",
":",
"OPTIONAL",
":",
"The",
"start",
"point",
"of",
"the",
"collection",
"to",
"return",
".",
"Default",
":",
"None",
"(",
"0",
")",
".",
":",
"param",
"limit",
":",
"OPTIONAL",
":",
"The",
"limit",
"of",
"the",
"number",
"of",
"pages",
"to",
"return",
"this",
"may",
"be",
"restricted",
"by",
"fixed",
"system",
"limits",
".",
"Default",
":",
"500",
":",
"return",
":"
]
| python | train |
allenai/allennlp | allennlp/data/vocabulary.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/vocabulary.py#L569-L595 | def extend_from_instances(self,
params: Params,
instances: Iterable['adi.Instance'] = ()) -> None:
"""
Extends an already generated vocabulary using a collection of instances.
"""
min_count = params.pop("min_count", None)
max_vocab_size = pop_max_vocab_size(params)
non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES)
pretrained_files = params.pop("pretrained_files", {})
min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None)
only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False)
tokens_to_add = params.pop("tokens_to_add", None)
params.assert_empty("Vocabulary - from dataset")
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
self._extend(counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
tokens_to_add=tokens_to_add,
min_pretrained_embeddings=min_pretrained_embeddings) | [
"def",
"extend_from_instances",
"(",
"self",
",",
"params",
":",
"Params",
",",
"instances",
":",
"Iterable",
"[",
"'adi.Instance'",
"]",
"=",
"(",
")",
")",
"->",
"None",
":",
"min_count",
"=",
"params",
".",
"pop",
"(",
"\"min_count\"",
",",
"None",
")",
"max_vocab_size",
"=",
"pop_max_vocab_size",
"(",
"params",
")",
"non_padded_namespaces",
"=",
"params",
".",
"pop",
"(",
"\"non_padded_namespaces\"",
",",
"DEFAULT_NON_PADDED_NAMESPACES",
")",
"pretrained_files",
"=",
"params",
".",
"pop",
"(",
"\"pretrained_files\"",
",",
"{",
"}",
")",
"min_pretrained_embeddings",
"=",
"params",
".",
"pop",
"(",
"\"min_pretrained_embeddings\"",
",",
"None",
")",
"only_include_pretrained_words",
"=",
"params",
".",
"pop_bool",
"(",
"\"only_include_pretrained_words\"",
",",
"False",
")",
"tokens_to_add",
"=",
"params",
".",
"pop",
"(",
"\"tokens_to_add\"",
",",
"None",
")",
"params",
".",
"assert_empty",
"(",
"\"Vocabulary - from dataset\"",
")",
"logger",
".",
"info",
"(",
"\"Fitting token dictionary from dataset.\"",
")",
"namespace_token_counts",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"int",
"]",
"]",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"int",
")",
")",
"for",
"instance",
"in",
"Tqdm",
".",
"tqdm",
"(",
"instances",
")",
":",
"instance",
".",
"count_vocab_items",
"(",
"namespace_token_counts",
")",
"self",
".",
"_extend",
"(",
"counter",
"=",
"namespace_token_counts",
",",
"min_count",
"=",
"min_count",
",",
"max_vocab_size",
"=",
"max_vocab_size",
",",
"non_padded_namespaces",
"=",
"non_padded_namespaces",
",",
"pretrained_files",
"=",
"pretrained_files",
",",
"only_include_pretrained_words",
"=",
"only_include_pretrained_words",
",",
"tokens_to_add",
"=",
"tokens_to_add",
",",
"min_pretrained_embeddings",
"=",
"min_pretrained_embeddings",
")"
]
| Extends an already generated vocabulary using a collection of instances. | [
"Extends",
"an",
"already",
"generated",
"vocabulary",
"using",
"a",
"collection",
"of",
"instances",
"."
]
| python | train |
wikimedia/ores | ores/scoring_context.py | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L95-L101 | def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache)) | [
"def",
"_solve_features",
"(",
"self",
",",
"model_name",
",",
"dependency_cache",
"=",
"None",
")",
":",
"features",
"=",
"self",
"[",
"model_name",
"]",
".",
"features",
"return",
"list",
"(",
"self",
".",
"extractor",
".",
"solve",
"(",
"features",
",",
"cache",
"=",
"dependency_cache",
")",
")"
]
| Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them. | [
"Solves",
"the",
"vector",
"(",
"list",
")",
"of",
"features",
"for",
"a",
"given",
"model",
"using",
"the",
"dependency_cache",
"and",
"returns",
"them",
"."
]
| python | train |
bfontaine/trigrams | trigrams/__init__.py | https://github.com/bfontaine/trigrams/blob/7e3906f7aae83d9b069bd11e611074c56d4e4803/trigrams/__init__.py#L63-L99 | def generate(self, **kwargs):
"""
Generate some text from the database. By default only 70 words are
generated, but you can change this using keyword arguments.
Keyword arguments:
- ``wlen``: maximum length (words)
- ``words``: a list of words to use to begin the text with
"""
words = list(map(self._sanitize, kwargs.get('words', [])))
max_wlen = kwargs.get('wlen', 70)
wlen = len(words)
if wlen < 2:
if not self._db:
return ''
if wlen == 0:
words = sample(self._db.keys(), 1)[0].split(self._WSEP)
elif wlen == 1:
spl = [k for k in self._db.keys()
if k.startswith(words[0]+self._WSEP)]
words.append(sample(spl, 1)[0].split(self._WSEP)[1])
wlen = 2
while wlen < max_wlen:
next_word = self._get(words[-2], words[-1])
if next_word is None:
break
words.append(next_word)
wlen += 1
return ' '.join(words) | [
"def",
"generate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"words",
"=",
"list",
"(",
"map",
"(",
"self",
".",
"_sanitize",
",",
"kwargs",
".",
"get",
"(",
"'words'",
",",
"[",
"]",
")",
")",
")",
"max_wlen",
"=",
"kwargs",
".",
"get",
"(",
"'wlen'",
",",
"70",
")",
"wlen",
"=",
"len",
"(",
"words",
")",
"if",
"wlen",
"<",
"2",
":",
"if",
"not",
"self",
".",
"_db",
":",
"return",
"''",
"if",
"wlen",
"==",
"0",
":",
"words",
"=",
"sample",
"(",
"self",
".",
"_db",
".",
"keys",
"(",
")",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"self",
".",
"_WSEP",
")",
"elif",
"wlen",
"==",
"1",
":",
"spl",
"=",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"_db",
".",
"keys",
"(",
")",
"if",
"k",
".",
"startswith",
"(",
"words",
"[",
"0",
"]",
"+",
"self",
".",
"_WSEP",
")",
"]",
"words",
".",
"append",
"(",
"sample",
"(",
"spl",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"self",
".",
"_WSEP",
")",
"[",
"1",
"]",
")",
"wlen",
"=",
"2",
"while",
"wlen",
"<",
"max_wlen",
":",
"next_word",
"=",
"self",
".",
"_get",
"(",
"words",
"[",
"-",
"2",
"]",
",",
"words",
"[",
"-",
"1",
"]",
")",
"if",
"next_word",
"is",
"None",
":",
"break",
"words",
".",
"append",
"(",
"next_word",
")",
"wlen",
"+=",
"1",
"return",
"' '",
".",
"join",
"(",
"words",
")"
]
| Generate some text from the database. By default only 70 words are
generated, but you can change this using keyword arguments.
Keyword arguments:
- ``wlen``: maximum length (words)
- ``words``: a list of words to use to begin the text with | [
"Generate",
"some",
"text",
"from",
"the",
"database",
".",
"By",
"default",
"only",
"70",
"words",
"are",
"generated",
"but",
"you",
"can",
"change",
"this",
"using",
"keyword",
"arguments",
"."
]
| python | train |
EnergieID/smappy | smappy/smappy.py | https://github.com/EnergieID/smappy/blob/1ada3abc9a51c76205c072369258f6f4f4e8fd0f/smappy/smappy.py#L357-L386 | def _actuator_on_off(self, on_off, service_location_id, actuator_id,
duration=None):
"""
Turn actuator on or off
Parameters
----------
on_off : str
'on' or 'off'
service_location_id : int
actuator_id : int
duration : int, optional
300,900,1800 or 3600 , specifying the time in seconds the actuator
should be turned on. Any other value results in turning on for an
undetermined period of time.
Returns
-------
requests.Response
"""
url = urljoin(URLS['servicelocation'], service_location_id,
"actuator", actuator_id, on_off)
headers = {"Authorization": "Bearer {}".format(self.access_token)}
if duration is not None:
data = {"duration": duration}
else:
data = {}
r = requests.post(url, headers=headers, json=data)
r.raise_for_status()
return r | [
"def",
"_actuator_on_off",
"(",
"self",
",",
"on_off",
",",
"service_location_id",
",",
"actuator_id",
",",
"duration",
"=",
"None",
")",
":",
"url",
"=",
"urljoin",
"(",
"URLS",
"[",
"'servicelocation'",
"]",
",",
"service_location_id",
",",
"\"actuator\"",
",",
"actuator_id",
",",
"on_off",
")",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"self",
".",
"access_token",
")",
"}",
"if",
"duration",
"is",
"not",
"None",
":",
"data",
"=",
"{",
"\"duration\"",
":",
"duration",
"}",
"else",
":",
"data",
"=",
"{",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"json",
"=",
"data",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"r"
]
| Turn actuator on or off
Parameters
----------
on_off : str
'on' or 'off'
service_location_id : int
actuator_id : int
duration : int, optional
300,900,1800 or 3600 , specifying the time in seconds the actuator
should be turned on. Any other value results in turning on for an
undetermined period of time.
Returns
-------
requests.Response | [
"Turn",
"actuator",
"on",
"or",
"off"
]
| python | train |
72squared/redpipe | redpipe/keyspaces.py | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L1776-L1788 | def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
:param name: str the name of the redis key
:param min: int or -inf
:param max: into or +inf
:return: Future()
"""
with self.pipe as pipe:
return pipe.zremrangebylex(self.redis_key(name), min, max) | [
"def",
"zremrangebylex",
"(",
"self",
",",
"name",
",",
"min",
",",
"max",
")",
":",
"with",
"self",
".",
"pipe",
"as",
"pipe",
":",
"return",
"pipe",
".",
"zremrangebylex",
"(",
"self",
".",
"redis_key",
"(",
"name",
")",
",",
"min",
",",
"max",
")"
]
| Remove all elements in the sorted set between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
:param name: str the name of the redis key
:param min: int or -inf
:param max: into or +inf
:return: Future() | [
"Remove",
"all",
"elements",
"in",
"the",
"sorted",
"set",
"between",
"the",
"lexicographical",
"range",
"specified",
"by",
"min",
"and",
"max",
"."
]
| python | train |
nitmir/django-cas-server | cas_server/views.py | https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/views.py#L1125-L1189 | def get(self, request):
"""
method called on GET request on this view
:param django.http.HttpRequest request: The current request object:
:return: The rendering of ``cas_server/serviceValidate.xml`` if no errors is raised,
the rendering or ``cas_server/serviceValidateError.xml`` otherwise.
:rtype: django.http.HttpResponse
"""
# define the class parameters
self.request = request
self.service = request.GET.get('service')
self.ticket = request.GET.get('ticket')
self.pgt_url = request.GET.get('pgtUrl')
self.renew = True if request.GET.get('renew') else False
# service and ticket parameter are mandatory
if not self.service or not self.ticket:
logger.warning("ValidateService: missing ticket or service")
return ValidateError(
u'INVALID_REQUEST',
u"you must specify a service and a ticket"
).render(request)
else:
try:
# search the ticket in the database
self.ticket, proxies = self.process_ticket()
# prepare template rendering context
params = {
'username': self.ticket.username(),
'attributes': self.ticket.attributs_flat(),
'proxies': proxies,
'auth_date': self.ticket.user.last_login.replace(microsecond=0).isoformat(),
'is_new_login': 'true' if self.ticket.renew else 'false'
}
# if pgtUrl is set, require https or localhost
if self.pgt_url and (
self.pgt_url.startswith("https://") or
re.match(r"^http://(127\.0\.0\.1|localhost)(:[0-9]+)?(/.*)?$", self.pgt_url)
):
return self.process_pgturl(params)
else:
logger.info(
"ValidateService: ticket %s validated for user %s on service %s." % (
self.ticket.value,
self.ticket.user.username,
self.ticket.service
)
)
logger.debug(
"ValidateService: User attributs are:\n%s" % (
pprint.pformat(self.ticket.attributs),
)
)
return render(
request,
"cas_server/serviceValidate.xml",
params,
content_type="text/xml; charset=utf-8"
)
except ValidateError as error:
logger.warning(
"ValidateService: validation error: %s %s" % (error.code, error.msg)
)
return error.render(request) | [
"def",
"get",
"(",
"self",
",",
"request",
")",
":",
"# define the class parameters",
"self",
".",
"request",
"=",
"request",
"self",
".",
"service",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'service'",
")",
"self",
".",
"ticket",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'ticket'",
")",
"self",
".",
"pgt_url",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'pgtUrl'",
")",
"self",
".",
"renew",
"=",
"True",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"'renew'",
")",
"else",
"False",
"# service and ticket parameter are mandatory",
"if",
"not",
"self",
".",
"service",
"or",
"not",
"self",
".",
"ticket",
":",
"logger",
".",
"warning",
"(",
"\"ValidateService: missing ticket or service\"",
")",
"return",
"ValidateError",
"(",
"u'INVALID_REQUEST'",
",",
"u\"you must specify a service and a ticket\"",
")",
".",
"render",
"(",
"request",
")",
"else",
":",
"try",
":",
"# search the ticket in the database",
"self",
".",
"ticket",
",",
"proxies",
"=",
"self",
".",
"process_ticket",
"(",
")",
"# prepare template rendering context",
"params",
"=",
"{",
"'username'",
":",
"self",
".",
"ticket",
".",
"username",
"(",
")",
",",
"'attributes'",
":",
"self",
".",
"ticket",
".",
"attributs_flat",
"(",
")",
",",
"'proxies'",
":",
"proxies",
",",
"'auth_date'",
":",
"self",
".",
"ticket",
".",
"user",
".",
"last_login",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
".",
"isoformat",
"(",
")",
",",
"'is_new_login'",
":",
"'true'",
"if",
"self",
".",
"ticket",
".",
"renew",
"else",
"'false'",
"}",
"# if pgtUrl is set, require https or localhost",
"if",
"self",
".",
"pgt_url",
"and",
"(",
"self",
".",
"pgt_url",
".",
"startswith",
"(",
"\"https://\"",
")",
"or",
"re",
".",
"match",
"(",
"r\"^http://(127\\.0\\.0\\.1|localhost)(:[0-9]+)?(/.*)?$\"",
",",
"self",
".",
"pgt_url",
")",
")",
":",
"return",
"self",
".",
"process_pgturl",
"(",
"params",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"ValidateService: ticket %s validated for user %s on service %s.\"",
"%",
"(",
"self",
".",
"ticket",
".",
"value",
",",
"self",
".",
"ticket",
".",
"user",
".",
"username",
",",
"self",
".",
"ticket",
".",
"service",
")",
")",
"logger",
".",
"debug",
"(",
"\"ValidateService: User attributs are:\\n%s\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"self",
".",
"ticket",
".",
"attributs",
")",
",",
")",
")",
"return",
"render",
"(",
"request",
",",
"\"cas_server/serviceValidate.xml\"",
",",
"params",
",",
"content_type",
"=",
"\"text/xml; charset=utf-8\"",
")",
"except",
"ValidateError",
"as",
"error",
":",
"logger",
".",
"warning",
"(",
"\"ValidateService: validation error: %s %s\"",
"%",
"(",
"error",
".",
"code",
",",
"error",
".",
"msg",
")",
")",
"return",
"error",
".",
"render",
"(",
"request",
")"
]
| method called on GET request on this view
:param django.http.HttpRequest request: The current request object:
:return: The rendering of ``cas_server/serviceValidate.xml`` if no errors is raised,
the rendering or ``cas_server/serviceValidateError.xml`` otherwise.
:rtype: django.http.HttpResponse | [
"method",
"called",
"on",
"GET",
"request",
"on",
"this",
"view"
]
| python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/dataset.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dataset.py#L362-L376 | def access_entries(self):
"""List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access
entries.
``role`` augments the entity type and must be present **unless** the
entity type is ``view``.
Raises:
TypeError: If 'value' is not a sequence
ValueError:
If any item in the sequence is not an
:class:`~google.cloud.bigquery.dataset.AccessEntry`.
"""
entries = self._properties.get("access", [])
return [AccessEntry.from_api_repr(entry) for entry in entries] | [
"def",
"access_entries",
"(",
"self",
")",
":",
"entries",
"=",
"self",
".",
"_properties",
".",
"get",
"(",
"\"access\"",
",",
"[",
"]",
")",
"return",
"[",
"AccessEntry",
".",
"from_api_repr",
"(",
"entry",
")",
"for",
"entry",
"in",
"entries",
"]"
]
| List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access
entries.
``role`` augments the entity type and must be present **unless** the
entity type is ``view``.
Raises:
TypeError: If 'value' is not a sequence
ValueError:
If any item in the sequence is not an
:class:`~google.cloud.bigquery.dataset.AccessEntry`. | [
"List",
"[",
"google",
".",
"cloud",
".",
"bigquery",
".",
"dataset",
".",
"AccessEntry",
"]",
":",
"Dataset",
"s",
"access",
"entries",
"."
]
| python | train |
genomoncology/related | src/related/fields.py | https://github.com/genomoncology/related/blob/be47c0081e60fc60afcde3a25f00ebcad5d18510/src/related/fields.py#L13-L27 | def BooleanField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(default=default, validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key)) | [
"def",
"BooleanField",
"(",
"default",
"=",
"NOTHING",
",",
"required",
"=",
"True",
",",
"repr",
"=",
"True",
",",
"cmp",
"=",
"True",
",",
"key",
"=",
"None",
")",
":",
"default",
"=",
"_init_fields",
".",
"init_default",
"(",
"required",
",",
"default",
",",
"None",
")",
"validator",
"=",
"_init_fields",
".",
"init_validator",
"(",
"required",
",",
"bool",
")",
"return",
"attrib",
"(",
"default",
"=",
"default",
",",
"validator",
"=",
"validator",
",",
"repr",
"=",
"repr",
",",
"cmp",
"=",
"cmp",
",",
"metadata",
"=",
"dict",
"(",
"key",
"=",
"key",
")",
")"
]
| Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict. | [
"Create",
"new",
"bool",
"field",
"on",
"a",
"model",
"."
]
| python | train |
obulpathi/cdn-fastly-python | fastly/__init__.py | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L537-L540 | def get_healthcheck(self, service_id, version_number, name):
"""Get the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name))
return FastlyHealthCheck(self, content) | [
"def",
"get_healthcheck",
"(",
"self",
",",
"service_id",
",",
"version_number",
",",
"name",
")",
":",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/service/%s/version/%d/healthcheck/%s\"",
"%",
"(",
"service_id",
",",
"version_number",
",",
"name",
")",
")",
"return",
"FastlyHealthCheck",
"(",
"self",
",",
"content",
")"
]
| Get the healthcheck for a particular service and version. | [
"Get",
"the",
"healthcheck",
"for",
"a",
"particular",
"service",
"and",
"version",
"."
]
| python | train |
mikedh/trimesh | trimesh/transformations.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/transformations.py#L1218-L1272 | def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i + parity - 1] + 1
k = _NEXT_AXIS[i - parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci * ck
cs = ci * sk
sc = si * ck
ss = si * sk
q = np.empty((4, ))
if repetition:
q[0] = cj * (cc - ss)
q[i] = cj * (cs + sc)
q[j] = sj * (cc + ss)
q[k] = sj * (cs - sc)
else:
q[0] = cj * cc + sj * ss
q[i] = cj * sc - sj * cs
q[j] = cj * ss + sj * cc
q[k] = cj * cs - sj * sc
if parity:
q[j] *= -1.0
return q | [
"def",
"quaternion_from_euler",
"(",
"ai",
",",
"aj",
",",
"ak",
",",
"axes",
"=",
"'sxyz'",
")",
":",
"try",
":",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"_AXES2TUPLE",
"[",
"axes",
".",
"lower",
"(",
")",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"_TUPLE2AXES",
"[",
"axes",
"]",
"# validation",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"axes",
"i",
"=",
"firstaxis",
"+",
"1",
"j",
"=",
"_NEXT_AXIS",
"[",
"i",
"+",
"parity",
"-",
"1",
"]",
"+",
"1",
"k",
"=",
"_NEXT_AXIS",
"[",
"i",
"-",
"parity",
"]",
"+",
"1",
"if",
"frame",
":",
"ai",
",",
"ak",
"=",
"ak",
",",
"ai",
"if",
"parity",
":",
"aj",
"=",
"-",
"aj",
"ai",
"/=",
"2.0",
"aj",
"/=",
"2.0",
"ak",
"/=",
"2.0",
"ci",
"=",
"math",
".",
"cos",
"(",
"ai",
")",
"si",
"=",
"math",
".",
"sin",
"(",
"ai",
")",
"cj",
"=",
"math",
".",
"cos",
"(",
"aj",
")",
"sj",
"=",
"math",
".",
"sin",
"(",
"aj",
")",
"ck",
"=",
"math",
".",
"cos",
"(",
"ak",
")",
"sk",
"=",
"math",
".",
"sin",
"(",
"ak",
")",
"cc",
"=",
"ci",
"*",
"ck",
"cs",
"=",
"ci",
"*",
"sk",
"sc",
"=",
"si",
"*",
"ck",
"ss",
"=",
"si",
"*",
"sk",
"q",
"=",
"np",
".",
"empty",
"(",
"(",
"4",
",",
")",
")",
"if",
"repetition",
":",
"q",
"[",
"0",
"]",
"=",
"cj",
"*",
"(",
"cc",
"-",
"ss",
")",
"q",
"[",
"i",
"]",
"=",
"cj",
"*",
"(",
"cs",
"+",
"sc",
")",
"q",
"[",
"j",
"]",
"=",
"sj",
"*",
"(",
"cc",
"+",
"ss",
")",
"q",
"[",
"k",
"]",
"=",
"sj",
"*",
"(",
"cs",
"-",
"sc",
")",
"else",
":",
"q",
"[",
"0",
"]",
"=",
"cj",
"*",
"cc",
"+",
"sj",
"*",
"ss",
"q",
"[",
"i",
"]",
"=",
"cj",
"*",
"sc",
"-",
"sj",
"*",
"cs",
"q",
"[",
"j",
"]",
"=",
"cj",
"*",
"ss",
"+",
"sj",
"*",
"cc",
"q",
"[",
"k",
"]",
"=",
"cj",
"*",
"cs",
"-",
"sj",
"*",
"sc",
"if",
"parity",
":",
"q",
"[",
"j",
"]",
"*=",
"-",
"1.0",
"return",
"q"
]
| Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True | [
"Return",
"quaternion",
"from",
"Euler",
"angles",
"and",
"axis",
"sequence",
"."
]
| python | train |
webrecorder/pywb | pywb/apps/wbrequestresponse.py | https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/apps/wbrequestresponse.py#L170-L196 | def add_access_control_headers(self, env=None):
"""Adds Access-Control* HTTP headers to this WbResponse's HTTP headers.
:param dict env: The WSGI environment dictionary
:return: The same WbResponse but with the values for the Access-Control* HTTP header added
:rtype: WbResponse
"""
allowed_methods = 'GET, POST, PUT, OPTIONS, DELETE, PATCH, HEAD, TRACE, CONNECT'
allowed_origin = None
if env is not None:
acr_method = env.get('HTTP_ACCESS_CONTROL_REQUEST_METHOD')
if acr_method is not None and acr_method not in allowed_methods:
allowed_methods = allowed_methods + ', ' + acr_method
r_method = env.get('REQUEST_METHOD')
if r_method is not None and r_method not in allowed_methods:
allowed_methods = allowed_methods + ', ' + r_method
acr_headers = env.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS')
if acr_headers is not None:
self.status_headers.add_header('Access-Control-Allow-Headers', acr_headers)
allowed_origin = env.get('HTTP_ORIGIN', env.get('HTTP_REFERER', allowed_origin))
if allowed_origin is None:
allowed_origin = '*'
self.status_headers.replace_header('Access-Control-Allow-Origin', allowed_origin)
self.status_headers.add_header('Access-Control-Allow-Methods', allowed_methods)
self.status_headers.add_header('Access-Control-Allow-Credentials', 'true')
self.status_headers.add_header('Access-Control-Max-Age', '1800')
return self | [
"def",
"add_access_control_headers",
"(",
"self",
",",
"env",
"=",
"None",
")",
":",
"allowed_methods",
"=",
"'GET, POST, PUT, OPTIONS, DELETE, PATCH, HEAD, TRACE, CONNECT'",
"allowed_origin",
"=",
"None",
"if",
"env",
"is",
"not",
"None",
":",
"acr_method",
"=",
"env",
".",
"get",
"(",
"'HTTP_ACCESS_CONTROL_REQUEST_METHOD'",
")",
"if",
"acr_method",
"is",
"not",
"None",
"and",
"acr_method",
"not",
"in",
"allowed_methods",
":",
"allowed_methods",
"=",
"allowed_methods",
"+",
"', '",
"+",
"acr_method",
"r_method",
"=",
"env",
".",
"get",
"(",
"'REQUEST_METHOD'",
")",
"if",
"r_method",
"is",
"not",
"None",
"and",
"r_method",
"not",
"in",
"allowed_methods",
":",
"allowed_methods",
"=",
"allowed_methods",
"+",
"', '",
"+",
"r_method",
"acr_headers",
"=",
"env",
".",
"get",
"(",
"'HTTP_ACCESS_CONTROL_REQUEST_HEADERS'",
")",
"if",
"acr_headers",
"is",
"not",
"None",
":",
"self",
".",
"status_headers",
".",
"add_header",
"(",
"'Access-Control-Allow-Headers'",
",",
"acr_headers",
")",
"allowed_origin",
"=",
"env",
".",
"get",
"(",
"'HTTP_ORIGIN'",
",",
"env",
".",
"get",
"(",
"'HTTP_REFERER'",
",",
"allowed_origin",
")",
")",
"if",
"allowed_origin",
"is",
"None",
":",
"allowed_origin",
"=",
"'*'",
"self",
".",
"status_headers",
".",
"replace_header",
"(",
"'Access-Control-Allow-Origin'",
",",
"allowed_origin",
")",
"self",
".",
"status_headers",
".",
"add_header",
"(",
"'Access-Control-Allow-Methods'",
",",
"allowed_methods",
")",
"self",
".",
"status_headers",
".",
"add_header",
"(",
"'Access-Control-Allow-Credentials'",
",",
"'true'",
")",
"self",
".",
"status_headers",
".",
"add_header",
"(",
"'Access-Control-Max-Age'",
",",
"'1800'",
")",
"return",
"self"
]
| Adds Access-Control* HTTP headers to this WbResponse's HTTP headers.
:param dict env: The WSGI environment dictionary
:return: The same WbResponse but with the values for the Access-Control* HTTP header added
:rtype: WbResponse | [
"Adds",
"Access",
"-",
"Control",
"*",
"HTTP",
"headers",
"to",
"this",
"WbResponse",
"s",
"HTTP",
"headers",
"."
]
| python | train |
fermiPy/fermipy | fermipy/gtanalysis.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L5508-L5525 | def write_weight_map(self, model_name=None):
"""Save counts model map to a FITS file.
"""
if model_name is None:
suffix = self.config['file_suffix']
else:
suffix = '_%s%s' % (model_name, self.config['file_suffix'])
self.logger.info('Generating model map for component %s.', self.name)
outfile = os.path.join(self.config['fileio']['workdir'],
'wcube%s.fits' % (suffix))
wmap = self.weight_map()
wmap.write(outfile, overwrite=True, conv='fgst-ccube')
return wmap | [
"def",
"write_weight_map",
"(",
"self",
",",
"model_name",
"=",
"None",
")",
":",
"if",
"model_name",
"is",
"None",
":",
"suffix",
"=",
"self",
".",
"config",
"[",
"'file_suffix'",
"]",
"else",
":",
"suffix",
"=",
"'_%s%s'",
"%",
"(",
"model_name",
",",
"self",
".",
"config",
"[",
"'file_suffix'",
"]",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Generating model map for component %s.'",
",",
"self",
".",
"name",
")",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config",
"[",
"'fileio'",
"]",
"[",
"'workdir'",
"]",
",",
"'wcube%s.fits'",
"%",
"(",
"suffix",
")",
")",
"wmap",
"=",
"self",
".",
"weight_map",
"(",
")",
"wmap",
".",
"write",
"(",
"outfile",
",",
"overwrite",
"=",
"True",
",",
"conv",
"=",
"'fgst-ccube'",
")",
"return",
"wmap"
]
| Save counts model map to a FITS file. | [
"Save",
"counts",
"model",
"map",
"to",
"a",
"FITS",
"file",
"."
]
| python | train |
noahbenson/neuropythy | neuropythy/geometry/util.py | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L120-L134 | def point_on_line(ab, c):
'''
point_on_line((a,b), c) yields True if point x is on line (a,b) and False otherwise.
'''
(a,b) = ab
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) == 2 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vca = a - c
vcb = b - c
uba = czdivide(vba, np.sqrt(np.sum(vba**2, axis=0)))
uca = czdivide(vca, np.sqrt(np.sum(vca**2, axis=0)))
return (np.isclose(np.sqrt(np.sum(vca**2, axis=0)), 0) |
np.isclose(np.sqrt(np.sum(vcb**2, axis=0)), 0) |
np.isclose(np.abs(np.sum(uba*uca, axis=0)), 1)) | [
"def",
"point_on_line",
"(",
"ab",
",",
"c",
")",
":",
"(",
"a",
",",
"b",
")",
"=",
"ab",
"abc",
"=",
"[",
"np",
".",
"asarray",
"(",
"u",
")",
"for",
"u",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
"if",
"any",
"(",
"len",
"(",
"u",
".",
"shape",
")",
"==",
"2",
"for",
"u",
"in",
"abc",
")",
":",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"[",
"np",
".",
"reshape",
"(",
"u",
",",
"(",
"len",
"(",
"u",
")",
",",
"-",
"1",
")",
")",
"for",
"u",
"in",
"abc",
"]",
"else",
":",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"abc",
"vca",
"=",
"a",
"-",
"c",
"vcb",
"=",
"b",
"-",
"c",
"uba",
"=",
"czdivide",
"(",
"vba",
",",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"vba",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
")",
"uca",
"=",
"czdivide",
"(",
"vca",
",",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"vca",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
")",
"return",
"(",
"np",
".",
"isclose",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"vca",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
",",
"0",
")",
"|",
"np",
".",
"isclose",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"vcb",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
",",
"0",
")",
"|",
"np",
".",
"isclose",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"sum",
"(",
"uba",
"*",
"uca",
",",
"axis",
"=",
"0",
")",
")",
",",
"1",
")",
")"
]
| point_on_line((a,b), c) yields True if point x is on line (a,b) and False otherwise. | [
"point_on_line",
"((",
"a",
"b",
")",
"c",
")",
"yields",
"True",
"if",
"point",
"x",
"is",
"on",
"line",
"(",
"a",
"b",
")",
"and",
"False",
"otherwise",
"."
]
| python | train |
pyopenapi/pyswagger | pyswagger/utils.py | https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/utils.py#L232-L242 | def jp_compose(s, base=None):
""" append/encode a string to json-pointer
"""
if s == None:
return base
ss = [s] if isinstance(s, six.string_types) else s
ss = [s.replace('~', '~0').replace('/', '~1') for s in ss]
if base:
ss.insert(0, base)
return '/'.join(ss) | [
"def",
"jp_compose",
"(",
"s",
",",
"base",
"=",
"None",
")",
":",
"if",
"s",
"==",
"None",
":",
"return",
"base",
"ss",
"=",
"[",
"s",
"]",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
"else",
"s",
"ss",
"=",
"[",
"s",
".",
"replace",
"(",
"'~'",
",",
"'~0'",
")",
".",
"replace",
"(",
"'/'",
",",
"'~1'",
")",
"for",
"s",
"in",
"ss",
"]",
"if",
"base",
":",
"ss",
".",
"insert",
"(",
"0",
",",
"base",
")",
"return",
"'/'",
".",
"join",
"(",
"ss",
")"
]
| append/encode a string to json-pointer | [
"append",
"/",
"encode",
"a",
"string",
"to",
"json",
"-",
"pointer"
]
| python | train |
Fantomas42/django-blog-zinnia | zinnia/templatetags/zinnia.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L65-L72 | def get_categories_tree(context, template='zinnia/tags/categories_tree.html'):
"""
Return the categories as a tree.
"""
return {'template': template,
'categories': Category.objects.all().annotate(
count_entries=Count('entries')),
'context_category': context.get('category')} | [
"def",
"get_categories_tree",
"(",
"context",
",",
"template",
"=",
"'zinnia/tags/categories_tree.html'",
")",
":",
"return",
"{",
"'template'",
":",
"template",
",",
"'categories'",
":",
"Category",
".",
"objects",
".",
"all",
"(",
")",
".",
"annotate",
"(",
"count_entries",
"=",
"Count",
"(",
"'entries'",
")",
")",
",",
"'context_category'",
":",
"context",
".",
"get",
"(",
"'category'",
")",
"}"
]
| Return the categories as a tree. | [
"Return",
"the",
"categories",
"as",
"a",
"tree",
"."
]
| python | train |
Rambatino/CHAID | CHAID/column.py | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L104-L136 | def substitute_values(self, vect):
"""
Internal method to substitute integers into the vector, and construct
metadata to convert back to the original vector.
np.nan is always given -1, all other objects are given integers in
order of apperence.
Parameters
----------
vect : np.array
the vector in which to substitute values in
"""
try:
unique = np.unique(vect)
except:
unique = set(vect)
unique = [
x for x in unique if not isinstance(x, float) or not isnan(x)
]
arr = np.copy(vect)
for new_id, value in enumerate(unique):
np.place(arr, arr==value, new_id)
self.metadata[new_id] = value
arr = arr.astype(np.float)
np.place(arr, np.isnan(arr), -1)
self.arr = arr
if -1 in arr:
self.metadata[-1] = self._missing_id | [
"def",
"substitute_values",
"(",
"self",
",",
"vect",
")",
":",
"try",
":",
"unique",
"=",
"np",
".",
"unique",
"(",
"vect",
")",
"except",
":",
"unique",
"=",
"set",
"(",
"vect",
")",
"unique",
"=",
"[",
"x",
"for",
"x",
"in",
"unique",
"if",
"not",
"isinstance",
"(",
"x",
",",
"float",
")",
"or",
"not",
"isnan",
"(",
"x",
")",
"]",
"arr",
"=",
"np",
".",
"copy",
"(",
"vect",
")",
"for",
"new_id",
",",
"value",
"in",
"enumerate",
"(",
"unique",
")",
":",
"np",
".",
"place",
"(",
"arr",
",",
"arr",
"==",
"value",
",",
"new_id",
")",
"self",
".",
"metadata",
"[",
"new_id",
"]",
"=",
"value",
"arr",
"=",
"arr",
".",
"astype",
"(",
"np",
".",
"float",
")",
"np",
".",
"place",
"(",
"arr",
",",
"np",
".",
"isnan",
"(",
"arr",
")",
",",
"-",
"1",
")",
"self",
".",
"arr",
"=",
"arr",
"if",
"-",
"1",
"in",
"arr",
":",
"self",
".",
"metadata",
"[",
"-",
"1",
"]",
"=",
"self",
".",
"_missing_id"
]
| Internal method to substitute integers into the vector, and construct
metadata to convert back to the original vector.
np.nan is always given -1, all other objects are given integers in
order of apperence.
Parameters
----------
vect : np.array
the vector in which to substitute values in | [
"Internal",
"method",
"to",
"substitute",
"integers",
"into",
"the",
"vector",
"and",
"construct",
"metadata",
"to",
"convert",
"back",
"to",
"the",
"original",
"vector",
"."
]
| python | train |
madsbk/lrcloud | lrcloud/__main__.py | https://github.com/madsbk/lrcloud/blob/8d99be3e1abdf941642e9a1c86b7d775dc373c0b/lrcloud/__main__.py#L74-L81 | def hashsum(filename):
"""Return a hash of the file From <http://stackoverflow.com/a/7829658>"""
with open(filename, mode='rb') as f:
d = hashlib.sha1()
for buf in iter(partial(f.read, 2**20), b''):
d.update(buf)
return d.hexdigest() | [
"def",
"hashsum",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"d",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"buf",
"in",
"iter",
"(",
"partial",
"(",
"f",
".",
"read",
",",
"2",
"**",
"20",
")",
",",
"b''",
")",
":",
"d",
".",
"update",
"(",
"buf",
")",
"return",
"d",
".",
"hexdigest",
"(",
")"
]
| Return a hash of the file From <http://stackoverflow.com/a/7829658> | [
"Return",
"a",
"hash",
"of",
"the",
"file",
"From",
"<http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"7829658",
">"
]
| python | valid |
cprogrammer1994/ModernGL.ext.obj | ModernGL/ext/obj/objects.py | https://github.com/cprogrammer1994/ModernGL.ext.obj/blob/84ef626166dc9a2520512158f1746c8bac0d95d2/ModernGL/ext/obj/objects.py#L83-L174 | def fromstring(data) -> 'Obj':
'''
Args:
data (str): The obj file content.
Returns:
Obj: The object.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import obj
content = open('box.obj').read()
model = obj.Obj.fromstring(content)
'''
vert = []
text = []
norm = []
face = []
data = RE_COMMENT.sub('\n', data)
for line in data.splitlines():
line = line.strip()
if not line:
continue
match = RE_VERT.match(line)
if match:
vert.append(tuple(map(safe_float, match.groups())))
continue
match = RE_TEXT.match(line)
if match:
text.append(tuple(map(safe_float, match.groups())))
continue
match = RE_NORM.match(line)
if match:
norm.append(tuple(map(safe_float, match.groups())))
continue
match = RE_TRIANGLE_FACE.match(line)
if match:
v, t, n = match.group(1, 3, 5)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(6, 8, 10)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(11, 13, 15)
face.append((int(v), int_or_none(t), int_or_none(n)))
continue
match = RE_QUAD_FACE.match(line)
if match:
# we convert the face in two triangles
v, t, n = match.group(1, 3, 5)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(6, 8, 10)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(11, 13, 15)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(1, 3, 5)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(11, 13, 15)
face.append((int(v), int_or_none(t), int_or_none(n)))
v, t, n = match.group(16, 18, 20)
face.append((int(v), int_or_none(t), int_or_none(n)))
continue
log.debug('unknown line "%s"', line)
if not face:
raise Exception('empty')
t0, n0 = face[0][1:3]
for v, t, n in face:
if (t0 is None) ^ (t is None):
raise Exception('inconsinstent')
if (n0 is None) ^ (n is None):
raise Exception('inconsinstent')
return Obj(vert, text, norm, face) | [
"def",
"fromstring",
"(",
"data",
")",
"->",
"'Obj'",
":",
"vert",
"=",
"[",
"]",
"text",
"=",
"[",
"]",
"norm",
"=",
"[",
"]",
"face",
"=",
"[",
"]",
"data",
"=",
"RE_COMMENT",
".",
"sub",
"(",
"'\\n'",
",",
"data",
")",
"for",
"line",
"in",
"data",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"continue",
"match",
"=",
"RE_VERT",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"vert",
".",
"append",
"(",
"tuple",
"(",
"map",
"(",
"safe_float",
",",
"match",
".",
"groups",
"(",
")",
")",
")",
")",
"continue",
"match",
"=",
"RE_TEXT",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"text",
".",
"append",
"(",
"tuple",
"(",
"map",
"(",
"safe_float",
",",
"match",
".",
"groups",
"(",
")",
")",
")",
")",
"continue",
"match",
"=",
"RE_NORM",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"norm",
".",
"append",
"(",
"tuple",
"(",
"map",
"(",
"safe_float",
",",
"match",
".",
"groups",
"(",
")",
")",
")",
")",
"continue",
"match",
"=",
"RE_TRIANGLE_FACE",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"1",
",",
"3",
",",
"5",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"6",
",",
"8",
",",
"10",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"11",
",",
"13",
",",
"15",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"continue",
"match",
"=",
"RE_QUAD_FACE",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"# we convert the face in two triangles",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"1",
",",
"3",
",",
"5",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"6",
",",
"8",
",",
"10",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"11",
",",
"13",
",",
"15",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"1",
",",
"3",
",",
"5",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"11",
",",
"13",
",",
"15",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"v",
",",
"t",
",",
"n",
"=",
"match",
".",
"group",
"(",
"16",
",",
"18",
",",
"20",
")",
"face",
".",
"append",
"(",
"(",
"int",
"(",
"v",
")",
",",
"int_or_none",
"(",
"t",
")",
",",
"int_or_none",
"(",
"n",
")",
")",
")",
"continue",
"log",
".",
"debug",
"(",
"'unknown line \"%s\"'",
",",
"line",
")",
"if",
"not",
"face",
":",
"raise",
"Exception",
"(",
"'empty'",
")",
"t0",
",",
"n0",
"=",
"face",
"[",
"0",
"]",
"[",
"1",
":",
"3",
"]",
"for",
"v",
",",
"t",
",",
"n",
"in",
"face",
":",
"if",
"(",
"t0",
"is",
"None",
")",
"^",
"(",
"t",
"is",
"None",
")",
":",
"raise",
"Exception",
"(",
"'inconsinstent'",
")",
"if",
"(",
"n0",
"is",
"None",
")",
"^",
"(",
"n",
"is",
"None",
")",
":",
"raise",
"Exception",
"(",
"'inconsinstent'",
")",
"return",
"Obj",
"(",
"vert",
",",
"text",
",",
"norm",
",",
"face",
")"
]
| Args:
data (str): The obj file content.
Returns:
Obj: The object.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import obj
content = open('box.obj').read()
model = obj.Obj.fromstring(content) | [
"Args",
":",
"data",
"(",
"str",
")",
":",
"The",
"obj",
"file",
"content",
"."
]
| python | train |
relwell/corenlp-xml-lib | corenlp_xml/document.py | https://github.com/relwell/corenlp-xml-lib/blob/9b0f8c912ba3ecedd34473f74a9f2d033a75baf9/corenlp_xml/document.py#L261-L273 | def collapsed_dependencies(self):
"""
Accessess collapsed dependencies for this sentence
:getter: Returns the dependency graph for collapsed dependencies
:type: corenlp_xml.dependencies.DependencyGraph
"""
if self._basic_dependencies is None:
deps = self._element.xpath('dependencies[@type="collapsed-dependencies"]')
if len(deps) > 0:
self._basic_dependencies = DependencyGraph(deps[0])
return self._basic_dependencies | [
"def",
"collapsed_dependencies",
"(",
"self",
")",
":",
"if",
"self",
".",
"_basic_dependencies",
"is",
"None",
":",
"deps",
"=",
"self",
".",
"_element",
".",
"xpath",
"(",
"'dependencies[@type=\"collapsed-dependencies\"]'",
")",
"if",
"len",
"(",
"deps",
")",
">",
"0",
":",
"self",
".",
"_basic_dependencies",
"=",
"DependencyGraph",
"(",
"deps",
"[",
"0",
"]",
")",
"return",
"self",
".",
"_basic_dependencies"
]
| Accessess collapsed dependencies for this sentence
:getter: Returns the dependency graph for collapsed dependencies
:type: corenlp_xml.dependencies.DependencyGraph | [
"Accessess",
"collapsed",
"dependencies",
"for",
"this",
"sentence"
]
| python | train |
SmokinCaterpillar/pypet | pypet/trajectory.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L1306-L1420 | def f_explore(self, build_dict):
"""Prepares the trajectory to explore the parameter space.
To explore the parameter space you need to provide a dictionary with the names of the
parameters to explore as keys and iterables specifying the exploration ranges as values.
All iterables need to have the same length otherwise a ValueError is raised.
A ValueError is also raised if the names from the dictionary map to groups or results
and not parameters.
If your trajectory is already explored but not stored yet and your parameters are
not locked you can add new explored parameters to the current ones if their
iterables match the current length of the trajectory.
Raises an AttributeError if the names from the dictionary are not found at all in
the trajectory and NotUniqueNodeError if the keys not unambiguously map
to single parameters.
Raises a TypeError if the trajectory has been stored already, please use
:func:`~pypet.trajectory.Trajectory.f_expand` then instead.
Example usage:
>>> traj.f_explore({'groupA.param1' : [1,2,3,4,5], 'groupA.param2':['a','b','c','d','e']})
Could also be called consecutively:
>>> traj.f_explore({'groupA.param1' : [1,2,3,4,5]})
>>> traj.f_explore({'groupA.param2':['a','b','c','d','e']})
NOTE:
Since parameters are very conservative regarding the data they accept
(see :ref:`type_conservation`), you sometimes won't be able to use Numpy arrays
for exploration as iterables.
For instance, the following code snippet won't work:
::
import numpy a np
from pypet.trajectory import Trajectory
traj = Trajectory()
traj.f_add_parameter('my_float_parameter', 42.4,
comment='My value is a standard python float')
traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23) } )
This will result in a `TypeError` because your exploration iterable
`np.arange(42.0, 44.876, 0.23)` contains `numpy.float64` values
whereas you parameter is supposed to use standard python floats.
Yet, you can use Numpys `tolist()` function to overcome this problem:
::
traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23).tolist() } )
Or you could specify your parameter directly as a numpy float:
::
traj.f_add_parameter('my_float_parameter', np.float64(42.4),
comment='My value is a numpy 64 bit float')
"""
for run_idx in range(len(self)):
if self.f_is_completed(run_idx):
raise TypeError('You cannot explore a trajectory which has been explored before, '
'please use `f_expand` instead.')
added_explored_parameters = []
try:
length = len(self)
for key, builditerable in build_dict.items():
act_param = self.f_get(key)
if not act_param.v_is_leaf or not act_param.v_is_parameter:
raise ValueError('%s is not an appropriate search string for a parameter.' % key)
act_param.f_unlock()
act_param._explore(builditerable)
added_explored_parameters.append(act_param)
full_name = act_param.v_full_name
self._explored_parameters[full_name] = act_param
act_param._explored = True
# Compare the length of two consecutive parameters in the `build_dict`
if len(self._explored_parameters) == 1:
length = act_param.f_get_range_length()
elif not length == act_param.f_get_range_length():
raise ValueError('The parameters to explore have not the same size!')
for irun in range(length):
self._add_run_info(irun)
self._test_run_addition(length)
except Exception:
# Remove the added parameters again
for param in added_explored_parameters:
param.f_unlock()
param._shrink()
param._explored = False
full_name = param.v_full_name
del self._explored_parameters[full_name]
if len(self._explored_parameters) == 0:
self.f_shrink(force=True)
raise | [
"def",
"f_explore",
"(",
"self",
",",
"build_dict",
")",
":",
"for",
"run_idx",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"if",
"self",
".",
"f_is_completed",
"(",
"run_idx",
")",
":",
"raise",
"TypeError",
"(",
"'You cannot explore a trajectory which has been explored before, '",
"'please use `f_expand` instead.'",
")",
"added_explored_parameters",
"=",
"[",
"]",
"try",
":",
"length",
"=",
"len",
"(",
"self",
")",
"for",
"key",
",",
"builditerable",
"in",
"build_dict",
".",
"items",
"(",
")",
":",
"act_param",
"=",
"self",
".",
"f_get",
"(",
"key",
")",
"if",
"not",
"act_param",
".",
"v_is_leaf",
"or",
"not",
"act_param",
".",
"v_is_parameter",
":",
"raise",
"ValueError",
"(",
"'%s is not an appropriate search string for a parameter.'",
"%",
"key",
")",
"act_param",
".",
"f_unlock",
"(",
")",
"act_param",
".",
"_explore",
"(",
"builditerable",
")",
"added_explored_parameters",
".",
"append",
"(",
"act_param",
")",
"full_name",
"=",
"act_param",
".",
"v_full_name",
"self",
".",
"_explored_parameters",
"[",
"full_name",
"]",
"=",
"act_param",
"act_param",
".",
"_explored",
"=",
"True",
"# Compare the length of two consecutive parameters in the `build_dict`",
"if",
"len",
"(",
"self",
".",
"_explored_parameters",
")",
"==",
"1",
":",
"length",
"=",
"act_param",
".",
"f_get_range_length",
"(",
")",
"elif",
"not",
"length",
"==",
"act_param",
".",
"f_get_range_length",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'The parameters to explore have not the same size!'",
")",
"for",
"irun",
"in",
"range",
"(",
"length",
")",
":",
"self",
".",
"_add_run_info",
"(",
"irun",
")",
"self",
".",
"_test_run_addition",
"(",
"length",
")",
"except",
"Exception",
":",
"# Remove the added parameters again",
"for",
"param",
"in",
"added_explored_parameters",
":",
"param",
".",
"f_unlock",
"(",
")",
"param",
".",
"_shrink",
"(",
")",
"param",
".",
"_explored",
"=",
"False",
"full_name",
"=",
"param",
".",
"v_full_name",
"del",
"self",
".",
"_explored_parameters",
"[",
"full_name",
"]",
"if",
"len",
"(",
"self",
".",
"_explored_parameters",
")",
"==",
"0",
":",
"self",
".",
"f_shrink",
"(",
"force",
"=",
"True",
")",
"raise"
]
| Prepares the trajectory to explore the parameter space.
To explore the parameter space you need to provide a dictionary with the names of the
parameters to explore as keys and iterables specifying the exploration ranges as values.
All iterables need to have the same length otherwise a ValueError is raised.
A ValueError is also raised if the names from the dictionary map to groups or results
and not parameters.
If your trajectory is already explored but not stored yet and your parameters are
not locked you can add new explored parameters to the current ones if their
iterables match the current length of the trajectory.
Raises an AttributeError if the names from the dictionary are not found at all in
the trajectory and NotUniqueNodeError if the keys not unambiguously map
to single parameters.
Raises a TypeError if the trajectory has been stored already, please use
:func:`~pypet.trajectory.Trajectory.f_expand` then instead.
Example usage:
>>> traj.f_explore({'groupA.param1' : [1,2,3,4,5], 'groupA.param2':['a','b','c','d','e']})
Could also be called consecutively:
>>> traj.f_explore({'groupA.param1' : [1,2,3,4,5]})
>>> traj.f_explore({'groupA.param2':['a','b','c','d','e']})
NOTE:
Since parameters are very conservative regarding the data they accept
(see :ref:`type_conservation`), you sometimes won't be able to use Numpy arrays
for exploration as iterables.
For instance, the following code snippet won't work:
::
import numpy a np
from pypet.trajectory import Trajectory
traj = Trajectory()
traj.f_add_parameter('my_float_parameter', 42.4,
comment='My value is a standard python float')
traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23) } )
This will result in a `TypeError` because your exploration iterable
`np.arange(42.0, 44.876, 0.23)` contains `numpy.float64` values
whereas you parameter is supposed to use standard python floats.
Yet, you can use Numpys `tolist()` function to overcome this problem:
::
traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23).tolist() } )
Or you could specify your parameter directly as a numpy float:
::
traj.f_add_parameter('my_float_parameter', np.float64(42.4),
comment='My value is a numpy 64 bit float') | [
"Prepares",
"the",
"trajectory",
"to",
"explore",
"the",
"parameter",
"space",
"."
]
| python | test |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/psutil/_pslinux.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/_pslinux.py#L197-L214 | def get_system_per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
f = open('/proc/stat', 'r')
# get rid of the first line who refers to system wide CPU stats
try:
f.readline()
for line in f.readlines():
if line.startswith('cpu'):
values = line.split()[1:8]
values = tuple([float(x) / _CLOCK_TICKS for x in values])
entry = nt_sys_cputimes(*values[:7])
cpus.append(entry)
return cpus
finally:
f.close() | [
"def",
"get_system_per_cpu_times",
"(",
")",
":",
"cpus",
"=",
"[",
"]",
"f",
"=",
"open",
"(",
"'/proc/stat'",
",",
"'r'",
")",
"# get rid of the first line who refers to system wide CPU stats",
"try",
":",
"f",
".",
"readline",
"(",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'cpu'",
")",
":",
"values",
"=",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"8",
"]",
"values",
"=",
"tuple",
"(",
"[",
"float",
"(",
"x",
")",
"/",
"_CLOCK_TICKS",
"for",
"x",
"in",
"values",
"]",
")",
"entry",
"=",
"nt_sys_cputimes",
"(",
"*",
"values",
"[",
":",
"7",
"]",
")",
"cpus",
".",
"append",
"(",
"entry",
")",
"return",
"cpus",
"finally",
":",
"f",
".",
"close",
"(",
")"
]
| Return a list of namedtuple representing the CPU times
for every CPU available on the system. | [
"Return",
"a",
"list",
"of",
"namedtuple",
"representing",
"the",
"CPU",
"times",
"for",
"every",
"CPU",
"available",
"on",
"the",
"system",
"."
]
| python | test |
icometrix/dicom2nifti | scripts/shrink_singleframe.py | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/scripts/shrink_singleframe.py#L16-L72 | def _shrink_file(dicom_file_in, subsample_factor):
"""
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
"""
# Default meta_fields
# Required fields according to reference
dicom_file_out = dicom_file_in
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
# Create new dicom file
# Set new file meta information
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
# Copy transfer syntax
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
# Add the data elements
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring() # = byte array (see pydicom docs)
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
# noinspection PyPep8Naming
dicom_out[0x7fe0, 0x0010].VR = 'OB'
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
# Save dicom_file_out
# Make sure we have a directory
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
# Save the file
dicom_out.save_as(dicom_file_out, write_like_original=False) | [
"def",
"_shrink_file",
"(",
"dicom_file_in",
",",
"subsample_factor",
")",
":",
"# Default meta_fields",
"# Required fields according to reference",
"dicom_file_out",
"=",
"dicom_file_in",
"# Load dicom_file_in",
"dicom_in",
"=",
"compressed_dicom",
".",
"read_file",
"(",
"dicom_file_in",
")",
"# Create new dicom file",
"# Set new file meta information",
"file_meta",
"=",
"pydicom",
".",
"dataset",
".",
"Dataset",
"(",
")",
"for",
"key",
",",
"value",
"in",
"dicom_in",
".",
"file_meta",
".",
"items",
"(",
")",
":",
"file_meta",
".",
"add",
"(",
"value",
")",
"# Create the FileDataset instance (initially no data elements, but file_meta supplied)",
"dicom_out",
"=",
"pydicom",
".",
"dataset",
".",
"FileDataset",
"(",
"dicom_file_out",
",",
"{",
"}",
",",
"file_meta",
"=",
"file_meta",
",",
"preamble",
"=",
"b'\\0'",
"*",
"128",
")",
"# Copy transfer syntax",
"dicom_out",
".",
"is_little_endian",
"=",
"dicom_in",
".",
"is_little_endian",
"dicom_out",
".",
"is_implicit_VR",
"=",
"dicom_in",
".",
"is_implicit_VR",
"rows",
"=",
"0",
"columns",
"=",
"0",
"# Add the data elements",
"for",
"field_key",
",",
"field_value",
"in",
"dicom_in",
".",
"items",
"(",
")",
":",
"logging",
".",
"info",
"(",
"field_key",
")",
"if",
"field_key",
"==",
"(",
"0x7fe0",
",",
"0x0010",
")",
":",
"pixel_array",
"=",
"dicom_in",
".",
"pixel_array",
"[",
":",
":",
"subsample_factor",
",",
":",
":",
"subsample_factor",
"]",
"dicom_out",
".",
"PixelData",
"=",
"pixel_array",
".",
"tostring",
"(",
")",
"# = byte array (see pydicom docs)",
"rows",
"=",
"pixel_array",
".",
"shape",
"[",
"1",
"]",
"columns",
"=",
"pixel_array",
".",
"shape",
"[",
"0",
"]",
"# noinspection PyPep8Naming",
"dicom_out",
"[",
"0x7fe0",
",",
"0x0010",
"]",
".",
"VR",
"=",
"'OB'",
"else",
":",
"dicom_out",
".",
"add",
"(",
"field_value",
")",
"dicom_out",
".",
"PixelSpacing",
"[",
"0",
"]",
"*=",
"subsample_factor",
"dicom_out",
".",
"PixelSpacing",
"[",
"1",
"]",
"*=",
"subsample_factor",
"dicom_out",
".",
"Rows",
"=",
"rows",
"dicom_out",
".",
"Columns",
"=",
"columns",
"# Save dicom_file_out",
"# Make sure we have a directory",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dicom_file_out",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Decompressing files'",
")",
"# Save the file",
"dicom_out",
".",
"save_as",
"(",
"dicom_file_out",
",",
"write_like_original",
"=",
"False",
")"
]
| Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep | [
"Anonimize",
"a",
"single",
"dicomfile",
":",
"param",
"dicom_file_in",
":",
"filepath",
"for",
"input",
"file",
":",
"param",
"dicom_file_out",
":",
"filepath",
"for",
"output",
"file",
":",
"param",
"fields_to_keep",
":",
"dicom",
"tags",
"to",
"keep"
]
| python | train |
spyder-ide/spyder | spyder/plugins/workingdirectory/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/workingdirectory/plugin.py#L171-L178 | def save_wdhistory(self):
"""Save history to a text file in user home directory"""
text = [ to_text_string( self.pathedit.itemText(index) ) \
for index in range(self.pathedit.count()) ]
try:
encoding.writelines(text, self.LOG_PATH)
except EnvironmentError:
pass | [
"def",
"save_wdhistory",
"(",
"self",
")",
":",
"text",
"=",
"[",
"to_text_string",
"(",
"self",
".",
"pathedit",
".",
"itemText",
"(",
"index",
")",
")",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"pathedit",
".",
"count",
"(",
")",
")",
"]",
"try",
":",
"encoding",
".",
"writelines",
"(",
"text",
",",
"self",
".",
"LOG_PATH",
")",
"except",
"EnvironmentError",
":",
"pass"
]
| Save history to a text file in user home directory | [
"Save",
"history",
"to",
"a",
"text",
"file",
"in",
"user",
"home",
"directory"
]
| python | train |
numenta/htmresearch | projects/sdr_paper/pytorch_experiments/union_experiment.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/pytorch_experiments/union_experiment.py#L44-L76 | def create_union_mnist_dataset():
"""
Create a UnionDataset composed of two versions of the MNIST datasets
where each item in the dataset contains 2 distinct images superimposed
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
mnist1 = datasets.MNIST('data', train=False, download=True, transform=transform)
data1 = zip(mnist1.test_data, mnist1.test_labels)
# Randomize second dataset
mnist2 = datasets.MNIST('data', train=False, download=True, transform=transform)
data2 = zip(mnist2.test_data, mnist2.test_labels)
random.shuffle(data2)
# Reorder images of second dataset with same label as first dataset
for i in range(len(data2)):
if data1[i][1] == data2[i][1]:
# Swap indices with same label to a location with diffent label
for j in range(len(data1)):
if data1[i][1] != data2[j][1] and data2[i][1] != data1[j][1]:
swap = data2[j]
data2[j] = data2[i]
data2[i] = swap
break
# Update second dataset with new item order
mnist2.test_data, mnist2.test_labels = zip(*data2)
# Combine the images of both datasets using the maximum value for each pixel
return UnionDataset(datasets=[mnist1, mnist2],
transform=lambda x, y: torch.max(x, y)) | [
"def",
"create_union_mnist_dataset",
"(",
")",
":",
"transform",
"=",
"transforms",
".",
"Compose",
"(",
"[",
"transforms",
".",
"ToTensor",
"(",
")",
",",
"transforms",
".",
"Normalize",
"(",
"(",
"0.1307",
",",
")",
",",
"(",
"0.3081",
",",
")",
")",
"]",
")",
"mnist1",
"=",
"datasets",
".",
"MNIST",
"(",
"'data'",
",",
"train",
"=",
"False",
",",
"download",
"=",
"True",
",",
"transform",
"=",
"transform",
")",
"data1",
"=",
"zip",
"(",
"mnist1",
".",
"test_data",
",",
"mnist1",
".",
"test_labels",
")",
"# Randomize second dataset",
"mnist2",
"=",
"datasets",
".",
"MNIST",
"(",
"'data'",
",",
"train",
"=",
"False",
",",
"download",
"=",
"True",
",",
"transform",
"=",
"transform",
")",
"data2",
"=",
"zip",
"(",
"mnist2",
".",
"test_data",
",",
"mnist2",
".",
"test_labels",
")",
"random",
".",
"shuffle",
"(",
"data2",
")",
"# Reorder images of second dataset with same label as first dataset",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data2",
")",
")",
":",
"if",
"data1",
"[",
"i",
"]",
"[",
"1",
"]",
"==",
"data2",
"[",
"i",
"]",
"[",
"1",
"]",
":",
"# Swap indices with same label to a location with diffent label",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data1",
")",
")",
":",
"if",
"data1",
"[",
"i",
"]",
"[",
"1",
"]",
"!=",
"data2",
"[",
"j",
"]",
"[",
"1",
"]",
"and",
"data2",
"[",
"i",
"]",
"[",
"1",
"]",
"!=",
"data1",
"[",
"j",
"]",
"[",
"1",
"]",
":",
"swap",
"=",
"data2",
"[",
"j",
"]",
"data2",
"[",
"j",
"]",
"=",
"data2",
"[",
"i",
"]",
"data2",
"[",
"i",
"]",
"=",
"swap",
"break",
"# Update second dataset with new item order",
"mnist2",
".",
"test_data",
",",
"mnist2",
".",
"test_labels",
"=",
"zip",
"(",
"*",
"data2",
")",
"# Combine the images of both datasets using the maximum value for each pixel",
"return",
"UnionDataset",
"(",
"datasets",
"=",
"[",
"mnist1",
",",
"mnist2",
"]",
",",
"transform",
"=",
"lambda",
"x",
",",
"y",
":",
"torch",
".",
"max",
"(",
"x",
",",
"y",
")",
")"
]
| Create a UnionDataset composed of two versions of the MNIST datasets
where each item in the dataset contains 2 distinct images superimposed | [
"Create",
"a",
"UnionDataset",
"composed",
"of",
"two",
"versions",
"of",
"the",
"MNIST",
"datasets",
"where",
"each",
"item",
"in",
"the",
"dataset",
"contains",
"2",
"distinct",
"images",
"superimposed"
]
| python | train |
ArchiveTeam/wpull | wpull/scraper/html.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/html.py#L656-L663 | def is_html_link(cls, tag, attribute):
'''Return whether the link is likely to be external object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_HTML
return attribute == 'href' | [
"def",
"is_html_link",
"(",
"cls",
",",
"tag",
",",
"attribute",
")",
":",
"if",
"tag",
"in",
"cls",
".",
"TAG_ATTRIBUTES",
"and",
"attribute",
"in",
"cls",
".",
"TAG_ATTRIBUTES",
"[",
"tag",
"]",
":",
"attr_flags",
"=",
"cls",
".",
"TAG_ATTRIBUTES",
"[",
"tag",
"]",
"[",
"attribute",
"]",
"return",
"attr_flags",
"&",
"cls",
".",
"ATTR_HTML",
"return",
"attribute",
"==",
"'href'"
]
| Return whether the link is likely to be external object. | [
"Return",
"whether",
"the",
"link",
"is",
"likely",
"to",
"be",
"external",
"object",
"."
]
| python | train |
wakatime/wakatime | wakatime/arguments.py | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/arguments.py#L340-L364 | def boolean_or_list(config_name, args, configs, alternative_names=[]):
"""Get a boolean or list of regexes from args and configs."""
# when argument flag present, set to wildcard regex
for key in alternative_names + [config_name]:
if hasattr(args, key) and getattr(args, key):
setattr(args, config_name, ['.*'])
return
setattr(args, config_name, [])
option = None
alternative_names.insert(0, config_name)
for key in alternative_names:
if configs.has_option('settings', key):
option = configs.get('settings', key)
break
if option is not None:
if option.strip().lower() == 'true':
setattr(args, config_name, ['.*'])
elif option.strip().lower() != 'false':
for pattern in option.split("\n"):
if pattern.strip() != '':
getattr(args, config_name).append(pattern) | [
"def",
"boolean_or_list",
"(",
"config_name",
",",
"args",
",",
"configs",
",",
"alternative_names",
"=",
"[",
"]",
")",
":",
"# when argument flag present, set to wildcard regex",
"for",
"key",
"in",
"alternative_names",
"+",
"[",
"config_name",
"]",
":",
"if",
"hasattr",
"(",
"args",
",",
"key",
")",
"and",
"getattr",
"(",
"args",
",",
"key",
")",
":",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"'.*'",
"]",
")",
"return",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"]",
")",
"option",
"=",
"None",
"alternative_names",
".",
"insert",
"(",
"0",
",",
"config_name",
")",
"for",
"key",
"in",
"alternative_names",
":",
"if",
"configs",
".",
"has_option",
"(",
"'settings'",
",",
"key",
")",
":",
"option",
"=",
"configs",
".",
"get",
"(",
"'settings'",
",",
"key",
")",
"break",
"if",
"option",
"is",
"not",
"None",
":",
"if",
"option",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"'.*'",
"]",
")",
"elif",
"option",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"!=",
"'false'",
":",
"for",
"pattern",
"in",
"option",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"pattern",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"getattr",
"(",
"args",
",",
"config_name",
")",
".",
"append",
"(",
"pattern",
")"
]
| Get a boolean or list of regexes from args and configs. | [
"Get",
"a",
"boolean",
"or",
"list",
"of",
"regexes",
"from",
"args",
"and",
"configs",
"."
]
| python | train |
mozilla/mozdownload | mozdownload/parser.py | https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L53-L59 | def filter(self, filter):
"""Filter entries by calling function or applying regex."""
if hasattr(filter, '__call__'):
return [entry for entry in self.entries if filter(entry)]
else:
pattern = re.compile(filter, re.IGNORECASE)
return [entry for entry in self.entries if pattern.match(entry)] | [
"def",
"filter",
"(",
"self",
",",
"filter",
")",
":",
"if",
"hasattr",
"(",
"filter",
",",
"'__call__'",
")",
":",
"return",
"[",
"entry",
"for",
"entry",
"in",
"self",
".",
"entries",
"if",
"filter",
"(",
"entry",
")",
"]",
"else",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"filter",
",",
"re",
".",
"IGNORECASE",
")",
"return",
"[",
"entry",
"for",
"entry",
"in",
"self",
".",
"entries",
"if",
"pattern",
".",
"match",
"(",
"entry",
")",
"]"
]
| Filter entries by calling function or applying regex. | [
"Filter",
"entries",
"by",
"calling",
"function",
"or",
"applying",
"regex",
"."
]
| python | train |
dsoprea/PySchedules | pyschedules/examples/read.py | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L55-L61 | def new_lineup(self, name, location, device, _type, postalCode, _id):
"""Callback run for each new lineup"""
if self.__v_lineup:
# [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X]
print("[Lineup: %s, %s, %s, %s, %s, %s]" %
(name, location, device, _type, postalCode, _id)) | [
"def",
"new_lineup",
"(",
"self",
",",
"name",
",",
"location",
",",
"device",
",",
"_type",
",",
"postalCode",
",",
"_id",
")",
":",
"if",
"self",
".",
"__v_lineup",
":",
"# [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X]",
"print",
"(",
"\"[Lineup: %s, %s, %s, %s, %s, %s]\"",
"%",
"(",
"name",
",",
"location",
",",
"device",
",",
"_type",
",",
"postalCode",
",",
"_id",
")",
")"
]
| Callback run for each new lineup | [
"Callback",
"run",
"for",
"each",
"new",
"lineup"
]
| python | train |
Fantomas42/django-blog-zinnia | zinnia/spam_checker/__init__.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/spam_checker/__init__.py#L10-L25 | def get_spam_checker(backend_path):
"""
Return the selected spam checker backend.
"""
try:
backend_module = import_module(backend_path)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % backend_path,
RuntimeWarning)
backend = None
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = None
return backend | [
"def",
"get_spam_checker",
"(",
"backend_path",
")",
":",
"try",
":",
"backend_module",
"=",
"import_module",
"(",
"backend_path",
")",
"backend",
"=",
"getattr",
"(",
"backend_module",
",",
"'backend'",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"warnings",
".",
"warn",
"(",
"'%s backend cannot be imported'",
"%",
"backend_path",
",",
"RuntimeWarning",
")",
"backend",
"=",
"None",
"except",
"ImproperlyConfigured",
"as",
"e",
":",
"warnings",
".",
"warn",
"(",
"str",
"(",
"e",
")",
",",
"RuntimeWarning",
")",
"backend",
"=",
"None",
"return",
"backend"
]
| Return the selected spam checker backend. | [
"Return",
"the",
"selected",
"spam",
"checker",
"backend",
"."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/data_generators/image_utils.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L43-L62 | def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum) | [
"def",
"image_to_tf_summary_value",
"(",
"image",
",",
"tag",
")",
":",
"curr_image",
"=",
"np",
".",
"asarray",
"(",
"image",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"height",
",",
"width",
",",
"n_channels",
"=",
"curr_image",
".",
"shape",
"# If monochrome image, then reshape to [height, width]",
"if",
"n_channels",
"==",
"1",
":",
"curr_image",
"=",
"np",
".",
"reshape",
"(",
"curr_image",
",",
"[",
"height",
",",
"width",
"]",
")",
"s",
"=",
"io",
".",
"BytesIO",
"(",
")",
"matplotlib_pyplot",
"(",
")",
".",
"imsave",
"(",
"s",
",",
"curr_image",
",",
"format",
"=",
"\"png\"",
")",
"img_sum",
"=",
"tf",
".",
"Summary",
".",
"Image",
"(",
"encoded_image_string",
"=",
"s",
".",
"getvalue",
"(",
")",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
",",
"colorspace",
"=",
"n_channels",
")",
"return",
"tf",
".",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"image",
"=",
"img_sum",
")"
]
| Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object. | [
"Converts",
"a",
"NumPy",
"image",
"to",
"a",
"tf",
".",
"Summary",
".",
"Value",
"object",
"."
]
| python | train |
cltk/cltk | cltk/phonology/old_english/phonology.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/old_english/phonology.py#L283-L306 | def ascii_encoding(self):
"""
:return: str: Returns the ASCII-encoded string
Thorn (Þ, þ) and Ash(Æ, æ) are substituted by the digraphs
'th' and 'ae' respectively. Wynn(Ƿ, ƿ) and Eth(Ð, ð) are replaced
by 'w' and 'd'.
Examples:
>>> Word('ġelǣd').ascii_encoding()
'gelaed'
>>> Word('ƿeorðunga').ascii_encoding()
'weordunga'
"""
w = self.remove_diacritics()
for k, val in zip(Normalize.keys(), Normalize.values()):
w = w.replace(k, val)
return w | [
"def",
"ascii_encoding",
"(",
"self",
")",
":",
"w",
"=",
"self",
".",
"remove_diacritics",
"(",
")",
"for",
"k",
",",
"val",
"in",
"zip",
"(",
"Normalize",
".",
"keys",
"(",
")",
",",
"Normalize",
".",
"values",
"(",
")",
")",
":",
"w",
"=",
"w",
".",
"replace",
"(",
"k",
",",
"val",
")",
"return",
"w"
]
| :return: str: Returns the ASCII-encoded string
Thorn (Þ, þ) and Ash(Æ, æ) are substituted by the digraphs
'th' and 'ae' respectively. Wynn(Ƿ, ƿ) and Eth(Ð, ð) are replaced
by 'w' and 'd'.
Examples:
>>> Word('ġelǣd').ascii_encoding()
'gelaed'
>>> Word('ƿeorðunga').ascii_encoding()
'weordunga' | [
":",
"return",
":",
"str",
":",
"Returns",
"the",
"ASCII",
"-",
"encoded",
"string"
]
| python | train |
yunojuno/elasticsearch-django | elasticsearch_django/models.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L565-L617 | def execute_search(
search,
search_terms="",
user=None,
reference="",
save=True,
query_type=SearchQuery.QUERY_TYPE_SEARCH,
):
"""
Create a new SearchQuery instance and execute a search against ES.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
query_type: string, used to determine whether to run a search query or
a count query (returns hit count, but no results).
"""
start = time.time()
if query_type == SearchQuery.QUERY_TYPE_SEARCH:
response = search.execute()
hits = [h.meta.to_dict() for h in response.hits]
total_hits = response.hits.total
elif query_type == SearchQuery.QUERY_TYPE_COUNT:
response = total_hits = search.count()
hits = []
else:
raise ValueError(f"Invalid SearchQuery.query_type value: '{query_type}'")
duration = time.time() - start
search_query = SearchQuery(
user=user,
search_terms=search_terms,
index=", ".join(search._index or ["_all"])[:100], # field length restriction
query=search.to_dict(),
query_type=query_type,
hits=hits,
total_hits=total_hits,
reference=reference or "",
executed_at=tz_now(),
duration=duration,
)
search_query.response = response
return search_query.save() if save else search_query | [
"def",
"execute_search",
"(",
"search",
",",
"search_terms",
"=",
"\"\"",
",",
"user",
"=",
"None",
",",
"reference",
"=",
"\"\"",
",",
"save",
"=",
"True",
",",
"query_type",
"=",
"SearchQuery",
".",
"QUERY_TYPE_SEARCH",
",",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"if",
"query_type",
"==",
"SearchQuery",
".",
"QUERY_TYPE_SEARCH",
":",
"response",
"=",
"search",
".",
"execute",
"(",
")",
"hits",
"=",
"[",
"h",
".",
"meta",
".",
"to_dict",
"(",
")",
"for",
"h",
"in",
"response",
".",
"hits",
"]",
"total_hits",
"=",
"response",
".",
"hits",
".",
"total",
"elif",
"query_type",
"==",
"SearchQuery",
".",
"QUERY_TYPE_COUNT",
":",
"response",
"=",
"total_hits",
"=",
"search",
".",
"count",
"(",
")",
"hits",
"=",
"[",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Invalid SearchQuery.query_type value: '{query_type}'\"",
")",
"duration",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"search_query",
"=",
"SearchQuery",
"(",
"user",
"=",
"user",
",",
"search_terms",
"=",
"search_terms",
",",
"index",
"=",
"\", \"",
".",
"join",
"(",
"search",
".",
"_index",
"or",
"[",
"\"_all\"",
"]",
")",
"[",
":",
"100",
"]",
",",
"# field length restriction",
"query",
"=",
"search",
".",
"to_dict",
"(",
")",
",",
"query_type",
"=",
"query_type",
",",
"hits",
"=",
"hits",
",",
"total_hits",
"=",
"total_hits",
",",
"reference",
"=",
"reference",
"or",
"\"\"",
",",
"executed_at",
"=",
"tz_now",
"(",
")",
",",
"duration",
"=",
"duration",
",",
")",
"search_query",
".",
"response",
"=",
"response",
"return",
"search_query",
".",
"save",
"(",
")",
"if",
"save",
"else",
"search_query"
]
| Create a new SearchQuery instance and execute a search against ES.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
query_type: string, used to determine whether to run a search query or
a count query (returns hit count, but no results). | [
"Create",
"a",
"new",
"SearchQuery",
"instance",
"and",
"execute",
"a",
"search",
"against",
"ES",
"."
]
| python | train |
JarryShaw/f2format | src/core.py | https://github.com/JarryShaw/f2format/blob/a144250268247ce0a98d734a26d53faadff7a6f8/src/core.py#L199-L225 | def f2format(filename):
"""Wrapper works for conversion.
Args:
- filename -- str, file to be converted
"""
print('Now converting %r...' % filename)
# fetch encoding
encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING)
lineno = dict() # line number -> file offset
content = list() # file content
with open(filename, 'r', encoding=encoding) as file:
lineno[1] = 0
for lnum, line in enumerate(file, start=1):
content.append(line)
lineno[lnum+1] = lineno[lnum] + len(line)
# now, do the dirty works
string = ''.join(content)
text = convert(string, lineno)
# dump back to the file
with open(filename, 'w', encoding=encoding) as file:
file.write(text) | [
"def",
"f2format",
"(",
"filename",
")",
":",
"print",
"(",
"'Now converting %r...'",
"%",
"filename",
")",
"# fetch encoding",
"encoding",
"=",
"os",
".",
"getenv",
"(",
"'F2FORMAT_ENCODING'",
",",
"LOCALE_ENCODING",
")",
"lineno",
"=",
"dict",
"(",
")",
"# line number -> file offset",
"content",
"=",
"list",
"(",
")",
"# file content",
"with",
"open",
"(",
"filename",
",",
"'r'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"file",
":",
"lineno",
"[",
"1",
"]",
"=",
"0",
"for",
"lnum",
",",
"line",
"in",
"enumerate",
"(",
"file",
",",
"start",
"=",
"1",
")",
":",
"content",
".",
"append",
"(",
"line",
")",
"lineno",
"[",
"lnum",
"+",
"1",
"]",
"=",
"lineno",
"[",
"lnum",
"]",
"+",
"len",
"(",
"line",
")",
"# now, do the dirty works",
"string",
"=",
"''",
".",
"join",
"(",
"content",
")",
"text",
"=",
"convert",
"(",
"string",
",",
"lineno",
")",
"# dump back to the file",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"text",
")"
]
| Wrapper works for conversion.
Args:
- filename -- str, file to be converted | [
"Wrapper",
"works",
"for",
"conversion",
"."
]
| python | train |
djgagne/hagelslag | hagelslag/evaluation/ProbabilityMetrics.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ProbabilityMetrics.py#L194-L212 | def from_str(self, in_str):
"""
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method
"""
parts = in_str.split(";")
for part in parts:
var_name, value = part.split(":")
if var_name == "Obs_Threshold":
self.obs_threshold = float(value)
elif var_name == "Thresholds":
self.thresholds = np.array(value.split(), dtype=float)
self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns,
data=np.zeros((self.thresholds.size,
self.contingency_tables.columns.size)))
elif var_name in self.contingency_tables.columns:
self.contingency_tables[var_name] = np.array(value.split(), dtype=int) | [
"def",
"from_str",
"(",
"self",
",",
"in_str",
")",
":",
"parts",
"=",
"in_str",
".",
"split",
"(",
"\";\"",
")",
"for",
"part",
"in",
"parts",
":",
"var_name",
",",
"value",
"=",
"part",
".",
"split",
"(",
"\":\"",
")",
"if",
"var_name",
"==",
"\"Obs_Threshold\"",
":",
"self",
".",
"obs_threshold",
"=",
"float",
"(",
"value",
")",
"elif",
"var_name",
"==",
"\"Thresholds\"",
":",
"self",
".",
"thresholds",
"=",
"np",
".",
"array",
"(",
"value",
".",
"split",
"(",
")",
",",
"dtype",
"=",
"float",
")",
"self",
".",
"contingency_tables",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"self",
".",
"contingency_tables",
".",
"columns",
",",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"thresholds",
".",
"size",
",",
"self",
".",
"contingency_tables",
".",
"columns",
".",
"size",
")",
")",
")",
"elif",
"var_name",
"in",
"self",
".",
"contingency_tables",
".",
"columns",
":",
"self",
".",
"contingency_tables",
"[",
"var_name",
"]",
"=",
"np",
".",
"array",
"(",
"value",
".",
"split",
"(",
")",
",",
"dtype",
"=",
"int",
")"
]
| Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method | [
"Read",
"the",
"DistributedROC",
"string",
"and",
"parse",
"the",
"contingency",
"table",
"values",
"from",
"it",
"."
]
| python | train |
saltstack/salt | salt/output/highstate.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/highstate.py#L554-L595 | def _format_terse(tcolor, comps, ret, colors, tabular):
'''
Terse formatting of a message.
'''
result = 'Clean'
if ret['changes']:
result = 'Changed'
if ret['result'] is False:
result = 'Failed'
elif ret['result'] is None:
result = 'Differs'
if tabular is True:
fmt_string = ''
if 'warnings' in ret:
fmt_string += '{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}\n'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += '{0}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += '{6[start_time]!s} [{6[duration]!s:>7} ms] '
fmt_string += '{2:>10}.{3:<10} {4:7} Name: {1}{5}'
elif isinstance(tabular, six.string_types):
fmt_string = tabular
else:
fmt_string = ''
if 'warnings' in ret:
fmt_string += '{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += ' {0} Name: {1} - Function: {2}.{3} - Result: {4}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += ' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'
fmt_string += '{5}'
msg = fmt_string.format(tcolor,
comps[2],
comps[0],
comps[-1],
result,
colors['ENDC'],
ret)
return msg | [
"def",
"_format_terse",
"(",
"tcolor",
",",
"comps",
",",
"ret",
",",
"colors",
",",
"tabular",
")",
":",
"result",
"=",
"'Clean'",
"if",
"ret",
"[",
"'changes'",
"]",
":",
"result",
"=",
"'Changed'",
"if",
"ret",
"[",
"'result'",
"]",
"is",
"False",
":",
"result",
"=",
"'Failed'",
"elif",
"ret",
"[",
"'result'",
"]",
"is",
"None",
":",
"result",
"=",
"'Differs'",
"if",
"tabular",
"is",
"True",
":",
"fmt_string",
"=",
"''",
"if",
"'warnings'",
"in",
"ret",
":",
"fmt_string",
"+=",
"'{c[LIGHT_RED]}Warnings:\\n{w}{c[ENDC]}\\n'",
".",
"format",
"(",
"c",
"=",
"colors",
",",
"w",
"=",
"'\\n'",
".",
"join",
"(",
"ret",
"[",
"'warnings'",
"]",
")",
")",
"fmt_string",
"+=",
"'{0}'",
"if",
"__opts__",
".",
"get",
"(",
"'state_output_profile'",
",",
"True",
")",
"and",
"'start_time'",
"in",
"ret",
":",
"fmt_string",
"+=",
"'{6[start_time]!s} [{6[duration]!s:>7} ms] '",
"fmt_string",
"+=",
"'{2:>10}.{3:<10} {4:7} Name: {1}{5}'",
"elif",
"isinstance",
"(",
"tabular",
",",
"six",
".",
"string_types",
")",
":",
"fmt_string",
"=",
"tabular",
"else",
":",
"fmt_string",
"=",
"''",
"if",
"'warnings'",
"in",
"ret",
":",
"fmt_string",
"+=",
"'{c[LIGHT_RED]}Warnings:\\n{w}{c[ENDC]}'",
".",
"format",
"(",
"c",
"=",
"colors",
",",
"w",
"=",
"'\\n'",
".",
"join",
"(",
"ret",
"[",
"'warnings'",
"]",
")",
")",
"fmt_string",
"+=",
"' {0} Name: {1} - Function: {2}.{3} - Result: {4}'",
"if",
"__opts__",
".",
"get",
"(",
"'state_output_profile'",
",",
"True",
")",
"and",
"'start_time'",
"in",
"ret",
":",
"fmt_string",
"+=",
"' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'",
"fmt_string",
"+=",
"'{5}'",
"msg",
"=",
"fmt_string",
".",
"format",
"(",
"tcolor",
",",
"comps",
"[",
"2",
"]",
",",
"comps",
"[",
"0",
"]",
",",
"comps",
"[",
"-",
"1",
"]",
",",
"result",
",",
"colors",
"[",
"'ENDC'",
"]",
",",
"ret",
")",
"return",
"msg"
]
| Terse formatting of a message. | [
"Terse",
"formatting",
"of",
"a",
"message",
"."
]
| python | train |
saltstack/salt | salt/states/win_iis.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_iis.py#L733-L780 | def remove_vdir(name, site, app='/'):
'''
Remove an IIS virtual directory.
:param str name: The virtual directory name.
:param str site: The IIS site name.
:param str app: The IIS application.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
- app: v1
'''
ret = {'name': name,
'changes': {},
'comment': str(),
'result': None}
current_vdirs = __salt__['win_iis.list_vdirs'](site, app)
if name not in current_vdirs:
ret['comment'] = 'Virtual directory has already been removed: {0}'.format(name)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = 'Virtual directory will be removed: {0}'.format(name)
ret['changes'] = {'old': name,
'new': None}
else:
ret['comment'] = 'Removed virtual directory: {0}'.format(name)
ret['changes'] = {'old': name,
'new': None}
ret['result'] = __salt__['win_iis.remove_vdir'](name, site, app)
return ret | [
"def",
"remove_vdir",
"(",
"name",
",",
"site",
",",
"app",
"=",
"'/'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"str",
"(",
")",
",",
"'result'",
":",
"None",
"}",
"current_vdirs",
"=",
"__salt__",
"[",
"'win_iis.list_vdirs'",
"]",
"(",
"site",
",",
"app",
")",
"if",
"name",
"not",
"in",
"current_vdirs",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Virtual directory has already been removed: {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Virtual directory will be removed: {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"name",
",",
"'new'",
":",
"None",
"}",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Removed virtual directory: {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"name",
",",
"'new'",
":",
"None",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"__salt__",
"[",
"'win_iis.remove_vdir'",
"]",
"(",
"name",
",",
"site",
",",
"app",
")",
"return",
"ret"
]
| Remove an IIS virtual directory.
:param str name: The virtual directory name.
:param str site: The IIS site name.
:param str app: The IIS application.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
- app: v1 | [
"Remove",
"an",
"IIS",
"virtual",
"directory",
"."
]
| python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L4122-L4127 | def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5 | [
"def",
"multiplyC",
"(",
"self",
",",
"alpha",
")",
":",
"self",
".",
"C",
"*=",
"alpha",
"if",
"self",
".",
"dC",
"is",
"not",
"self",
".",
"C",
":",
"self",
".",
"dC",
"*=",
"alpha",
"self",
".",
"D",
"*=",
"alpha",
"**",
"0.5"
]
| multiply C with a scalar and update all related internal variables (dC, D,...) | [
"multiply",
"C",
"with",
"a",
"scalar",
"and",
"update",
"all",
"related",
"internal",
"variables",
"(",
"dC",
"D",
"...",
")"
]
| python | train |
eng-tools/sfsimodels | sfsimodels/models/soils.py | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1125-L1158 | def one_vertical_total_stress(self, z_c):
"""
Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface
"""
total_stress = 0.0
depths = self.depths
end = 0
for layer_int in range(1, len(depths) + 1):
l_index = layer_int - 1
if z_c > depths[layer_int - 1]:
if l_index < len(depths) - 1 and z_c > depths[l_index + 1]:
height = depths[l_index + 1] - depths[l_index]
bottom_depth = depths[l_index + 1]
else:
end = 1
height = z_c - depths[l_index]
bottom_depth = z_c
if bottom_depth <= self.gwl:
total_stress += height * self.layer(layer_int).unit_dry_weight
else:
if self.layer(layer_int).unit_sat_weight is None:
raise AnalysisError("Saturated unit weight not defined for layer %i." % layer_int)
sat_height = bottom_depth - max(self.gwl, depths[l_index])
dry_height = height - sat_height
total_stress += dry_height * self.layer(layer_int).unit_dry_weight + \
sat_height * self.layer(layer_int).unit_sat_weight
else:
end = 1
if end:
break
return total_stress | [
"def",
"one_vertical_total_stress",
"(",
"self",
",",
"z_c",
")",
":",
"total_stress",
"=",
"0.0",
"depths",
"=",
"self",
".",
"depths",
"end",
"=",
"0",
"for",
"layer_int",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"depths",
")",
"+",
"1",
")",
":",
"l_index",
"=",
"layer_int",
"-",
"1",
"if",
"z_c",
">",
"depths",
"[",
"layer_int",
"-",
"1",
"]",
":",
"if",
"l_index",
"<",
"len",
"(",
"depths",
")",
"-",
"1",
"and",
"z_c",
">",
"depths",
"[",
"l_index",
"+",
"1",
"]",
":",
"height",
"=",
"depths",
"[",
"l_index",
"+",
"1",
"]",
"-",
"depths",
"[",
"l_index",
"]",
"bottom_depth",
"=",
"depths",
"[",
"l_index",
"+",
"1",
"]",
"else",
":",
"end",
"=",
"1",
"height",
"=",
"z_c",
"-",
"depths",
"[",
"l_index",
"]",
"bottom_depth",
"=",
"z_c",
"if",
"bottom_depth",
"<=",
"self",
".",
"gwl",
":",
"total_stress",
"+=",
"height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_dry_weight",
"else",
":",
"if",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_sat_weight",
"is",
"None",
":",
"raise",
"AnalysisError",
"(",
"\"Saturated unit weight not defined for layer %i.\"",
"%",
"layer_int",
")",
"sat_height",
"=",
"bottom_depth",
"-",
"max",
"(",
"self",
".",
"gwl",
",",
"depths",
"[",
"l_index",
"]",
")",
"dry_height",
"=",
"height",
"-",
"sat_height",
"total_stress",
"+=",
"dry_height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_dry_weight",
"+",
"sat_height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_sat_weight",
"else",
":",
"end",
"=",
"1",
"if",
"end",
":",
"break",
"return",
"total_stress"
]
| Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface | [
"Determine",
"the",
"vertical",
"total",
"stress",
"at",
"a",
"single",
"depth",
"z_c",
"."
]
| python | train |
raphaelvallat/pingouin | pingouin/external/tabulate.py | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L751-L769 | def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header) | [
"def",
"_align_header",
"(",
"header",
",",
"alignment",
",",
"width",
",",
"visible_width",
",",
"is_multiline",
"=",
"False",
",",
"width_fn",
"=",
"None",
")",
":",
"if",
"is_multiline",
":",
"header_lines",
"=",
"re",
".",
"split",
"(",
"_multiline_codes",
",",
"header",
")",
"padded_lines",
"=",
"[",
"_align_header",
"(",
"h",
",",
"alignment",
",",
"width",
",",
"width_fn",
"(",
"h",
")",
")",
"for",
"h",
"in",
"header_lines",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"padded_lines",
")",
"# else: not multiline",
"ninvisible",
"=",
"len",
"(",
"header",
")",
"-",
"visible_width",
"width",
"+=",
"ninvisible",
"if",
"alignment",
"==",
"\"left\"",
":",
"return",
"_padright",
"(",
"width",
",",
"header",
")",
"elif",
"alignment",
"==",
"\"center\"",
":",
"return",
"_padboth",
"(",
"width",
",",
"header",
")",
"elif",
"not",
"alignment",
":",
"return",
"\"{0}\"",
".",
"format",
"(",
"header",
")",
"else",
":",
"return",
"_padleft",
"(",
"width",
",",
"header",
")"
]
| Pad string header to width chars given known visible_width of the header. | [
"Pad",
"string",
"header",
"to",
"width",
"chars",
"given",
"known",
"visible_width",
"of",
"the",
"header",
"."
]
| python | train |
carpedm20/fbchat | fbchat/_client.py | https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L1407-L1444 | def quickReply(self, quick_reply, payload=None, thread_id=None, thread_type=None):
"""
Replies to a chosen quick reply
:param quick_reply: Quick reply to reply to
:param payload: Optional answer to the quick reply
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type quick_reply: models.QuickReply
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent message
:raises: FBchatException if request failed
"""
quick_reply.is_response = True
if isinstance(quick_reply, QuickReplyText):
return self.send(
Message(text=quick_reply.title, quick_replies=[quick_reply])
)
elif isinstance(quick_reply, QuickReplyLocation):
if not isinstance(payload, LocationAttachment):
raise ValueError(
"Payload must be an instance of `fbchat.models.LocationAttachment`"
)
return self.sendLocation(
payload, thread_id=thread_id, thread_type=thread_type
)
elif isinstance(quick_reply, QuickReplyEmail):
if not payload:
payload = self.getEmails()[0]
quick_reply.external_payload = quick_reply.payload
quick_reply.payload = payload
return self.send(Message(text=payload, quick_replies=[quick_reply]))
elif isinstance(quick_reply, QuickReplyPhoneNumber):
if not payload:
payload = self.getPhoneNumbers()[0]
quick_reply.external_payload = quick_reply.payload
quick_reply.payload = payload
return self.send(Message(text=payload, quick_replies=[quick_reply])) | [
"def",
"quickReply",
"(",
"self",
",",
"quick_reply",
",",
"payload",
"=",
"None",
",",
"thread_id",
"=",
"None",
",",
"thread_type",
"=",
"None",
")",
":",
"quick_reply",
".",
"is_response",
"=",
"True",
"if",
"isinstance",
"(",
"quick_reply",
",",
"QuickReplyText",
")",
":",
"return",
"self",
".",
"send",
"(",
"Message",
"(",
"text",
"=",
"quick_reply",
".",
"title",
",",
"quick_replies",
"=",
"[",
"quick_reply",
"]",
")",
")",
"elif",
"isinstance",
"(",
"quick_reply",
",",
"QuickReplyLocation",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"LocationAttachment",
")",
":",
"raise",
"ValueError",
"(",
"\"Payload must be an instance of `fbchat.models.LocationAttachment`\"",
")",
"return",
"self",
".",
"sendLocation",
"(",
"payload",
",",
"thread_id",
"=",
"thread_id",
",",
"thread_type",
"=",
"thread_type",
")",
"elif",
"isinstance",
"(",
"quick_reply",
",",
"QuickReplyEmail",
")",
":",
"if",
"not",
"payload",
":",
"payload",
"=",
"self",
".",
"getEmails",
"(",
")",
"[",
"0",
"]",
"quick_reply",
".",
"external_payload",
"=",
"quick_reply",
".",
"payload",
"quick_reply",
".",
"payload",
"=",
"payload",
"return",
"self",
".",
"send",
"(",
"Message",
"(",
"text",
"=",
"payload",
",",
"quick_replies",
"=",
"[",
"quick_reply",
"]",
")",
")",
"elif",
"isinstance",
"(",
"quick_reply",
",",
"QuickReplyPhoneNumber",
")",
":",
"if",
"not",
"payload",
":",
"payload",
"=",
"self",
".",
"getPhoneNumbers",
"(",
")",
"[",
"0",
"]",
"quick_reply",
".",
"external_payload",
"=",
"quick_reply",
".",
"payload",
"quick_reply",
".",
"payload",
"=",
"payload",
"return",
"self",
".",
"send",
"(",
"Message",
"(",
"text",
"=",
"payload",
",",
"quick_replies",
"=",
"[",
"quick_reply",
"]",
")",
")"
]
| Replies to a chosen quick reply
:param quick_reply: Quick reply to reply to
:param payload: Optional answer to the quick reply
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type quick_reply: models.QuickReply
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent message
:raises: FBchatException if request failed | [
"Replies",
"to",
"a",
"chosen",
"quick",
"reply"
]
| python | train |
openpaperwork/paperwork-backend | paperwork_backend/index.py | https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/index.py#L495-L504 | def get(self, obj_id):
"""
Get a document or a page using its ID
Won't instantiate them if they are not yet available
"""
if BasicPage.PAGE_ID_SEPARATOR in obj_id:
(docid, page_nb) = obj_id.split(BasicPage.PAGE_ID_SEPARATOR)
page_nb = int(page_nb)
return self._docs_by_id[docid].pages[page_nb]
return self._docs_by_id[obj_id] | [
"def",
"get",
"(",
"self",
",",
"obj_id",
")",
":",
"if",
"BasicPage",
".",
"PAGE_ID_SEPARATOR",
"in",
"obj_id",
":",
"(",
"docid",
",",
"page_nb",
")",
"=",
"obj_id",
".",
"split",
"(",
"BasicPage",
".",
"PAGE_ID_SEPARATOR",
")",
"page_nb",
"=",
"int",
"(",
"page_nb",
")",
"return",
"self",
".",
"_docs_by_id",
"[",
"docid",
"]",
".",
"pages",
"[",
"page_nb",
"]",
"return",
"self",
".",
"_docs_by_id",
"[",
"obj_id",
"]"
]
| Get a document or a page using its ID
Won't instantiate them if they are not yet available | [
"Get",
"a",
"document",
"or",
"a",
"page",
"using",
"its",
"ID",
"Won",
"t",
"instantiate",
"them",
"if",
"they",
"are",
"not",
"yet",
"available"
]
| python | train |
althonos/pronto | pronto/ontology.py | https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L487-L529 | def _obo_meta(self):
"""Generate the obo metadata header and updates metadata.
When called, this method will create appropriate values for the
``auto-generated-by`` and ``date`` fields.
Note:
Generated following specs of the unofficial format guide:
ftp://ftp.geneontology.org/pub/go/www/GO.format.obo-1_4.shtml
"""
metatags = (
"format-version", "data-version", "date", "saved-by",
"auto-generated-by", "import", "subsetdef", "synonymtypedef",
"default-namespace", "namespace-id-rule", "idspace",
"treat-xrefs-as-equivalent", "treat-xrefs-as-genus-differentia",
"treat-xrefs-as-is_a", "remark", "ontology"
)
meta = self.meta.copy()
meta['auto-generated-by'] = ['pronto v{}'.format(__version__)]
meta['date'] = [datetime.datetime.now().strftime('%d:%m:%Y %H:%M')]
obo_meta = "\n".join(
[ # official obo tags
x.obo if hasattr(x, 'obo') \
else "{}: {}".format(k,x)
for k in metatags[:-1]
for x in meta.get(k, ())
] + [ # eventual other metadata added to remarksmock.patch in production code
"remark: {}: {}".format(k, x)
for k,v in sorted(six.iteritems(meta), key=operator.itemgetter(0))
for x in v
if k not in metatags
] + ( ["ontology: {}".format(x) for x in meta["ontology"]]
if "ontology" in meta
else ["ontology: {}".format(meta["namespace"][0].lower())]
if "namespace" in meta
else [])
)
return obo_meta | [
"def",
"_obo_meta",
"(",
"self",
")",
":",
"metatags",
"=",
"(",
"\"format-version\"",
",",
"\"data-version\"",
",",
"\"date\"",
",",
"\"saved-by\"",
",",
"\"auto-generated-by\"",
",",
"\"import\"",
",",
"\"subsetdef\"",
",",
"\"synonymtypedef\"",
",",
"\"default-namespace\"",
",",
"\"namespace-id-rule\"",
",",
"\"idspace\"",
",",
"\"treat-xrefs-as-equivalent\"",
",",
"\"treat-xrefs-as-genus-differentia\"",
",",
"\"treat-xrefs-as-is_a\"",
",",
"\"remark\"",
",",
"\"ontology\"",
")",
"meta",
"=",
"self",
".",
"meta",
".",
"copy",
"(",
")",
"meta",
"[",
"'auto-generated-by'",
"]",
"=",
"[",
"'pronto v{}'",
".",
"format",
"(",
"__version__",
")",
"]",
"meta",
"[",
"'date'",
"]",
"=",
"[",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%d:%m:%Y %H:%M'",
")",
"]",
"obo_meta",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"# official obo tags",
"x",
".",
"obo",
"if",
"hasattr",
"(",
"x",
",",
"'obo'",
")",
"else",
"\"{}: {}\"",
".",
"format",
"(",
"k",
",",
"x",
")",
"for",
"k",
"in",
"metatags",
"[",
":",
"-",
"1",
"]",
"for",
"x",
"in",
"meta",
".",
"get",
"(",
"k",
",",
"(",
")",
")",
"]",
"+",
"[",
"# eventual other metadata added to remarksmock.patch in production code",
"\"remark: {}: {}\"",
".",
"format",
"(",
"k",
",",
"x",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"meta",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"for",
"x",
"in",
"v",
"if",
"k",
"not",
"in",
"metatags",
"]",
"+",
"(",
"[",
"\"ontology: {}\"",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"meta",
"[",
"\"ontology\"",
"]",
"]",
"if",
"\"ontology\"",
"in",
"meta",
"else",
"[",
"\"ontology: {}\"",
".",
"format",
"(",
"meta",
"[",
"\"namespace\"",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"]",
"if",
"\"namespace\"",
"in",
"meta",
"else",
"[",
"]",
")",
")",
"return",
"obo_meta"
]
| Generate the obo metadata header and updates metadata.
When called, this method will create appropriate values for the
``auto-generated-by`` and ``date`` fields.
Note:
Generated following specs of the unofficial format guide:
ftp://ftp.geneontology.org/pub/go/www/GO.format.obo-1_4.shtml | [
"Generate",
"the",
"obo",
"metadata",
"header",
"and",
"updates",
"metadata",
"."
]
| python | train |
mikedh/trimesh | trimesh/path/exchange/dxf.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/exchange/dxf.py#L66-L481 | def load_dxf(file_obj, **kwargs):
"""
Load a DXF file to a dictionary containing vertices and
entities.
Parameters
----------
file_obj: file or file- like object (has object.read method)
Returns
----------
result: dict, keys are entities, vertices and metadata
"""
def info(e):
"""
Pull metadata based on group code, and return as a dict.
"""
# which keys should we extract from the entity data
# DXF group code : our metadata key
get = {'8': 'layer'}
# replace group codes with names and only
# take info from the entity dict if it is in cand
renamed = {get[k]: util.make_sequence(v)[0] for k,
v in e.items() if k in get}
return renamed
def convert_line(e):
"""
Convert DXF LINE entities into trimesh Line entities.
"""
# create a single Line entity
entities.append(Line(points=len(vertices) + np.arange(2),
**info(e)))
# add the vertices to our collection
vertices.extend(np.array([[e['10'], e['20']],
[e['11'], e['21']]],
dtype=np.float64))
def convert_circle(e):
"""
Convert DXF CIRCLE entities into trimesh Circle entities
"""
R = float(e['40'])
C = np.array([e['10'],
e['20']]).astype(np.float64)
points = to_threepoint(center=C[0:2],
radius=R)
entities.append(Arc(points=(len(vertices) + np.arange(3)),
closed=True,
**info(e)))
vertices.extend(points)
def convert_arc(e):
"""
Convert DXF ARC entities into into trimesh Arc entities.
"""
# the radius of the circle
R = float(e['40'])
# the center point of the circle
C = np.array([e['10'],
e['20']], dtype=np.float64)
# the start and end angle of the arc, in degrees
# this may depend on an AUNITS header data
A = np.radians(np.array([e['50'],
e['51']], dtype=np.float64))
# convert center/radius/angle representation
# to three points on the arc representation
points = to_threepoint(center=C[0:2],
radius=R,
angles=A)
# add a single Arc entity
entities.append(Arc(points=len(vertices) + np.arange(3),
closed=False,
**info(e)))
# add the three vertices
vertices.extend(points)
def convert_polyline(e):
"""
Convert DXF LWPOLYLINE entities into trimesh Line entities.
"""
# load the points in the line
lines = np.column_stack((
e['10'], e['20'])).astype(np.float64)
# save entity info so we don't have to recompute
polyinfo = info(e)
# 70 is the closed flag for polylines
# if the closed flag is set make sure to close
is_closed = '70' in e and int(e['70'][0]) & 1
if is_closed:
lines = np.vstack((lines, lines[:1]))
# 42 is the vertex bulge flag for LWPOLYLINE entities
# "bulge" is autocad for "add a stupid arc using flags
# in my otherwise normal polygon", it's like SVG arc
# flags but somehow even more annoying
if '42' in e:
# get the actual bulge float values
bulge = np.array(e['42'], dtype=np.float64)
# what position were vertices stored at
vid = np.nonzero(chunk[:, 0] == '10')[0]
# what position were bulges stored at in the chunk
bid = np.nonzero(chunk[:, 0] == '42')[0]
# filter out endpoint bulge if we're not closed
if not is_closed:
bid_ok = bid < vid.max()
bid = bid[bid_ok]
bulge = bulge[bid_ok]
# which vertex index is bulge value associated with
bulge_idx = np.searchsorted(vid, bid)
# convert stupid bulge to Line/Arc entities
v, e = bulge_to_arcs(lines=lines,
bulge=bulge,
bulge_idx=bulge_idx,
is_closed=is_closed)
for i in e:
# offset added entities by current vertices length
i.points += len(vertices)
vertices.extend(v)
entities.extend(e)
# done with this polyline
return
# we have a normal polyline so just add it
# as single line entity and vertices
entities.append(Line(
points=np.arange(len(lines)) + len(vertices),
**polyinfo))
vertices.extend(lines)
def convert_bspline(e):
"""
Convert DXF Spline entities into trimesh BSpline entities.
"""
# in the DXF there are n points and n ordered fields
# with the same group code
points = np.column_stack((e['10'],
e['20'])).astype(np.float64)
knots = np.array(e['40']).astype(np.float64)
# if there are only two points, save it as a line
if len(points) == 2:
# create a single Line entity
entities.append(Line(points=len(vertices) +
np.arange(2),
**info(e)))
# add the vertices to our collection
vertices.extend(points)
return
# check bit coded flag for closed
# closed = bool(int(e['70'][0]) & 1)
# check euclidean distance to see if closed
closed = np.linalg.norm(points[0] -
points[-1]) < tol.merge
# create a BSpline entity
entities.append(BSpline(
points=np.arange(len(points)) + len(vertices),
knots=knots,
closed=closed,
**info(e)))
# add the vertices
vertices.extend(points)
def convert_text(e):
"""
Convert a DXF TEXT entity into a native text entity.
"""
if '50' in e:
# rotation angle converted to radians
angle = np.radians(float(e['50']))
else:
# otherwise no rotation
angle = 0.0
# text with leading and trailing whitespace removed
text = e['1'].strip()
# height of text
if '40' in e:
height = float(e['40'])
else:
height = None
# origin point
origin = np.array([e['10'],
e['20']]).astype(np.float64)
# an origin- relative point (so transforms work)
vector = origin + [np.cos(angle), np.sin(angle)]
# try to extract a (horizontal, vertical) text alignment
align = ['center', 'center']
try:
align[0] = ['left', 'center', 'right'][int(e['72'])]
except BaseException:
pass
# append the entity
entities.append(Text(origin=len(vertices),
vector=len(vertices) + 1,
height=height,
text=text,
align=align))
# append the text origin and direction
vertices.append(origin)
vertices.append(vector)
# in a DXF file, lines come in pairs,
# a group code then the next line is the value
# we are removing all whitespace then splitting with the
# splitlines function which uses the universal newline method
raw = file_obj.read()
# if we've been passed bytes
if hasattr(raw, 'decode'):
# search for the sentinel string indicating binary DXF
# do it by encoding sentinel to bytes and subset searching
if raw[:22].find(b'AutoCAD Binary DXF') != -1:
if _teigha is None:
# no converter to ASCII DXF available
raise ValueError('binary DXF not supported!')
else:
# convert binary DXF to R14 ASCII DXF
raw = _teigha_convert(raw, extension='dxf')
else:
# we've been passed bytes that don't have the
# header for binary DXF so try decoding as UTF-8
raw = raw.decode('utf-8', errors='ignore')
# remove trailing whitespace
raw = str(raw).strip()
# without any spaces and in upper case
cleaned = raw.replace(' ', '').strip().upper()
# blob with spaces and original case
blob_raw = np.array(str.splitlines(raw)).reshape((-1, 2))
# if this reshape fails, it means the DXF is malformed
blob = np.array(str.splitlines(cleaned)).reshape((-1, 2))
# get the section which contains the header in the DXF file
endsec = np.nonzero(blob[:, 1] == 'ENDSEC')[0]
# get the section which contains entities in the DXF file
entity_start = np.nonzero(blob[:, 1] == 'ENTITIES')[0][0]
entity_end = endsec[np.searchsorted(endsec, entity_start)]
entity_blob = blob[entity_start:entity_end]
# store the entity blob with original case
entity_raw = blob_raw[entity_start:entity_end]
# store metadata
metadata = {}
# try reading the header, which may be malformed
header_start = np.nonzero(blob[:, 1] == 'HEADER')[0]
if len(header_start) > 0:
header_end = endsec[np.searchsorted(endsec, header_start[0])]
header_blob = blob[header_start[0]:header_end]
# store some properties from the DXF header
metadata['DXF_HEADER'] = {}
for key, group in [('$ACADVER', '1'),
('$DIMSCALE', '40'),
('$DIMALT', '70'),
('$DIMALTF', '40'),
('$DIMUNIT', '70'),
('$INSUNITS', '70'),
('$LUNITS', '70')]:
value = get_key(header_blob,
key,
group)
if value is not None:
metadata['DXF_HEADER'][key] = value
# store unit data pulled from the header of the DXF
# prefer LUNITS over INSUNITS
# I couldn't find a table for LUNITS values but they
# look like they are 0- indexed versions of
# the INSUNITS keys, so for now offset the key value
for offset, key in [(-1, '$LUNITS'),
(0, '$INSUNITS')]:
# get the key from the header blob
units = get_key(header_blob, key, '70')
# if it exists add the offset
if units is None:
continue
metadata[key] = units
units += offset
# if the key is in our list of units store it
if units in _DXF_UNITS:
metadata['units'] = _DXF_UNITS[units]
# warn on drawings with no units
if 'units' not in metadata:
log.warning('DXF doesn\'t have units specified!')
# find the start points of entities
group_check = entity_blob[:, 0] == '0'
inflection = np.nonzero(group_check)[0]
# DXF object to trimesh object converters
loaders = {'LINE': (dict, convert_line),
'LWPOLYLINE': (util.multi_dict, convert_polyline),
'ARC': (dict, convert_arc),
'CIRCLE': (dict, convert_circle),
'SPLINE': (util.multi_dict, convert_bspline)}
# store loaded vertices
vertices = []
# store loaded entities
entities = []
# an old-style polyline entity strings its data across
# multiple vertex entities like a real asshole
polyline = None
# loop through chunks of entity information
for index in np.array_split(np.arange(len(entity_blob)),
inflection):
# if there is only a header continue
if len(index) < 1:
continue
# chunk will be an (n, 2) array of (group code, data) pairs
chunk = entity_blob[index]
# the string representing entity type
entity_type = chunk[0][1]
############
# special case old- style polyline entities
if entity_type == 'POLYLINE':
polyline = [dict(chunk)]
# if we are collecting vertex entities
elif polyline is not None and entity_type == 'VERTEX':
polyline.append(dict(chunk))
# the end of a polyline
elif polyline is not None and entity_type == 'SEQEND':
# pull the geometry information for the entity
lines = np.array([[i['10'], i['20']]
for i in polyline[1:]],
dtype=np.float64)
# check for a closed flag on the polyline
if '70' in polyline[0]:
# flag is bit- coded integer
flag = int(polyline[0]['70'])
# first bit represents closed
is_closed = bool(flag & 1)
if is_closed:
lines = np.vstack((lines, lines[:1]))
# get the index of each bulged vertices
bulge_idx = np.array([i for i, e in enumerate(polyline)
if '42' in e],
dtype=np.int64)
# get the actual bulge value
bulge = np.array([float(e['42'])
for i, e in enumerate(polyline)
if '42' in e],
dtype=np.float64)
# convert bulge to new entities
v, e = bulge_to_arcs(lines=lines,
bulge=bulge,
bulge_idx=bulge_idx,
is_closed=is_closed)
for i in e:
# offset entities by existing vertices
i.points += len(vertices)
vertices.extend(v)
entities.extend(e)
# we no longer have an active polyline
polyline = None
elif entity_type == 'TEXT':
# text entities need spaces preserved so take
# group codes from clean representation (0- column)
# and data from the raw representation (1- column)
chunk_raw = entity_raw[index]
# if we didn't use clean group codes we wouldn't
# be able to access them by key as whitespace
# is random and crazy, like: ' 1 '
chunk_raw[:, 0] = entity_blob[index][:, 0]
try:
convert_text(dict(chunk_raw))
except BaseException:
log.warning('failed to load text entity!',
exc_info=True)
# if the entity contains all relevant data we can
# cleanly load it from inside a single function
elif entity_type in loaders:
# the chunker converts an (n,2) list into a dict
chunker, loader = loaders[entity_type]
# convert data to dict
entity_data = chunker(chunk)
# append data to the lists we're collecting
loader(entity_data)
else:
log.debug('Entity type %s not supported',
entity_type)
# stack vertices into single array
vertices = util.vstack_empty(vertices).astype(np.float64)
# return result as kwargs for trimesh.path.Path2D constructor
result = {'vertices': vertices,
'entities': np.array(entities),
'metadata': metadata}
return result | [
"def",
"load_dxf",
"(",
"file_obj",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"info",
"(",
"e",
")",
":",
"\"\"\"\n Pull metadata based on group code, and return as a dict.\n \"\"\"",
"# which keys should we extract from the entity data",
"# DXF group code : our metadata key",
"get",
"=",
"{",
"'8'",
":",
"'layer'",
"}",
"# replace group codes with names and only",
"# take info from the entity dict if it is in cand",
"renamed",
"=",
"{",
"get",
"[",
"k",
"]",
":",
"util",
".",
"make_sequence",
"(",
"v",
")",
"[",
"0",
"]",
"for",
"k",
",",
"v",
"in",
"e",
".",
"items",
"(",
")",
"if",
"k",
"in",
"get",
"}",
"return",
"renamed",
"def",
"convert_line",
"(",
"e",
")",
":",
"\"\"\"\n Convert DXF LINE entities into trimesh Line entities.\n \"\"\"",
"# create a single Line entity",
"entities",
".",
"append",
"(",
"Line",
"(",
"points",
"=",
"len",
"(",
"vertices",
")",
"+",
"np",
".",
"arange",
"(",
"2",
")",
",",
"*",
"*",
"info",
"(",
"e",
")",
")",
")",
"# add the vertices to our collection",
"vertices",
".",
"extend",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
"]",
",",
"[",
"e",
"[",
"'11'",
"]",
",",
"e",
"[",
"'21'",
"]",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
")",
"def",
"convert_circle",
"(",
"e",
")",
":",
"\"\"\"\n Convert DXF CIRCLE entities into trimesh Circle entities\n \"\"\"",
"R",
"=",
"float",
"(",
"e",
"[",
"'40'",
"]",
")",
"C",
"=",
"np",
".",
"array",
"(",
"[",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"points",
"=",
"to_threepoint",
"(",
"center",
"=",
"C",
"[",
"0",
":",
"2",
"]",
",",
"radius",
"=",
"R",
")",
"entities",
".",
"append",
"(",
"Arc",
"(",
"points",
"=",
"(",
"len",
"(",
"vertices",
")",
"+",
"np",
".",
"arange",
"(",
"3",
")",
")",
",",
"closed",
"=",
"True",
",",
"*",
"*",
"info",
"(",
"e",
")",
")",
")",
"vertices",
".",
"extend",
"(",
"points",
")",
"def",
"convert_arc",
"(",
"e",
")",
":",
"\"\"\"\n Convert DXF ARC entities into into trimesh Arc entities.\n \"\"\"",
"# the radius of the circle",
"R",
"=",
"float",
"(",
"e",
"[",
"'40'",
"]",
")",
"# the center point of the circle",
"C",
"=",
"np",
".",
"array",
"(",
"[",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# the start and end angle of the arc, in degrees",
"# this may depend on an AUNITS header data",
"A",
"=",
"np",
".",
"radians",
"(",
"np",
".",
"array",
"(",
"[",
"e",
"[",
"'50'",
"]",
",",
"e",
"[",
"'51'",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
")",
"# convert center/radius/angle representation",
"# to three points on the arc representation",
"points",
"=",
"to_threepoint",
"(",
"center",
"=",
"C",
"[",
"0",
":",
"2",
"]",
",",
"radius",
"=",
"R",
",",
"angles",
"=",
"A",
")",
"# add a single Arc entity",
"entities",
".",
"append",
"(",
"Arc",
"(",
"points",
"=",
"len",
"(",
"vertices",
")",
"+",
"np",
".",
"arange",
"(",
"3",
")",
",",
"closed",
"=",
"False",
",",
"*",
"*",
"info",
"(",
"e",
")",
")",
")",
"# add the three vertices",
"vertices",
".",
"extend",
"(",
"points",
")",
"def",
"convert_polyline",
"(",
"e",
")",
":",
"\"\"\"\n Convert DXF LWPOLYLINE entities into trimesh Line entities.\n \"\"\"",
"# load the points in the line",
"lines",
"=",
"np",
".",
"column_stack",
"(",
"(",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
")",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# save entity info so we don't have to recompute",
"polyinfo",
"=",
"info",
"(",
"e",
")",
"# 70 is the closed flag for polylines",
"# if the closed flag is set make sure to close",
"is_closed",
"=",
"'70'",
"in",
"e",
"and",
"int",
"(",
"e",
"[",
"'70'",
"]",
"[",
"0",
"]",
")",
"&",
"1",
"if",
"is_closed",
":",
"lines",
"=",
"np",
".",
"vstack",
"(",
"(",
"lines",
",",
"lines",
"[",
":",
"1",
"]",
")",
")",
"# 42 is the vertex bulge flag for LWPOLYLINE entities",
"# \"bulge\" is autocad for \"add a stupid arc using flags",
"# in my otherwise normal polygon\", it's like SVG arc",
"# flags but somehow even more annoying",
"if",
"'42'",
"in",
"e",
":",
"# get the actual bulge float values",
"bulge",
"=",
"np",
".",
"array",
"(",
"e",
"[",
"'42'",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# what position were vertices stored at",
"vid",
"=",
"np",
".",
"nonzero",
"(",
"chunk",
"[",
":",
",",
"0",
"]",
"==",
"'10'",
")",
"[",
"0",
"]",
"# what position were bulges stored at in the chunk",
"bid",
"=",
"np",
".",
"nonzero",
"(",
"chunk",
"[",
":",
",",
"0",
"]",
"==",
"'42'",
")",
"[",
"0",
"]",
"# filter out endpoint bulge if we're not closed",
"if",
"not",
"is_closed",
":",
"bid_ok",
"=",
"bid",
"<",
"vid",
".",
"max",
"(",
")",
"bid",
"=",
"bid",
"[",
"bid_ok",
"]",
"bulge",
"=",
"bulge",
"[",
"bid_ok",
"]",
"# which vertex index is bulge value associated with",
"bulge_idx",
"=",
"np",
".",
"searchsorted",
"(",
"vid",
",",
"bid",
")",
"# convert stupid bulge to Line/Arc entities",
"v",
",",
"e",
"=",
"bulge_to_arcs",
"(",
"lines",
"=",
"lines",
",",
"bulge",
"=",
"bulge",
",",
"bulge_idx",
"=",
"bulge_idx",
",",
"is_closed",
"=",
"is_closed",
")",
"for",
"i",
"in",
"e",
":",
"# offset added entities by current vertices length",
"i",
".",
"points",
"+=",
"len",
"(",
"vertices",
")",
"vertices",
".",
"extend",
"(",
"v",
")",
"entities",
".",
"extend",
"(",
"e",
")",
"# done with this polyline",
"return",
"# we have a normal polyline so just add it",
"# as single line entity and vertices",
"entities",
".",
"append",
"(",
"Line",
"(",
"points",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"lines",
")",
")",
"+",
"len",
"(",
"vertices",
")",
",",
"*",
"*",
"polyinfo",
")",
")",
"vertices",
".",
"extend",
"(",
"lines",
")",
"def",
"convert_bspline",
"(",
"e",
")",
":",
"\"\"\"\n Convert DXF Spline entities into trimesh BSpline entities.\n \"\"\"",
"# in the DXF there are n points and n ordered fields",
"# with the same group code",
"points",
"=",
"np",
".",
"column_stack",
"(",
"(",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
")",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"knots",
"=",
"np",
".",
"array",
"(",
"e",
"[",
"'40'",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# if there are only two points, save it as a line",
"if",
"len",
"(",
"points",
")",
"==",
"2",
":",
"# create a single Line entity",
"entities",
".",
"append",
"(",
"Line",
"(",
"points",
"=",
"len",
"(",
"vertices",
")",
"+",
"np",
".",
"arange",
"(",
"2",
")",
",",
"*",
"*",
"info",
"(",
"e",
")",
")",
")",
"# add the vertices to our collection",
"vertices",
".",
"extend",
"(",
"points",
")",
"return",
"# check bit coded flag for closed",
"# closed = bool(int(e['70'][0]) & 1)",
"# check euclidean distance to see if closed",
"closed",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"points",
"[",
"0",
"]",
"-",
"points",
"[",
"-",
"1",
"]",
")",
"<",
"tol",
".",
"merge",
"# create a BSpline entity",
"entities",
".",
"append",
"(",
"BSpline",
"(",
"points",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"points",
")",
")",
"+",
"len",
"(",
"vertices",
")",
",",
"knots",
"=",
"knots",
",",
"closed",
"=",
"closed",
",",
"*",
"*",
"info",
"(",
"e",
")",
")",
")",
"# add the vertices",
"vertices",
".",
"extend",
"(",
"points",
")",
"def",
"convert_text",
"(",
"e",
")",
":",
"\"\"\"\n Convert a DXF TEXT entity into a native text entity.\n \"\"\"",
"if",
"'50'",
"in",
"e",
":",
"# rotation angle converted to radians",
"angle",
"=",
"np",
".",
"radians",
"(",
"float",
"(",
"e",
"[",
"'50'",
"]",
")",
")",
"else",
":",
"# otherwise no rotation",
"angle",
"=",
"0.0",
"# text with leading and trailing whitespace removed",
"text",
"=",
"e",
"[",
"'1'",
"]",
".",
"strip",
"(",
")",
"# height of text",
"if",
"'40'",
"in",
"e",
":",
"height",
"=",
"float",
"(",
"e",
"[",
"'40'",
"]",
")",
"else",
":",
"height",
"=",
"None",
"# origin point",
"origin",
"=",
"np",
".",
"array",
"(",
"[",
"e",
"[",
"'10'",
"]",
",",
"e",
"[",
"'20'",
"]",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# an origin- relative point (so transforms work)",
"vector",
"=",
"origin",
"+",
"[",
"np",
".",
"cos",
"(",
"angle",
")",
",",
"np",
".",
"sin",
"(",
"angle",
")",
"]",
"# try to extract a (horizontal, vertical) text alignment",
"align",
"=",
"[",
"'center'",
",",
"'center'",
"]",
"try",
":",
"align",
"[",
"0",
"]",
"=",
"[",
"'left'",
",",
"'center'",
",",
"'right'",
"]",
"[",
"int",
"(",
"e",
"[",
"'72'",
"]",
")",
"]",
"except",
"BaseException",
":",
"pass",
"# append the entity",
"entities",
".",
"append",
"(",
"Text",
"(",
"origin",
"=",
"len",
"(",
"vertices",
")",
",",
"vector",
"=",
"len",
"(",
"vertices",
")",
"+",
"1",
",",
"height",
"=",
"height",
",",
"text",
"=",
"text",
",",
"align",
"=",
"align",
")",
")",
"# append the text origin and direction",
"vertices",
".",
"append",
"(",
"origin",
")",
"vertices",
".",
"append",
"(",
"vector",
")",
"# in a DXF file, lines come in pairs,",
"# a group code then the next line is the value",
"# we are removing all whitespace then splitting with the",
"# splitlines function which uses the universal newline method",
"raw",
"=",
"file_obj",
".",
"read",
"(",
")",
"# if we've been passed bytes",
"if",
"hasattr",
"(",
"raw",
",",
"'decode'",
")",
":",
"# search for the sentinel string indicating binary DXF",
"# do it by encoding sentinel to bytes and subset searching",
"if",
"raw",
"[",
":",
"22",
"]",
".",
"find",
"(",
"b'AutoCAD Binary DXF'",
")",
"!=",
"-",
"1",
":",
"if",
"_teigha",
"is",
"None",
":",
"# no converter to ASCII DXF available",
"raise",
"ValueError",
"(",
"'binary DXF not supported!'",
")",
"else",
":",
"# convert binary DXF to R14 ASCII DXF",
"raw",
"=",
"_teigha_convert",
"(",
"raw",
",",
"extension",
"=",
"'dxf'",
")",
"else",
":",
"# we've been passed bytes that don't have the",
"# header for binary DXF so try decoding as UTF-8",
"raw",
"=",
"raw",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
"# remove trailing whitespace",
"raw",
"=",
"str",
"(",
"raw",
")",
".",
"strip",
"(",
")",
"# without any spaces and in upper case",
"cleaned",
"=",
"raw",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"# blob with spaces and original case",
"blob_raw",
"=",
"np",
".",
"array",
"(",
"str",
".",
"splitlines",
"(",
"raw",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"2",
")",
")",
"# if this reshape fails, it means the DXF is malformed",
"blob",
"=",
"np",
".",
"array",
"(",
"str",
".",
"splitlines",
"(",
"cleaned",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"2",
")",
")",
"# get the section which contains the header in the DXF file",
"endsec",
"=",
"np",
".",
"nonzero",
"(",
"blob",
"[",
":",
",",
"1",
"]",
"==",
"'ENDSEC'",
")",
"[",
"0",
"]",
"# get the section which contains entities in the DXF file",
"entity_start",
"=",
"np",
".",
"nonzero",
"(",
"blob",
"[",
":",
",",
"1",
"]",
"==",
"'ENTITIES'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"entity_end",
"=",
"endsec",
"[",
"np",
".",
"searchsorted",
"(",
"endsec",
",",
"entity_start",
")",
"]",
"entity_blob",
"=",
"blob",
"[",
"entity_start",
":",
"entity_end",
"]",
"# store the entity blob with original case",
"entity_raw",
"=",
"blob_raw",
"[",
"entity_start",
":",
"entity_end",
"]",
"# store metadata",
"metadata",
"=",
"{",
"}",
"# try reading the header, which may be malformed",
"header_start",
"=",
"np",
".",
"nonzero",
"(",
"blob",
"[",
":",
",",
"1",
"]",
"==",
"'HEADER'",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"header_start",
")",
">",
"0",
":",
"header_end",
"=",
"endsec",
"[",
"np",
".",
"searchsorted",
"(",
"endsec",
",",
"header_start",
"[",
"0",
"]",
")",
"]",
"header_blob",
"=",
"blob",
"[",
"header_start",
"[",
"0",
"]",
":",
"header_end",
"]",
"# store some properties from the DXF header",
"metadata",
"[",
"'DXF_HEADER'",
"]",
"=",
"{",
"}",
"for",
"key",
",",
"group",
"in",
"[",
"(",
"'$ACADVER'",
",",
"'1'",
")",
",",
"(",
"'$DIMSCALE'",
",",
"'40'",
")",
",",
"(",
"'$DIMALT'",
",",
"'70'",
")",
",",
"(",
"'$DIMALTF'",
",",
"'40'",
")",
",",
"(",
"'$DIMUNIT'",
",",
"'70'",
")",
",",
"(",
"'$INSUNITS'",
",",
"'70'",
")",
",",
"(",
"'$LUNITS'",
",",
"'70'",
")",
"]",
":",
"value",
"=",
"get_key",
"(",
"header_blob",
",",
"key",
",",
"group",
")",
"if",
"value",
"is",
"not",
"None",
":",
"metadata",
"[",
"'DXF_HEADER'",
"]",
"[",
"key",
"]",
"=",
"value",
"# store unit data pulled from the header of the DXF",
"# prefer LUNITS over INSUNITS",
"# I couldn't find a table for LUNITS values but they",
"# look like they are 0- indexed versions of",
"# the INSUNITS keys, so for now offset the key value",
"for",
"offset",
",",
"key",
"in",
"[",
"(",
"-",
"1",
",",
"'$LUNITS'",
")",
",",
"(",
"0",
",",
"'$INSUNITS'",
")",
"]",
":",
"# get the key from the header blob",
"units",
"=",
"get_key",
"(",
"header_blob",
",",
"key",
",",
"'70'",
")",
"# if it exists add the offset",
"if",
"units",
"is",
"None",
":",
"continue",
"metadata",
"[",
"key",
"]",
"=",
"units",
"units",
"+=",
"offset",
"# if the key is in our list of units store it",
"if",
"units",
"in",
"_DXF_UNITS",
":",
"metadata",
"[",
"'units'",
"]",
"=",
"_DXF_UNITS",
"[",
"units",
"]",
"# warn on drawings with no units",
"if",
"'units'",
"not",
"in",
"metadata",
":",
"log",
".",
"warning",
"(",
"'DXF doesn\\'t have units specified!'",
")",
"# find the start points of entities",
"group_check",
"=",
"entity_blob",
"[",
":",
",",
"0",
"]",
"==",
"'0'",
"inflection",
"=",
"np",
".",
"nonzero",
"(",
"group_check",
")",
"[",
"0",
"]",
"# DXF object to trimesh object converters",
"loaders",
"=",
"{",
"'LINE'",
":",
"(",
"dict",
",",
"convert_line",
")",
",",
"'LWPOLYLINE'",
":",
"(",
"util",
".",
"multi_dict",
",",
"convert_polyline",
")",
",",
"'ARC'",
":",
"(",
"dict",
",",
"convert_arc",
")",
",",
"'CIRCLE'",
":",
"(",
"dict",
",",
"convert_circle",
")",
",",
"'SPLINE'",
":",
"(",
"util",
".",
"multi_dict",
",",
"convert_bspline",
")",
"}",
"# store loaded vertices",
"vertices",
"=",
"[",
"]",
"# store loaded entities",
"entities",
"=",
"[",
"]",
"# an old-style polyline entity strings its data across",
"# multiple vertex entities like a real asshole",
"polyline",
"=",
"None",
"# loop through chunks of entity information",
"for",
"index",
"in",
"np",
".",
"array_split",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"entity_blob",
")",
")",
",",
"inflection",
")",
":",
"# if there is only a header continue",
"if",
"len",
"(",
"index",
")",
"<",
"1",
":",
"continue",
"# chunk will be an (n, 2) array of (group code, data) pairs",
"chunk",
"=",
"entity_blob",
"[",
"index",
"]",
"# the string representing entity type",
"entity_type",
"=",
"chunk",
"[",
"0",
"]",
"[",
"1",
"]",
"############",
"# special case old- style polyline entities",
"if",
"entity_type",
"==",
"'POLYLINE'",
":",
"polyline",
"=",
"[",
"dict",
"(",
"chunk",
")",
"]",
"# if we are collecting vertex entities",
"elif",
"polyline",
"is",
"not",
"None",
"and",
"entity_type",
"==",
"'VERTEX'",
":",
"polyline",
".",
"append",
"(",
"dict",
"(",
"chunk",
")",
")",
"# the end of a polyline",
"elif",
"polyline",
"is",
"not",
"None",
"and",
"entity_type",
"==",
"'SEQEND'",
":",
"# pull the geometry information for the entity",
"lines",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"i",
"[",
"'10'",
"]",
",",
"i",
"[",
"'20'",
"]",
"]",
"for",
"i",
"in",
"polyline",
"[",
"1",
":",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# check for a closed flag on the polyline",
"if",
"'70'",
"in",
"polyline",
"[",
"0",
"]",
":",
"# flag is bit- coded integer",
"flag",
"=",
"int",
"(",
"polyline",
"[",
"0",
"]",
"[",
"'70'",
"]",
")",
"# first bit represents closed",
"is_closed",
"=",
"bool",
"(",
"flag",
"&",
"1",
")",
"if",
"is_closed",
":",
"lines",
"=",
"np",
".",
"vstack",
"(",
"(",
"lines",
",",
"lines",
"[",
":",
"1",
"]",
")",
")",
"# get the index of each bulged vertices",
"bulge_idx",
"=",
"np",
".",
"array",
"(",
"[",
"i",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"polyline",
")",
"if",
"'42'",
"in",
"e",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"# get the actual bulge value",
"bulge",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"e",
"[",
"'42'",
"]",
")",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"polyline",
")",
"if",
"'42'",
"in",
"e",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# convert bulge to new entities",
"v",
",",
"e",
"=",
"bulge_to_arcs",
"(",
"lines",
"=",
"lines",
",",
"bulge",
"=",
"bulge",
",",
"bulge_idx",
"=",
"bulge_idx",
",",
"is_closed",
"=",
"is_closed",
")",
"for",
"i",
"in",
"e",
":",
"# offset entities by existing vertices",
"i",
".",
"points",
"+=",
"len",
"(",
"vertices",
")",
"vertices",
".",
"extend",
"(",
"v",
")",
"entities",
".",
"extend",
"(",
"e",
")",
"# we no longer have an active polyline",
"polyline",
"=",
"None",
"elif",
"entity_type",
"==",
"'TEXT'",
":",
"# text entities need spaces preserved so take",
"# group codes from clean representation (0- column)",
"# and data from the raw representation (1- column)",
"chunk_raw",
"=",
"entity_raw",
"[",
"index",
"]",
"# if we didn't use clean group codes we wouldn't",
"# be able to access them by key as whitespace",
"# is random and crazy, like: ' 1 '",
"chunk_raw",
"[",
":",
",",
"0",
"]",
"=",
"entity_blob",
"[",
"index",
"]",
"[",
":",
",",
"0",
"]",
"try",
":",
"convert_text",
"(",
"dict",
"(",
"chunk_raw",
")",
")",
"except",
"BaseException",
":",
"log",
".",
"warning",
"(",
"'failed to load text entity!'",
",",
"exc_info",
"=",
"True",
")",
"# if the entity contains all relevant data we can",
"# cleanly load it from inside a single function",
"elif",
"entity_type",
"in",
"loaders",
":",
"# the chunker converts an (n,2) list into a dict",
"chunker",
",",
"loader",
"=",
"loaders",
"[",
"entity_type",
"]",
"# convert data to dict",
"entity_data",
"=",
"chunker",
"(",
"chunk",
")",
"# append data to the lists we're collecting",
"loader",
"(",
"entity_data",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'Entity type %s not supported'",
",",
"entity_type",
")",
"# stack vertices into single array",
"vertices",
"=",
"util",
".",
"vstack_empty",
"(",
"vertices",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# return result as kwargs for trimesh.path.Path2D constructor",
"result",
"=",
"{",
"'vertices'",
":",
"vertices",
",",
"'entities'",
":",
"np",
".",
"array",
"(",
"entities",
")",
",",
"'metadata'",
":",
"metadata",
"}",
"return",
"result"
]
| Load a DXF file to a dictionary containing vertices and
entities.
Parameters
----------
file_obj: file or file- like object (has object.read method)
Returns
----------
result: dict, keys are entities, vertices and metadata | [
"Load",
"a",
"DXF",
"file",
"to",
"a",
"dictionary",
"containing",
"vertices",
"and",
"entities",
"."
]
| python | train |
ramses-tech/nefertari | nefertari/renderers.py | https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/renderers.py#L96-L102 | def _get_create_update_kwargs(self, value, common_kw):
""" Get kwargs common to create, update, replace. """
kw = common_kw.copy()
kw['body'] = value
if '_self' in value:
kw['headers'] = [('Location', value['_self'])]
return kw | [
"def",
"_get_create_update_kwargs",
"(",
"self",
",",
"value",
",",
"common_kw",
")",
":",
"kw",
"=",
"common_kw",
".",
"copy",
"(",
")",
"kw",
"[",
"'body'",
"]",
"=",
"value",
"if",
"'_self'",
"in",
"value",
":",
"kw",
"[",
"'headers'",
"]",
"=",
"[",
"(",
"'Location'",
",",
"value",
"[",
"'_self'",
"]",
")",
"]",
"return",
"kw"
]
| Get kwargs common to create, update, replace. | [
"Get",
"kwargs",
"common",
"to",
"create",
"update",
"replace",
"."
]
| python | train |
serkanyersen/underscore.py | src/underscore.py | https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L973-L982 | def functions(self):
""" Return a sorted list of the function names available on the object.
"""
names = []
for i, k in enumerate(self.obj):
if _(self.obj[k]).isCallable():
names.append(k)
return self._wrap(sorted(names)) | [
"def",
"functions",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"for",
"i",
",",
"k",
"in",
"enumerate",
"(",
"self",
".",
"obj",
")",
":",
"if",
"_",
"(",
"self",
".",
"obj",
"[",
"k",
"]",
")",
".",
"isCallable",
"(",
")",
":",
"names",
".",
"append",
"(",
"k",
")",
"return",
"self",
".",
"_wrap",
"(",
"sorted",
"(",
"names",
")",
")"
]
| Return a sorted list of the function names available on the object. | [
"Return",
"a",
"sorted",
"list",
"of",
"the",
"function",
"names",
"available",
"on",
"the",
"object",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.