nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
francisck/DanderSpritz_docs
|
86bb7caca5a957147f120b18bb5c31f299914904
|
Python/Core/Lib/lib-tk/turtle.py
|
python
|
__methods
|
(cls)
|
return _dict.keys()
|
helper function for Scrolled Canvas
|
helper function for Scrolled Canvas
|
[
"helper",
"function",
"for",
"Scrolled",
"Canvas"
] |
def __methods(cls):
"""helper function for Scrolled Canvas"""
_dict = {}
__methodDict(cls, _dict)
return _dict.keys()
|
[
"def",
"__methods",
"(",
"cls",
")",
":",
"_dict",
"=",
"{",
"}",
"__methodDict",
"(",
"cls",
",",
"_dict",
")",
"return",
"_dict",
".",
"keys",
"(",
")"
] |
https://github.com/francisck/DanderSpritz_docs/blob/86bb7caca5a957147f120b18bb5c31f299914904/Python/Core/Lib/lib-tk/turtle.py#L282-L286
|
|
Lispython/human_curl
|
9f45c45538e9e17cf71d584dc56333d31593c405
|
human_curl/utils.py
|
python
|
generate_nonce
|
(length=8)
|
return ''.join([str(random.randint(0, 9)) for i in range(length)])
|
Generate pseudorandom number.
|
Generate pseudorandom number.
|
[
"Generate",
"pseudorandom",
"number",
"."
] |
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
|
[
"def",
"generate_nonce",
"(",
"length",
"=",
"8",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"9",
")",
")",
"for",
"i",
"in",
"range",
"(",
"length",
")",
"]",
")"
] |
https://github.com/Lispython/human_curl/blob/9f45c45538e9e17cf71d584dc56333d31593c405/human_curl/utils.py#L336-L338
|
|
quantumlib/OpenFermion
|
6187085f2a7707012b68370b625acaeed547e62b
|
src/openfermion/ops/operators/symbolic_operator.py
|
python
|
SymbolicOperator.__imul__
|
(self, multiplier)
|
In-place multiply (*=) with scalar or operator of the same type.
Default implementation is to multiply coefficients and
concatenate terms.
Args:
multiplier(complex float, or SymbolicOperator): multiplier
Returns:
product (SymbolicOperator): Mutated self.
|
In-place multiply (*=) with scalar or operator of the same type.
|
[
"In",
"-",
"place",
"multiply",
"(",
"*",
"=",
")",
"with",
"scalar",
"or",
"operator",
"of",
"the",
"same",
"type",
"."
] |
def __imul__(self, multiplier):
"""In-place multiply (*=) with scalar or operator of the same type.
Default implementation is to multiply coefficients and
concatenate terms.
Args:
multiplier(complex float, or SymbolicOperator): multiplier
Returns:
product (SymbolicOperator): Mutated self.
"""
# Handle scalars.
if isinstance(multiplier, COEFFICIENT_TYPES):
for term in self.terms:
self.terms[term] *= multiplier
return self
# Handle operator of the same type
elif isinstance(multiplier, self.__class__):
result_terms = dict()
for left_term in self.terms:
for right_term in multiplier.terms:
left_coefficient = self.terms[left_term]
right_coefficient = multiplier.terms[right_term]
new_coefficient = left_coefficient * right_coefficient
new_term = left_term + right_term
new_coefficient, new_term = self._simplify(
new_term, coefficient=new_coefficient)
# Update result dict.
if new_term in result_terms:
result_terms[new_term] += new_coefficient
else:
result_terms[new_term] = new_coefficient
self.terms = result_terms
return self
# Invalid multiplier type
else:
raise TypeError('Cannot multiply {} with {}'.format(
self.__class__.__name__, multiplier.__class__.__name__))
|
[
"def",
"__imul__",
"(",
"self",
",",
"multiplier",
")",
":",
"# Handle scalars.",
"if",
"isinstance",
"(",
"multiplier",
",",
"COEFFICIENT_TYPES",
")",
":",
"for",
"term",
"in",
"self",
".",
"terms",
":",
"self",
".",
"terms",
"[",
"term",
"]",
"*=",
"multiplier",
"return",
"self",
"# Handle operator of the same type",
"elif",
"isinstance",
"(",
"multiplier",
",",
"self",
".",
"__class__",
")",
":",
"result_terms",
"=",
"dict",
"(",
")",
"for",
"left_term",
"in",
"self",
".",
"terms",
":",
"for",
"right_term",
"in",
"multiplier",
".",
"terms",
":",
"left_coefficient",
"=",
"self",
".",
"terms",
"[",
"left_term",
"]",
"right_coefficient",
"=",
"multiplier",
".",
"terms",
"[",
"right_term",
"]",
"new_coefficient",
"=",
"left_coefficient",
"*",
"right_coefficient",
"new_term",
"=",
"left_term",
"+",
"right_term",
"new_coefficient",
",",
"new_term",
"=",
"self",
".",
"_simplify",
"(",
"new_term",
",",
"coefficient",
"=",
"new_coefficient",
")",
"# Update result dict.",
"if",
"new_term",
"in",
"result_terms",
":",
"result_terms",
"[",
"new_term",
"]",
"+=",
"new_coefficient",
"else",
":",
"result_terms",
"[",
"new_term",
"]",
"=",
"new_coefficient",
"self",
".",
"terms",
"=",
"result_terms",
"return",
"self",
"# Invalid multiplier type",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot multiply {} with {}'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"multiplier",
".",
"__class__",
".",
"__name__",
")",
")"
] |
https://github.com/quantumlib/OpenFermion/blob/6187085f2a7707012b68370b625acaeed547e62b/src/openfermion/ops/operators/symbolic_operator.py#L350-L392
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/Python-2.7.9/Lib/lib-tk/FileDialog.py
|
python
|
FileDialog.cancel_command
|
(self, event=None)
|
[] |
def cancel_command(self, event=None):
self.quit()
|
[
"def",
"cancel_command",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"self",
".",
"quit",
"(",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/lib-tk/FileDialog.py#L200-L201
|
||||
PanJinquan/Face_Detection_Recognition
|
7e06668a98f4cd08c7ca8cbbfa3917f9b433b489
|
faceRecognition/align/detect_face.py
|
python
|
layer
|
(op)
|
return layer_decorated
|
Decorator for composable network layers.
|
Decorator for composable network layers.
|
[
"Decorator",
"for",
"composable",
"network",
"layers",
"."
] |
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
|
[
"def",
"layer",
"(",
"op",
")",
":",
"def",
"layer_decorated",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Automatically set a name if not provided.",
"name",
"=",
"kwargs",
".",
"setdefault",
"(",
"'name'",
",",
"self",
".",
"get_unique_name",
"(",
"op",
".",
"__name__",
")",
")",
"# Figure out the layer inputs.",
"if",
"len",
"(",
"self",
".",
"terminals",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"'No input variables found for layer %s.'",
"%",
"name",
")",
"elif",
"len",
"(",
"self",
".",
"terminals",
")",
"==",
"1",
":",
"layer_input",
"=",
"self",
".",
"terminals",
"[",
"0",
"]",
"else",
":",
"layer_input",
"=",
"list",
"(",
"self",
".",
"terminals",
")",
"# Perform the operation and get the output.",
"layer_output",
"=",
"op",
"(",
"self",
",",
"layer_input",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Add to layer LUT.",
"self",
".",
"layers",
"[",
"name",
"]",
"=",
"layer_output",
"# This output is now the input for the next layer.",
"self",
".",
"feed",
"(",
"layer_output",
")",
"# Return self for chained calls.",
"return",
"self",
"return",
"layer_decorated"
] |
https://github.com/PanJinquan/Face_Detection_Recognition/blob/7e06668a98f4cd08c7ca8cbbfa3917f9b433b489/faceRecognition/align/detect_face.py#L37-L59
|
|
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/cryptography/hazmat/primitives/serialization.py
|
python
|
_read_next_string
|
(data)
|
return data[4:4 + str_len], data[4 + str_len:]
|
Retrieves the next RFC 4251 string value from the data.
While the RFC calls these strings, in Python they are bytes objects.
|
Retrieves the next RFC 4251 string value from the data.
|
[
"Retrieves",
"the",
"next",
"RFC",
"4251",
"string",
"value",
"from",
"the",
"data",
"."
] |
def _read_next_string(data):
"""
Retrieves the next RFC 4251 string value from the data.
While the RFC calls these strings, in Python they are bytes objects.
"""
str_len, = struct.unpack('>I', data[:4])
return data[4:4 + str_len], data[4 + str_len:]
|
[
"def",
"_read_next_string",
"(",
"data",
")",
":",
"str_len",
",",
"=",
"struct",
".",
"unpack",
"(",
"'>I'",
",",
"data",
"[",
":",
"4",
"]",
")",
"return",
"data",
"[",
"4",
":",
"4",
"+",
"str_len",
"]",
",",
"data",
"[",
"4",
"+",
"str_len",
":",
"]"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/cryptography/hazmat/primitives/serialization.py#L134-L141
|
|
MenglinLu/Chinese-clinical-NER
|
9614593ee2e1ba38d0985c44e957d316e178b93c
|
bert_sklearn_bioes/bert_sklearn/model/pytorch_pretrained/tokenization.py
|
python
|
WordpieceTokenizer.tokenize
|
(self, text)
|
return output_tokens
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
|
Tokenizes a piece of text into its word pieces.
|
[
"Tokenizes",
"a",
"piece",
"of",
"text",
"into",
"its",
"word",
"pieces",
"."
] |
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
[
"def",
"tokenize",
"(",
"self",
",",
"text",
")",
":",
"output_tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"whitespace_tokenize",
"(",
"text",
")",
":",
"chars",
"=",
"list",
"(",
"token",
")",
"if",
"len",
"(",
"chars",
")",
">",
"self",
".",
"max_input_chars_per_word",
":",
"output_tokens",
".",
"append",
"(",
"self",
".",
"unk_token",
")",
"continue",
"is_bad",
"=",
"False",
"start",
"=",
"0",
"sub_tokens",
"=",
"[",
"]",
"while",
"start",
"<",
"len",
"(",
"chars",
")",
":",
"end",
"=",
"len",
"(",
"chars",
")",
"cur_substr",
"=",
"None",
"while",
"start",
"<",
"end",
":",
"substr",
"=",
"\"\"",
".",
"join",
"(",
"chars",
"[",
"start",
":",
"end",
"]",
")",
"if",
"start",
">",
"0",
":",
"substr",
"=",
"\"##\"",
"+",
"substr",
"if",
"substr",
"in",
"self",
".",
"vocab",
":",
"cur_substr",
"=",
"substr",
"break",
"end",
"-=",
"1",
"if",
"cur_substr",
"is",
"None",
":",
"is_bad",
"=",
"True",
"break",
"sub_tokens",
".",
"append",
"(",
"cur_substr",
")",
"start",
"=",
"end",
"if",
"is_bad",
":",
"output_tokens",
".",
"append",
"(",
"self",
".",
"unk_token",
")",
"else",
":",
"output_tokens",
".",
"extend",
"(",
"sub_tokens",
")",
"return",
"output_tokens"
] |
https://github.com/MenglinLu/Chinese-clinical-NER/blob/9614593ee2e1ba38d0985c44e957d316e178b93c/bert_sklearn_bioes/bert_sklearn/model/pytorch_pretrained/tokenization.py#L311-L360
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/tkinter/__init__.py
|
python
|
Canvas.focus
|
(self, *args)
|
return self.tk.call((self._w, 'focus') + args)
|
Set focus to the first item specified in ARGS.
|
Set focus to the first item specified in ARGS.
|
[
"Set",
"focus",
"to",
"the",
"first",
"item",
"specified",
"in",
"ARGS",
"."
] |
def focus(self, *args):
"""Set focus to the first item specified in ARGS."""
return self.tk.call((self._w, 'focus') + args)
|
[
"def",
"focus",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"(",
"self",
".",
"_w",
",",
"'focus'",
")",
"+",
"args",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/tkinter/__init__.py#L2549-L2551
|
|
Shmuma/ptan
|
ddf9ae54de1715f25c910453f78e007012a20388
|
ptan/common/utils.py
|
python
|
SpeedMonitor.epoch_time
|
(self)
|
return timedelta(seconds=s)
|
Calculate average epoch time
:return: timedelta object
|
Calculate average epoch time
:return: timedelta object
|
[
"Calculate",
"average",
"epoch",
"time",
":",
"return",
":",
"timedelta",
"object"
] |
def epoch_time(self):
"""
Calculate average epoch time
:return: timedelta object
"""
if self.start_ts is None:
return None
s = self.seconds()
if self.epoches > 0:
s /= self.epoches + 1
return timedelta(seconds=s)
|
[
"def",
"epoch_time",
"(",
"self",
")",
":",
"if",
"self",
".",
"start_ts",
"is",
"None",
":",
"return",
"None",
"s",
"=",
"self",
".",
"seconds",
"(",
")",
"if",
"self",
".",
"epoches",
">",
"0",
":",
"s",
"/=",
"self",
".",
"epoches",
"+",
"1",
"return",
"timedelta",
"(",
"seconds",
"=",
"s",
")"
] |
https://github.com/Shmuma/ptan/blob/ddf9ae54de1715f25c910453f78e007012a20388/ptan/common/utils.py#L94-L104
|
|
PacktPublishing/Mastering-OpenCV-4-with-Python
|
ea5372c6d8758ebc56ef5c775f9785d4427f81e6
|
Chapter05/01-chapter-content/bitwise_operations.py
|
python
|
show_with_matplotlib
|
(color_img, title, pos)
|
Shows an image using matplotlib capabilities
|
Shows an image using matplotlib capabilities
|
[
"Shows",
"an",
"image",
"using",
"matplotlib",
"capabilities"
] |
def show_with_matplotlib(color_img, title, pos):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(3, 3, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
|
[
"def",
"show_with_matplotlib",
"(",
"color_img",
",",
"title",
",",
"pos",
")",
":",
"# Convert BGR image to RGB",
"img_RGB",
"=",
"color_img",
"[",
":",
",",
":",
",",
":",
":",
"-",
"1",
"]",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"3",
",",
"3",
",",
"pos",
")",
"plt",
".",
"imshow",
"(",
"img_RGB",
")",
"plt",
".",
"title",
"(",
"title",
")",
"plt",
".",
"axis",
"(",
"'off'",
")"
] |
https://github.com/PacktPublishing/Mastering-OpenCV-4-with-Python/blob/ea5372c6d8758ebc56ef5c775f9785d4427f81e6/Chapter05/01-chapter-content/bitwise_operations.py#L11-L20
|
||
mwielgoszewski/doorman
|
9a9b97c814625fcdd281b55c82177c9f777c5526
|
doorman/utils.py
|
python
|
create_query_pack_from_upload
|
(upload)
|
return pack
|
Create a pack and queries from a query pack file. **Note**, if a
pack already exists under the filename being uploaded, then any
queries defined here will be added to the existing pack! However,
if a query with a particular name already exists, and its sql is
NOT the same, then a new query with the same name but different id
will be created (as to avoid clobbering the existing query). If its
sql is identical, then the query will be reused.
|
Create a pack and queries from a query pack file. **Note**, if a
pack already exists under the filename being uploaded, then any
queries defined here will be added to the existing pack! However,
if a query with a particular name already exists, and its sql is
NOT the same, then a new query with the same name but different id
will be created (as to avoid clobbering the existing query). If its
sql is identical, then the query will be reused.
|
[
"Create",
"a",
"pack",
"and",
"queries",
"from",
"a",
"query",
"pack",
"file",
".",
"**",
"Note",
"**",
"if",
"a",
"pack",
"already",
"exists",
"under",
"the",
"filename",
"being",
"uploaded",
"then",
"any",
"queries",
"defined",
"here",
"will",
"be",
"added",
"to",
"the",
"existing",
"pack!",
"However",
"if",
"a",
"query",
"with",
"a",
"particular",
"name",
"already",
"exists",
"and",
"its",
"sql",
"is",
"NOT",
"the",
"same",
"then",
"a",
"new",
"query",
"with",
"the",
"same",
"name",
"but",
"different",
"id",
"will",
"be",
"created",
"(",
"as",
"to",
"avoid",
"clobbering",
"the",
"existing",
"query",
")",
".",
"If",
"its",
"sql",
"is",
"identical",
"then",
"the",
"query",
"will",
"be",
"reused",
"."
] |
def create_query_pack_from_upload(upload):
'''
Create a pack and queries from a query pack file. **Note**, if a
pack already exists under the filename being uploaded, then any
queries defined here will be added to the existing pack! However,
if a query with a particular name already exists, and its sql is
NOT the same, then a new query with the same name but different id
will be created (as to avoid clobbering the existing query). If its
sql is identical, then the query will be reused.
'''
# The json package on Python 3 expects a `str` input, so we're going to
# read the body and possibly convert to the right type
body = upload.data.read()
if not isinstance(body, six.string_types):
body = body.decode('utf-8')
try:
data = json.loads(body)
except ValueError:
flash(u"Could not load pack as JSON - ensure it is JSON encoded",
'danger')
return None
else:
if 'queries' not in data:
flash(u"No queries in pack", 'danger')
return None
name = splitext(basename(upload.data.filename))[0]
pack = Pack.query.filter(Pack.name == name).first()
if not pack:
current_app.logger.debug("Creating pack %s", name)
pack = Pack.create(name=name, **data)
for query_name, query in data['queries'].items():
if not validate_osquery_query(query['query']):
flash('Invalid osquery query: "{0}"'.format(query['query']), 'danger')
return None
q = Query.query.filter(Query.name == query_name).first()
if not q:
q = Query.create(name=query_name, **query)
pack.queries.append(q)
current_app.logger.debug("Adding new query %s to pack %s",
q.name, pack.name)
continue
if q in pack.queries:
continue
if q.sql == query['query']:
current_app.logger.debug("Adding existing query %s to pack %s",
q.name, pack.name)
pack.queries.append(q)
else:
q2 = Query.create(name=query_name, **query)
current_app.logger.debug(
"Created another query named %s, but different sql: %r vs %r",
query_name, q2.sql.encode('utf-8'), q.sql.encode('utf-8'))
pack.queries.append(q2)
else:
pack.save()
flash(u"Imported query pack {0}".format(pack.name), 'success')
return pack
|
[
"def",
"create_query_pack_from_upload",
"(",
"upload",
")",
":",
"# The json package on Python 3 expects a `str` input, so we're going to",
"# read the body and possibly convert to the right type",
"body",
"=",
"upload",
".",
"data",
".",
"read",
"(",
")",
"if",
"not",
"isinstance",
"(",
"body",
",",
"six",
".",
"string_types",
")",
":",
"body",
"=",
"body",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"except",
"ValueError",
":",
"flash",
"(",
"u\"Could not load pack as JSON - ensure it is JSON encoded\"",
",",
"'danger'",
")",
"return",
"None",
"else",
":",
"if",
"'queries'",
"not",
"in",
"data",
":",
"flash",
"(",
"u\"No queries in pack\"",
",",
"'danger'",
")",
"return",
"None",
"name",
"=",
"splitext",
"(",
"basename",
"(",
"upload",
".",
"data",
".",
"filename",
")",
")",
"[",
"0",
"]",
"pack",
"=",
"Pack",
".",
"query",
".",
"filter",
"(",
"Pack",
".",
"name",
"==",
"name",
")",
".",
"first",
"(",
")",
"if",
"not",
"pack",
":",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Creating pack %s\"",
",",
"name",
")",
"pack",
"=",
"Pack",
".",
"create",
"(",
"name",
"=",
"name",
",",
"*",
"*",
"data",
")",
"for",
"query_name",
",",
"query",
"in",
"data",
"[",
"'queries'",
"]",
".",
"items",
"(",
")",
":",
"if",
"not",
"validate_osquery_query",
"(",
"query",
"[",
"'query'",
"]",
")",
":",
"flash",
"(",
"'Invalid osquery query: \"{0}\"'",
".",
"format",
"(",
"query",
"[",
"'query'",
"]",
")",
",",
"'danger'",
")",
"return",
"None",
"q",
"=",
"Query",
".",
"query",
".",
"filter",
"(",
"Query",
".",
"name",
"==",
"query_name",
")",
".",
"first",
"(",
")",
"if",
"not",
"q",
":",
"q",
"=",
"Query",
".",
"create",
"(",
"name",
"=",
"query_name",
",",
"*",
"*",
"query",
")",
"pack",
".",
"queries",
".",
"append",
"(",
"q",
")",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Adding new query %s to pack %s\"",
",",
"q",
".",
"name",
",",
"pack",
".",
"name",
")",
"continue",
"if",
"q",
"in",
"pack",
".",
"queries",
":",
"continue",
"if",
"q",
".",
"sql",
"==",
"query",
"[",
"'query'",
"]",
":",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Adding existing query %s to pack %s\"",
",",
"q",
".",
"name",
",",
"pack",
".",
"name",
")",
"pack",
".",
"queries",
".",
"append",
"(",
"q",
")",
"else",
":",
"q2",
"=",
"Query",
".",
"create",
"(",
"name",
"=",
"query_name",
",",
"*",
"*",
"query",
")",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Created another query named %s, but different sql: %r vs %r\"",
",",
"query_name",
",",
"q2",
".",
"sql",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"q",
".",
"sql",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"pack",
".",
"queries",
".",
"append",
"(",
"q2",
")",
"else",
":",
"pack",
".",
"save",
"(",
")",
"flash",
"(",
"u\"Imported query pack {0}\"",
".",
"format",
"(",
"pack",
".",
"name",
")",
",",
"'success'",
")",
"return",
"pack"
] |
https://github.com/mwielgoszewski/doorman/blob/9a9b97c814625fcdd281b55c82177c9f777c5526/doorman/utils.py#L114-L181
|
|
sympy/sympy
|
d822fcba181155b85ff2b29fe525adbafb22b448
|
sympy/physics/mechanics/system.py
|
python
|
SymbolicSystem.dyn_implicit_rhs
|
(self)
|
Returns the column matrix, F, corresponding to the dynamic equations
in implicit form, M x' = F, where the kinematical equations are not
included
|
Returns the column matrix, F, corresponding to the dynamic equations
in implicit form, M x' = F, where the kinematical equations are not
included
|
[
"Returns",
"the",
"column",
"matrix",
"F",
"corresponding",
"to",
"the",
"dynamic",
"equations",
"in",
"implicit",
"form",
"M",
"x",
"=",
"F",
"where",
"the",
"kinematical",
"equations",
"are",
"not",
"included"
] |
def dyn_implicit_rhs(self):
"""Returns the column matrix, F, corresponding to the dynamic equations
in implicit form, M x' = F, where the kinematical equations are not
included"""
if self._dyn_implicit_rhs is None:
raise AttributeError("dyn_implicit_rhs is not specified for "
"equations of motion form [1] or [2].")
else:
return self._dyn_implicit_rhs
|
[
"def",
"dyn_implicit_rhs",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dyn_implicit_rhs",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"dyn_implicit_rhs is not specified for \"",
"\"equations of motion form [1] or [2].\"",
")",
"else",
":",
"return",
"self",
".",
"_dyn_implicit_rhs"
] |
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/physics/mechanics/system.py#L312-L320
|
||
Henryhaohao/12306_Ticket
|
b47657abfcfc8c5a507f546fa780bcb84c56fb7e
|
Spiders/12306_captcha.py
|
python
|
Login.captcha
|
(self, answer_num)
|
填写验证码
|
填写验证码
|
[
"填写验证码"
] |
def captcha(self, answer_num):
'''填写验证码'''
answer_sp = answer_num.split(',')
answer_list = []
an = {'1': (31, 35), '2': (116, 46), '3': (191, 24), '4': (243, 50), '5': (22, 114), '6': (117, 94),
'7': (167, 120), '8': (251, 105)}
for i in answer_sp:
for j in an.keys():
if i == j:
answer_list.append(an[j][0])
answer_list.append(',')
answer_list.append(an[j][1])
answer_list.append(',')
s = ''
for i in answer_list:
s += str(i)
answer = s[:-1]
# 验证验证码
form_check = {
'answer': answer,
'login_site': 'E',
'rand': 'sjrand'
}
global req
html_check = req.post(self.url_check, data=form_check, headers=self.headers, verify=False).json()
print(html_check)
if html_check['result_code'] == '4':
print('验证码校验成功!')
else:
print('验证码校验失败!')
exit()
|
[
"def",
"captcha",
"(",
"self",
",",
"answer_num",
")",
":",
"answer_sp",
"=",
"answer_num",
".",
"split",
"(",
"','",
")",
"answer_list",
"=",
"[",
"]",
"an",
"=",
"{",
"'1'",
":",
"(",
"31",
",",
"35",
")",
",",
"'2'",
":",
"(",
"116",
",",
"46",
")",
",",
"'3'",
":",
"(",
"191",
",",
"24",
")",
",",
"'4'",
":",
"(",
"243",
",",
"50",
")",
",",
"'5'",
":",
"(",
"22",
",",
"114",
")",
",",
"'6'",
":",
"(",
"117",
",",
"94",
")",
",",
"'7'",
":",
"(",
"167",
",",
"120",
")",
",",
"'8'",
":",
"(",
"251",
",",
"105",
")",
"}",
"for",
"i",
"in",
"answer_sp",
":",
"for",
"j",
"in",
"an",
".",
"keys",
"(",
")",
":",
"if",
"i",
"==",
"j",
":",
"answer_list",
".",
"append",
"(",
"an",
"[",
"j",
"]",
"[",
"0",
"]",
")",
"answer_list",
".",
"append",
"(",
"','",
")",
"answer_list",
".",
"append",
"(",
"an",
"[",
"j",
"]",
"[",
"1",
"]",
")",
"answer_list",
".",
"append",
"(",
"','",
")",
"s",
"=",
"''",
"for",
"i",
"in",
"answer_list",
":",
"s",
"+=",
"str",
"(",
"i",
")",
"answer",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"# 验证验证码",
"form_check",
"=",
"{",
"'answer'",
":",
"answer",
",",
"'login_site'",
":",
"'E'",
",",
"'rand'",
":",
"'sjrand'",
"}",
"global",
"req",
"html_check",
"=",
"req",
".",
"post",
"(",
"self",
".",
"url_check",
",",
"data",
"=",
"form_check",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"verify",
"=",
"False",
")",
".",
"json",
"(",
")",
"print",
"(",
"html_check",
")",
"if",
"html_check",
"[",
"'result_code'",
"]",
"==",
"'4'",
":",
"print",
"(",
"'验证码校验成功!')",
"",
"else",
":",
"print",
"(",
"'验证码校验失败!')",
"",
"exit",
"(",
")"
] |
https://github.com/Henryhaohao/12306_Ticket/blob/b47657abfcfc8c5a507f546fa780bcb84c56fb7e/Spiders/12306_captcha.py#L119-L149
|
||
Yukinoshita47/Yuki-Chan-The-Auto-Pentest
|
bea1af4e1d544eadc166f728be2f543ea10af191
|
Module/dirsearch/thirdparty/requests/utils.py
|
python
|
dict_from_cookiejar
|
(cj)
|
return cookie_dict
|
Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
|
Returns a key/value dictionary from a CookieJar.
|
[
"Returns",
"a",
"key",
"/",
"value",
"dictionary",
"from",
"a",
"CookieJar",
"."
] |
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
|
[
"def",
"dict_from_cookiejar",
"(",
"cj",
")",
":",
"cookie_dict",
"=",
"{",
"}",
"for",
"cookie",
"in",
"cj",
":",
"cookie_dict",
"[",
"cookie",
".",
"name",
"]",
"=",
"cookie",
".",
"value",
"return",
"cookie_dict"
] |
https://github.com/Yukinoshita47/Yuki-Chan-The-Auto-Pentest/blob/bea1af4e1d544eadc166f728be2f543ea10af191/Module/dirsearch/thirdparty/requests/utils.py#L263-L274
|
|
agschwender/pilbox
|
d05d1c0366fba2de92c261c81ca49307370f4699
|
pilbox/image.py
|
python
|
Image.rotate
|
(self, deg, **kwargs)
|
return self
|
Rotates the image clockwise around its center. Returns the
instance. Supports the following optional keyword arguments:
expand - Expand the output image to fit rotation
|
Rotates the image clockwise around its center. Returns the
instance. Supports the following optional keyword arguments:
|
[
"Rotates",
"the",
"image",
"clockwise",
"around",
"its",
"center",
".",
"Returns",
"the",
"instance",
".",
"Supports",
"the",
"following",
"optional",
"keyword",
"arguments",
":"
] |
def rotate(self, deg, **kwargs):
""" Rotates the image clockwise around its center. Returns the
instance. Supports the following optional keyword arguments:
expand - Expand the output image to fit rotation
"""
opts = Image._normalize_options(kwargs)
if deg == "auto":
if self._orig_format == "JPEG":
try:
exif = self.img._getexif() or dict()
deg = _orientation_to_rotation.get(exif.get(274, 0), 0)
except Exception:
logger.warn('unable to parse exif')
deg = 0
else:
deg = 0
deg = 360 - (int(deg) % 360)
if deg % 90 == 0:
if deg == 90:
self.img = self.img.transpose(PIL.Image.ROTATE_90)
elif deg == 180:
self.img = self.img.transpose(PIL.Image.ROTATE_180)
elif deg == 270:
self.img = self.img.transpose(PIL.Image.ROTATE_270)
else:
self.img = self.img.rotate(deg, expand=bool(int(opts["expand"])))
return self
|
[
"def",
"rotate",
"(",
"self",
",",
"deg",
",",
"*",
"*",
"kwargs",
")",
":",
"opts",
"=",
"Image",
".",
"_normalize_options",
"(",
"kwargs",
")",
"if",
"deg",
"==",
"\"auto\"",
":",
"if",
"self",
".",
"_orig_format",
"==",
"\"JPEG\"",
":",
"try",
":",
"exif",
"=",
"self",
".",
"img",
".",
"_getexif",
"(",
")",
"or",
"dict",
"(",
")",
"deg",
"=",
"_orientation_to_rotation",
".",
"get",
"(",
"exif",
".",
"get",
"(",
"274",
",",
"0",
")",
",",
"0",
")",
"except",
"Exception",
":",
"logger",
".",
"warn",
"(",
"'unable to parse exif'",
")",
"deg",
"=",
"0",
"else",
":",
"deg",
"=",
"0",
"deg",
"=",
"360",
"-",
"(",
"int",
"(",
"deg",
")",
"%",
"360",
")",
"if",
"deg",
"%",
"90",
"==",
"0",
":",
"if",
"deg",
"==",
"90",
":",
"self",
".",
"img",
"=",
"self",
".",
"img",
".",
"transpose",
"(",
"PIL",
".",
"Image",
".",
"ROTATE_90",
")",
"elif",
"deg",
"==",
"180",
":",
"self",
".",
"img",
"=",
"self",
".",
"img",
".",
"transpose",
"(",
"PIL",
".",
"Image",
".",
"ROTATE_180",
")",
"elif",
"deg",
"==",
"270",
":",
"self",
".",
"img",
"=",
"self",
".",
"img",
".",
"transpose",
"(",
"PIL",
".",
"Image",
".",
"ROTATE_270",
")",
"else",
":",
"self",
".",
"img",
"=",
"self",
".",
"img",
".",
"rotate",
"(",
"deg",
",",
"expand",
"=",
"bool",
"(",
"int",
"(",
"opts",
"[",
"\"expand\"",
"]",
")",
")",
")",
"return",
"self"
] |
https://github.com/agschwender/pilbox/blob/d05d1c0366fba2de92c261c81ca49307370f4699/pilbox/image.py#L208-L238
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-darwin/x64/ldap3/core/connection.py
|
python
|
Connection.repr_with_sensitive_data_stripped
|
(self)
|
return r
|
[] |
def repr_with_sensitive_data_stripped(self):
conf_default_pool_name = get_config_parameter('DEFAULT_THREADED_POOL_NAME')
if self.server_pool:
r = 'Connection(server={0.server_pool!r}'.format(self)
else:
r = 'Connection(server={0.server!r}'.format(self)
r += '' if self.user is None else ', user={0.user!r}'.format(self)
r += '' if self.password is None else ", password='{0}'".format('<stripped %d characters of sensitive data>' % len(self.password))
r += '' if self.auto_bind is None else ', auto_bind={0.auto_bind!r}'.format(self)
r += '' if self.version is None else ', version={0.version!r}'.format(self)
r += '' if self.authentication is None else ', authentication={0.authentication!r}'.format(self)
r += '' if self.strategy_type is None else ', client_strategy={0.strategy_type!r}'.format(self)
r += '' if self.auto_referrals is None else ', auto_referrals={0.auto_referrals!r}'.format(self)
r += '' if self.sasl_mechanism is None else ', sasl_mechanism={0.sasl_mechanism!r}'.format(self)
if self.sasl_mechanism == DIGEST_MD5:
r += '' if self.sasl_credentials is None else ", sasl_credentials=({0!r}, {1!r}, '{2}', {3!r})".format(self.sasl_credentials[0], self.sasl_credentials[1], '*' * len(self.sasl_credentials[2]), self.sasl_credentials[3])
else:
r += '' if self.sasl_credentials is None else ', sasl_credentials={0.sasl_credentials!r}'.format(self)
r += '' if self.check_names is None else ', check_names={0.check_names!r}'.format(self)
r += '' if self.usage is None else (', collect_usage=' + 'True' if self.usage else 'False')
r += '' if self.read_only is None else ', read_only={0.read_only!r}'.format(self)
r += '' if self.lazy is None else ', lazy={0.lazy!r}'.format(self)
r += '' if self.raise_exceptions is None else ', raise_exceptions={0.raise_exceptions!r}'.format(self)
r += '' if (self.pool_name is None or self.pool_name == conf_default_pool_name) else ', pool_name={0.pool_name!r}'.format(self)
r += '' if self.pool_size is None else ', pool_size={0.pool_size!r}'.format(self)
r += '' if self.pool_lifetime is None else ', pool_lifetime={0.pool_lifetime!r}'.format(self)
r += '' if self.pool_keepalive is None else ', pool_keepalive={0.pool_keepalive!r}'.format(self)
r += '' if self.fast_decoder is None else (', fast_decoder=' + 'True' if self.fast_decoder else 'False')
r += '' if self.auto_range is None else (', auto_range=' + ('True' if self.auto_range else 'False'))
r += '' if self.receive_timeout is None else ', receive_timeout={0.receive_timeout!r}'.format(self)
r += '' if self.empty_attributes is None else (', return_empty_attributes=' + 'True' if self.empty_attributes else 'False')
r += '' if self.auto_encode is None else (', auto_encode=' + ('True' if self.auto_encode else 'False'))
r += '' if self.auto_escape is None else (', auto_escape=' + ('True' if self.auto_escape else 'False'))
r += '' if self.use_referral_cache is None else (', use_referral_cache=' + ('True' if self.use_referral_cache else 'False'))
r += ')'
return r
|
[
"def",
"repr_with_sensitive_data_stripped",
"(",
"self",
")",
":",
"conf_default_pool_name",
"=",
"get_config_parameter",
"(",
"'DEFAULT_THREADED_POOL_NAME'",
")",
"if",
"self",
".",
"server_pool",
":",
"r",
"=",
"'Connection(server={0.server_pool!r}'",
".",
"format",
"(",
"self",
")",
"else",
":",
"r",
"=",
"'Connection(server={0.server!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"user",
"is",
"None",
"else",
"', user={0.user!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"password",
"is",
"None",
"else",
"\", password='{0}'\"",
".",
"format",
"(",
"'<stripped %d characters of sensitive data>'",
"%",
"len",
"(",
"self",
".",
"password",
")",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"auto_bind",
"is",
"None",
"else",
"', auto_bind={0.auto_bind!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"version",
"is",
"None",
"else",
"', version={0.version!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"authentication",
"is",
"None",
"else",
"', authentication={0.authentication!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"strategy_type",
"is",
"None",
"else",
"', client_strategy={0.strategy_type!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"auto_referrals",
"is",
"None",
"else",
"', auto_referrals={0.auto_referrals!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"sasl_mechanism",
"is",
"None",
"else",
"', sasl_mechanism={0.sasl_mechanism!r}'",
".",
"format",
"(",
"self",
")",
"if",
"self",
".",
"sasl_mechanism",
"==",
"DIGEST_MD5",
":",
"r",
"+=",
"''",
"if",
"self",
".",
"sasl_credentials",
"is",
"None",
"else",
"\", sasl_credentials=({0!r}, {1!r}, '{2}', {3!r})\"",
".",
"format",
"(",
"self",
".",
"sasl_credentials",
"[",
"0",
"]",
",",
"self",
".",
"sasl_credentials",
"[",
"1",
"]",
",",
"'*'",
"*",
"len",
"(",
"self",
".",
"sasl_credentials",
"[",
"2",
"]",
")",
",",
"self",
".",
"sasl_credentials",
"[",
"3",
"]",
")",
"else",
":",
"r",
"+=",
"''",
"if",
"self",
".",
"sasl_credentials",
"is",
"None",
"else",
"', sasl_credentials={0.sasl_credentials!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"check_names",
"is",
"None",
"else",
"', check_names={0.check_names!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"usage",
"is",
"None",
"else",
"(",
"', collect_usage='",
"+",
"'True'",
"if",
"self",
".",
"usage",
"else",
"'False'",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"read_only",
"is",
"None",
"else",
"', read_only={0.read_only!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"lazy",
"is",
"None",
"else",
"', lazy={0.lazy!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"raise_exceptions",
"is",
"None",
"else",
"', raise_exceptions={0.raise_exceptions!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"(",
"self",
".",
"pool_name",
"is",
"None",
"or",
"self",
".",
"pool_name",
"==",
"conf_default_pool_name",
")",
"else",
"', pool_name={0.pool_name!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"pool_size",
"is",
"None",
"else",
"', pool_size={0.pool_size!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"pool_lifetime",
"is",
"None",
"else",
"', pool_lifetime={0.pool_lifetime!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"pool_keepalive",
"is",
"None",
"else",
"', pool_keepalive={0.pool_keepalive!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"fast_decoder",
"is",
"None",
"else",
"(",
"', fast_decoder='",
"+",
"'True'",
"if",
"self",
".",
"fast_decoder",
"else",
"'False'",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"auto_range",
"is",
"None",
"else",
"(",
"', auto_range='",
"+",
"(",
"'True'",
"if",
"self",
".",
"auto_range",
"else",
"'False'",
")",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"receive_timeout",
"is",
"None",
"else",
"', receive_timeout={0.receive_timeout!r}'",
".",
"format",
"(",
"self",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"empty_attributes",
"is",
"None",
"else",
"(",
"', return_empty_attributes='",
"+",
"'True'",
"if",
"self",
".",
"empty_attributes",
"else",
"'False'",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"auto_encode",
"is",
"None",
"else",
"(",
"', auto_encode='",
"+",
"(",
"'True'",
"if",
"self",
".",
"auto_encode",
"else",
"'False'",
")",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"auto_escape",
"is",
"None",
"else",
"(",
"', auto_escape='",
"+",
"(",
"'True'",
"if",
"self",
".",
"auto_escape",
"else",
"'False'",
")",
")",
"r",
"+=",
"''",
"if",
"self",
".",
"use_referral_cache",
"is",
"None",
"else",
"(",
"', use_referral_cache='",
"+",
"(",
"'True'",
"if",
"self",
".",
"use_referral_cache",
"else",
"'False'",
")",
")",
"r",
"+=",
"')'",
"return",
"r"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/ldap3/core/connection.py#L401-L437
|
|||
lad1337/XDM
|
0c1b7009fe00f06f102a6f67c793478f515e7efe
|
site-packages/guessit/guess.py
|
python
|
choose_string
|
(g1, g2)
|
Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> s(choose_string(('Hello', 0.75), ('World', 0.5)))
('Hello', 0.25)
>>> s(choose_string(('Hello', 0.5), ('hello', 0.5)))
('Hello', 0.75)
>>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4)))
('Hello', 0.64)
>>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5)))
('The Simpsons', 0.75)
|
Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
|
[
"Function",
"used",
"by",
"merge_similar_guesses",
"to",
"choose",
"between",
"2",
"possible",
"properties",
"when",
"they",
"are",
"strings",
"."
] |
def choose_string(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> s(choose_string(('Hello', 0.75), ('World', 0.5)))
('Hello', 0.25)
>>> s(choose_string(('Hello', 0.5), ('hello', 0.5)))
('Hello', 0.75)
>>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4)))
('Hello', 0.64)
>>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5)))
('The Simpsons', 0.75)
"""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if not v1:
return g2
elif not v2:
return g1
v1, v2 = v1.strip(), v2.strip()
v1l, v2l = v1.lower(), v2.lower()
combined_prob = 1 - (1 - c1) * (1 - c2)
if v1l == v2l:
return (v1, combined_prob)
# check for common patterns
elif v1l == 'the ' + v2l:
return (v1, combined_prob)
elif v2l == 'the ' + v1l:
return (v2, combined_prob)
# if one string is contained in the other, return the shortest one
elif v2l in v1l:
return (v2, combined_prob)
elif v1l in v2l:
return (v1, combined_prob)
# in case of conflict, return the one with highest priority
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1)
|
[
"def",
"choose_string",
"(",
"g1",
",",
"g2",
")",
":",
"v1",
",",
"c1",
"=",
"g1",
"# value, confidence",
"v2",
",",
"c2",
"=",
"g2",
"if",
"not",
"v1",
":",
"return",
"g2",
"elif",
"not",
"v2",
":",
"return",
"g1",
"v1",
",",
"v2",
"=",
"v1",
".",
"strip",
"(",
")",
",",
"v2",
".",
"strip",
"(",
")",
"v1l",
",",
"v2l",
"=",
"v1",
".",
"lower",
"(",
")",
",",
"v2",
".",
"lower",
"(",
")",
"combined_prob",
"=",
"1",
"-",
"(",
"1",
"-",
"c1",
")",
"*",
"(",
"1",
"-",
"c2",
")",
"if",
"v1l",
"==",
"v2l",
":",
"return",
"(",
"v1",
",",
"combined_prob",
")",
"# check for common patterns",
"elif",
"v1l",
"==",
"'the '",
"+",
"v2l",
":",
"return",
"(",
"v1",
",",
"combined_prob",
")",
"elif",
"v2l",
"==",
"'the '",
"+",
"v1l",
":",
"return",
"(",
"v2",
",",
"combined_prob",
")",
"# if one string is contained in the other, return the shortest one",
"elif",
"v2l",
"in",
"v1l",
":",
"return",
"(",
"v2",
",",
"combined_prob",
")",
"elif",
"v1l",
"in",
"v2l",
":",
"return",
"(",
"v1",
",",
"combined_prob",
")",
"# in case of conflict, return the one with highest priority",
"else",
":",
"if",
"c1",
">",
"c2",
":",
"return",
"(",
"v1",
",",
"c1",
"-",
"c2",
")",
"else",
":",
"return",
"(",
"v2",
",",
"c2",
"-",
"c1",
")"
] |
https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/guessit/guess.py#L129-L189
|
||
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
thirdparty_libs/dns/flags.py
|
python
|
edns_to_text
|
(flags)
|
return _to_text(flags, _edns_by_value, _edns_flags_order)
|
Convert an EDNS flags value into a space-separated list of EDNS flag
text values.
@rtype: string
|
Convert an EDNS flags value into a space-separated list of EDNS flag
text values.
|
[
"Convert",
"an",
"EDNS",
"flags",
"value",
"into",
"a",
"space",
"-",
"separated",
"list",
"of",
"EDNS",
"flag",
"text",
"values",
"."
] |
def edns_to_text(flags):
"""Convert an EDNS flags value into a space-separated list of EDNS flag
text values.
@rtype: string"""
return _to_text(flags, _edns_by_value, _edns_flags_order)
|
[
"def",
"edns_to_text",
"(",
"flags",
")",
":",
"return",
"_to_text",
"(",
"flags",
",",
"_edns_by_value",
",",
"_edns_flags_order",
")"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/dns/flags.py#L101-L106
|
|
Alexey-T/CudaText
|
6a8b9a974c5d5029c6c273bde83198c83b3a5fb9
|
app/cudatext.app/Contents/Resources/py/sys/requests/utils.py
|
python
|
parse_dict_header
|
(value)
|
return result
|
Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
|
Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
|
[
"Parse",
"lists",
"of",
"key",
"value",
"pairs",
"as",
"described",
"by",
"RFC",
"2068",
"Section",
"2",
"and",
"convert",
"them",
"into",
"a",
"python",
"dict",
":"
] |
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
|
[
"def",
"parse_dict_header",
"(",
"value",
")",
":",
"result",
"=",
"{",
"}",
"for",
"item",
"in",
"_parse_list_header",
"(",
"value",
")",
":",
"if",
"'='",
"not",
"in",
"item",
":",
"result",
"[",
"item",
"]",
"=",
"None",
"continue",
"name",
",",
"value",
"=",
"item",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"value",
"[",
":",
"1",
"]",
"==",
"value",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"value",
"=",
"unquote_header_value",
"(",
"value",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
"[",
"name",
"]",
"=",
"value",
"return",
"result"
] |
https://github.com/Alexey-T/CudaText/blob/6a8b9a974c5d5029c6c273bde83198c83b3a5fb9/app/cudatext.app/Contents/Resources/py/sys/requests/utils.py#L376-L407
|
|
volatilityfoundation/volatility3
|
168b0d0b053ab97a7cb096ef2048795cc54d885f
|
volatility3/framework/symbols/intermed.py
|
python
|
Version8Format._process_fields
|
(self, fields: Dict[str, Dict[str, Any]])
|
return members
|
For each type field, it walks its tree of subtypes, reducing the hierarchy to just one level.
It creates a tuple of offset and object templates for each field.
|
For each type field, it walks its tree of subtypes, reducing the hierarchy to just one level.
It creates a tuple of offset and object templates for each field.
|
[
"For",
"each",
"type",
"field",
"it",
"walks",
"its",
"tree",
"of",
"subtypes",
"reducing",
"the",
"hierarchy",
"to",
"just",
"one",
"level",
".",
"It",
"creates",
"a",
"tuple",
"of",
"offset",
"and",
"object",
"templates",
"for",
"each",
"field",
"."
] |
def _process_fields(self, fields: Dict[str, Dict[str, Any]]) -> Dict[Any, Tuple[int, interfaces.objects.Template]]:
"""For each type field, it walks its tree of subtypes, reducing the hierarchy to just one level.
It creates a tuple of offset and object templates for each field.
"""
members = {}
for new_offset, member_name, member_value in self._reduce_fields(fields):
member = (new_offset, self._interdict_to_template(member_value['type']))
members[member_name] = member
return members
|
[
"def",
"_process_fields",
"(",
"self",
",",
"fields",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
")",
"->",
"Dict",
"[",
"Any",
",",
"Tuple",
"[",
"int",
",",
"interfaces",
".",
"objects",
".",
"Template",
"]",
"]",
":",
"members",
"=",
"{",
"}",
"for",
"new_offset",
",",
"member_name",
",",
"member_value",
"in",
"self",
".",
"_reduce_fields",
"(",
"fields",
")",
":",
"member",
"=",
"(",
"new_offset",
",",
"self",
".",
"_interdict_to_template",
"(",
"member_value",
"[",
"'type'",
"]",
")",
")",
"members",
"[",
"member_name",
"]",
"=",
"member",
"return",
"members"
] |
https://github.com/volatilityfoundation/volatility3/blob/168b0d0b053ab97a7cb096ef2048795cc54d885f/volatility3/framework/symbols/intermed.py#L631-L639
|
|
rembo10/headphones
|
b3199605be1ebc83a7a8feab6b1e99b64014187c
|
lib/yaml/emitter.py
|
python
|
Emitter.emit
|
(self, event)
|
[] |
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
|
[
"def",
"emit",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"events",
".",
"append",
"(",
"event",
")",
"while",
"not",
"self",
".",
"need_more_events",
"(",
")",
":",
"self",
".",
"event",
"=",
"self",
".",
"events",
".",
"pop",
"(",
"0",
")",
"self",
".",
"state",
"(",
")",
"self",
".",
"event",
"=",
"None"
] |
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/yaml/emitter.py#L111-L116
|
||||
addisonlynch/iexfinance
|
64ee3afe0f3e456e5a0a8120dd7c8c87b28964dd
|
iexfinance/base.py
|
python
|
_IEXBase.fetch
|
(self, format=None)
|
return self._format_output(data, format=format)
|
Fetches latest data
Prepares the query URL based on self.params and executes the request
Returns
-------
response: requests.response
A response object
|
Fetches latest data
|
[
"Fetches",
"latest",
"data"
] |
def fetch(self, format=None):
"""Fetches latest data
Prepares the query URL based on self.params and executes the request
Returns
-------
response: requests.response
A response object
"""
url = self._prepare_query()
data = self._execute_iex_query(url)
return self._format_output(data, format=format)
|
[
"def",
"fetch",
"(",
"self",
",",
"format",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_prepare_query",
"(",
")",
"data",
"=",
"self",
".",
"_execute_iex_query",
"(",
"url",
")",
"return",
"self",
".",
"_format_output",
"(",
"data",
",",
"format",
"=",
"format",
")"
] |
https://github.com/addisonlynch/iexfinance/blob/64ee3afe0f3e456e5a0a8120dd7c8c87b28964dd/iexfinance/base.py#L197-L209
|
|
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
controllers/gis.py
|
python
|
layer_config
|
()
|
return s3_rest_controller(csv_stylesheet = csv_stylesheet)
|
RESTful CRUD controller
|
RESTful CRUD controller
|
[
"RESTful",
"CRUD",
"controller"
] |
def layer_config():
""" RESTful CRUD controller """
if settings.get_security_map() and not auth.s3_has_role("MAP_ADMIN"):
auth.permission.fail()
layer = get_vars.get("layer", None)
if layer:
csv_stylesheet = "layer_%s.xsl" % layer
else:
# Cannot import without a specific layer type
csv_stylesheet = None
return s3_rest_controller(csv_stylesheet = csv_stylesheet)
|
[
"def",
"layer_config",
"(",
")",
":",
"if",
"settings",
".",
"get_security_map",
"(",
")",
"and",
"not",
"auth",
".",
"s3_has_role",
"(",
"\"MAP_ADMIN\"",
")",
":",
"auth",
".",
"permission",
".",
"fail",
"(",
")",
"layer",
"=",
"get_vars",
".",
"get",
"(",
"\"layer\"",
",",
"None",
")",
"if",
"layer",
":",
"csv_stylesheet",
"=",
"\"layer_%s.xsl\"",
"%",
"layer",
"else",
":",
"# Cannot import without a specific layer type",
"csv_stylesheet",
"=",
"None",
"return",
"s3_rest_controller",
"(",
"csv_stylesheet",
"=",
"csv_stylesheet",
")"
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/controllers/gis.py#L1593-L1606
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/sympy/physics/mechanics/system.py
|
python
|
SymbolicSystem.dyn_implicit_mat
|
(self)
|
Returns the matrix, M, corresponding to the dynamic equations in
implicit form, M x' = F, where the kinematical equations are not
included
|
Returns the matrix, M, corresponding to the dynamic equations in
implicit form, M x' = F, where the kinematical equations are not
included
|
[
"Returns",
"the",
"matrix",
"M",
"corresponding",
"to",
"the",
"dynamic",
"equations",
"in",
"implicit",
"form",
"M",
"x",
"=",
"F",
"where",
"the",
"kinematical",
"equations",
"are",
"not",
"included"
] |
def dyn_implicit_mat(self):
"""Returns the matrix, M, corresponding to the dynamic equations in
implicit form, M x' = F, where the kinematical equations are not
included"""
if self._dyn_implicit_mat is None:
raise AttributeError("dyn_implicit_mat is not specified for "
"equations of motion form [1] or [2].")
else:
return self._dyn_implicit_mat
|
[
"def",
"dyn_implicit_mat",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dyn_implicit_mat",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"dyn_implicit_mat is not specified for \"",
"\"equations of motion form [1] or [2].\"",
")",
"else",
":",
"return",
"self",
".",
"_dyn_implicit_mat"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/physics/mechanics/system.py#L301-L309
|
||
airspeed-velocity/asv
|
9d5af5713357ccea00a518758fae6822cc69f539
|
asv/extern/asizeof.py
|
python
|
Asizer.code
|
(self)
|
return self._code_
|
Size (byte) code (bool).
|
Size (byte) code (bool).
|
[
"Size",
"(",
"byte",
")",
"code",
"(",
"bool",
")",
"."
] |
def code(self):
'''Size (byte) code (bool).
'''
return self._code_
|
[
"def",
"code",
"(",
"self",
")",
":",
"return",
"self",
".",
"_code_"
] |
https://github.com/airspeed-velocity/asv/blob/9d5af5713357ccea00a518758fae6822cc69f539/asv/extern/asizeof.py#L2156-L2159
|
|
mtivadar/qiew
|
87a3b96b43f1745a6b3f1fcfebce5164d2a40a14
|
DataModel.py
|
python
|
DataModel.slideLine
|
(self, factor)
|
[] |
def slideLine(self, factor):
self.slide(factor*self.cols)
|
[
"def",
"slideLine",
"(",
"self",
",",
"factor",
")",
":",
"self",
".",
"slide",
"(",
"factor",
"*",
"self",
".",
"cols",
")"
] |
https://github.com/mtivadar/qiew/blob/87a3b96b43f1745a6b3f1fcfebce5164d2a40a14/DataModel.py#L55-L56
|
||||
SheffieldML/GPy
|
bb1bc5088671f9316bc92a46d356734e34c2d5c0
|
GPy/util/choleskies.py
|
python
|
_triang_to_flat_cython
|
(L)
|
return choleskies_cython.triang_to_flat(L)
|
[] |
def _triang_to_flat_cython(L):
return choleskies_cython.triang_to_flat(L)
|
[
"def",
"_triang_to_flat_cython",
"(",
"L",
")",
":",
"return",
"choleskies_cython",
".",
"triang_to_flat",
"(",
"L",
")"
] |
https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/GPy/util/choleskies.py#L54-L55
|
|||
researchmm/tasn
|
5dba8ccc096cedc63913730eeea14a9647911129
|
tasn-mxnet/3rdparty/tvm/python/tvm/rpc/client.py
|
python
|
RPCSession.load_module
|
(self, path)
|
return base._LoadRemoteModule(self._sess, path)
|
Load a remote module, the file need to be uploaded first.
Parameters
----------
path : str
The relative location to remote temp folder.
Returns
-------
m : Module
The remote module containing remote function.
|
Load a remote module, the file need to be uploaded first.
|
[
"Load",
"a",
"remote",
"module",
"the",
"file",
"need",
"to",
"be",
"uploaded",
"first",
"."
] |
def load_module(self, path):
"""Load a remote module, the file need to be uploaded first.
Parameters
----------
path : str
The relative location to remote temp folder.
Returns
-------
m : Module
The remote module containing remote function.
"""
return base._LoadRemoteModule(self._sess, path)
|
[
"def",
"load_module",
"(",
"self",
",",
"path",
")",
":",
"return",
"base",
".",
"_LoadRemoteModule",
"(",
"self",
".",
"_sess",
",",
"path",
")"
] |
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/3rdparty/tvm/python/tvm/rpc/client.py#L106-L119
|
|
garywiz/chaperone
|
9ff2c3a5b9c6820f8750320a564ea214042df06f
|
chaperone/cutil/proc.py
|
python
|
ProcStatus.continued
|
(self)
|
return os.WIFCONTINUED(self)
|
[] |
def continued(self):
return os.WIFCONTINUED(self)
|
[
"def",
"continued",
"(",
"self",
")",
":",
"return",
"os",
".",
"WIFCONTINUED",
"(",
"self",
")"
] |
https://github.com/garywiz/chaperone/blob/9ff2c3a5b9c6820f8750320a564ea214042df06f/chaperone/cutil/proc.py#L32-L33
|
|||
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
base/site-packages/tornado/websocket.py
|
python
|
WebSocketHandler.write_message
|
(self, message, binary=False)
|
Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
|
Sends the given message to the client of this Web Socket.
|
[
"Sends",
"the",
"given",
"message",
"to",
"the",
"client",
"of",
"this",
"Web",
"Socket",
"."
] |
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
"""
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
self.ws_connection.write_message(message, binary=binary)
|
[
"def",
"write_message",
"(",
"self",
",",
"message",
",",
"binary",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"dict",
")",
":",
"message",
"=",
"tornado",
".",
"escape",
".",
"json_encode",
"(",
"message",
")",
"self",
".",
"ws_connection",
".",
"write_message",
"(",
"message",
",",
"binary",
"=",
"binary",
")"
] |
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/tornado/websocket.py#L134-L144
|
||
CiscoDevNet/yang-explorer
|
3be4d2f4b7b0ac20e2ef36bbac69011153f1a017
|
server/explorer/utils/adapter.py
|
python
|
Adapter.run_netconf
|
(username, device, rpc, lock=False)
|
return session.run(rpc, lock)
|
Execute Netconf request
|
Execute Netconf request
|
[
"Execute",
"Netconf",
"request"
] |
def run_netconf(username, device, rpc, lock=False):
""" Execute Netconf request """
plat = device.get('platform', None)
if plat is not None and plat not in ['', 'other']:
params = {'name' : plat}
else:
params = {}
session = NCClient(device['host'], device['port'],
device['user'], device['passwd'], params)
# If rpc is not provided, return capabilities
if rpc is None or rpc == '':
return session.get_capability()
return session.run(rpc, lock)
|
[
"def",
"run_netconf",
"(",
"username",
",",
"device",
",",
"rpc",
",",
"lock",
"=",
"False",
")",
":",
"plat",
"=",
"device",
".",
"get",
"(",
"'platform'",
",",
"None",
")",
"if",
"plat",
"is",
"not",
"None",
"and",
"plat",
"not",
"in",
"[",
"''",
",",
"'other'",
"]",
":",
"params",
"=",
"{",
"'name'",
":",
"plat",
"}",
"else",
":",
"params",
"=",
"{",
"}",
"session",
"=",
"NCClient",
"(",
"device",
"[",
"'host'",
"]",
",",
"device",
"[",
"'port'",
"]",
",",
"device",
"[",
"'user'",
"]",
",",
"device",
"[",
"'passwd'",
"]",
",",
"params",
")",
"# If rpc is not provided, return capabilities",
"if",
"rpc",
"is",
"None",
"or",
"rpc",
"==",
"''",
":",
"return",
"session",
".",
"get_capability",
"(",
")",
"return",
"session",
".",
"run",
"(",
"rpc",
",",
"lock",
")"
] |
https://github.com/CiscoDevNet/yang-explorer/blob/3be4d2f4b7b0ac20e2ef36bbac69011153f1a017/server/explorer/utils/adapter.py#L98-L113
|
|
nopernik/mpDNS
|
b17dc39e7068406df82cb3431b3042e74e520cf9
|
circuits/web/wsgi.py
|
python
|
create_environ
|
(errors, path, req)
|
return environ
|
[] |
def create_environ(errors, path, req):
environ = {}
env = environ.__setitem__
env("REQUEST_METHOD", req.method)
env("SERVER_NAME", req.host.split(":", 1)[0])
env("SERVER_PORT", "%i" % (req.server.port or 0))
env("SERVER_PROTOCOL", "HTTP/%d.%d" % req.server.http.protocol)
env("QUERY_STRING", req.qs)
env("SCRIPT_NAME", req.script_name)
env("CONTENT_TYPE", req.headers.get("Content-Type", ""))
env("CONTENT_LENGTH", req.headers.get("Content-Length", ""))
env("REMOTE_ADDR", req.remote.ip)
env("REMOTE_PORT", "%i" % (req.remote.port or 0))
env("wsgi.version", (1, 0))
env("wsgi.input", req.body)
env("wsgi.errors", errors)
env("wsgi.multithread", False)
env("wsgi.multiprocess", False)
env("wsgi.run_once", False)
env("wsgi.url_scheme", req.scheme)
if req.path:
req.script_name = req.path[:len(path)]
req.path = req.path[len(path):]
env("SCRIPT_NAME", req.script_name)
env("PATH_INFO", req.path)
for k, v in list(req.headers.items()):
env("HTTP_%s" % k.upper().replace("-", "_"), v)
return environ
|
[
"def",
"create_environ",
"(",
"errors",
",",
"path",
",",
"req",
")",
":",
"environ",
"=",
"{",
"}",
"env",
"=",
"environ",
".",
"__setitem__",
"env",
"(",
"\"REQUEST_METHOD\"",
",",
"req",
".",
"method",
")",
"env",
"(",
"\"SERVER_NAME\"",
",",
"req",
".",
"host",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"[",
"0",
"]",
")",
"env",
"(",
"\"SERVER_PORT\"",
",",
"\"%i\"",
"%",
"(",
"req",
".",
"server",
".",
"port",
"or",
"0",
")",
")",
"env",
"(",
"\"SERVER_PROTOCOL\"",
",",
"\"HTTP/%d.%d\"",
"%",
"req",
".",
"server",
".",
"http",
".",
"protocol",
")",
"env",
"(",
"\"QUERY_STRING\"",
",",
"req",
".",
"qs",
")",
"env",
"(",
"\"SCRIPT_NAME\"",
",",
"req",
".",
"script_name",
")",
"env",
"(",
"\"CONTENT_TYPE\"",
",",
"req",
".",
"headers",
".",
"get",
"(",
"\"Content-Type\"",
",",
"\"\"",
")",
")",
"env",
"(",
"\"CONTENT_LENGTH\"",
",",
"req",
".",
"headers",
".",
"get",
"(",
"\"Content-Length\"",
",",
"\"\"",
")",
")",
"env",
"(",
"\"REMOTE_ADDR\"",
",",
"req",
".",
"remote",
".",
"ip",
")",
"env",
"(",
"\"REMOTE_PORT\"",
",",
"\"%i\"",
"%",
"(",
"req",
".",
"remote",
".",
"port",
"or",
"0",
")",
")",
"env",
"(",
"\"wsgi.version\"",
",",
"(",
"1",
",",
"0",
")",
")",
"env",
"(",
"\"wsgi.input\"",
",",
"req",
".",
"body",
")",
"env",
"(",
"\"wsgi.errors\"",
",",
"errors",
")",
"env",
"(",
"\"wsgi.multithread\"",
",",
"False",
")",
"env",
"(",
"\"wsgi.multiprocess\"",
",",
"False",
")",
"env",
"(",
"\"wsgi.run_once\"",
",",
"False",
")",
"env",
"(",
"\"wsgi.url_scheme\"",
",",
"req",
".",
"scheme",
")",
"if",
"req",
".",
"path",
":",
"req",
".",
"script_name",
"=",
"req",
".",
"path",
"[",
":",
"len",
"(",
"path",
")",
"]",
"req",
".",
"path",
"=",
"req",
".",
"path",
"[",
"len",
"(",
"path",
")",
":",
"]",
"env",
"(",
"\"SCRIPT_NAME\"",
",",
"req",
".",
"script_name",
")",
"env",
"(",
"\"PATH_INFO\"",
",",
"req",
".",
"path",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"req",
".",
"headers",
".",
"items",
"(",
")",
")",
":",
"env",
"(",
"\"HTTP_%s\"",
"%",
"k",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
",",
"v",
")",
"return",
"environ"
] |
https://github.com/nopernik/mpDNS/blob/b17dc39e7068406df82cb3431b3042e74e520cf9/circuits/web/wsgi.py#L29-L60
|
|||
explosion/spacy-stanza
|
efc4bba30ddc4c952b64164a537df97a064d2317
|
spacy_stanza/tokenizer.py
|
python
|
StanzaTokenizer._find_embeddings
|
(snlp)
|
return embs
|
Find pretrained word embeddings in any of a SNLP's processors.
RETURNS (Pretrain): Or None if no embeddings were found.
|
Find pretrained word embeddings in any of a SNLP's processors.
|
[
"Find",
"pretrained",
"word",
"embeddings",
"in",
"any",
"of",
"a",
"SNLP",
"s",
"processors",
"."
] |
def _find_embeddings(snlp):
"""Find pretrained word embeddings in any of a SNLP's processors.
RETURNS (Pretrain): Or None if no embeddings were found.
"""
embs = None
for proc in snlp.processors.values():
if hasattr(proc, "pretrain") and isinstance(proc.pretrain, Pretrain):
embs = proc.pretrain
break
return embs
|
[
"def",
"_find_embeddings",
"(",
"snlp",
")",
":",
"embs",
"=",
"None",
"for",
"proc",
"in",
"snlp",
".",
"processors",
".",
"values",
"(",
")",
":",
"if",
"hasattr",
"(",
"proc",
",",
"\"pretrain\"",
")",
"and",
"isinstance",
"(",
"proc",
".",
"pretrain",
",",
"Pretrain",
")",
":",
"embs",
"=",
"proc",
".",
"pretrain",
"break",
"return",
"embs"
] |
https://github.com/explosion/spacy-stanza/blob/efc4bba30ddc4c952b64164a537df97a064d2317/spacy_stanza/tokenizer.py#L263-L273
|
|
jarvisteach/appJar
|
0b59ce041da2197dcff3410e20f298676f1f7266
|
appJar/appjar.py
|
python
|
gui.addLabels
|
(self, names, row=None, colspan=0, rowspan=0)
|
adds a set of labels, in the row, spannning specified columns
|
adds a set of labels, in the row, spannning specified columns
|
[
"adds",
"a",
"set",
"of",
"labels",
"in",
"the",
"row",
"spannning",
"specified",
"columns"
] |
def addLabels(self, names, row=None, colspan=0, rowspan=0):
''' adds a set of labels, in the row, spannning specified columns '''
frame = self._makeWidgetBox()(self.getContainer())
if not self.ttkFlag:
frame.config(background=self._getContainerBg())
for i in range(len(names)):
self.widgetManager.verify(WIDGET_NAMES.Label, names[i])
if not self.ttkFlag:
lab = Label(frame, text=names[i])
lab.config(font=self._getContainerProperty('labelFont'), justify=LEFT, background=self._getContainerBg())
else:
lab = ttk.Label(frame, text=names[i])
lab.DEFAULT_TEXT = names[i]
lab.inContainer = False
self.widgetManager.add(WIDGET_NAMES.Label, names[i], lab)
lab.grid(row=0, column=i)
Grid.columnconfigure(frame, i, weight=1)
Grid.rowconfigure(frame, 0, weight=1)
frame.theWidgets.append(lab)
self._positionWidget(frame, row, 0, colspan, rowspan)
self.widgetManager.log(WIDGET_NAMES.FrameBox, frame)
|
[
"def",
"addLabels",
"(",
"self",
",",
"names",
",",
"row",
"=",
"None",
",",
"colspan",
"=",
"0",
",",
"rowspan",
"=",
"0",
")",
":",
"frame",
"=",
"self",
".",
"_makeWidgetBox",
"(",
")",
"(",
"self",
".",
"getContainer",
"(",
")",
")",
"if",
"not",
"self",
".",
"ttkFlag",
":",
"frame",
".",
"config",
"(",
"background",
"=",
"self",
".",
"_getContainerBg",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"names",
")",
")",
":",
"self",
".",
"widgetManager",
".",
"verify",
"(",
"WIDGET_NAMES",
".",
"Label",
",",
"names",
"[",
"i",
"]",
")",
"if",
"not",
"self",
".",
"ttkFlag",
":",
"lab",
"=",
"Label",
"(",
"frame",
",",
"text",
"=",
"names",
"[",
"i",
"]",
")",
"lab",
".",
"config",
"(",
"font",
"=",
"self",
".",
"_getContainerProperty",
"(",
"'labelFont'",
")",
",",
"justify",
"=",
"LEFT",
",",
"background",
"=",
"self",
".",
"_getContainerBg",
"(",
")",
")",
"else",
":",
"lab",
"=",
"ttk",
".",
"Label",
"(",
"frame",
",",
"text",
"=",
"names",
"[",
"i",
"]",
")",
"lab",
".",
"DEFAULT_TEXT",
"=",
"names",
"[",
"i",
"]",
"lab",
".",
"inContainer",
"=",
"False",
"self",
".",
"widgetManager",
".",
"add",
"(",
"WIDGET_NAMES",
".",
"Label",
",",
"names",
"[",
"i",
"]",
",",
"lab",
")",
"lab",
".",
"grid",
"(",
"row",
"=",
"0",
",",
"column",
"=",
"i",
")",
"Grid",
".",
"columnconfigure",
"(",
"frame",
",",
"i",
",",
"weight",
"=",
"1",
")",
"Grid",
".",
"rowconfigure",
"(",
"frame",
",",
"0",
",",
"weight",
"=",
"1",
")",
"frame",
".",
"theWidgets",
".",
"append",
"(",
"lab",
")",
"self",
".",
"_positionWidget",
"(",
"frame",
",",
"row",
",",
"0",
",",
"colspan",
",",
"rowspan",
")",
"self",
".",
"widgetManager",
".",
"log",
"(",
"WIDGET_NAMES",
".",
"FrameBox",
",",
"frame",
")"
] |
https://github.com/jarvisteach/appJar/blob/0b59ce041da2197dcff3410e20f298676f1f7266/appJar/appjar.py#L9117-L9139
|
||
appinho/SARosPerceptionKitti
|
148683b23bef294762ced937e4386501e56c8d64
|
benchmark/python/munkres.py
|
python
|
Munkres.__make_matrix
|
(self, n, val)
|
return matrix
|
Create an *n*x*n* matrix, populating it with the specific value.
|
Create an *n*x*n* matrix, populating it with the specific value.
|
[
"Create",
"an",
"*",
"n",
"*",
"x",
"*",
"n",
"*",
"matrix",
"populating",
"it",
"with",
"the",
"specific",
"value",
"."
] |
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
|
[
"def",
"__make_matrix",
"(",
"self",
",",
"n",
",",
"val",
")",
":",
"matrix",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"matrix",
"+=",
"[",
"[",
"val",
"for",
"j",
"in",
"range",
"(",
"n",
")",
"]",
"]",
"return",
"matrix"
] |
https://github.com/appinho/SARosPerceptionKitti/blob/148683b23bef294762ced937e4386501e56c8d64/benchmark/python/munkres.py#L428-L433
|
|
kbandla/ImmunityDebugger
|
2abc03fb15c8f3ed0914e1175c4d8933977c73e3
|
1.73/Libs/immlib.py
|
python
|
Debugger.setVariable
|
(self,address,string)
|
return debugger.SetVariable(address,string)
|
Set Variable name to specified address.
@type Address: DWORD
@param Address: Address from assembly line
@type String: STRING
@param String: Variable name to be set
|
Set Variable name to specified address.
|
[
"Set",
"Variable",
"name",
"to",
"specified",
"address",
"."
] |
def setVariable(self,address,string):
"""
Set Variable name to specified address.
@type Address: DWORD
@param Address: Address from assembly line
@type String: STRING
@param String: Variable name to be set
"""
return debugger.SetVariable(address,string)
|
[
"def",
"setVariable",
"(",
"self",
",",
"address",
",",
"string",
")",
":",
"return",
"debugger",
".",
"SetVariable",
"(",
"address",
",",
"string",
")"
] |
https://github.com/kbandla/ImmunityDebugger/blob/2abc03fb15c8f3ed0914e1175c4d8933977c73e3/1.73/Libs/immlib.py#L361-L372
|
|
amonapp/amon
|
61ae3575ad98ec4854ea87c213aa8dfbb29a0199
|
amon/templatetags/math.py
|
python
|
substract_int
|
(element, second_element)
|
return result
|
[] |
def substract_int(element, second_element):
result = 0
if type(element) == int and type(second_element) == int:
result = element-second_element
return result
|
[
"def",
"substract_int",
"(",
"element",
",",
"second_element",
")",
":",
"result",
"=",
"0",
"if",
"type",
"(",
"element",
")",
"==",
"int",
"and",
"type",
"(",
"second_element",
")",
"==",
"int",
":",
"result",
"=",
"element",
"-",
"second_element",
"return",
"result"
] |
https://github.com/amonapp/amon/blob/61ae3575ad98ec4854ea87c213aa8dfbb29a0199/amon/templatetags/math.py#L22-L26
|
|||
tobegit3hub/deep_image_model
|
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
|
java_predict_client/src/main/proto/tensorflow/contrib/slim/python/slim/data/data_provider.py
|
python
|
DataProvider.__init__
|
(self, items_to_tensors, num_samples)
|
Constructs the Data Provider.
Args:
items_to_tensors: a dictionary of names to tensors.
num_samples: the number of samples in the dataset being provided.
|
Constructs the Data Provider.
|
[
"Constructs",
"the",
"Data",
"Provider",
"."
] |
def __init__(self, items_to_tensors, num_samples):
"""Constructs the Data Provider.
Args:
items_to_tensors: a dictionary of names to tensors.
num_samples: the number of samples in the dataset being provided.
"""
self._items_to_tensors = items_to_tensors
self._num_samples = num_samples
|
[
"def",
"__init__",
"(",
"self",
",",
"items_to_tensors",
",",
"num_samples",
")",
":",
"self",
".",
"_items_to_tensors",
"=",
"items_to_tensors",
"self",
".",
"_num_samples",
"=",
"num_samples"
] |
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/contrib/slim/python/slim/data/data_provider.py#L52-L60
|
||
IronLanguages/ironpython3
|
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
|
Src/Scripts/generate_tuples.py
|
python
|
main
|
()
|
return generate(
("Tuples", gen_tuples),
("Tuple Get From Size", gen_get_size),
)
|
[] |
def main():
return generate(
("Tuples", gen_tuples),
("Tuple Get From Size", gen_get_size),
)
|
[
"def",
"main",
"(",
")",
":",
"return",
"generate",
"(",
"(",
"\"Tuples\"",
",",
"gen_tuples",
")",
",",
"(",
"\"Tuple Get From Size\"",
",",
"gen_get_size",
")",
",",
")"
] |
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/Scripts/generate_tuples.py#L83-L87
|
|||
sympy/sympy
|
d822fcba181155b85ff2b29fe525adbafb22b448
|
sympy/physics/secondquant.py
|
python
|
SqOperator.doit
|
(self, **kw_args)
|
return self
|
FIXME: hack to prevent crash further up...
|
FIXME: hack to prevent crash further up...
|
[
"FIXME",
":",
"hack",
"to",
"prevent",
"crash",
"further",
"up",
"..."
] |
def doit(self, **kw_args):
"""
FIXME: hack to prevent crash further up...
"""
return self
|
[
"def",
"doit",
"(",
"self",
",",
"*",
"*",
"kw_args",
")",
":",
"return",
"self"
] |
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/physics/secondquant.py#L378-L382
|
|
lisa-lab/pylearn2
|
af81e5c362f0df4df85c3e54e23b2adeec026055
|
pylearn2/scripts/dbm/top_filters.py
|
python
|
get_elements_count
|
(N, N1, W2)
|
return count
|
Retrieve the number of elements to show.
Parameters
----------
N: int
Number of rows.
N1: int
Number of elements in the first layer.
W2: list
Second hidden layer.
|
Retrieve the number of elements to show.
|
[
"Retrieve",
"the",
"number",
"of",
"elements",
"to",
"show",
"."
] |
def get_elements_count(N, N1, W2):
"""
Retrieve the number of elements to show.
Parameters
----------
N: int
Number of rows.
N1: int
Number of elements in the first layer.
W2: list
Second hidden layer.
"""
thresh = .9
max_count = 0
total_counts = 0.
for i in xrange(N):
w = W2[:, i]
wa = np.abs(w)
total = wa.sum()
s = np.asarray(sorted(wa))
count = 1
while s[-count:].sum() < thresh * total:
count += 1
if count > max_count:
max_count = count
total_counts += count
ave = total_counts / float(N)
print('average needed filters', ave)
count = max_count
print('It takes', count, 'of', N1, 'elements to account for ',
(thresh*100.), '\% of the weight in at least one filter')
lim = 10
if count > lim:
count = lim
print('Only displaying ', count, ' elements though.')
if count > N1:
count = N1
return count
|
[
"def",
"get_elements_count",
"(",
"N",
",",
"N1",
",",
"W2",
")",
":",
"thresh",
"=",
".9",
"max_count",
"=",
"0",
"total_counts",
"=",
"0.",
"for",
"i",
"in",
"xrange",
"(",
"N",
")",
":",
"w",
"=",
"W2",
"[",
":",
",",
"i",
"]",
"wa",
"=",
"np",
".",
"abs",
"(",
"w",
")",
"total",
"=",
"wa",
".",
"sum",
"(",
")",
"s",
"=",
"np",
".",
"asarray",
"(",
"sorted",
"(",
"wa",
")",
")",
"count",
"=",
"1",
"while",
"s",
"[",
"-",
"count",
":",
"]",
".",
"sum",
"(",
")",
"<",
"thresh",
"*",
"total",
":",
"count",
"+=",
"1",
"if",
"count",
">",
"max_count",
":",
"max_count",
"=",
"count",
"total_counts",
"+=",
"count",
"ave",
"=",
"total_counts",
"/",
"float",
"(",
"N",
")",
"print",
"(",
"'average needed filters'",
",",
"ave",
")",
"count",
"=",
"max_count",
"print",
"(",
"'It takes'",
",",
"count",
",",
"'of'",
",",
"N1",
",",
"'elements to account for '",
",",
"(",
"thresh",
"*",
"100.",
")",
",",
"'\\% of the weight in at least one filter'",
")",
"lim",
"=",
"10",
"if",
"count",
">",
"lim",
":",
"count",
"=",
"lim",
"print",
"(",
"'Only displaying '",
",",
"count",
",",
"' elements though.'",
")",
"if",
"count",
">",
"N1",
":",
"count",
"=",
"N1",
"return",
"count"
] |
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/scripts/dbm/top_filters.py#L155-L206
|
|
ronf/asyncssh
|
ee1714c598d8c2ea6f5484e465443f38b68714aa
|
asyncssh/sftp.py
|
python
|
SFTPClient.islink
|
(self, path: _SFTPPath)
|
return (await self._type(path, statfunc=self.lstat)) == \
FILEXFER_TYPE_SYMLINK
|
Return if the remote path refers to a symbolic link
:param path:
The remote path to check
:type path: :class:`PurePath <pathlib.PurePath>`, `str`, or `bytes`
:raises: :exc:`SFTPError` if the server returns an error
|
Return if the remote path refers to a symbolic link
|
[
"Return",
"if",
"the",
"remote",
"path",
"refers",
"to",
"a",
"symbolic",
"link"
] |
async def islink(self, path: _SFTPPath) -> bool:
"""Return if the remote path refers to a symbolic link
:param path:
The remote path to check
:type path: :class:`PurePath <pathlib.PurePath>`, `str`, or `bytes`
:raises: :exc:`SFTPError` if the server returns an error
"""
return (await self._type(path, statfunc=self.lstat)) == \
FILEXFER_TYPE_SYMLINK
|
[
"async",
"def",
"islink",
"(",
"self",
",",
"path",
":",
"_SFTPPath",
")",
"->",
"bool",
":",
"return",
"(",
"await",
"self",
".",
"_type",
"(",
"path",
",",
"statfunc",
"=",
"self",
".",
"lstat",
")",
")",
"==",
"FILEXFER_TYPE_SYMLINK"
] |
https://github.com/ronf/asyncssh/blob/ee1714c598d8c2ea6f5484e465443f38b68714aa/asyncssh/sftp.py#L4774-L4786
|
|
haiwen/seahub
|
e92fcd44e3e46260597d8faa9347cb8222b8b10d
|
scripts/setup-seafile-mysql.py
|
python
|
CcnetConfigurator.ask_questions
|
(self)
|
[] |
def ask_questions(self):
if not self.server_name:
self.ask_server_name()
if not self.ip_or_domain:
self.ask_server_ip_or_domain()
|
[
"def",
"ask_questions",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"server_name",
":",
"self",
".",
"ask_server_name",
"(",
")",
"if",
"not",
"self",
".",
"ip_or_domain",
":",
"self",
".",
"ask_server_ip_or_domain",
"(",
")"
] |
https://github.com/haiwen/seahub/blob/e92fcd44e3e46260597d8faa9347cb8222b8b10d/scripts/setup-seafile-mysql.py#L801-L805
|
||||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/rings/derivation.py
|
python
|
RingDerivationModule.gens
|
(self)
|
return tuple(self._gens)
|
r"""
Return the generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.gens()
([x |--> y, y |--> x] - id,)
|
r"""
Return the generators of this module of derivations.
|
[
"r",
"Return",
"the",
"generators",
"of",
"this",
"module",
"of",
"derivations",
"."
] |
def gens(self):
r"""
Return the generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.gens()
([x |--> y, y |--> x] - id,)
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return tuple(self._gens)
|
[
"def",
"gens",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gens",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"generators are not implemented for this derivation module\"",
")",
"return",
"tuple",
"(",
"self",
".",
"_gens",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/derivation.py#L614-L641
|
|
nicholastoddsmith/pythonml
|
e57a57307c2c954ade0c2f060a802f953e74ad74
|
TFANN/TFANN.py
|
python
|
ANN.fit
|
(self, A, Y, FD = None)
|
Fit the ANN to the data
A: numpy matrix where each row is a sample
Y: numpy matrix of target values
|
Fit the ANN to the data
A: numpy matrix where each row is a sample
Y: numpy matrix of target values
|
[
"Fit",
"the",
"ANN",
"to",
"the",
"data",
"A",
":",
"numpy",
"matrix",
"where",
"each",
"row",
"is",
"a",
"sample",
"Y",
":",
"numpy",
"matrix",
"of",
"target",
"values"
] |
def fit(self, A, Y, FD = None):
'''
Fit the ANN to the data
A: numpy matrix where each row is a sample
Y: numpy matrix of target values
'''
m = len(A)
FD = {self.X:A, self.Y:Y} if FD is None else FD #Feed dictionary
#Loop up to mIter times gradually scaling up the batch size (if range is provided)
for i, BSi in enumerate(_GetBatchRange(self.batSz, self.mIter)):
if BSi is None: #Compute loss and optimize simultaneously for all samples
err, _ = self.GetSes().run([self.loss, self.optmzr], feed_dict = FD)
else: #Train m samples using random batches of size self.bs
err = 0.0
for j in range(0, m, BSi): #Compute loss and optimize simultaneously for batch
bi = np.random.choice(m, BSi, False) #Randomly chosen batch indices
BFD = {k:v[bi] for k, v in FD.items()} #Feed dictionary for this batch
l, _ = self.GetSes().run([self.loss, self.optmzr], feed_dict = BFD)
err += l #Accumulate loss over all batches
err /= len(range(0, m, BSi)) #Average over all batches
if self.vrbse:
print("Iter {:5d}\t{:16.8f} (Batch Size: {:5d})".format(i + 1, err, -1 if BSi is None else BSi))
if err < self.tol or self.stopIter:
break
|
[
"def",
"fit",
"(",
"self",
",",
"A",
",",
"Y",
",",
"FD",
"=",
"None",
")",
":",
"m",
"=",
"len",
"(",
"A",
")",
"FD",
"=",
"{",
"self",
".",
"X",
":",
"A",
",",
"self",
".",
"Y",
":",
"Y",
"}",
"if",
"FD",
"is",
"None",
"else",
"FD",
"#Feed dictionary",
"#Loop up to mIter times gradually scaling up the batch size (if range is provided)",
"for",
"i",
",",
"BSi",
"in",
"enumerate",
"(",
"_GetBatchRange",
"(",
"self",
".",
"batSz",
",",
"self",
".",
"mIter",
")",
")",
":",
"if",
"BSi",
"is",
"None",
":",
"#Compute loss and optimize simultaneously for all samples",
"err",
",",
"_",
"=",
"self",
".",
"GetSes",
"(",
")",
".",
"run",
"(",
"[",
"self",
".",
"loss",
",",
"self",
".",
"optmzr",
"]",
",",
"feed_dict",
"=",
"FD",
")",
"else",
":",
"#Train m samples using random batches of size self.bs",
"err",
"=",
"0.0",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"m",
",",
"BSi",
")",
":",
"#Compute loss and optimize simultaneously for batch",
"bi",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"m",
",",
"BSi",
",",
"False",
")",
"#Randomly chosen batch indices",
"BFD",
"=",
"{",
"k",
":",
"v",
"[",
"bi",
"]",
"for",
"k",
",",
"v",
"in",
"FD",
".",
"items",
"(",
")",
"}",
"#Feed dictionary for this batch",
"l",
",",
"_",
"=",
"self",
".",
"GetSes",
"(",
")",
".",
"run",
"(",
"[",
"self",
".",
"loss",
",",
"self",
".",
"optmzr",
"]",
",",
"feed_dict",
"=",
"BFD",
")",
"err",
"+=",
"l",
"#Accumulate loss over all batches",
"err",
"/=",
"len",
"(",
"range",
"(",
"0",
",",
"m",
",",
"BSi",
")",
")",
"#Average over all batches",
"if",
"self",
".",
"vrbse",
":",
"print",
"(",
"\"Iter {:5d}\\t{:16.8f} (Batch Size: {:5d})\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"err",
",",
"-",
"1",
"if",
"BSi",
"is",
"None",
"else",
"BSi",
")",
")",
"if",
"err",
"<",
"self",
".",
"tol",
"or",
"self",
".",
"stopIter",
":",
"break"
] |
https://github.com/nicholastoddsmith/pythonml/blob/e57a57307c2c954ade0c2f060a802f953e74ad74/TFANN/TFANN.py#L234-L257
|
||
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/websocket/_utils.py
|
python
|
validate_utf8
|
(utfbytes)
|
return _validate_utf8(utfbytes)
|
validate utf8 byte string.
utfbytes: utf byte string to check.
return value: if valid utf8 string, return true. Otherwise, return false.
|
validate utf8 byte string.
utfbytes: utf byte string to check.
return value: if valid utf8 string, return true. Otherwise, return false.
|
[
"validate",
"utf8",
"byte",
"string",
".",
"utfbytes",
":",
"utf",
"byte",
"string",
"to",
"check",
".",
"return",
"value",
":",
"if",
"valid",
"utf8",
"string",
"return",
"true",
".",
"Otherwise",
"return",
"false",
"."
] |
def validate_utf8(utfbytes):
"""
validate utf8 byte string.
utfbytes: utf byte string to check.
return value: if valid utf8 string, return true. Otherwise, return false.
"""
return _validate_utf8(utfbytes)
|
[
"def",
"validate_utf8",
"(",
"utfbytes",
")",
":",
"return",
"_validate_utf8",
"(",
"utfbytes",
")"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/websocket/_utils.py#L86-L92
|
|
florath/rmtoo
|
6ffe08703451358dca24b232ee4380b1da23bcad
|
rmtoo/outputs/LatexJinja2.py
|
python
|
LatexJinja2.__output_latex_one_constraint
|
(self, cname, cnstrt)
|
Output one constraint.
|
Output one constraint.
|
[
"Output",
"one",
"constraint",
"."
] |
def __output_latex_one_constraint(self, cname, cnstrt):
'''Output one constraint.'''
cname = LatexJinja2.__strescape(cname)
tracer.debug("Output constraint [%s]." % cname)
self.__fd.write(u"%% CONSTRAINT '%s'\n" % cname)
self.__fd.write(u"\\%s{%s}\\label{CONSTRAINT%s}\n"
"\\textbf{Description:} %s\n"
% (self.level_names[1],
cnstrt.get_value("Name").get_content(),
cname, cnstrt.get_value(
"Description").get_content()))
if cnstrt.is_val_av_and_not_null("Rationale"):
self.__fd.write(u"\n\\textbf{Rationale:} %s\n"
% cnstrt.get_value("Rationale").get_content())
if cnstrt.is_val_av_and_not_null("Note"):
self.__fd.write(u"\n\\textbf{Note:} %s\n"
% cnstrt.get_value("Note").get_content())
# Write out the references to the requirements
reqs_refs = []
for req in self.__constraints_reqs_ref[cname]:
refid = LatexJinja2.__strescape(req)
refctr = "\\ref{%s} \\nameref{%s}" \
% (refid, refid)
reqs_refs.append(refctr)
self.__fd.write(u"\n\\textbf{Requirements:} %s\n" %
", ".join(reqs_refs))
tracer.debug("Finished.")
|
[
"def",
"__output_latex_one_constraint",
"(",
"self",
",",
"cname",
",",
"cnstrt",
")",
":",
"cname",
"=",
"LatexJinja2",
".",
"__strescape",
"(",
"cname",
")",
"tracer",
".",
"debug",
"(",
"\"Output constraint [%s].\"",
"%",
"cname",
")",
"self",
".",
"__fd",
".",
"write",
"(",
"u\"%% CONSTRAINT '%s'\\n\"",
"%",
"cname",
")",
"self",
".",
"__fd",
".",
"write",
"(",
"u\"\\\\%s{%s}\\\\label{CONSTRAINT%s}\\n\"",
"\"\\\\textbf{Description:} %s\\n\"",
"%",
"(",
"self",
".",
"level_names",
"[",
"1",
"]",
",",
"cnstrt",
".",
"get_value",
"(",
"\"Name\"",
")",
".",
"get_content",
"(",
")",
",",
"cname",
",",
"cnstrt",
".",
"get_value",
"(",
"\"Description\"",
")",
".",
"get_content",
"(",
")",
")",
")",
"if",
"cnstrt",
".",
"is_val_av_and_not_null",
"(",
"\"Rationale\"",
")",
":",
"self",
".",
"__fd",
".",
"write",
"(",
"u\"\\n\\\\textbf{Rationale:} %s\\n\"",
"%",
"cnstrt",
".",
"get_value",
"(",
"\"Rationale\"",
")",
".",
"get_content",
"(",
")",
")",
"if",
"cnstrt",
".",
"is_val_av_and_not_null",
"(",
"\"Note\"",
")",
":",
"self",
".",
"__fd",
".",
"write",
"(",
"u\"\\n\\\\textbf{Note:} %s\\n\"",
"%",
"cnstrt",
".",
"get_value",
"(",
"\"Note\"",
")",
".",
"get_content",
"(",
")",
")",
"# Write out the references to the requirements",
"reqs_refs",
"=",
"[",
"]",
"for",
"req",
"in",
"self",
".",
"__constraints_reqs_ref",
"[",
"cname",
"]",
":",
"refid",
"=",
"LatexJinja2",
".",
"__strescape",
"(",
"req",
")",
"refctr",
"=",
"\"\\\\ref{%s} \\\\nameref{%s}\"",
"%",
"(",
"refid",
",",
"refid",
")",
"reqs_refs",
".",
"append",
"(",
"refctr",
")",
"self",
".",
"__fd",
".",
"write",
"(",
"u\"\\n\\\\textbf{Requirements:} %s\\n\"",
"%",
"\", \"",
".",
"join",
"(",
"reqs_refs",
")",
")",
"tracer",
".",
"debug",
"(",
"\"Finished.\"",
")"
] |
https://github.com/florath/rmtoo/blob/6ffe08703451358dca24b232ee4380b1da23bcad/rmtoo/outputs/LatexJinja2.py#L83-L115
|
||
frappe/erpnext
|
9d36e30ef7043b391b5ed2523b8288bf46c45d18
|
erpnext/portal/doctype/products_settings/products_settings.py
|
python
|
home_page_is_products
|
(doc, method)
|
Called on saving Website Settings
|
Called on saving Website Settings
|
[
"Called",
"on",
"saving",
"Website",
"Settings"
] |
def home_page_is_products(doc, method):
'''Called on saving Website Settings'''
home_page_is_products = cint(frappe.db.get_single_value('Products Settings', 'home_page_is_products'))
if home_page_is_products:
doc.home_page = 'products'
|
[
"def",
"home_page_is_products",
"(",
"doc",
",",
"method",
")",
":",
"home_page_is_products",
"=",
"cint",
"(",
"frappe",
".",
"db",
".",
"get_single_value",
"(",
"'Products Settings'",
",",
"'home_page_is_products'",
")",
")",
"if",
"home_page_is_products",
":",
"doc",
".",
"home_page",
"=",
"'products'"
] |
https://github.com/frappe/erpnext/blob/9d36e30ef7043b391b5ed2523b8288bf46c45d18/erpnext/portal/doctype/products_settings/products_settings.py#L39-L43
|
||
andyzsf/TuShare
|
92787ad0cd492614bdb6389b71a19c80d1c8c9ae
|
tushare/stock/trading.py
|
python
|
get_hists
|
(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001)
|
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
|
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
|
[
"批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口"
] |
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
|
[
"def",
"get_hists",
"(",
"symbols",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"ktype",
"=",
"'D'",
",",
"retry_count",
"=",
"3",
",",
"pause",
"=",
"0.001",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"if",
"isinstance",
"(",
"symbols",
",",
"list",
")",
"or",
"isinstance",
"(",
"symbols",
",",
"set",
")",
"or",
"isinstance",
"(",
"symbols",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"symbols",
",",
"pd",
".",
"Series",
")",
":",
"for",
"symbol",
"in",
"symbols",
":",
"data",
"=",
"get_hist_data",
"(",
"symbol",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"ktype",
"=",
"ktype",
",",
"retry_count",
"=",
"retry_count",
",",
"pause",
"=",
"pause",
")",
"data",
"[",
"'code'",
"]",
"=",
"symbol",
"df",
"=",
"df",
".",
"append",
"(",
"data",
",",
"ignore_index",
"=",
"True",
")",
"return",
"df",
"else",
":",
"return",
"None"
] |
https://github.com/andyzsf/TuShare/blob/92787ad0cd492614bdb6389b71a19c80d1c8c9ae/tushare/stock/trading.py#L588-L604
|
||
readbeyond/aeneas
|
4d200a050690903b30b3d885b44714fecb23f18a
|
aeneas/validator.py
|
python
|
Validator.check_config_txt
|
(self, contents, is_config_string=False)
|
return self.result
|
Check whether the given TXT config file contents
(if ``is_config_string`` is ``False``) or
TXT config string (if ``is_config_string`` is ``True``)
is well-formed and it has all the required parameters.
:param string contents: the TXT config file contents or TXT config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
|
Check whether the given TXT config file contents
(if ``is_config_string`` is ``False``) or
TXT config string (if ``is_config_string`` is ``True``)
is well-formed and it has all the required parameters.
|
[
"Check",
"whether",
"the",
"given",
"TXT",
"config",
"file",
"contents",
"(",
"if",
"is_config_string",
"is",
"False",
")",
"or",
"TXT",
"config",
"string",
"(",
"if",
"is_config_string",
"is",
"True",
")",
"is",
"well",
"-",
"formed",
"and",
"it",
"has",
"all",
"the",
"required",
"parameters",
"."
] |
def check_config_txt(self, contents, is_config_string=False):
"""
Check whether the given TXT config file contents
(if ``is_config_string`` is ``False``) or
TXT config string (if ``is_config_string`` is ``True``)
is well-formed and it has all the required parameters.
:param string contents: the TXT config file contents or TXT config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking contents TXT config file")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_config_txt"):
return self.result
is_bstring = gf.is_bytes(contents)
if is_bstring:
self.log(u"Checking that contents is well formed")
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result
contents = gf.safe_unicode(contents)
if not is_config_string:
self.log(u"Converting file contents to config string")
contents = gf.config_txt_to_string(contents)
self.log(u"Checking required parameters")
required_parameters = self.TXT_REQUIRED_PARAMETERS
parameters = gf.config_string_to_dict(contents, self.result)
self._check_required_parameters(required_parameters, parameters)
self.log([u"Checking contents: returning %s", self.result.passed])
return self.result
|
[
"def",
"check_config_txt",
"(",
"self",
",",
"contents",
",",
"is_config_string",
"=",
"False",
")",
":",
"self",
".",
"log",
"(",
"u\"Checking contents TXT config file\"",
")",
"self",
".",
"result",
"=",
"ValidatorResult",
"(",
")",
"if",
"self",
".",
"_are_safety_checks_disabled",
"(",
"u\"check_config_txt\"",
")",
":",
"return",
"self",
".",
"result",
"is_bstring",
"=",
"gf",
".",
"is_bytes",
"(",
"contents",
")",
"if",
"is_bstring",
":",
"self",
".",
"log",
"(",
"u\"Checking that contents is well formed\"",
")",
"self",
".",
"check_raw_string",
"(",
"contents",
",",
"is_bstring",
"=",
"True",
")",
"if",
"not",
"self",
".",
"result",
".",
"passed",
":",
"return",
"self",
".",
"result",
"contents",
"=",
"gf",
".",
"safe_unicode",
"(",
"contents",
")",
"if",
"not",
"is_config_string",
":",
"self",
".",
"log",
"(",
"u\"Converting file contents to config string\"",
")",
"contents",
"=",
"gf",
".",
"config_txt_to_string",
"(",
"contents",
")",
"self",
".",
"log",
"(",
"u\"Checking required parameters\"",
")",
"required_parameters",
"=",
"self",
".",
"TXT_REQUIRED_PARAMETERS",
"parameters",
"=",
"gf",
".",
"config_string_to_dict",
"(",
"contents",
",",
"self",
".",
"result",
")",
"self",
".",
"_check_required_parameters",
"(",
"required_parameters",
",",
"parameters",
")",
"self",
".",
"log",
"(",
"[",
"u\"Checking contents: returning %s\"",
",",
"self",
".",
"result",
".",
"passed",
"]",
")",
"return",
"self",
".",
"result"
] |
https://github.com/readbeyond/aeneas/blob/4d200a050690903b30b3d885b44714fecb23f18a/aeneas/validator.py#L348-L378
|
|
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/sgmllib.py
|
python
|
SGMLParser.close
|
(self)
|
Handle the remaining data.
|
Handle the remaining data.
|
[
"Handle",
"the",
"remaining",
"data",
"."
] |
def close(self):
"""Handle the remaining data."""
self.goahead(1)
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"goahead",
"(",
"1",
")"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/sgmllib.py#L106-L108
|
||
timkpaine/pyEX
|
254acd2b0cf7cb7183100106f4ecc11d1860c46a
|
pyEX/economic/economic.py
|
python
|
fedfundsAsync
|
(
token="", version="stable", filter="", format="json", **timeseries_kwargs
)
|
return await timeSeriesAsync(
id="ECONOMIC",
key="FEDFUNDS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
|
[] |
async def fedfundsAsync(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
_timeseriesWrapper(timeseries_kwargs)
return await timeSeriesAsync(
id="ECONOMIC",
key="FEDFUNDS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
|
[
"async",
"def",
"fedfundsAsync",
"(",
"token",
"=",
"\"\"",
",",
"version",
"=",
"\"stable\"",
",",
"filter",
"=",
"\"\"",
",",
"format",
"=",
"\"json\"",
",",
"*",
"*",
"timeseries_kwargs",
")",
":",
"_timeseriesWrapper",
"(",
"timeseries_kwargs",
")",
"return",
"await",
"timeSeriesAsync",
"(",
"id",
"=",
"\"ECONOMIC\"",
",",
"key",
"=",
"\"FEDFUNDS\"",
",",
"token",
"=",
"token",
",",
"version",
"=",
"version",
",",
"filter",
"=",
"filter",
",",
"format",
"=",
"format",
",",
"*",
"*",
"timeseries_kwargs",
")"
] |
https://github.com/timkpaine/pyEX/blob/254acd2b0cf7cb7183100106f4ecc11d1860c46a/pyEX/economic/economic.py#L103-L115
|
|||
volatilityfoundation/volatility
|
a438e768194a9e05eb4d9ee9338b881c0fa25937
|
volatility/plugins/vadinfo.py
|
python
|
VADInfo.write_vad_short
|
(self, outfd, vad)
|
Renders a text version of a Short Vad
|
Renders a text version of a Short Vad
|
[
"Renders",
"a",
"text",
"version",
"of",
"a",
"Short",
"Vad"
] |
def write_vad_short(self, outfd, vad):
"""Renders a text version of a Short Vad"""
self.table_header(None,
[("VAD node @", str(len("VAD node @"))),
("address", "[addrpad]"),
("Start", "5"),
("startaddr", "[addrpad]"),
("End", "3"),
("endaddr", "[addrpad]"),
("Tag", "3"),
("tagval", ""),
])
self.table_row(outfd, "VAD node @",
vad.obj_offset,
"Start",
vad.Start,
"End",
vad.End,
"Tag",
vad.Tag)
outfd.write("Flags: {0}\n".format(str(vad.VadFlags)))
# although the numeric value of Protection is printed above with VadFlags,
# let's show the user a human-readable translation of the protection
outfd.write("Protection: {0}\n".format(PROTECT_FLAGS.get(vad.VadFlags.Protection.v(), hex(vad.VadFlags.Protection))))
# translate the vad type if its available (> XP)
if hasattr(vad.VadFlags, "VadType"):
outfd.write("Vad Type: {0}\n".format(MI_VAD_TYPE.get(vad.VadFlags.VadType.v(), hex(vad.VadFlags.VadType))))
|
[
"def",
"write_vad_short",
"(",
"self",
",",
"outfd",
",",
"vad",
")",
":",
"self",
".",
"table_header",
"(",
"None",
",",
"[",
"(",
"\"VAD node @\"",
",",
"str",
"(",
"len",
"(",
"\"VAD node @\"",
")",
")",
")",
",",
"(",
"\"address\"",
",",
"\"[addrpad]\"",
")",
",",
"(",
"\"Start\"",
",",
"\"5\"",
")",
",",
"(",
"\"startaddr\"",
",",
"\"[addrpad]\"",
")",
",",
"(",
"\"End\"",
",",
"\"3\"",
")",
",",
"(",
"\"endaddr\"",
",",
"\"[addrpad]\"",
")",
",",
"(",
"\"Tag\"",
",",
"\"3\"",
")",
",",
"(",
"\"tagval\"",
",",
"\"\"",
")",
",",
"]",
")",
"self",
".",
"table_row",
"(",
"outfd",
",",
"\"VAD node @\"",
",",
"vad",
".",
"obj_offset",
",",
"\"Start\"",
",",
"vad",
".",
"Start",
",",
"\"End\"",
",",
"vad",
".",
"End",
",",
"\"Tag\"",
",",
"vad",
".",
"Tag",
")",
"outfd",
".",
"write",
"(",
"\"Flags: {0}\\n\"",
".",
"format",
"(",
"str",
"(",
"vad",
".",
"VadFlags",
")",
")",
")",
"# although the numeric value of Protection is printed above with VadFlags,",
"# let's show the user a human-readable translation of the protection ",
"outfd",
".",
"write",
"(",
"\"Protection: {0}\\n\"",
".",
"format",
"(",
"PROTECT_FLAGS",
".",
"get",
"(",
"vad",
".",
"VadFlags",
".",
"Protection",
".",
"v",
"(",
")",
",",
"hex",
"(",
"vad",
".",
"VadFlags",
".",
"Protection",
")",
")",
")",
")",
"# translate the vad type if its available (> XP)",
"if",
"hasattr",
"(",
"vad",
".",
"VadFlags",
",",
"\"VadType\"",
")",
":",
"outfd",
".",
"write",
"(",
"\"Vad Type: {0}\\n\"",
".",
"format",
"(",
"MI_VAD_TYPE",
".",
"get",
"(",
"vad",
".",
"VadFlags",
".",
"VadType",
".",
"v",
"(",
")",
",",
"hex",
"(",
"vad",
".",
"VadFlags",
".",
"VadType",
")",
")",
")",
")"
] |
https://github.com/volatilityfoundation/volatility/blob/a438e768194a9e05eb4d9ee9338b881c0fa25937/volatility/plugins/vadinfo.py#L224-L250
|
||
web2py/web2py
|
095905c4e010a1426c729483d912e270a51b7ba8
|
gluon/contrib/markmin/markmin2html.py
|
python
|
render
|
(text,
extra={},
allowed={},
sep='p',
URL=None,
environment=None,
latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_',
pretty_print=False)
|
return text.translate(ttab_out)
|
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
>>> render('this is\\n# a section\\n\\nparagraph')
'<p>this is</p><h1>a section</h1><p>paragraph</p>'
>>> render('this is\\n## a subsection\\n\\nparagraph')
'<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
>>> render('this is\\n### a subsubsection\\n\\nparagraph')
'<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
>>> render('**hello world**')
'<p><strong>hello world</strong></p>'
>>> render('``hello world``')
'<code>hello world</code>'
>>> render('``hello world``:python')
'<code class="python">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python')
'<pre><code class="python">hello\\nworld</code></pre>'
>>> render('``hello world``:python[test_id]')
'<code class="python" id="markmin_test_id">hello world</code>'
>>> render('``hello world``:id[test_id]')
'<code id="markmin_test_id">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python[test_id]')
'<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
>>> render('``\\nhello\\nworld\\n``:id[test_id]')
'<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
>>> render("''hello world''")
'<p><em>hello world</em></p>'
>>> render('** hello** **world**')
'<p>** hello** <strong>world</strong></p>'
>>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
'<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
>>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
'<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
>>> render("----\\na | b\\nc | d\\n----\\n")
'<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
>>> render("----\\nhello world\\n----\\n")
'<blockquote><p>hello world</p></blockquote>'
>>> render('[[myanchor]]')
'<p><span class="anchor" id="markmin_myanchor"></span></p>'
>>> render('[[ http://example.com]]')
'<p><a href="http://example.com">http://example.com</a></p>'
>>> render('[[bookmark [http://example.com] ]]')
'<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
>>> render('[[this is a link http://example.com]]')
'<p><a href="http://example.com">this is a link</a></p>'
>>> render('[[this is an image http://example.com left]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
>>> render('[[this is an image http://example.com left 200px]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
>>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
'<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <video> HTML5 tag</video></p>'
>>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render('[[this is a **link** http://example.com]]')
'<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
>>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
'xaaax'
>>> print(render(r"$$\int_a^b sin(x)dx$$"))
<img src="http://chart.apis.google.com/chart?cht=tx&chl=%5Cint_a%5Eb%20sin%28x%29dx" />
>>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
'<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
>>> markmin2html("backslash instead of exclamation sign: \``probe``")
'<p>backslash instead of exclamation sign: ``probe``</p>'
>>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
'<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
>>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
'<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
>>> render("auto-url: http://example.com")
'<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
>>> render("auto-image: (http://example.com/image.jpeg)")
'<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
>>> render("qr: (qr:http://example.com/image.jpeg)")
'<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
>>> render("embed: (embed:http://example.com/page)")
'<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("iframe: (iframe:http://example.com/page)")
'<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
'<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
>>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
'<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
>>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
'<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
>>> render("title4: [[ [simple title] http://www.example.com popup]]")
'<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
>>> render("title5: [[test message [simple title] http://example.com IMG]]")
'<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
>>> render("title6: [[[test message w/o title] http://example.com IMG]]")
'<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
>>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
'<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
>>> render("title8: [[test message [title] http://example.com center]]")
'<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
>>> render("title9: [[test message [title] http://example.com left]]")
'<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
>>> render("title10: [[test message [title] http://example.com right 100px]]")
'<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
>>> render("title11: [[test message [title] http://example.com center 200px]]")
'<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
>>> render(r"\\[[probe]]")
'<p>[[probe]]</p>'
>>> render(r"\\\\[[probe]]")
'<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\[[probe]]")
'<p>\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\[[probe]]")
'<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\\\\[[probe]]")
'<p>\\\\\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\\\\[[probe]]")
'<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
'<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
>>> render("the \\**text**")
'<p>the **text**</p>'
>>> render("the \\``text``")
'<p>the ``text``</p>'
>>> render("the \\\\''text''")
"<p>the ''text''</p>"
>>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
'<p>the <a href="http://www.example.com" title="**with** ``<b>title</b>``:red">link</a></p>'
>>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
'<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red"><b>title</b></span>]</a></p>'
>>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
'<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
'<p><a href="<a>text3</a>" title="<a>test2</a>"><a>test</a></a></p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
'<p><img src="<a>text3</a>" alt="<a>test</a>" title="<a>test2</a>" /></p>'
>>> render("**bold** ''italic'' ~~strikeout~~")
'<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
>>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
'<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
>>> render("this is ``a text with yellow background``:c[:yellow]")
'<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
>>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
'<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
>>> render("this is ``a green text``:color[green:]")
'<p>this is <span style="color: green;">a green text</span></p>'
>>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
'<p><strong>test 1</strong></p>'
>>> render("**@{probe:t=a}**", environment=dict(probe=lambda t:"test %s" % t, a=1))
'<p><strong>test 1</strong></p>'
>>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
'<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
>>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
'<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
|
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
|
[
"Arguments",
":",
"-",
"text",
"is",
"the",
"text",
"to",
"be",
"processed",
"-",
"extra",
"is",
"a",
"dict",
"like",
"extra",
"=",
"dict",
"(",
"custom",
"=",
"lambda",
"value",
":",
"value",
")",
"that",
"process",
"custom",
"code",
"as",
"in",
"this",
"is",
"custom",
"code",
":",
"custom",
"-",
"allowed",
"is",
"a",
"dictionary",
"of",
"list",
"of",
"allowed",
"classes",
"like",
"allowed",
"=",
"dict",
"(",
"code",
"=",
"(",
"python",
"cpp",
"java",
"))",
"-",
"sep",
"can",
"be",
"p",
"to",
"separate",
"text",
"in",
"<p",
">",
"...",
"<",
"/",
"p",
">",
"or",
"can",
"be",
"br",
"to",
"separate",
"text",
"using",
"<br",
"/",
">",
"-",
"URL",
"-",
"-",
"environment",
"is",
"a",
"dictionary",
"of",
"environment",
"variables",
"(",
"can",
"be",
"accessed",
"with",
"@",
"{",
"variable",
"}",
"-",
"latex",
"-",
"-",
"autolinks",
"is",
"a",
"function",
"to",
"convert",
"auto",
"urls",
"to",
"html",
"-",
"code",
"(",
"default",
"is",
"autolinks",
"(",
"url",
")",
")",
"-",
"protolinks",
"is",
"a",
"function",
"to",
"convert",
"proto",
"-",
"urls",
"(",
"e",
".",
"g",
".",
"proto",
":",
"url",
")",
"to",
"html",
"-",
"code",
"(",
"default",
"is",
"protolinks",
"(",
"proto",
"url",
"))",
"-",
"class_prefix",
"is",
"a",
"prefix",
"for",
"ALL",
"classes",
"in",
"markmin",
"text",
".",
"E",
".",
"g",
".",
"if",
"class_prefix",
"=",
"my_",
"then",
"for",
"test",
":",
"cls",
"class",
"will",
"be",
"changed",
"to",
"my_cls",
"(",
"default",
"value",
"is",
")",
"-",
"id_prefix",
"is",
"prefix",
"for",
"ALL",
"ids",
"in",
"markmin",
"text",
"(",
"default",
"value",
"is",
"markmin_",
")",
".",
"E",
".",
"g",
".",
":",
"--",
"[[",
"id",
"]]",
"will",
"be",
"converted",
"to",
"<span",
"class",
"=",
"anchor",
"id",
"=",
"markmin_id",
">",
"<",
"/",
"span",
">",
"--",
"[[",
"link",
"#id",
"]]",
"will",
"be",
"converted",
"to",
"<a",
"href",
"=",
"#markmin_id",
">",
"link<",
"/",
"a",
">",
"--",
"test",
":",
"cls",
"[",
"id",
"]",
"will",
"be",
"converted",
"to",
"<code",
"class",
"=",
"cls",
"id",
"=",
"markmin_id",
">",
"test<",
"/",
"code",
">"
] |
def render(text,
extra={},
allowed={},
sep='p',
URL=None,
environment=None,
latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_',
pretty_print=False):
"""
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
>>> render('this is\\n# a section\\n\\nparagraph')
'<p>this is</p><h1>a section</h1><p>paragraph</p>'
>>> render('this is\\n## a subsection\\n\\nparagraph')
'<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
>>> render('this is\\n### a subsubsection\\n\\nparagraph')
'<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
>>> render('**hello world**')
'<p><strong>hello world</strong></p>'
>>> render('``hello world``')
'<code>hello world</code>'
>>> render('``hello world``:python')
'<code class="python">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python')
'<pre><code class="python">hello\\nworld</code></pre>'
>>> render('``hello world``:python[test_id]')
'<code class="python" id="markmin_test_id">hello world</code>'
>>> render('``hello world``:id[test_id]')
'<code id="markmin_test_id">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python[test_id]')
'<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
>>> render('``\\nhello\\nworld\\n``:id[test_id]')
'<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
>>> render("''hello world''")
'<p><em>hello world</em></p>'
>>> render('** hello** **world**')
'<p>** hello** <strong>world</strong></p>'
>>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
'<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
>>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
'<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
>>> render("----\\na | b\\nc | d\\n----\\n")
'<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
>>> render("----\\nhello world\\n----\\n")
'<blockquote><p>hello world</p></blockquote>'
>>> render('[[myanchor]]')
'<p><span class="anchor" id="markmin_myanchor"></span></p>'
>>> render('[[ http://example.com]]')
'<p><a href="http://example.com">http://example.com</a></p>'
>>> render('[[bookmark [http://example.com] ]]')
'<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
>>> render('[[this is a link http://example.com]]')
'<p><a href="http://example.com">this is a link</a></p>'
>>> render('[[this is an image http://example.com left]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
>>> render('[[this is an image http://example.com left 200px]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
>>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
'<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <video> HTML5 tag</video></p>'
>>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render('[[this is a **link** http://example.com]]')
'<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
>>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
'xaaax'
>>> print(render(r"$$\int_a^b sin(x)dx$$"))
<img src="http://chart.apis.google.com/chart?cht=tx&chl=%5Cint_a%5Eb%20sin%28x%29dx" />
>>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
'<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
>>> markmin2html("backslash instead of exclamation sign: \``probe``")
'<p>backslash instead of exclamation sign: ``probe``</p>'
>>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
'<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
>>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
'<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
>>> render("auto-url: http://example.com")
'<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
>>> render("auto-image: (http://example.com/image.jpeg)")
'<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
>>> render("qr: (qr:http://example.com/image.jpeg)")
'<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
>>> render("embed: (embed:http://example.com/page)")
'<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("iframe: (iframe:http://example.com/page)")
'<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
'<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
>>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
'<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
>>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
'<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
>>> render("title4: [[ [simple title] http://www.example.com popup]]")
'<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
>>> render("title5: [[test message [simple title] http://example.com IMG]]")
'<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
>>> render("title6: [[[test message w/o title] http://example.com IMG]]")
'<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
>>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
'<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
>>> render("title8: [[test message [title] http://example.com center]]")
'<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
>>> render("title9: [[test message [title] http://example.com left]]")
'<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
>>> render("title10: [[test message [title] http://example.com right 100px]]")
'<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
>>> render("title11: [[test message [title] http://example.com center 200px]]")
'<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
>>> render(r"\\[[probe]]")
'<p>[[probe]]</p>'
>>> render(r"\\\\[[probe]]")
'<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\[[probe]]")
'<p>\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\[[probe]]")
'<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\\\\[[probe]]")
'<p>\\\\\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\\\\[[probe]]")
'<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
'<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
>>> render("the \\**text**")
'<p>the **text**</p>'
>>> render("the \\``text``")
'<p>the ``text``</p>'
>>> render("the \\\\''text''")
"<p>the ''text''</p>"
>>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
'<p>the <a href="http://www.example.com" title="**with** ``<b>title</b>``:red">link</a></p>'
>>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
'<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red"><b>title</b></span>]</a></p>'
>>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
'<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
'<p><a href="<a>text3</a>" title="<a>test2</a>"><a>test</a></a></p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
'<p><img src="<a>text3</a>" alt="<a>test</a>" title="<a>test2</a>" /></p>'
>>> render("**bold** ''italic'' ~~strikeout~~")
'<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
>>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
'<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
>>> render("this is ``a text with yellow background``:c[:yellow]")
'<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
>>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
'<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
>>> render("this is ``a green text``:color[green:]")
'<p>this is <span style="color: green;">a green text</span></p>'
>>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
'<p><strong>test 1</strong></p>'
>>> render("**@{probe:t=a}**", environment=dict(probe=lambda t:"test %s" % t, a=1))
'<p><strong>test 1</strong></p>'
>>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
'<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
>>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
'<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
"""
if autolinks == "default":
autolinks = autolinks_simple
if protolinks == "default":
protolinks = protolinks_simple
pp = '\n' if pretty_print else ''
text = text if text is None or isinstance(text, str) else text.decode('utf8', 'strict')
if not (isinstance(text, str)):
text = str(text or '')
text = regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), text)
text = text.replace('\x05', '').replace('\r\n', '\n') # concatenate strings separeted by \\n
if URL is not None:
text = replace_at_urls(text, URL)
if latex == 'google':
text = regex_dd.sub('``\g<latex>``:latex ', text)
#############################################################
# replace all blocks marked with ``...``:class[id] with META
# store them into segments they will be treated as code
#############################################################
segments = []
def mark_code(m):
g = m.group(0)
if g in (META, DISABLED_META):
segments.append((None, None, None, g))
return m.group()
elif g == '````':
segments.append((None, None, None, ''))
return m.group()
else:
c = m.group('c') or ''
p = m.group('p') or ''
if 'code' in allowed and c not in allowed['code']:
c = ''
code = m.group('t').replace('!`!', '`')
segments.append((code, c, p, m.group(0)))
return META
text = regex_code.sub(mark_code, text)
#############################################################
# replace all blocks marked with [[...]] with LINK
# store them into links they will be treated as link
#############################################################
links = []
def mark_link(m):
links.append(None if m.group() == LINK
else m.group('s'))
return LINK
text = regex_link.sub(mark_link, text)
text = local_html_escape(text)
if protolinks:
text = regex_proto.sub(lambda m: protolinks(*m.group('p', 'k')), text)
if autolinks:
text = replace_autolinks(text, autolinks)
#############################################################
# normalize spaces
#############################################################
strings = text.split('\n')
def parse_title(t, s): # out, lev, etags, tag, s):
hlevel = str(len(t))
out.extend(etags[::-1])
out.append("<h%s>%s" % (hlevel, s))
etags[:] = ["</h%s>%s" % (hlevel, pp)]
lev = 0
ltags[:] = []
tlev[:] = []
return (lev, 'h')
def parse_list(t, p, s, tag, lev, mtag, lineno):
lent = len(t)
if lent < lev: # current item level < previous item level
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
if lent > lev: # current item level > previous item level
if lev == 0: # previous line is not a list (paragraph or title)
out.extend(etags[::-1])
ltags[:] = []
tlev[:] = []
etags[:] = []
if pend and mtag == '.': # paragraph in a list:
out.append(etags.pop())
ltags.pop()
for i in range(lent - lev):
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
lev += 1
ltags.append(lev)
tlev.append(tag)
elif lent == lev:
if tlev[-1] != tag:
# type of list is changed (ul<=>ol):
for i in range(ltags.count(lent)):
ltags.pop()
out.append(etags.pop())
tlev[-1] = tag
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
ltags.append(lev)
else:
if ltags.count(lev) > 1:
out.append(etags.pop())
ltags.pop()
mtag = 'l'
out.append('<li>')
etags.append('</li>' + pp)
ltags.append(lev)
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if p and mtag == 'l':
(lev, mtag, lineno) = parse_point(t, s, lev, '', lineno)
else:
out.append(s)
return (lev, mtag, lineno)
def parse_point(t, s, lev, mtag, lineno):
""" paragraphs in lists """
lent = len(t)
if lent > lev:
return parse_list(t, '.', s, 'ul', lev, mtag, lineno)
elif lent < lev:
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
mtag = ''
elif lent == lev:
if pend and mtag == '.':
out.append(etags.pop())
ltags.pop()
if br and mtag in ('l', '.'):
out.append(br)
if s == META:
mtag = ''
else:
mtag = '.'
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if mtag == '.':
out.append(pbeg)
if pend:
etags.append(pend)
ltags.append(lev)
out.append(s)
return (lev, mtag, lineno)
def parse_table_or_blockquote(s, mtag, lineno):
# check next line. If next line :
# - is empty -> this is an <hr /> tag
# - consists '|' -> table
# - consists other characters -> blockquote
if (lineno + 1 >= strings_len or
not (s.count('-') == len(s) and len(s) > 3)):
return (s, mtag, lineno)
lineno += 1
s = strings[lineno].strip()
if s:
if '|' in s:
# table
tout = []
thead = []
tbody = []
rownum = 0
t_id = ''
t_cls = ''
# parse table:
while lineno < strings_len:
s = strings[lineno].strip()
if s[:1] == '=':
# header or footer
if s.count('=') == len(s) and len(s) > 3:
if not thead: # if thead list is empty:
thead = tout
else:
tbody.extend(tout)
tout = []
rownum = 0
lineno += 1
continue
m = regex_tq.match(s)
if m:
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if rownum % 2:
tr = '<tr class="even">'
else:
tr = '<tr class="first">' if rownum == 0 else '<tr>'
tout.append(tr + ''.join(['<td%s>%s</td>' % (
' class="num"'
if regex_num.match(f) else '',
f.strip()
) for f in s.split('|')]) + '</tr>' + pp)
rownum += 1
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) if t_id else ''
s = ''
if thead:
s += '<thead>' + pp + ''.join([l for l in thead]) + '</thead>' + pp
if not tbody: # tbody strings are in tout list
tbody = tout
tout = []
if tbody: # if tbody list is not empty:
s += '<tbody>' + pp + ''.join([l for l in tbody]) + '</tbody>' + pp
if tout: # tfoot is not empty:
s += '<tfoot>' + pp + ''.join([l for l in tout]) + '</tfoot>' + pp
s = '<table%s%s>%s%s</table>%s' % (t_cls, t_id, pp, s, pp)
mtag = 't'
else:
# parse blockquote:
bq_begin = lineno
t_mode = False # embedded table
t_cls = ''
t_id = ''
# search blockquote closing line:
while lineno < strings_len:
s = strings[lineno].strip()
if not t_mode:
m = regex_tq.match(s)
if m:
if (lineno + 1 == strings_len or
'|' not in strings[lineno + 1]):
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if regex_bq_headline.match(s):
if (lineno + 1 < strings_len and
strings[lineno + 1].strip()):
t_mode = True
lineno += 1
continue
elif regex_tq.match(s):
t_mode = False
lineno += 1
continue
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) \
if t_id else ''
s = '<blockquote%s%s>%s</blockquote>%s' \
% (t_cls,
t_id,
render('\n'.join(strings[bq_begin:lineno])), pp)
mtag = 'q'
else:
s = '<hr />'
lineno -= 1
mtag = 'q'
return (s, 'q', lineno)
if sep == 'p':
pbeg = "<p>"
pend = "</p>" + pp
br = ''
else:
pbeg = pend = ''
br = "<br />" + pp if sep == 'br' else ''
lev = 0 # nesting level of lists
c0 = '' # first character of current line
out = [] # list of processed lines
etags = [] # trailing tags
ltags = [] # level# correspondent to trailing tag
tlev = [] # list of tags for each level ('ul' or 'ol')
mtag = '' # marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>
# and to avoid <p></p> around tables and blockquotes
lineno = 0
strings_len = len(strings)
while lineno < strings_len:
s0 = strings[lineno][:1]
s = strings[lineno].strip()
""" # + - . ---------------------
## ++ -- .. ------- field | field | field <-title
### +++ --- ... quote =====================
#### ++++ ---- .... ------- field | field | field <-body
##### +++++ ----- ..... ---------------------:class[id]
"""
pc0 = c0 # first character of previous line
c0 = s[:1]
if c0: # for non empty strings
if c0 in "#+-.": # first character is one of: # + - .
(t1, t2, p, ss) = regex_list.findall(s)[0]
# t1 - tag ("###")
# t2 - tag ("+++", "---", "...")
# p - paragraph point ('.')->for "++." or "--."
# ss - other part of string
if t1 or t2:
# headers and lists:
if c0 == '#': # headers
(lev, mtag) = parse_title(t1, ss)
lineno += 1
continue
elif c0 == '+': # ordered list
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ol', lev, mtag, lineno)
lineno += 1
continue
elif c0 == '-': # unordered list, table or blockquote
if p or ss:
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ul', lev, mtag, lineno)
lineno += 1
continue
else:
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
elif lev > 0: # and c0 == '.' # paragraph in lists
(lev, mtag, lineno) = parse_point(t2, ss, lev, mtag, lineno)
lineno += 1
continue
if lev == 0 and (mtag == 'q' or s == META):
# new paragraph
pc0 = ''
if pc0 == '' or (mtag != 'p' and s0 not in (' ', '\t')):
# paragraph
out.extend(etags[::-1])
etags = []
ltags = []
tlev = []
lev = 0
if br and mtag == 'p':
out.append(br)
if mtag != 'q' and s != META:
if pend:
etags = [pend]
out.append(pbeg)
mtag = 'p'
else:
mtag = ''
out.append(s)
else:
if lev > 0 and mtag == '.' and s == META:
out.append(etags.pop())
ltags.pop()
out.append(s)
mtag = ''
else:
out.append(' ' + s)
lineno += 1
out.extend(etags[::-1])
text = ''.join(out)
#############################################################
# do strong,em,del
#############################################################
text = regex_strong.sub('<strong>\g<t></strong>', text)
text = regex_del.sub('<del>\g<t></del>', text)
text = regex_em.sub('<em>\g<t></em>', text)
#############################################################
# deal with images, videos, audios and links
#############################################################
def sub_media(m):
t, a, k, p, w = m.group('t', 'a', 'k', 'p', 'w')
if not k:
return m.group(0)
k = local_html_escape(k)
t = t or ''
style = 'width:%s' % w if w else ''
title = ' title="%s"' % local_html_escape(a).replace(META, DISABLED_META) if a else ''
p_begin = p_end = ''
if p == 'center':
p_begin = '<p style="text-align:center">'
p_end = '</p>' + pp
elif p == 'blockleft':
p_begin = '<p style="text-align:left">'
p_end = '</p>' + pp
elif p == 'blockright':
p_begin = '<p style="text-align:right">'
p_end = '</p>' + pp
elif p in ('left', 'right'):
style = ('float:%s' % p) + (';%s' % style if style else '')
if t and regex_auto.match(t):
p_begin = p_begin + '<a href="%s">' % t
p_end = '</a>' + p_end
t = ''
if style:
style = ' style="%s"' % style
if p in ('video', 'audio'):
t = render(t, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print)
return '<%(p)s controls="controls"%(title)s%(style)s><source src="%(k)s" />%(t)s</%(p)s>' \
% dict(p=p, title=title, style=style, k=k, t=t)
alt = ' alt="%s"' % local_html_escape(t).replace(META, DISABLED_META) if t else ''
return '%(begin)s<img src="%(k)s"%(alt)s%(title)s%(style)s />%(end)s' \
% dict(begin=p_begin, k=k, alt=alt, title=title, style=style, end=p_end)
def sub_link(m):
t, a, k, p = m.group('t', 'a', 'k', 'p')
if not k and not t:
return m.group(0)
t = t or ''
a = local_html_escape(a) if a else ''
if k:
if '#' in k and ':' not in k.split('#')[0]:
# wikipage, not external url
k = k.replace('#', '#' + id_prefix)
k = local_html_escape(k)
title = ' title="%s"' % a.replace(META, DISABLED_META) if a else ''
target = ' target="_blank"' if p == 'popup' else ''
t = render(t, {}, {}, 'br', URL, environment, latex, None,
None, class_prefix, id_prefix, pretty_print) if t else k
return '<a href="%(k)s"%(title)s%(target)s>%(t)s</a>' \
% dict(k=k, title=title, target=target, t=t)
if t == 'NEWLINE' and not a:
return '<br />' + pp
return '<span class="anchor" id="%s">%s</span>' % (
local_html_escape(id_prefix + t),
render(a, {}, {}, 'br', URL,
environment, latex, autolinks,
protolinks, class_prefix,
id_prefix, pretty_print))
parts = text.split(LINK)
text = parts[0]
for i, s in enumerate(links):
if s is None:
html = LINK
else:
html = regex_media_level2.sub(sub_media, s)
if html == s:
html = regex_link_level2.sub(sub_link, html)
if html == s:
# return unprocessed string as a signal of an error
html = '[[%s]]' % s
text += html + parts[i + 1]
#############################################################
# process all code text
#############################################################
def expand_meta(m):
code, b, p, s = segments.pop(0)
if code is None or m.group() == DISABLED_META:
return local_html_escape(s)
if b in extra:
if code[:1] == '\n':
code = code[1:]
if code[-1:] == '\n':
code = code[:-1]
if p:
return str(extra[b](code, p))
else:
return str(extra[b](code))
elif b == 'cite':
return '[' + ','.join('<a href="#%s" class="%s">%s</a>' %
(id_prefix + d, b, d) for d in local_html_escape(code).split(',')) + ']'
elif b == 'latex':
return LATEX % urllib_quote(code)
elif b in html_colors:
return '<span style="color: %s">%s</span>' \
% (b, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
elif b in ('c', 'color') and p:
c = p.split(':')
fg = 'color: %s;' % c[0] if c[0] else ''
bg = 'background-color: %s;' % c[1] if len(c) > 1 and c[1] else ''
return '<span style="%s%s">%s</span>' \
% (fg, bg, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
cls = ' class="%s%s"' % (class_prefix, b) if b and b != 'id' else ''
id = ' id="%s%s"' % (id_prefix, local_html_escape(p)) if p else ''
beg = (code[:1] == '\n')
end = [None, -1][code[-1:] == '\n']
if beg and end:
return '<pre><code%s%s>%s</code></pre>%s' % (cls, id, local_html_escape(code[1:-1]), pp)
return '<code%s%s>%s</code>' % (cls, id, local_html_escape(code[beg:end]))
text = regex_expand_meta.sub(expand_meta, text)
if environment:
text = replace_components(text, environment)
return text.translate(ttab_out)
|
[
"def",
"render",
"(",
"text",
",",
"extra",
"=",
"{",
"}",
",",
"allowed",
"=",
"{",
"}",
",",
"sep",
"=",
"'p'",
",",
"URL",
"=",
"None",
",",
"environment",
"=",
"None",
",",
"latex",
"=",
"'google'",
",",
"autolinks",
"=",
"'default'",
",",
"protolinks",
"=",
"'default'",
",",
"class_prefix",
"=",
"''",
",",
"id_prefix",
"=",
"'markmin_'",
",",
"pretty_print",
"=",
"False",
")",
":",
"if",
"autolinks",
"==",
"\"default\"",
":",
"autolinks",
"=",
"autolinks_simple",
"if",
"protolinks",
"==",
"\"default\"",
":",
"protolinks",
"=",
"protolinks_simple",
"pp",
"=",
"'\\n'",
"if",
"pretty_print",
"else",
"''",
"text",
"=",
"text",
"if",
"text",
"is",
"None",
"or",
"isinstance",
"(",
"text",
",",
"str",
")",
"else",
"text",
".",
"decode",
"(",
"'utf8'",
",",
"'strict'",
")",
"if",
"not",
"(",
"isinstance",
"(",
"text",
",",
"str",
")",
")",
":",
"text",
"=",
"str",
"(",
"text",
"or",
"''",
")",
"text",
"=",
"regex_backslash",
".",
"sub",
"(",
"lambda",
"m",
":",
"m",
".",
"group",
"(",
"1",
")",
".",
"translate",
"(",
"ttab_in",
")",
",",
"text",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\x05'",
",",
"''",
")",
".",
"replace",
"(",
"'\\r\\n'",
",",
"'\\n'",
")",
"# concatenate strings separeted by \\\\n",
"if",
"URL",
"is",
"not",
"None",
":",
"text",
"=",
"replace_at_urls",
"(",
"text",
",",
"URL",
")",
"if",
"latex",
"==",
"'google'",
":",
"text",
"=",
"regex_dd",
".",
"sub",
"(",
"'``\\g<latex>``:latex '",
",",
"text",
")",
"#############################################################",
"# replace all blocks marked with ``...``:class[id] with META",
"# store them into segments they will be treated as code",
"#############################################################",
"segments",
"=",
"[",
"]",
"def",
"mark_code",
"(",
"m",
")",
":",
"g",
"=",
"m",
".",
"group",
"(",
"0",
")",
"if",
"g",
"in",
"(",
"META",
",",
"DISABLED_META",
")",
":",
"segments",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"g",
")",
")",
"return",
"m",
".",
"group",
"(",
")",
"elif",
"g",
"==",
"'````'",
":",
"segments",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"''",
")",
")",
"return",
"m",
".",
"group",
"(",
")",
"else",
":",
"c",
"=",
"m",
".",
"group",
"(",
"'c'",
")",
"or",
"''",
"p",
"=",
"m",
".",
"group",
"(",
"'p'",
")",
"or",
"''",
"if",
"'code'",
"in",
"allowed",
"and",
"c",
"not",
"in",
"allowed",
"[",
"'code'",
"]",
":",
"c",
"=",
"''",
"code",
"=",
"m",
".",
"group",
"(",
"'t'",
")",
".",
"replace",
"(",
"'!`!'",
",",
"'`'",
")",
"segments",
".",
"append",
"(",
"(",
"code",
",",
"c",
",",
"p",
",",
"m",
".",
"group",
"(",
"0",
")",
")",
")",
"return",
"META",
"text",
"=",
"regex_code",
".",
"sub",
"(",
"mark_code",
",",
"text",
")",
"#############################################################",
"# replace all blocks marked with [[...]] with LINK",
"# store them into links they will be treated as link",
"#############################################################",
"links",
"=",
"[",
"]",
"def",
"mark_link",
"(",
"m",
")",
":",
"links",
".",
"append",
"(",
"None",
"if",
"m",
".",
"group",
"(",
")",
"==",
"LINK",
"else",
"m",
".",
"group",
"(",
"'s'",
")",
")",
"return",
"LINK",
"text",
"=",
"regex_link",
".",
"sub",
"(",
"mark_link",
",",
"text",
")",
"text",
"=",
"local_html_escape",
"(",
"text",
")",
"if",
"protolinks",
":",
"text",
"=",
"regex_proto",
".",
"sub",
"(",
"lambda",
"m",
":",
"protolinks",
"(",
"*",
"m",
".",
"group",
"(",
"'p'",
",",
"'k'",
")",
")",
",",
"text",
")",
"if",
"autolinks",
":",
"text",
"=",
"replace_autolinks",
"(",
"text",
",",
"autolinks",
")",
"#############################################################",
"# normalize spaces",
"#############################################################",
"strings",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"def",
"parse_title",
"(",
"t",
",",
"s",
")",
":",
"# out, lev, etags, tag, s):",
"hlevel",
"=",
"str",
"(",
"len",
"(",
"t",
")",
")",
"out",
".",
"extend",
"(",
"etags",
"[",
":",
":",
"-",
"1",
"]",
")",
"out",
".",
"append",
"(",
"\"<h%s>%s\"",
"%",
"(",
"hlevel",
",",
"s",
")",
")",
"etags",
"[",
":",
"]",
"=",
"[",
"\"</h%s>%s\"",
"%",
"(",
"hlevel",
",",
"pp",
")",
"]",
"lev",
"=",
"0",
"ltags",
"[",
":",
"]",
"=",
"[",
"]",
"tlev",
"[",
":",
"]",
"=",
"[",
"]",
"return",
"(",
"lev",
",",
"'h'",
")",
"def",
"parse_list",
"(",
"t",
",",
"p",
",",
"s",
",",
"tag",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
":",
"lent",
"=",
"len",
"(",
"t",
")",
"if",
"lent",
"<",
"lev",
":",
"# current item level < previous item level",
"while",
"ltags",
"[",
"-",
"1",
"]",
">",
"lent",
":",
"ltags",
".",
"pop",
"(",
")",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"lev",
"=",
"lent",
"tlev",
"[",
"lev",
":",
"]",
"=",
"[",
"]",
"if",
"lent",
">",
"lev",
":",
"# current item level > previous item level",
"if",
"lev",
"==",
"0",
":",
"# previous line is not a list (paragraph or title)",
"out",
".",
"extend",
"(",
"etags",
"[",
":",
":",
"-",
"1",
"]",
")",
"ltags",
"[",
":",
"]",
"=",
"[",
"]",
"tlev",
"[",
":",
"]",
"=",
"[",
"]",
"etags",
"[",
":",
"]",
"=",
"[",
"]",
"if",
"pend",
"and",
"mtag",
"==",
"'.'",
":",
"# paragraph in a list:",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"ltags",
".",
"pop",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"lent",
"-",
"lev",
")",
":",
"out",
".",
"append",
"(",
"'<'",
"+",
"tag",
"+",
"'>'",
"+",
"pp",
")",
"etags",
".",
"append",
"(",
"'</'",
"+",
"tag",
"+",
"'>'",
"+",
"pp",
")",
"lev",
"+=",
"1",
"ltags",
".",
"append",
"(",
"lev",
")",
"tlev",
".",
"append",
"(",
"tag",
")",
"elif",
"lent",
"==",
"lev",
":",
"if",
"tlev",
"[",
"-",
"1",
"]",
"!=",
"tag",
":",
"# type of list is changed (ul<=>ol):",
"for",
"i",
"in",
"range",
"(",
"ltags",
".",
"count",
"(",
"lent",
")",
")",
":",
"ltags",
".",
"pop",
"(",
")",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"tlev",
"[",
"-",
"1",
"]",
"=",
"tag",
"out",
".",
"append",
"(",
"'<'",
"+",
"tag",
"+",
"'>'",
"+",
"pp",
")",
"etags",
".",
"append",
"(",
"'</'",
"+",
"tag",
"+",
"'>'",
"+",
"pp",
")",
"ltags",
".",
"append",
"(",
"lev",
")",
"else",
":",
"if",
"ltags",
".",
"count",
"(",
"lev",
")",
">",
"1",
":",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"ltags",
".",
"pop",
"(",
")",
"mtag",
"=",
"'l'",
"out",
".",
"append",
"(",
"'<li>'",
")",
"etags",
".",
"append",
"(",
"'</li>'",
"+",
"pp",
")",
"ltags",
".",
"append",
"(",
"lev",
")",
"if",
"s",
"[",
":",
"1",
"]",
"==",
"'-'",
":",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_table_or_blockquote",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"if",
"p",
"and",
"mtag",
"==",
"'l'",
":",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_point",
"(",
"t",
",",
"s",
",",
"lev",
",",
"''",
",",
"lineno",
")",
"else",
":",
"out",
".",
"append",
"(",
"s",
")",
"return",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"def",
"parse_point",
"(",
"t",
",",
"s",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
":",
"\"\"\" paragraphs in lists \"\"\"",
"lent",
"=",
"len",
"(",
"t",
")",
"if",
"lent",
">",
"lev",
":",
"return",
"parse_list",
"(",
"t",
",",
"'.'",
",",
"s",
",",
"'ul'",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
"elif",
"lent",
"<",
"lev",
":",
"while",
"ltags",
"[",
"-",
"1",
"]",
">",
"lent",
":",
"ltags",
".",
"pop",
"(",
")",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"lev",
"=",
"lent",
"tlev",
"[",
"lev",
":",
"]",
"=",
"[",
"]",
"mtag",
"=",
"''",
"elif",
"lent",
"==",
"lev",
":",
"if",
"pend",
"and",
"mtag",
"==",
"'.'",
":",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"ltags",
".",
"pop",
"(",
")",
"if",
"br",
"and",
"mtag",
"in",
"(",
"'l'",
",",
"'.'",
")",
":",
"out",
".",
"append",
"(",
"br",
")",
"if",
"s",
"==",
"META",
":",
"mtag",
"=",
"''",
"else",
":",
"mtag",
"=",
"'.'",
"if",
"s",
"[",
":",
"1",
"]",
"==",
"'-'",
":",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_table_or_blockquote",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"if",
"mtag",
"==",
"'.'",
":",
"out",
".",
"append",
"(",
"pbeg",
")",
"if",
"pend",
":",
"etags",
".",
"append",
"(",
"pend",
")",
"ltags",
".",
"append",
"(",
"lev",
")",
"out",
".",
"append",
"(",
"s",
")",
"return",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"def",
"parse_table_or_blockquote",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
":",
"# check next line. If next line :",
"# - is empty -> this is an <hr /> tag",
"# - consists '|' -> table",
"# - consists other characters -> blockquote",
"if",
"(",
"lineno",
"+",
"1",
">=",
"strings_len",
"or",
"not",
"(",
"s",
".",
"count",
"(",
"'-'",
")",
"==",
"len",
"(",
"s",
")",
"and",
"len",
"(",
"s",
")",
">",
"3",
")",
")",
":",
"return",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"lineno",
"+=",
"1",
"s",
"=",
"strings",
"[",
"lineno",
"]",
".",
"strip",
"(",
")",
"if",
"s",
":",
"if",
"'|'",
"in",
"s",
":",
"# table",
"tout",
"=",
"[",
"]",
"thead",
"=",
"[",
"]",
"tbody",
"=",
"[",
"]",
"rownum",
"=",
"0",
"t_id",
"=",
"''",
"t_cls",
"=",
"''",
"# parse table:",
"while",
"lineno",
"<",
"strings_len",
":",
"s",
"=",
"strings",
"[",
"lineno",
"]",
".",
"strip",
"(",
")",
"if",
"s",
"[",
":",
"1",
"]",
"==",
"'='",
":",
"# header or footer",
"if",
"s",
".",
"count",
"(",
"'='",
")",
"==",
"len",
"(",
"s",
")",
"and",
"len",
"(",
"s",
")",
">",
"3",
":",
"if",
"not",
"thead",
":",
"# if thead list is empty:",
"thead",
"=",
"tout",
"else",
":",
"tbody",
".",
"extend",
"(",
"tout",
")",
"tout",
"=",
"[",
"]",
"rownum",
"=",
"0",
"lineno",
"+=",
"1",
"continue",
"m",
"=",
"regex_tq",
".",
"match",
"(",
"s",
")",
"if",
"m",
":",
"t_cls",
"=",
"m",
".",
"group",
"(",
"'c'",
")",
"or",
"''",
"t_id",
"=",
"m",
".",
"group",
"(",
"'p'",
")",
"or",
"''",
"break",
"if",
"rownum",
"%",
"2",
":",
"tr",
"=",
"'<tr class=\"even\">'",
"else",
":",
"tr",
"=",
"'<tr class=\"first\">'",
"if",
"rownum",
"==",
"0",
"else",
"'<tr>'",
"tout",
".",
"append",
"(",
"tr",
"+",
"''",
".",
"join",
"(",
"[",
"'<td%s>%s</td>'",
"%",
"(",
"' class=\"num\"'",
"if",
"regex_num",
".",
"match",
"(",
"f",
")",
"else",
"''",
",",
"f",
".",
"strip",
"(",
")",
")",
"for",
"f",
"in",
"s",
".",
"split",
"(",
"'|'",
")",
"]",
")",
"+",
"'</tr>'",
"+",
"pp",
")",
"rownum",
"+=",
"1",
"lineno",
"+=",
"1",
"t_cls",
"=",
"' class=\"%s%s\"'",
"%",
"(",
"class_prefix",
",",
"t_cls",
")",
"if",
"t_cls",
"and",
"t_cls",
"!=",
"'id'",
"else",
"''",
"t_id",
"=",
"' id=\"%s%s\"'",
"%",
"(",
"id_prefix",
",",
"t_id",
")",
"if",
"t_id",
"else",
"''",
"s",
"=",
"''",
"if",
"thead",
":",
"s",
"+=",
"'<thead>'",
"+",
"pp",
"+",
"''",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"thead",
"]",
")",
"+",
"'</thead>'",
"+",
"pp",
"if",
"not",
"tbody",
":",
"# tbody strings are in tout list",
"tbody",
"=",
"tout",
"tout",
"=",
"[",
"]",
"if",
"tbody",
":",
"# if tbody list is not empty:",
"s",
"+=",
"'<tbody>'",
"+",
"pp",
"+",
"''",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"tbody",
"]",
")",
"+",
"'</tbody>'",
"+",
"pp",
"if",
"tout",
":",
"# tfoot is not empty:",
"s",
"+=",
"'<tfoot>'",
"+",
"pp",
"+",
"''",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"tout",
"]",
")",
"+",
"'</tfoot>'",
"+",
"pp",
"s",
"=",
"'<table%s%s>%s%s</table>%s'",
"%",
"(",
"t_cls",
",",
"t_id",
",",
"pp",
",",
"s",
",",
"pp",
")",
"mtag",
"=",
"'t'",
"else",
":",
"# parse blockquote:",
"bq_begin",
"=",
"lineno",
"t_mode",
"=",
"False",
"# embedded table",
"t_cls",
"=",
"''",
"t_id",
"=",
"''",
"# search blockquote closing line:",
"while",
"lineno",
"<",
"strings_len",
":",
"s",
"=",
"strings",
"[",
"lineno",
"]",
".",
"strip",
"(",
")",
"if",
"not",
"t_mode",
":",
"m",
"=",
"regex_tq",
".",
"match",
"(",
"s",
")",
"if",
"m",
":",
"if",
"(",
"lineno",
"+",
"1",
"==",
"strings_len",
"or",
"'|'",
"not",
"in",
"strings",
"[",
"lineno",
"+",
"1",
"]",
")",
":",
"t_cls",
"=",
"m",
".",
"group",
"(",
"'c'",
")",
"or",
"''",
"t_id",
"=",
"m",
".",
"group",
"(",
"'p'",
")",
"or",
"''",
"break",
"if",
"regex_bq_headline",
".",
"match",
"(",
"s",
")",
":",
"if",
"(",
"lineno",
"+",
"1",
"<",
"strings_len",
"and",
"strings",
"[",
"lineno",
"+",
"1",
"]",
".",
"strip",
"(",
")",
")",
":",
"t_mode",
"=",
"True",
"lineno",
"+=",
"1",
"continue",
"elif",
"regex_tq",
".",
"match",
"(",
"s",
")",
":",
"t_mode",
"=",
"False",
"lineno",
"+=",
"1",
"continue",
"lineno",
"+=",
"1",
"t_cls",
"=",
"' class=\"%s%s\"'",
"%",
"(",
"class_prefix",
",",
"t_cls",
")",
"if",
"t_cls",
"and",
"t_cls",
"!=",
"'id'",
"else",
"''",
"t_id",
"=",
"' id=\"%s%s\"'",
"%",
"(",
"id_prefix",
",",
"t_id",
")",
"if",
"t_id",
"else",
"''",
"s",
"=",
"'<blockquote%s%s>%s</blockquote>%s'",
"%",
"(",
"t_cls",
",",
"t_id",
",",
"render",
"(",
"'\\n'",
".",
"join",
"(",
"strings",
"[",
"bq_begin",
":",
"lineno",
"]",
")",
")",
",",
"pp",
")",
"mtag",
"=",
"'q'",
"else",
":",
"s",
"=",
"'<hr />'",
"lineno",
"-=",
"1",
"mtag",
"=",
"'q'",
"return",
"(",
"s",
",",
"'q'",
",",
"lineno",
")",
"if",
"sep",
"==",
"'p'",
":",
"pbeg",
"=",
"\"<p>\"",
"pend",
"=",
"\"</p>\"",
"+",
"pp",
"br",
"=",
"''",
"else",
":",
"pbeg",
"=",
"pend",
"=",
"''",
"br",
"=",
"\"<br />\"",
"+",
"pp",
"if",
"sep",
"==",
"'br'",
"else",
"''",
"lev",
"=",
"0",
"# nesting level of lists",
"c0",
"=",
"''",
"# first character of current line",
"out",
"=",
"[",
"]",
"# list of processed lines",
"etags",
"=",
"[",
"]",
"# trailing tags",
"ltags",
"=",
"[",
"]",
"# level# correspondent to trailing tag",
"tlev",
"=",
"[",
"]",
"# list of tags for each level ('ul' or 'ol')",
"mtag",
"=",
"''",
"# marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>",
"# and to avoid <p></p> around tables and blockquotes",
"lineno",
"=",
"0",
"strings_len",
"=",
"len",
"(",
"strings",
")",
"while",
"lineno",
"<",
"strings_len",
":",
"s0",
"=",
"strings",
"[",
"lineno",
"]",
"[",
":",
"1",
"]",
"s",
"=",
"strings",
"[",
"lineno",
"]",
".",
"strip",
"(",
")",
"\"\"\" # + - . ---------------------\n ## ++ -- .. ------- field | field | field <-title\n ### +++ --- ... quote =====================\n #### ++++ ---- .... ------- field | field | field <-body\n ##### +++++ ----- ..... ---------------------:class[id]\n \"\"\"",
"pc0",
"=",
"c0",
"# first character of previous line",
"c0",
"=",
"s",
"[",
":",
"1",
"]",
"if",
"c0",
":",
"# for non empty strings",
"if",
"c0",
"in",
"\"#+-.\"",
":",
"# first character is one of: # + - .",
"(",
"t1",
",",
"t2",
",",
"p",
",",
"ss",
")",
"=",
"regex_list",
".",
"findall",
"(",
"s",
")",
"[",
"0",
"]",
"# t1 - tag (\"###\")",
"# t2 - tag (\"+++\", \"---\", \"...\")",
"# p - paragraph point ('.')->for \"++.\" or \"--.\"",
"# ss - other part of string",
"if",
"t1",
"or",
"t2",
":",
"# headers and lists:",
"if",
"c0",
"==",
"'#'",
":",
"# headers",
"(",
"lev",
",",
"mtag",
")",
"=",
"parse_title",
"(",
"t1",
",",
"ss",
")",
"lineno",
"+=",
"1",
"continue",
"elif",
"c0",
"==",
"'+'",
":",
"# ordered list",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_list",
"(",
"t2",
",",
"p",
",",
"ss",
",",
"'ol'",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
"lineno",
"+=",
"1",
"continue",
"elif",
"c0",
"==",
"'-'",
":",
"# unordered list, table or blockquote",
"if",
"p",
"or",
"ss",
":",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_list",
"(",
"t2",
",",
"p",
",",
"ss",
",",
"'ul'",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
"lineno",
"+=",
"1",
"continue",
"else",
":",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_table_or_blockquote",
"(",
"s",
",",
"mtag",
",",
"lineno",
")",
"elif",
"lev",
">",
"0",
":",
"# and c0 == '.' # paragraph in lists",
"(",
"lev",
",",
"mtag",
",",
"lineno",
")",
"=",
"parse_point",
"(",
"t2",
",",
"ss",
",",
"lev",
",",
"mtag",
",",
"lineno",
")",
"lineno",
"+=",
"1",
"continue",
"if",
"lev",
"==",
"0",
"and",
"(",
"mtag",
"==",
"'q'",
"or",
"s",
"==",
"META",
")",
":",
"# new paragraph",
"pc0",
"=",
"''",
"if",
"pc0",
"==",
"''",
"or",
"(",
"mtag",
"!=",
"'p'",
"and",
"s0",
"not",
"in",
"(",
"' '",
",",
"'\\t'",
")",
")",
":",
"# paragraph",
"out",
".",
"extend",
"(",
"etags",
"[",
":",
":",
"-",
"1",
"]",
")",
"etags",
"=",
"[",
"]",
"ltags",
"=",
"[",
"]",
"tlev",
"=",
"[",
"]",
"lev",
"=",
"0",
"if",
"br",
"and",
"mtag",
"==",
"'p'",
":",
"out",
".",
"append",
"(",
"br",
")",
"if",
"mtag",
"!=",
"'q'",
"and",
"s",
"!=",
"META",
":",
"if",
"pend",
":",
"etags",
"=",
"[",
"pend",
"]",
"out",
".",
"append",
"(",
"pbeg",
")",
"mtag",
"=",
"'p'",
"else",
":",
"mtag",
"=",
"''",
"out",
".",
"append",
"(",
"s",
")",
"else",
":",
"if",
"lev",
">",
"0",
"and",
"mtag",
"==",
"'.'",
"and",
"s",
"==",
"META",
":",
"out",
".",
"append",
"(",
"etags",
".",
"pop",
"(",
")",
")",
"ltags",
".",
"pop",
"(",
")",
"out",
".",
"append",
"(",
"s",
")",
"mtag",
"=",
"''",
"else",
":",
"out",
".",
"append",
"(",
"' '",
"+",
"s",
")",
"lineno",
"+=",
"1",
"out",
".",
"extend",
"(",
"etags",
"[",
":",
":",
"-",
"1",
"]",
")",
"text",
"=",
"''",
".",
"join",
"(",
"out",
")",
"#############################################################",
"# do strong,em,del",
"#############################################################",
"text",
"=",
"regex_strong",
".",
"sub",
"(",
"'<strong>\\g<t></strong>'",
",",
"text",
")",
"text",
"=",
"regex_del",
".",
"sub",
"(",
"'<del>\\g<t></del>'",
",",
"text",
")",
"text",
"=",
"regex_em",
".",
"sub",
"(",
"'<em>\\g<t></em>'",
",",
"text",
")",
"#############################################################",
"# deal with images, videos, audios and links",
"#############################################################",
"def",
"sub_media",
"(",
"m",
")",
":",
"t",
",",
"a",
",",
"k",
",",
"p",
",",
"w",
"=",
"m",
".",
"group",
"(",
"'t'",
",",
"'a'",
",",
"'k'",
",",
"'p'",
",",
"'w'",
")",
"if",
"not",
"k",
":",
"return",
"m",
".",
"group",
"(",
"0",
")",
"k",
"=",
"local_html_escape",
"(",
"k",
")",
"t",
"=",
"t",
"or",
"''",
"style",
"=",
"'width:%s'",
"%",
"w",
"if",
"w",
"else",
"''",
"title",
"=",
"' title=\"%s\"'",
"%",
"local_html_escape",
"(",
"a",
")",
".",
"replace",
"(",
"META",
",",
"DISABLED_META",
")",
"if",
"a",
"else",
"''",
"p_begin",
"=",
"p_end",
"=",
"''",
"if",
"p",
"==",
"'center'",
":",
"p_begin",
"=",
"'<p style=\"text-align:center\">'",
"p_end",
"=",
"'</p>'",
"+",
"pp",
"elif",
"p",
"==",
"'blockleft'",
":",
"p_begin",
"=",
"'<p style=\"text-align:left\">'",
"p_end",
"=",
"'</p>'",
"+",
"pp",
"elif",
"p",
"==",
"'blockright'",
":",
"p_begin",
"=",
"'<p style=\"text-align:right\">'",
"p_end",
"=",
"'</p>'",
"+",
"pp",
"elif",
"p",
"in",
"(",
"'left'",
",",
"'right'",
")",
":",
"style",
"=",
"(",
"'float:%s'",
"%",
"p",
")",
"+",
"(",
"';%s'",
"%",
"style",
"if",
"style",
"else",
"''",
")",
"if",
"t",
"and",
"regex_auto",
".",
"match",
"(",
"t",
")",
":",
"p_begin",
"=",
"p_begin",
"+",
"'<a href=\"%s\">'",
"%",
"t",
"p_end",
"=",
"'</a>'",
"+",
"p_end",
"t",
"=",
"''",
"if",
"style",
":",
"style",
"=",
"' style=\"%s\"'",
"%",
"style",
"if",
"p",
"in",
"(",
"'video'",
",",
"'audio'",
")",
":",
"t",
"=",
"render",
"(",
"t",
",",
"{",
"}",
",",
"{",
"}",
",",
"'br'",
",",
"URL",
",",
"environment",
",",
"latex",
",",
"autolinks",
",",
"protolinks",
",",
"class_prefix",
",",
"id_prefix",
",",
"pretty_print",
")",
"return",
"'<%(p)s controls=\"controls\"%(title)s%(style)s><source src=\"%(k)s\" />%(t)s</%(p)s>'",
"%",
"dict",
"(",
"p",
"=",
"p",
",",
"title",
"=",
"title",
",",
"style",
"=",
"style",
",",
"k",
"=",
"k",
",",
"t",
"=",
"t",
")",
"alt",
"=",
"' alt=\"%s\"'",
"%",
"local_html_escape",
"(",
"t",
")",
".",
"replace",
"(",
"META",
",",
"DISABLED_META",
")",
"if",
"t",
"else",
"''",
"return",
"'%(begin)s<img src=\"%(k)s\"%(alt)s%(title)s%(style)s />%(end)s'",
"%",
"dict",
"(",
"begin",
"=",
"p_begin",
",",
"k",
"=",
"k",
",",
"alt",
"=",
"alt",
",",
"title",
"=",
"title",
",",
"style",
"=",
"style",
",",
"end",
"=",
"p_end",
")",
"def",
"sub_link",
"(",
"m",
")",
":",
"t",
",",
"a",
",",
"k",
",",
"p",
"=",
"m",
".",
"group",
"(",
"'t'",
",",
"'a'",
",",
"'k'",
",",
"'p'",
")",
"if",
"not",
"k",
"and",
"not",
"t",
":",
"return",
"m",
".",
"group",
"(",
"0",
")",
"t",
"=",
"t",
"or",
"''",
"a",
"=",
"local_html_escape",
"(",
"a",
")",
"if",
"a",
"else",
"''",
"if",
"k",
":",
"if",
"'#'",
"in",
"k",
"and",
"':'",
"not",
"in",
"k",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
":",
"# wikipage, not external url",
"k",
"=",
"k",
".",
"replace",
"(",
"'#'",
",",
"'#'",
"+",
"id_prefix",
")",
"k",
"=",
"local_html_escape",
"(",
"k",
")",
"title",
"=",
"' title=\"%s\"'",
"%",
"a",
".",
"replace",
"(",
"META",
",",
"DISABLED_META",
")",
"if",
"a",
"else",
"''",
"target",
"=",
"' target=\"_blank\"'",
"if",
"p",
"==",
"'popup'",
"else",
"''",
"t",
"=",
"render",
"(",
"t",
",",
"{",
"}",
",",
"{",
"}",
",",
"'br'",
",",
"URL",
",",
"environment",
",",
"latex",
",",
"None",
",",
"None",
",",
"class_prefix",
",",
"id_prefix",
",",
"pretty_print",
")",
"if",
"t",
"else",
"k",
"return",
"'<a href=\"%(k)s\"%(title)s%(target)s>%(t)s</a>'",
"%",
"dict",
"(",
"k",
"=",
"k",
",",
"title",
"=",
"title",
",",
"target",
"=",
"target",
",",
"t",
"=",
"t",
")",
"if",
"t",
"==",
"'NEWLINE'",
"and",
"not",
"a",
":",
"return",
"'<br />'",
"+",
"pp",
"return",
"'<span class=\"anchor\" id=\"%s\">%s</span>'",
"%",
"(",
"local_html_escape",
"(",
"id_prefix",
"+",
"t",
")",
",",
"render",
"(",
"a",
",",
"{",
"}",
",",
"{",
"}",
",",
"'br'",
",",
"URL",
",",
"environment",
",",
"latex",
",",
"autolinks",
",",
"protolinks",
",",
"class_prefix",
",",
"id_prefix",
",",
"pretty_print",
")",
")",
"parts",
"=",
"text",
".",
"split",
"(",
"LINK",
")",
"text",
"=",
"parts",
"[",
"0",
"]",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"links",
")",
":",
"if",
"s",
"is",
"None",
":",
"html",
"=",
"LINK",
"else",
":",
"html",
"=",
"regex_media_level2",
".",
"sub",
"(",
"sub_media",
",",
"s",
")",
"if",
"html",
"==",
"s",
":",
"html",
"=",
"regex_link_level2",
".",
"sub",
"(",
"sub_link",
",",
"html",
")",
"if",
"html",
"==",
"s",
":",
"# return unprocessed string as a signal of an error",
"html",
"=",
"'[[%s]]'",
"%",
"s",
"text",
"+=",
"html",
"+",
"parts",
"[",
"i",
"+",
"1",
"]",
"#############################################################",
"# process all code text",
"#############################################################",
"def",
"expand_meta",
"(",
"m",
")",
":",
"code",
",",
"b",
",",
"p",
",",
"s",
"=",
"segments",
".",
"pop",
"(",
"0",
")",
"if",
"code",
"is",
"None",
"or",
"m",
".",
"group",
"(",
")",
"==",
"DISABLED_META",
":",
"return",
"local_html_escape",
"(",
"s",
")",
"if",
"b",
"in",
"extra",
":",
"if",
"code",
"[",
":",
"1",
"]",
"==",
"'\\n'",
":",
"code",
"=",
"code",
"[",
"1",
":",
"]",
"if",
"code",
"[",
"-",
"1",
":",
"]",
"==",
"'\\n'",
":",
"code",
"=",
"code",
"[",
":",
"-",
"1",
"]",
"if",
"p",
":",
"return",
"str",
"(",
"extra",
"[",
"b",
"]",
"(",
"code",
",",
"p",
")",
")",
"else",
":",
"return",
"str",
"(",
"extra",
"[",
"b",
"]",
"(",
"code",
")",
")",
"elif",
"b",
"==",
"'cite'",
":",
"return",
"'['",
"+",
"','",
".",
"join",
"(",
"'<a href=\"#%s\" class=\"%s\">%s</a>'",
"%",
"(",
"id_prefix",
"+",
"d",
",",
"b",
",",
"d",
")",
"for",
"d",
"in",
"local_html_escape",
"(",
"code",
")",
".",
"split",
"(",
"','",
")",
")",
"+",
"']'",
"elif",
"b",
"==",
"'latex'",
":",
"return",
"LATEX",
"%",
"urllib_quote",
"(",
"code",
")",
"elif",
"b",
"in",
"html_colors",
":",
"return",
"'<span style=\"color: %s\">%s</span>'",
"%",
"(",
"b",
",",
"render",
"(",
"code",
",",
"{",
"}",
",",
"{",
"}",
",",
"'br'",
",",
"URL",
",",
"environment",
",",
"latex",
",",
"autolinks",
",",
"protolinks",
",",
"class_prefix",
",",
"id_prefix",
",",
"pretty_print",
")",
")",
"elif",
"b",
"in",
"(",
"'c'",
",",
"'color'",
")",
"and",
"p",
":",
"c",
"=",
"p",
".",
"split",
"(",
"':'",
")",
"fg",
"=",
"'color: %s;'",
"%",
"c",
"[",
"0",
"]",
"if",
"c",
"[",
"0",
"]",
"else",
"''",
"bg",
"=",
"'background-color: %s;'",
"%",
"c",
"[",
"1",
"]",
"if",
"len",
"(",
"c",
")",
">",
"1",
"and",
"c",
"[",
"1",
"]",
"else",
"''",
"return",
"'<span style=\"%s%s\">%s</span>'",
"%",
"(",
"fg",
",",
"bg",
",",
"render",
"(",
"code",
",",
"{",
"}",
",",
"{",
"}",
",",
"'br'",
",",
"URL",
",",
"environment",
",",
"latex",
",",
"autolinks",
",",
"protolinks",
",",
"class_prefix",
",",
"id_prefix",
",",
"pretty_print",
")",
")",
"cls",
"=",
"' class=\"%s%s\"'",
"%",
"(",
"class_prefix",
",",
"b",
")",
"if",
"b",
"and",
"b",
"!=",
"'id'",
"else",
"''",
"id",
"=",
"' id=\"%s%s\"'",
"%",
"(",
"id_prefix",
",",
"local_html_escape",
"(",
"p",
")",
")",
"if",
"p",
"else",
"''",
"beg",
"=",
"(",
"code",
"[",
":",
"1",
"]",
"==",
"'\\n'",
")",
"end",
"=",
"[",
"None",
",",
"-",
"1",
"]",
"[",
"code",
"[",
"-",
"1",
":",
"]",
"==",
"'\\n'",
"]",
"if",
"beg",
"and",
"end",
":",
"return",
"'<pre><code%s%s>%s</code></pre>%s'",
"%",
"(",
"cls",
",",
"id",
",",
"local_html_escape",
"(",
"code",
"[",
"1",
":",
"-",
"1",
"]",
")",
",",
"pp",
")",
"return",
"'<code%s%s>%s</code>'",
"%",
"(",
"cls",
",",
"id",
",",
"local_html_escape",
"(",
"code",
"[",
"beg",
":",
"end",
"]",
")",
")",
"text",
"=",
"regex_expand_meta",
".",
"sub",
"(",
"expand_meta",
",",
"text",
")",
"if",
"environment",
":",
"text",
"=",
"replace_components",
"(",
"text",
",",
"environment",
")",
"return",
"text",
".",
"translate",
"(",
"ttab_out",
")"
] |
https://github.com/web2py/web2py/blob/095905c4e010a1426c729483d912e270a51b7ba8/gluon/contrib/markmin/markmin2html.py#L727-L1472
|
|
joeferraro/MavensMate-SublimeText
|
af36de7ffaa9b0541f446145736a48b1c66ac0cf
|
mavensmate.py
|
python
|
DeployResourceBundleCommand.panel_done
|
(self, picked)
|
[] |
def panel_done(self, picked):
if 0 > picked < len(self.results):
return
bundle_path = os.path.join(util.mm_project_directory(),"resource-bundles",self.results[picked])
mm.call('deploy-resource-bundle', True, body={ "paths": [ bundle_path ] }, context=self, message="Deploying resource bundle...")
|
[
"def",
"panel_done",
"(",
"self",
",",
"picked",
")",
":",
"if",
"0",
">",
"picked",
"<",
"len",
"(",
"self",
".",
"results",
")",
":",
"return",
"bundle_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"util",
".",
"mm_project_directory",
"(",
")",
",",
"\"resource-bundles\"",
",",
"self",
".",
"results",
"[",
"picked",
"]",
")",
"mm",
".",
"call",
"(",
"'deploy-resource-bundle'",
",",
"True",
",",
"body",
"=",
"{",
"\"paths\"",
":",
"[",
"bundle_path",
"]",
"}",
",",
"context",
"=",
"self",
",",
"message",
"=",
"\"Deploying resource bundle...\"",
")"
] |
https://github.com/joeferraro/MavensMate-SublimeText/blob/af36de7ffaa9b0541f446145736a48b1c66ac0cf/mavensmate.py#L964-L968
|
||||
BigBrotherBot/big-brother-bot
|
848823c71413c86e7f1ff9584f43e08d40a7f2c0
|
b3/parsers/frostbite2/abstractParser.py
|
python
|
AbstractParser.close_frostbite_connection
|
(self)
|
Close the connection with the Frostbite2 server.
|
Close the connection with the Frostbite2 server.
|
[
"Close",
"the",
"connection",
"with",
"the",
"Frostbite2",
"server",
"."
] |
def close_frostbite_connection(self):
"""
Close the connection with the Frostbite2 server.
"""
try:
self._serverConnection.stop()
except Exception:
pass
self._serverConnection = None
|
[
"def",
"close_frostbite_connection",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_serverConnection",
".",
"stop",
"(",
")",
"except",
"Exception",
":",
"pass",
"self",
".",
"_serverConnection",
"=",
"None"
] |
https://github.com/BigBrotherBot/big-brother-bot/blob/848823c71413c86e7f1ff9584f43e08d40a7f2c0/b3/parsers/frostbite2/abstractParser.py#L327-L335
|
||
MDAnalysis/mdanalysis
|
3488df3cdb0c29ed41c4fb94efe334b541e31b21
|
package/MDAnalysis/coordinates/CRD.py
|
python
|
CRDWriter.write
|
(self, selection, frame=None)
|
Write selection at current trajectory frame to file.
Parameters
----------
selection : AtomGroup
group of atoms to be written
frame : int (optional)
Move the trajectory to frame `frame`; by default, write
the current frame.
|
Write selection at current trajectory frame to file.
|
[
"Write",
"selection",
"at",
"current",
"trajectory",
"frame",
"to",
"file",
"."
] |
def write(self, selection, frame=None):
"""Write selection at current trajectory frame to file.
Parameters
----------
selection : AtomGroup
group of atoms to be written
frame : int (optional)
Move the trajectory to frame `frame`; by default, write
the current frame.
"""
try:
u = selection.universe
except AttributeError:
errmsg = "Input obj is neither an AtomGroup or Universe"
raise TypeError(errmsg) from None
if frame is not None:
u.trajectory[frame] # advance to frame
else:
try:
frame = u.trajectory.ts.frame
except AttributeError:
frame = 0 # should catch cases when we are analyzing a single PDB (?)
atoms = selection.atoms # make sure to use atoms (Issue 46)
coor = atoms.positions # can write from selection == Universe (Issue 49)
n_atoms = len(atoms)
# Detect which format string we're using to output (EXT or not)
# *len refers to how to truncate various things,
# depending on output format!
if n_atoms > 99999:
at_fmt = self.fmt['ATOM_EXT']
serial_len = 10
resid_len = 8
totres_len = 10
else:
at_fmt = self.fmt['ATOM']
serial_len = 5
resid_len = 4
totres_len = 5
# Check for attributes, use defaults for missing ones
attrs = {}
missing_topology = []
for attr, default in (
('resnames', itertools.cycle(('UNK',))),
# Resids *must* be an array because we index it later
('resids', np.ones(n_atoms, dtype=int)),
('names', itertools.cycle(('X',))),
('tempfactors', itertools.cycle((0.0,))),
):
try:
attrs[attr] = getattr(atoms, attr)
except (NoDataError, AttributeError):
attrs[attr] = default
missing_topology.append(attr)
# ChainIDs - Try ChainIDs first, fall back to Segids
try:
attrs['chainIDs'] = atoms.chainIDs
except (NoDataError, AttributeError):
# try looking for segids instead
try:
attrs['chainIDs'] = atoms.segids
except (NoDataError, AttributeError):
attrs['chainIDs'] = itertools.cycle(('',))
missing_topology.append(attr)
if missing_topology:
warnings.warn(
"Supplied AtomGroup was missing the following attributes: "
"{miss}. These will be written with default values. "
"".format(miss=', '.join(missing_topology)))
with util.openany(self.filename, 'w') as crd:
# Write Title
crd.write(self.fmt['TITLE'].format(
frame=frame, where=u.trajectory.filename))
crd.write("*\n")
# Write NUMATOMS
if n_atoms > 99999:
crd.write(self.fmt['NUMATOMS_EXT'].format(n_atoms))
else:
crd.write(self.fmt['NUMATOMS'].format(n_atoms))
# Write all atoms
current_resid = 1
resids = attrs['resids']
for i, pos, resname, name, chainID, resid, tempfactor in zip(
range(n_atoms), coor, attrs['resnames'], attrs['names'],
attrs['chainIDs'], attrs['resids'], attrs['tempfactors']):
if not i == 0 and resids[i] != resids[i-1]:
current_resid += 1
# Truncate numbers
serial = util.ltruncate_int(i + 1, serial_len)
resid = util.ltruncate_int(resid, resid_len)
current_resid = util.ltruncate_int(current_resid, totres_len)
crd.write(at_fmt.format(
serial=serial, totRes=current_resid, resname=resname,
name=name, pos=pos, chainID=chainID,
resSeq=resid, tempfactor=tempfactor))
|
[
"def",
"write",
"(",
"self",
",",
"selection",
",",
"frame",
"=",
"None",
")",
":",
"try",
":",
"u",
"=",
"selection",
".",
"universe",
"except",
"AttributeError",
":",
"errmsg",
"=",
"\"Input obj is neither an AtomGroup or Universe\"",
"raise",
"TypeError",
"(",
"errmsg",
")",
"from",
"None",
"if",
"frame",
"is",
"not",
"None",
":",
"u",
".",
"trajectory",
"[",
"frame",
"]",
"# advance to frame",
"else",
":",
"try",
":",
"frame",
"=",
"u",
".",
"trajectory",
".",
"ts",
".",
"frame",
"except",
"AttributeError",
":",
"frame",
"=",
"0",
"# should catch cases when we are analyzing a single PDB (?)",
"atoms",
"=",
"selection",
".",
"atoms",
"# make sure to use atoms (Issue 46)",
"coor",
"=",
"atoms",
".",
"positions",
"# can write from selection == Universe (Issue 49)",
"n_atoms",
"=",
"len",
"(",
"atoms",
")",
"# Detect which format string we're using to output (EXT or not)",
"# *len refers to how to truncate various things,",
"# depending on output format!",
"if",
"n_atoms",
">",
"99999",
":",
"at_fmt",
"=",
"self",
".",
"fmt",
"[",
"'ATOM_EXT'",
"]",
"serial_len",
"=",
"10",
"resid_len",
"=",
"8",
"totres_len",
"=",
"10",
"else",
":",
"at_fmt",
"=",
"self",
".",
"fmt",
"[",
"'ATOM'",
"]",
"serial_len",
"=",
"5",
"resid_len",
"=",
"4",
"totres_len",
"=",
"5",
"# Check for attributes, use defaults for missing ones",
"attrs",
"=",
"{",
"}",
"missing_topology",
"=",
"[",
"]",
"for",
"attr",
",",
"default",
"in",
"(",
"(",
"'resnames'",
",",
"itertools",
".",
"cycle",
"(",
"(",
"'UNK'",
",",
")",
")",
")",
",",
"# Resids *must* be an array because we index it later",
"(",
"'resids'",
",",
"np",
".",
"ones",
"(",
"n_atoms",
",",
"dtype",
"=",
"int",
")",
")",
",",
"(",
"'names'",
",",
"itertools",
".",
"cycle",
"(",
"(",
"'X'",
",",
")",
")",
")",
",",
"(",
"'tempfactors'",
",",
"itertools",
".",
"cycle",
"(",
"(",
"0.0",
",",
")",
")",
")",
",",
")",
":",
"try",
":",
"attrs",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"atoms",
",",
"attr",
")",
"except",
"(",
"NoDataError",
",",
"AttributeError",
")",
":",
"attrs",
"[",
"attr",
"]",
"=",
"default",
"missing_topology",
".",
"append",
"(",
"attr",
")",
"# ChainIDs - Try ChainIDs first, fall back to Segids",
"try",
":",
"attrs",
"[",
"'chainIDs'",
"]",
"=",
"atoms",
".",
"chainIDs",
"except",
"(",
"NoDataError",
",",
"AttributeError",
")",
":",
"# try looking for segids instead",
"try",
":",
"attrs",
"[",
"'chainIDs'",
"]",
"=",
"atoms",
".",
"segids",
"except",
"(",
"NoDataError",
",",
"AttributeError",
")",
":",
"attrs",
"[",
"'chainIDs'",
"]",
"=",
"itertools",
".",
"cycle",
"(",
"(",
"''",
",",
")",
")",
"missing_topology",
".",
"append",
"(",
"attr",
")",
"if",
"missing_topology",
":",
"warnings",
".",
"warn",
"(",
"\"Supplied AtomGroup was missing the following attributes: \"",
"\"{miss}. These will be written with default values. \"",
"\"\"",
".",
"format",
"(",
"miss",
"=",
"', '",
".",
"join",
"(",
"missing_topology",
")",
")",
")",
"with",
"util",
".",
"openany",
"(",
"self",
".",
"filename",
",",
"'w'",
")",
"as",
"crd",
":",
"# Write Title",
"crd",
".",
"write",
"(",
"self",
".",
"fmt",
"[",
"'TITLE'",
"]",
".",
"format",
"(",
"frame",
"=",
"frame",
",",
"where",
"=",
"u",
".",
"trajectory",
".",
"filename",
")",
")",
"crd",
".",
"write",
"(",
"\"*\\n\"",
")",
"# Write NUMATOMS",
"if",
"n_atoms",
">",
"99999",
":",
"crd",
".",
"write",
"(",
"self",
".",
"fmt",
"[",
"'NUMATOMS_EXT'",
"]",
".",
"format",
"(",
"n_atoms",
")",
")",
"else",
":",
"crd",
".",
"write",
"(",
"self",
".",
"fmt",
"[",
"'NUMATOMS'",
"]",
".",
"format",
"(",
"n_atoms",
")",
")",
"# Write all atoms",
"current_resid",
"=",
"1",
"resids",
"=",
"attrs",
"[",
"'resids'",
"]",
"for",
"i",
",",
"pos",
",",
"resname",
",",
"name",
",",
"chainID",
",",
"resid",
",",
"tempfactor",
"in",
"zip",
"(",
"range",
"(",
"n_atoms",
")",
",",
"coor",
",",
"attrs",
"[",
"'resnames'",
"]",
",",
"attrs",
"[",
"'names'",
"]",
",",
"attrs",
"[",
"'chainIDs'",
"]",
",",
"attrs",
"[",
"'resids'",
"]",
",",
"attrs",
"[",
"'tempfactors'",
"]",
")",
":",
"if",
"not",
"i",
"==",
"0",
"and",
"resids",
"[",
"i",
"]",
"!=",
"resids",
"[",
"i",
"-",
"1",
"]",
":",
"current_resid",
"+=",
"1",
"# Truncate numbers",
"serial",
"=",
"util",
".",
"ltruncate_int",
"(",
"i",
"+",
"1",
",",
"serial_len",
")",
"resid",
"=",
"util",
".",
"ltruncate_int",
"(",
"resid",
",",
"resid_len",
")",
"current_resid",
"=",
"util",
".",
"ltruncate_int",
"(",
"current_resid",
",",
"totres_len",
")",
"crd",
".",
"write",
"(",
"at_fmt",
".",
"format",
"(",
"serial",
"=",
"serial",
",",
"totRes",
"=",
"current_resid",
",",
"resname",
"=",
"resname",
",",
"name",
"=",
"name",
",",
"pos",
"=",
"pos",
",",
"chainID",
"=",
"chainID",
",",
"resSeq",
"=",
"resid",
",",
"tempfactor",
"=",
"tempfactor",
")",
")"
] |
https://github.com/MDAnalysis/mdanalysis/blob/3488df3cdb0c29ed41c4fb94efe334b541e31b21/package/MDAnalysis/coordinates/CRD.py#L159-L264
|
||
nneonneo/2048-ai
|
5b892173be44f482e521fd028799a3a605b68404
|
chromectrl.py
|
python
|
ChromeDebuggerControl._send_cmd_noresult
|
(self, method, **params)
|
Send a command and ignore the result.
|
Send a command and ignore the result.
|
[
"Send",
"a",
"command",
"and",
"ignore",
"the",
"result",
"."
] |
def _send_cmd_noresult(self, method, **params):
''' Send a command and ignore the result. '''
id = next(self.req_counter)
out = {'id': id, 'method': method}
if params:
out['params'] = params
self.ws.send(json.dumps(out))
|
[
"def",
"_send_cmd_noresult",
"(",
"self",
",",
"method",
",",
"*",
"*",
"params",
")",
":",
"id",
"=",
"next",
"(",
"self",
".",
"req_counter",
")",
"out",
"=",
"{",
"'id'",
":",
"id",
",",
"'method'",
":",
"method",
"}",
"if",
"params",
":",
"out",
"[",
"'params'",
"]",
"=",
"params",
"self",
".",
"ws",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"out",
")",
")"
] |
https://github.com/nneonneo/2048-ai/blob/5b892173be44f482e521fd028799a3a605b68404/chromectrl.py#L78-L84
|
||
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/dialog/convo_mgr.py
|
python
|
ConversationManager.empty
|
(self)
|
[] |
def empty(self):
self._conversations.clear()
|
[
"def",
"empty",
"(",
"self",
")",
":",
"self",
".",
"_conversations",
".",
"clear",
"(",
")"
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/dialog/convo_mgr.py#L46-L47
|
||||
cocos2d/cocos2d-x-samples
|
9f1472d9083a18853bb1fe97a337292f42abe44a
|
samples/GAFExamples/Example.android/build_native.py
|
python
|
get_num_of_cpu
|
()
|
The build process can be accelerated by running multiple concurrent job processes using the -j-option.
|
The build process can be accelerated by running multiple concurrent job processes using the -j-option.
|
[
"The",
"build",
"process",
"can",
"be",
"accelerated",
"by",
"running",
"multiple",
"concurrent",
"job",
"processes",
"using",
"the",
"-",
"j",
"-",
"option",
"."
] |
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
|
[
"def",
"get_num_of_cpu",
"(",
")",
":",
"try",
":",
"platform",
"=",
"sys",
".",
"platform",
"if",
"platform",
"==",
"'win32'",
":",
"if",
"'NUMBER_OF_PROCESSORS'",
"in",
"os",
".",
"environ",
":",
"return",
"int",
"(",
"os",
".",
"environ",
"[",
"'NUMBER_OF_PROCESSORS'",
"]",
")",
"else",
":",
"return",
"1",
"else",
":",
"from",
"numpy",
".",
"distutils",
"import",
"cpuinfo",
"return",
"cpuinfo",
".",
"cpu",
".",
"_getNCPUs",
"(",
")",
"except",
"Exception",
":",
"print",
"\"Can't know cpuinfo, use default 1 cpu\"",
"return",
"1"
] |
https://github.com/cocos2d/cocos2d-x-samples/blob/9f1472d9083a18853bb1fe97a337292f42abe44a/samples/GAFExamples/Example.android/build_native.py#L11-L26
|
||
glinscott/fishtest
|
8d2b823a63fbe7be169a2177a130018c389d7aea
|
worker/packages/requests/adapters.py
|
python
|
HTTPAdapter.send
|
(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None)
|
return self.build_response(request, resp)
|
Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
|
Sends PreparedRequest object. Returns Response object.
|
[
"Sends",
"PreparedRequest",
"object",
".",
"Returns",
"Response",
"object",
"."
] |
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7, use buffering of HTTP responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 3.3+
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
[
"def",
"send",
"(",
"self",
",",
"request",
",",
"stream",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cert",
"=",
"None",
",",
"proxies",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"request",
".",
"url",
",",
"proxies",
")",
"except",
"LocationValueError",
"as",
"e",
":",
"raise",
"InvalidURL",
"(",
"e",
",",
"request",
"=",
"request",
")",
"self",
".",
"cert_verify",
"(",
"conn",
",",
"request",
".",
"url",
",",
"verify",
",",
"cert",
")",
"url",
"=",
"self",
".",
"request_url",
"(",
"request",
",",
"proxies",
")",
"self",
".",
"add_headers",
"(",
"request",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
",",
"proxies",
"=",
"proxies",
")",
"chunked",
"=",
"not",
"(",
"request",
".",
"body",
"is",
"None",
"or",
"'Content-Length'",
"in",
"request",
".",
"headers",
")",
"if",
"isinstance",
"(",
"timeout",
",",
"tuple",
")",
":",
"try",
":",
"connect",
",",
"read",
"=",
"timeout",
"timeout",
"=",
"TimeoutSauce",
"(",
"connect",
"=",
"connect",
",",
"read",
"=",
"read",
")",
"except",
"ValueError",
"as",
"e",
":",
"# this may raise a string formatting error.",
"err",
"=",
"(",
"\"Invalid timeout {}. Pass a (connect, read) \"",
"\"timeout tuple, or a single float to set \"",
"\"both timeouts to the same value\"",
".",
"format",
"(",
"timeout",
")",
")",
"raise",
"ValueError",
"(",
"err",
")",
"elif",
"isinstance",
"(",
"timeout",
",",
"TimeoutSauce",
")",
":",
"pass",
"else",
":",
"timeout",
"=",
"TimeoutSauce",
"(",
"connect",
"=",
"timeout",
",",
"read",
"=",
"timeout",
")",
"try",
":",
"if",
"not",
"chunked",
":",
"resp",
"=",
"conn",
".",
"urlopen",
"(",
"method",
"=",
"request",
".",
"method",
",",
"url",
"=",
"url",
",",
"body",
"=",
"request",
".",
"body",
",",
"headers",
"=",
"request",
".",
"headers",
",",
"redirect",
"=",
"False",
",",
"assert_same_host",
"=",
"False",
",",
"preload_content",
"=",
"False",
",",
"decode_content",
"=",
"False",
",",
"retries",
"=",
"self",
".",
"max_retries",
",",
"timeout",
"=",
"timeout",
")",
"# Send the request.",
"else",
":",
"if",
"hasattr",
"(",
"conn",
",",
"'proxy_pool'",
")",
":",
"conn",
"=",
"conn",
".",
"proxy_pool",
"low_conn",
"=",
"conn",
".",
"_get_conn",
"(",
"timeout",
"=",
"DEFAULT_POOL_TIMEOUT",
")",
"try",
":",
"low_conn",
".",
"putrequest",
"(",
"request",
".",
"method",
",",
"url",
",",
"skip_accept_encoding",
"=",
"True",
")",
"for",
"header",
",",
"value",
"in",
"request",
".",
"headers",
".",
"items",
"(",
")",
":",
"low_conn",
".",
"putheader",
"(",
"header",
",",
"value",
")",
"low_conn",
".",
"endheaders",
"(",
")",
"for",
"i",
"in",
"request",
".",
"body",
":",
"low_conn",
".",
"send",
"(",
"hex",
"(",
"len",
"(",
"i",
")",
")",
"[",
"2",
":",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"low_conn",
".",
"send",
"(",
"b'\\r\\n'",
")",
"low_conn",
".",
"send",
"(",
"i",
")",
"low_conn",
".",
"send",
"(",
"b'\\r\\n'",
")",
"low_conn",
".",
"send",
"(",
"b'0\\r\\n\\r\\n'",
")",
"# Receive the response from the server",
"try",
":",
"# For Python 2.7, use buffering of HTTP responses",
"r",
"=",
"low_conn",
".",
"getresponse",
"(",
"buffering",
"=",
"True",
")",
"except",
"TypeError",
":",
"# For compatibility with Python 3.3+",
"r",
"=",
"low_conn",
".",
"getresponse",
"(",
")",
"resp",
"=",
"HTTPResponse",
".",
"from_httplib",
"(",
"r",
",",
"pool",
"=",
"conn",
",",
"connection",
"=",
"low_conn",
",",
"preload_content",
"=",
"False",
",",
"decode_content",
"=",
"False",
")",
"except",
":",
"# If we hit any problems here, clean up the connection.",
"# Then, reraise so that we can handle the actual exception.",
"low_conn",
".",
"close",
"(",
")",
"raise",
"except",
"(",
"ProtocolError",
",",
"socket",
".",
"error",
")",
"as",
"err",
":",
"raise",
"ConnectionError",
"(",
"err",
",",
"request",
"=",
"request",
")",
"except",
"MaxRetryError",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
".",
"reason",
",",
"ConnectTimeoutError",
")",
":",
"# TODO: Remove this in 3.0.0: see #2811",
"if",
"not",
"isinstance",
"(",
"e",
".",
"reason",
",",
"NewConnectionError",
")",
":",
"raise",
"ConnectTimeout",
"(",
"e",
",",
"request",
"=",
"request",
")",
"if",
"isinstance",
"(",
"e",
".",
"reason",
",",
"ResponseError",
")",
":",
"raise",
"RetryError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"if",
"isinstance",
"(",
"e",
".",
"reason",
",",
"_ProxyError",
")",
":",
"raise",
"ProxyError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"if",
"isinstance",
"(",
"e",
".",
"reason",
",",
"_SSLError",
")",
":",
"# This branch is for urllib3 v1.22 and later.",
"raise",
"SSLError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"raise",
"ConnectionError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"except",
"ClosedPoolError",
"as",
"e",
":",
"raise",
"ConnectionError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"except",
"_ProxyError",
"as",
"e",
":",
"raise",
"ProxyError",
"(",
"e",
")",
"except",
"(",
"_SSLError",
",",
"_HTTPError",
")",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"_SSLError",
")",
":",
"# This branch is for urllib3 versions earlier than v1.22",
"raise",
"SSLError",
"(",
"e",
",",
"request",
"=",
"request",
")",
"elif",
"isinstance",
"(",
"e",
",",
"ReadTimeoutError",
")",
":",
"raise",
"ReadTimeout",
"(",
"e",
",",
"request",
"=",
"request",
")",
"else",
":",
"raise",
"return",
"self",
".",
"build_response",
"(",
"request",
",",
"resp",
")"
] |
https://github.com/glinscott/fishtest/blob/8d2b823a63fbe7be169a2177a130018c389d7aea/worker/packages/requests/adapters.py#L394-L533
|
|
miraclewkf/ResNeXt-PyTorch
|
0b1ad0252aba44b070a3d191d209443fb08f9f9c
|
resnext.py
|
python
|
resnext101
|
(**kwargs)
|
return model
|
Constructs a ResNeXt-101 model.
|
Constructs a ResNeXt-101 model.
|
[
"Constructs",
"a",
"ResNeXt",
"-",
"101",
"model",
"."
] |
def resnext101(**kwargs):
"""Constructs a ResNeXt-101 model.
"""
model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
|
[
"def",
"resnext101",
"(",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"ResNeXt",
"(",
"Bottleneck",
",",
"[",
"3",
",",
"4",
",",
"23",
",",
"3",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"model"
] |
https://github.com/miraclewkf/ResNeXt-PyTorch/blob/0b1ad0252aba44b070a3d191d209443fb08f9f9c/resnext.py#L171-L175
|
|
grnet/synnefo
|
d06ec8c7871092131cdaabf6b03ed0b504c93e43
|
contrib/snf-pithos-tools/setup.py
|
python
|
find_package_data
|
(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False)
|
return out
|
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
|
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
|
[
"Return",
"a",
"dictionary",
"suitable",
"for",
"use",
"in",
"package_data",
"in",
"a",
"distutils",
"setup",
".",
"py",
"file",
"."
] |
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append(
(fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
|
[
"def",
"find_package_data",
"(",
"where",
"=",
"\".\"",
",",
"package",
"=",
"\"\"",
",",
"exclude",
"=",
"standard_exclude",
",",
"exclude_directories",
"=",
"standard_exclude_directories",
",",
"only_in_packages",
"=",
"True",
",",
"show_ignored",
"=",
"False",
")",
":",
"out",
"=",
"{",
"}",
"stack",
"=",
"[",
"(",
"convert_path",
"(",
"where",
")",
",",
"\"\"",
",",
"package",
",",
"only_in_packages",
")",
"]",
"while",
"stack",
":",
"where",
",",
"prefix",
",",
"package",
",",
"only_in_packages",
"=",
"stack",
".",
"pop",
"(",
"0",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"where",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"where",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"fn",
")",
":",
"bad_name",
"=",
"False",
"for",
"pattern",
"in",
"exclude_directories",
":",
"if",
"(",
"fnmatchcase",
"(",
"name",
",",
"pattern",
")",
"or",
"fn",
".",
"lower",
"(",
")",
"==",
"pattern",
".",
"lower",
"(",
")",
")",
":",
"bad_name",
"=",
"True",
"if",
"show_ignored",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"Directory %s ignored by pattern %s\"",
"%",
"(",
"fn",
",",
"pattern",
")",
")",
"break",
"if",
"bad_name",
":",
"continue",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"fn",
",",
"\"__init__.py\"",
")",
")",
"and",
"not",
"prefix",
")",
":",
"if",
"not",
"package",
":",
"new_package",
"=",
"name",
"else",
":",
"new_package",
"=",
"package",
"+",
"\".\"",
"+",
"name",
"stack",
".",
"append",
"(",
"(",
"fn",
",",
"\"\"",
",",
"new_package",
",",
"False",
")",
")",
"else",
":",
"stack",
".",
"append",
"(",
"(",
"fn",
",",
"prefix",
"+",
"name",
"+",
"\"/\"",
",",
"package",
",",
"only_in_packages",
")",
")",
"elif",
"package",
"or",
"not",
"only_in_packages",
":",
"# is a file",
"bad_name",
"=",
"False",
"for",
"pattern",
"in",
"exclude",
":",
"if",
"(",
"fnmatchcase",
"(",
"name",
",",
"pattern",
")",
"or",
"fn",
".",
"lower",
"(",
")",
"==",
"pattern",
".",
"lower",
"(",
")",
")",
":",
"bad_name",
"=",
"True",
"if",
"show_ignored",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"File %s ignored by pattern %s\"",
"%",
"(",
"fn",
",",
"pattern",
")",
")",
"break",
"if",
"bad_name",
":",
"continue",
"out",
".",
"setdefault",
"(",
"package",
",",
"[",
"]",
")",
".",
"append",
"(",
"prefix",
"+",
"name",
")",
"return",
"out"
] |
https://github.com/grnet/synnefo/blob/d06ec8c7871092131cdaabf6b03ed0b504c93e43/contrib/snf-pithos-tools/setup.py#L69-L146
|
|
Galvant/InstrumentKit
|
6d216bd7f8e9ec7918762fe5fb7a306d5bd0eb1f
|
instruments/qubitekk/cc1.py
|
python
|
CC1.subtract
|
(self)
|
return self.query("SUBT?").strip() == self._bool[0]
|
Gets/sets the subtract enable status
:type: `bool`
|
Gets/sets the subtract enable status
|
[
"Gets",
"/",
"sets",
"the",
"subtract",
"enable",
"status"
] |
def subtract(self):
"""
Gets/sets the subtract enable status
:type: `bool`
"""
return self.query("SUBT?").strip() == self._bool[0]
|
[
"def",
"subtract",
"(",
"self",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"SUBT?\"",
")",
".",
"strip",
"(",
")",
"==",
"self",
".",
"_bool",
"[",
"0",
"]"
] |
https://github.com/Galvant/InstrumentKit/blob/6d216bd7f8e9ec7918762fe5fb7a306d5bd0eb1f/instruments/qubitekk/cc1.py#L186-L192
|
|
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/orm/session.py
|
python
|
sessionmaker.__call__
|
(self, **local_kw)
|
return self.class_(**local_kw)
|
Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
|
Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
|
[
"Produce",
"a",
"new",
":",
"class",
":",
".",
"Session",
"object",
"using",
"the",
"configuration",
"established",
"in",
"this",
":",
"class",
":",
".",
"sessionmaker",
"."
] |
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == 'info' and 'info' in local_kw:
d = v.copy()
d.update(local_kw['info'])
local_kw['info'] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
|
[
"def",
"__call__",
"(",
"self",
",",
"*",
"*",
"local_kw",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"kw",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'info'",
"and",
"'info'",
"in",
"local_kw",
":",
"d",
"=",
"v",
".",
"copy",
"(",
")",
"d",
".",
"update",
"(",
"local_kw",
"[",
"'info'",
"]",
")",
"local_kw",
"[",
"'info'",
"]",
"=",
"d",
"else",
":",
"local_kw",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"return",
"self",
".",
"class_",
"(",
"*",
"*",
"local_kw",
")"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/orm/session.py#L2342-L2360
|
|
xyuanmu/XX-Mini
|
5daf3a3d741566bfd18655ea9eb94f546bd0c70a
|
lib/pyasn1/type/univ.py
|
python
|
ObjectIdentifier.prettyIn
|
(self, value)
|
return value
|
Dotted -> tuple of numerics OID converter
|
Dotted -> tuple of numerics OID converter
|
[
"Dotted",
"-",
">",
"tuple",
"of",
"numerics",
"OID",
"converter"
] |
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
|
[
"def",
"prettyIn",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"value",
",",
"ObjectIdentifier",
")",
":",
"return",
"tuple",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"r",
"=",
"[",
"]",
"for",
"element",
"in",
"[",
"x",
"for",
"x",
"in",
"value",
".",
"split",
"(",
"'.'",
")",
"if",
"x",
"!=",
"''",
"]",
":",
"try",
":",
"r",
".",
"append",
"(",
"int",
"(",
"element",
",",
"0",
")",
")",
"except",
"ValueError",
":",
"raise",
"error",
".",
"PyAsn1Error",
"(",
"'Malformed Object ID %s at %s: %s'",
"%",
"(",
"str",
"(",
"value",
")",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
")",
"value",
"=",
"tuple",
"(",
"r",
")",
"else",
":",
"try",
":",
"value",
"=",
"tuple",
"(",
"value",
")",
"except",
"TypeError",
":",
"raise",
"error",
".",
"PyAsn1Error",
"(",
"'Malformed Object ID %s at %s: %s'",
"%",
"(",
"str",
"(",
"value",
")",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
")",
"for",
"x",
"in",
"value",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"intTypes",
")",
"or",
"x",
"<",
"0",
":",
"raise",
"error",
".",
"PyAsn1Error",
"(",
"'Invalid sub-ID in %s at %s'",
"%",
"(",
"value",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"value"
] |
https://github.com/xyuanmu/XX-Mini/blob/5daf3a3d741566bfd18655ea9eb94f546bd0c70a/lib/pyasn1/type/univ.py#L470-L502
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/sets/set.py
|
python
|
Set_object_union.__richcmp__
|
(self, right, op)
|
return rich_to_bool(op, -1)
|
r"""
Try to compare ``self`` and ``right``.
.. NOTE::
Comparison is basically not implemented, or rather it could
say sets are not equal even though they are. I don't know
how one could implement this for a generic union of sets in
a meaningful manner. So be careful when using this.
EXAMPLES::
sage: Y = Set(ZZ^2).union(Set(ZZ^3))
sage: X = Set(ZZ^3).union(Set(ZZ^2))
sage: X == Y
True
sage: Y == X
True
This illustrates that equality testing for formal unions
can be misleading in general.
::
sage: Set(ZZ).union(Set(QQ)) == Set(QQ)
False
|
r"""
Try to compare ``self`` and ``right``.
|
[
"r",
"Try",
"to",
"compare",
"self",
"and",
"right",
"."
] |
def __richcmp__(self, right, op):
r"""
Try to compare ``self`` and ``right``.
.. NOTE::
Comparison is basically not implemented, or rather it could
say sets are not equal even though they are. I don't know
how one could implement this for a generic union of sets in
a meaningful manner. So be careful when using this.
EXAMPLES::
sage: Y = Set(ZZ^2).union(Set(ZZ^3))
sage: X = Set(ZZ^3).union(Set(ZZ^2))
sage: X == Y
True
sage: Y == X
True
This illustrates that equality testing for formal unions
can be misleading in general.
::
sage: Set(ZZ).union(Set(QQ)) == Set(QQ)
False
"""
if not isinstance(right, Set_generic):
return rich_to_bool(op, -1)
if not isinstance(right, Set_object_union):
return rich_to_bool(op, -1)
if self._X == right._X and self._Y == right._Y or \
self._X == right._Y and self._Y == right._X:
return rich_to_bool(op, 0)
return rich_to_bool(op, -1)
|
[
"def",
"__richcmp__",
"(",
"self",
",",
"right",
",",
"op",
")",
":",
"if",
"not",
"isinstance",
"(",
"right",
",",
"Set_generic",
")",
":",
"return",
"rich_to_bool",
"(",
"op",
",",
"-",
"1",
")",
"if",
"not",
"isinstance",
"(",
"right",
",",
"Set_object_union",
")",
":",
"return",
"rich_to_bool",
"(",
"op",
",",
"-",
"1",
")",
"if",
"self",
".",
"_X",
"==",
"right",
".",
"_X",
"and",
"self",
".",
"_Y",
"==",
"right",
".",
"_Y",
"or",
"self",
".",
"_X",
"==",
"right",
".",
"_Y",
"and",
"self",
".",
"_Y",
"==",
"right",
".",
"_X",
":",
"return",
"rich_to_bool",
"(",
"op",
",",
"0",
")",
"return",
"rich_to_bool",
"(",
"op",
",",
"-",
"1",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/sets/set.py#L1375-L1410
|
|
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/CPython/27/Lib/inspect.py
|
python
|
getmodulename
|
(path)
|
Return the module name for a given file, or None.
|
Return the module name for a given file, or None.
|
[
"Return",
"the",
"module",
"name",
"for",
"a",
"given",
"file",
"or",
"None",
"."
] |
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
|
[
"def",
"getmodulename",
"(",
"path",
")",
":",
"info",
"=",
"getmoduleinfo",
"(",
"path",
")",
"if",
"info",
":",
"return",
"info",
"[",
"0",
"]"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/inspect.py#L437-L440
|
||
kovidgoyal/calibre
|
2b41671370f2a9eb1109b9ae901ccf915f1bd0c8
|
src/odf/odf2xhtml.py
|
python
|
ODF2XHTML.s_text_s
|
(self, tag, attrs)
|
Generate a number of spaces. We use the non breaking space for
the text:s ODF element.
|
Generate a number of spaces. We use the non breaking space for
the text:s ODF element.
|
[
"Generate",
"a",
"number",
"of",
"spaces",
".",
"We",
"use",
"the",
"non",
"breaking",
"space",
"for",
"the",
"text",
":",
"s",
"ODF",
"element",
"."
] |
def s_text_s(self, tag, attrs):
# Changed by Kovid to fix non breaking spaces being prepended to
# element instead of being part of the text flow.
# We don't use an entity for the nbsp as the contents of self.data will
# be escaped on writeout.
""" Generate a number of spaces. We use the non breaking space for
the text:s ODF element.
"""
try:
c = int(attrs.get((TEXTNS, 'c'), 1))
except:
c = 0
if c > 0:
self.data.append('\u00a0'*c)
|
[
"def",
"s_text_s",
"(",
"self",
",",
"tag",
",",
"attrs",
")",
":",
"# Changed by Kovid to fix non breaking spaces being prepended to",
"# element instead of being part of the text flow.",
"# We don't use an entity for the nbsp as the contents of self.data will",
"# be escaped on writeout.",
"try",
":",
"c",
"=",
"int",
"(",
"attrs",
".",
"get",
"(",
"(",
"TEXTNS",
",",
"'c'",
")",
",",
"1",
")",
")",
"except",
":",
"c",
"=",
"0",
"if",
"c",
">",
"0",
":",
"self",
".",
"data",
".",
"append",
"(",
"'\\u00a0'",
"*",
"c",
")"
] |
https://github.com/kovidgoyal/calibre/blob/2b41671370f2a9eb1109b9ae901ccf915f1bd0c8/src/odf/odf2xhtml.py#L1500-L1513
|
||
hellohaptik/chatbot_ner
|
742104790170ae5b73c583c94db6786549337dc4
|
ner_v2/detectors/temporal/date/standard_date_regex.py
|
python
|
BaseRegexDate._detect_date_ref_month_3
|
(self, date_list, original_list)
|
return date_list, original_list
|
Parser to detect date containing reference date and month like '2 tarikh ko'(hindi)
Args:
Returns:
date_list (list): list of dict containing day, month, year from detected text
original_list (list): list of original text corresponding to values detected
|
Parser to detect date containing reference date and month like '2 tarikh ko'(hindi)
Args:
|
[
"Parser",
"to",
"detect",
"date",
"containing",
"reference",
"date",
"and",
"month",
"like",
"2",
"tarikh",
"ko",
"(",
"hindi",
")",
"Args",
":"
] |
def _detect_date_ref_month_3(self, date_list, original_list):
"""
Parser to detect date containing reference date and month like '2 tarikh ko'(hindi)
Args:
Returns:
date_list (list): list of dict containing day, month, year from detected text
original_list (list): list of original text corresponding to values detected
"""
date_list = date_list or []
original_list = original_list or []
date_ref_month_match = self.regex_date_ref_month_3.findall(self.processed_text)
for date_match in date_ref_month_match:
original = date_match[0]
dd = self._get_int_from_numeral(date_match[1])
if (self.now_date.day > dd and self.past_date_referenced) or \
(self.now_date.day <= dd and not self.past_date_referenced):
mm = self.now_date.month
yy = self.now_date.year
elif self.now_date.day <= dd and self.past_date_referenced:
req_date = self.now_date - relativedelta(months=1)
mm = req_date.month
yy = req_date.year
else:
req_date = self.now_date + relativedelta(months=1)
mm = req_date.month
yy = req_date.year
date = {
'dd': int(dd),
'mm': int(mm),
'yy': int(yy),
'type': TYPE_EXACT
}
date_list.append(date)
original_list.append(original)
return date_list, original_list
|
[
"def",
"_detect_date_ref_month_3",
"(",
"self",
",",
"date_list",
",",
"original_list",
")",
":",
"date_list",
"=",
"date_list",
"or",
"[",
"]",
"original_list",
"=",
"original_list",
"or",
"[",
"]",
"date_ref_month_match",
"=",
"self",
".",
"regex_date_ref_month_3",
".",
"findall",
"(",
"self",
".",
"processed_text",
")",
"for",
"date_match",
"in",
"date_ref_month_match",
":",
"original",
"=",
"date_match",
"[",
"0",
"]",
"dd",
"=",
"self",
".",
"_get_int_from_numeral",
"(",
"date_match",
"[",
"1",
"]",
")",
"if",
"(",
"self",
".",
"now_date",
".",
"day",
">",
"dd",
"and",
"self",
".",
"past_date_referenced",
")",
"or",
"(",
"self",
".",
"now_date",
".",
"day",
"<=",
"dd",
"and",
"not",
"self",
".",
"past_date_referenced",
")",
":",
"mm",
"=",
"self",
".",
"now_date",
".",
"month",
"yy",
"=",
"self",
".",
"now_date",
".",
"year",
"elif",
"self",
".",
"now_date",
".",
"day",
"<=",
"dd",
"and",
"self",
".",
"past_date_referenced",
":",
"req_date",
"=",
"self",
".",
"now_date",
"-",
"relativedelta",
"(",
"months",
"=",
"1",
")",
"mm",
"=",
"req_date",
".",
"month",
"yy",
"=",
"req_date",
".",
"year",
"else",
":",
"req_date",
"=",
"self",
".",
"now_date",
"+",
"relativedelta",
"(",
"months",
"=",
"1",
")",
"mm",
"=",
"req_date",
".",
"month",
"yy",
"=",
"req_date",
".",
"year",
"date",
"=",
"{",
"'dd'",
":",
"int",
"(",
"dd",
")",
",",
"'mm'",
":",
"int",
"(",
"mm",
")",
",",
"'yy'",
":",
"int",
"(",
"yy",
")",
",",
"'type'",
":",
"TYPE_EXACT",
"}",
"date_list",
".",
"append",
"(",
"date",
")",
"original_list",
".",
"append",
"(",
"original",
")",
"return",
"date_list",
",",
"original_list"
] |
https://github.com/hellohaptik/chatbot_ner/blob/742104790170ae5b73c583c94db6786549337dc4/ner_v2/detectors/temporal/date/standard_date_regex.py#L330-L367
|
|
quic/aimet
|
dae9bae9a77ca719aa7553fefde4768270fc3518
|
TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py
|
python
|
ConnectedGraph.__del__
|
(self)
|
Destructor of ConnectedGraph class
break the dependencies of Ops with Product
|
Destructor of ConnectedGraph class
break the dependencies of Ops with Product
|
[
"Destructor",
"of",
"ConnectedGraph",
"class",
"break",
"the",
"dependencies",
"of",
"Ops",
"with",
"Product"
] |
def __del__(self):
"""
Destructor of ConnectedGraph class
break the dependencies of Ops with Product
"""
for product in self._products.values():
product.producer = None
product.set_consumers_to_null()
|
[
"def",
"__del__",
"(",
"self",
")",
":",
"for",
"product",
"in",
"self",
".",
"_products",
".",
"values",
"(",
")",
":",
"product",
".",
"producer",
"=",
"None",
"product",
".",
"set_consumers_to_null",
"(",
")"
] |
https://github.com/quic/aimet/blob/dae9bae9a77ca719aa7553fefde4768270fc3518/TrainingExtensions/torch/src/python/aimet_torch/meta/connectedgraph.py#L171-L178
|
||
ibis-project/ibis
|
e1ef8b6870ac53de9d1fe5c52851fa41872109c4
|
ibis/expr/lineage.py
|
python
|
_get_args
|
(op, name)
|
Hack to get relevant arguments for lineage computation.
We need a better way to determine the relevant arguments of an expression.
|
Hack to get relevant arguments for lineage computation.
|
[
"Hack",
"to",
"get",
"relevant",
"arguments",
"for",
"lineage",
"computation",
"."
] |
def _get_args(op, name):
"""Hack to get relevant arguments for lineage computation.
We need a better way to determine the relevant arguments of an expression.
"""
# Could use multipledispatch here to avoid the pasta
if isinstance(op, ops.Selection):
assert name is not None, 'name is None'
result = op.selections
# if Selection.selections is always columnar, could use an
# OrderedDict to prevent scanning the whole thing
return [col for col in result if col._name == name]
elif isinstance(op, ops.Aggregation):
assert name is not None, 'name is None'
return [
col
for col in itertools.chain(op.by, op.metrics)
if col._name == name
]
else:
return op.args
|
[
"def",
"_get_args",
"(",
"op",
",",
"name",
")",
":",
"# Could use multipledispatch here to avoid the pasta",
"if",
"isinstance",
"(",
"op",
",",
"ops",
".",
"Selection",
")",
":",
"assert",
"name",
"is",
"not",
"None",
",",
"'name is None'",
"result",
"=",
"op",
".",
"selections",
"# if Selection.selections is always columnar, could use an",
"# OrderedDict to prevent scanning the whole thing",
"return",
"[",
"col",
"for",
"col",
"in",
"result",
"if",
"col",
".",
"_name",
"==",
"name",
"]",
"elif",
"isinstance",
"(",
"op",
",",
"ops",
".",
"Aggregation",
")",
":",
"assert",
"name",
"is",
"not",
"None",
",",
"'name is None'",
"return",
"[",
"col",
"for",
"col",
"in",
"itertools",
".",
"chain",
"(",
"op",
".",
"by",
",",
"op",
".",
"metrics",
")",
"if",
"col",
".",
"_name",
"==",
"name",
"]",
"else",
":",
"return",
"op",
".",
"args"
] |
https://github.com/ibis-project/ibis/blob/e1ef8b6870ac53de9d1fe5c52851fa41872109c4/ibis/expr/lineage.py#L141-L162
|
||
Azure/azure-cli
|
6c1b085a0910c6c2139006fcbd8ade44006eb6dd
|
src/azure-cli/azure/cli/command_modules/storage/url_quote_util.py
|
python
|
make_encoded_file_url_and_params
|
(file_service, share, file_dir, file_name, sas_token, safe=SAFE_CHARS)
|
return encode_url_path(file_url, safe), file_dir, file_name
|
Makes the file url using the service. Converts the file directory and name into byte-strings if needed and returns
(url, dir, file) as a tuple. This is needed to account for string encoding differences between python 2 and 3.
|
Makes the file url using the service. Converts the file directory and name into byte-strings if needed and returns
(url, dir, file) as a tuple. This is needed to account for string encoding differences between python 2 and 3.
|
[
"Makes",
"the",
"file",
"url",
"using",
"the",
"service",
".",
"Converts",
"the",
"file",
"directory",
"and",
"name",
"into",
"byte",
"-",
"strings",
"if",
"needed",
"and",
"returns",
"(",
"url",
"dir",
"file",
")",
"as",
"a",
"tuple",
".",
"This",
"is",
"needed",
"to",
"account",
"for",
"string",
"encoding",
"differences",
"between",
"python",
"2",
"and",
"3",
"."
] |
def make_encoded_file_url_and_params(file_service, share, file_dir, file_name, sas_token, safe=SAFE_CHARS):
"""
Makes the file url using the service. Converts the file directory and name into byte-strings if needed and returns
(url, dir, file) as a tuple. This is needed to account for string encoding differences between python 2 and 3.
"""
try:
file_url = file_service.make_file_url(share, file_dir, file_name, sas_token=sas_token)
except UnicodeEncodeError:
file_dir = file_dir.encode('utf-8')
file_name = file_name.encode('utf-8')
file_url = file_service.make_file_url(share, file_dir, file_name, sas_token=sas_token)
if not file_dir:
sep = file_url.find('://')
file_url = file_url[:sep + 3] + file_url[sep + 3:].replace('//', '/')
return encode_url_path(file_url, safe), file_dir, file_name
|
[
"def",
"make_encoded_file_url_and_params",
"(",
"file_service",
",",
"share",
",",
"file_dir",
",",
"file_name",
",",
"sas_token",
",",
"safe",
"=",
"SAFE_CHARS",
")",
":",
"try",
":",
"file_url",
"=",
"file_service",
".",
"make_file_url",
"(",
"share",
",",
"file_dir",
",",
"file_name",
",",
"sas_token",
"=",
"sas_token",
")",
"except",
"UnicodeEncodeError",
":",
"file_dir",
"=",
"file_dir",
".",
"encode",
"(",
"'utf-8'",
")",
"file_name",
"=",
"file_name",
".",
"encode",
"(",
"'utf-8'",
")",
"file_url",
"=",
"file_service",
".",
"make_file_url",
"(",
"share",
",",
"file_dir",
",",
"file_name",
",",
"sas_token",
"=",
"sas_token",
")",
"if",
"not",
"file_dir",
":",
"sep",
"=",
"file_url",
".",
"find",
"(",
"'://'",
")",
"file_url",
"=",
"file_url",
"[",
":",
"sep",
"+",
"3",
"]",
"+",
"file_url",
"[",
"sep",
"+",
"3",
":",
"]",
".",
"replace",
"(",
"'//'",
",",
"'/'",
")",
"return",
"encode_url_path",
"(",
"file_url",
",",
"safe",
")",
",",
"file_dir",
",",
"file_name"
] |
https://github.com/Azure/azure-cli/blob/6c1b085a0910c6c2139006fcbd8ade44006eb6dd/src/azure-cli/azure/cli/command_modules/storage/url_quote_util.py#L27-L42
|
|
stopstalk/stopstalk-deployment
|
10c3ab44c4ece33ae515f6888c15033db2004bb1
|
aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/ipaddress.py
|
python
|
_BaseNetwork.compare_networks
|
(self, other)
|
return 0
|
Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
|
Compare two IP objects.
|
[
"Compare",
"two",
"IP",
"objects",
"."
] |
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
|
[
"def",
"compare_networks",
"(",
"self",
",",
"other",
")",
":",
"# does this need to raise a ValueError?",
"if",
"self",
".",
"_version",
"!=",
"other",
".",
"_version",
":",
"raise",
"TypeError",
"(",
"'%s and %s are not of the same type'",
"%",
"(",
"self",
",",
"other",
")",
")",
"# self._version == other._version below here:",
"if",
"self",
".",
"network_address",
"<",
"other",
".",
"network_address",
":",
"return",
"-",
"1",
"if",
"self",
".",
"network_address",
">",
"other",
".",
"network_address",
":",
"return",
"1",
"# self.network_address == other.network_address below here:",
"if",
"self",
".",
"netmask",
"<",
"other",
".",
"netmask",
":",
"return",
"-",
"1",
"if",
"self",
".",
"netmask",
">",
"other",
".",
"netmask",
":",
"return",
"1",
"return",
"0"
] |
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/ipaddress.py#L938-L984
|
|
vprusso/youtube_tutorials
|
a3f2fadfc408aafbf4505bcd5f2f4a41d670b44d
|
data_structures/linked_list/singularly_linked_list/linked_list_tail_to_head.py
|
python
|
LinkedList.insert_after_node
|
(self, prev_node, data)
|
[] |
def insert_after_node(self, prev_node, data):
if not prev_node:
print("Previous node is not in the list")
return
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
|
[
"def",
"insert_after_node",
"(",
"self",
",",
"prev_node",
",",
"data",
")",
":",
"if",
"not",
"prev_node",
":",
"print",
"(",
"\"Previous node is not in the list\"",
")",
"return",
"new_node",
"=",
"Node",
"(",
"data",
")",
"new_node",
".",
"next",
"=",
"prev_node",
".",
"next",
"prev_node",
".",
"next",
"=",
"new_node"
] |
https://github.com/vprusso/youtube_tutorials/blob/a3f2fadfc408aafbf4505bcd5f2f4a41d670b44d/data_structures/linked_list/singularly_linked_list/linked_list_tail_to_head.py#L36-L45
|
||||
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
Lib/logging/__init__.py
|
python
|
Handler.flush
|
(self)
|
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
|
Ensure all logging output has been flushed.
|
[
"Ensure",
"all",
"logging",
"output",
"has",
"been",
"flushed",
"."
] |
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
|
[
"def",
"flush",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/logging/__init__.py#L846-L853
|
||
urschrei/pyzotero
|
ed4175e0f1a62f10984b311864c13ac453fa9ebe
|
pyzotero/zotero.py
|
python
|
Zotero._build_query
|
(self, query_string, no_params=False)
|
return query
|
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
|
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
|
[
"Set",
"request",
"parameters",
".",
"Will",
"always",
"add",
"the",
"user",
"ID",
"if",
"it",
"hasn",
"t",
"been",
"specifically",
"set",
"by",
"an",
"API",
"method"
] |
def _build_query(self, query_string, no_params=False):
"""
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
"""
try:
query = quote(query_string.format(u=self.library_id, t=self.library_type))
except KeyError as err:
raise ze.ParamNotPassed("There's a request parameter missing: %s" % err)
# Add the URL parameters and the user key, if necessary
if no_params is False:
if not self.url_params:
self.add_parameters()
query = "%s?%s" % (query, self.url_params)
return query
|
[
"def",
"_build_query",
"(",
"self",
",",
"query_string",
",",
"no_params",
"=",
"False",
")",
":",
"try",
":",
"query",
"=",
"quote",
"(",
"query_string",
".",
"format",
"(",
"u",
"=",
"self",
".",
"library_id",
",",
"t",
"=",
"self",
".",
"library_type",
")",
")",
"except",
"KeyError",
"as",
"err",
":",
"raise",
"ze",
".",
"ParamNotPassed",
"(",
"\"There's a request parameter missing: %s\"",
"%",
"err",
")",
"# Add the URL parameters and the user key, if necessary",
"if",
"no_params",
"is",
"False",
":",
"if",
"not",
"self",
".",
"url_params",
":",
"self",
".",
"add_parameters",
"(",
")",
"query",
"=",
"\"%s?%s\"",
"%",
"(",
"query",
",",
"self",
".",
"url_params",
")",
"return",
"query"
] |
https://github.com/urschrei/pyzotero/blob/ed4175e0f1a62f10984b311864c13ac453fa9ebe/pyzotero/zotero.py#L514-L528
|
|
ahmetcemturan/SFACT
|
7576e29ba72b33e5058049b77b7b558875542747
|
fabmetheus_utilities/geometry/geometry_utilities/evaluate.py
|
python
|
convertToPaths
|
(dictionary)
|
Recursively convert any ElementNodes to paths.
|
Recursively convert any ElementNodes to paths.
|
[
"Recursively",
"convert",
"any",
"ElementNodes",
"to",
"paths",
"."
] |
def convertToPaths(dictionary):
'Recursively convert any ElementNodes to paths.'
if dictionary.__class__ == Vector3 or dictionary.__class__.__name__ == 'Vector3Index':
return
keys = getKeys(dictionary)
if keys == None:
return
for key in keys:
value = dictionary[key]
if value.__class__.__name__ == 'ElementNode':
if value.xmlObject != None:
dictionary[key] = getFloatListListsByPaths(value.xmlObject.getPaths())
else:
convertToPaths(dictionary[key])
|
[
"def",
"convertToPaths",
"(",
"dictionary",
")",
":",
"if",
"dictionary",
".",
"__class__",
"==",
"Vector3",
"or",
"dictionary",
".",
"__class__",
".",
"__name__",
"==",
"'Vector3Index'",
":",
"return",
"keys",
"=",
"getKeys",
"(",
"dictionary",
")",
"if",
"keys",
"==",
"None",
":",
"return",
"for",
"key",
"in",
"keys",
":",
"value",
"=",
"dictionary",
"[",
"key",
"]",
"if",
"value",
".",
"__class__",
".",
"__name__",
"==",
"'ElementNode'",
":",
"if",
"value",
".",
"xmlObject",
"!=",
"None",
":",
"dictionary",
"[",
"key",
"]",
"=",
"getFloatListListsByPaths",
"(",
"value",
".",
"xmlObject",
".",
"getPaths",
"(",
")",
")",
"else",
":",
"convertToPaths",
"(",
"dictionary",
"[",
"key",
"]",
")"
] |
https://github.com/ahmetcemturan/SFACT/blob/7576e29ba72b33e5058049b77b7b558875542747/fabmetheus_utilities/geometry/geometry_utilities/evaluate.py#L98-L111
|
||
libtcod/python-tcod
|
e12c4172baa9efdfd74aff6ee9bab8454a835248
|
tcod/random.py
|
python
|
Random.__getstate__
|
(self)
|
return state
|
Pack the self.random_c attribute into a portable state.
|
Pack the self.random_c attribute into a portable state.
|
[
"Pack",
"the",
"self",
".",
"random_c",
"attribute",
"into",
"a",
"portable",
"state",
"."
] |
def __getstate__(self) -> Any:
"""Pack the self.random_c attribute into a portable state."""
state = self.__dict__.copy()
state["random_c"] = {
"mt_cmwc": {
"algorithm": self.random_c.mt_cmwc.algorithm,
"distribution": self.random_c.mt_cmwc.distribution,
"mt": list(self.random_c.mt_cmwc.mt),
"cur_mt": self.random_c.mt_cmwc.cur_mt,
"Q": list(self.random_c.mt_cmwc.Q),
"c": self.random_c.mt_cmwc.c,
"cur": self.random_c.mt_cmwc.cur,
}
}
return state
|
[
"def",
"__getstate__",
"(",
"self",
")",
"->",
"Any",
":",
"state",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"state",
"[",
"\"random_c\"",
"]",
"=",
"{",
"\"mt_cmwc\"",
":",
"{",
"\"algorithm\"",
":",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"algorithm",
",",
"\"distribution\"",
":",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"distribution",
",",
"\"mt\"",
":",
"list",
"(",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"mt",
")",
",",
"\"cur_mt\"",
":",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"cur_mt",
",",
"\"Q\"",
":",
"list",
"(",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"Q",
")",
",",
"\"c\"",
":",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"c",
",",
"\"cur\"",
":",
"self",
".",
"random_c",
".",
"mt_cmwc",
".",
"cur",
",",
"}",
"}",
"return",
"state"
] |
https://github.com/libtcod/python-tcod/blob/e12c4172baa9efdfd74aff6ee9bab8454a835248/tcod/random.py#L137-L151
|
|
lxy5513/videopose
|
6da0415183c5befd233ad85ff3aefce3179d8c44
|
joints_detectors/Alphapose/yolo/video_demo.py
|
python
|
arg_parse
|
()
|
return parser.parse_args()
|
Parse arguements to the detect module
|
Parse arguements to the detect module
|
[
"Parse",
"arguements",
"to",
"the",
"detect",
"module"
] |
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
|
[
"def",
"arg_parse",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'YOLO v3 Video Detection Module'",
")",
"parser",
".",
"add_argument",
"(",
"\"--video\"",
",",
"dest",
"=",
"'video'",
",",
"help",
"=",
"\"Video to run detection upon\"",
",",
"default",
"=",
"\"video.avi\"",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"\"--dataset\"",
",",
"dest",
"=",
"\"dataset\"",
",",
"help",
"=",
"\"Dataset on which the network has been trained\"",
",",
"default",
"=",
"\"pascal\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--confidence\"",
",",
"dest",
"=",
"\"confidence\"",
",",
"help",
"=",
"\"Object Confidence to filter predictions\"",
",",
"default",
"=",
"0.5",
")",
"parser",
".",
"add_argument",
"(",
"\"--nms_thresh\"",
",",
"dest",
"=",
"\"nms_thresh\"",
",",
"help",
"=",
"\"NMS Threshhold\"",
",",
"default",
"=",
"0.4",
")",
"parser",
".",
"add_argument",
"(",
"\"--cfg\"",
",",
"dest",
"=",
"'cfgfile'",
",",
"help",
"=",
"\"Config file\"",
",",
"default",
"=",
"\"cfg/yolov3-spp.cfg\"",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"\"--weights\"",
",",
"dest",
"=",
"'weightsfile'",
",",
"help",
"=",
"\"weightsfile\"",
",",
"default",
"=",
"\"yolov3-spp.weights\"",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"\"--reso\"",
",",
"dest",
"=",
"'reso'",
",",
"help",
"=",
"\"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\"",
",",
"default",
"=",
"\"416\"",
",",
"type",
"=",
"str",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] |
https://github.com/lxy5513/videopose/blob/6da0415183c5befd233ad85ff3aefce3179d8c44/joints_detectors/Alphapose/yolo/video_demo.py#L57-L81
|
|
ctxis/canape
|
5f0e03424577296bcc60c2008a60a98ec5307e4b
|
CANAPE.Scripting/Lib/distutils/filelist.py
|
python
|
glob_to_re
|
(pattern)
|
return pattern_re
|
Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
|
Translate a shell-like glob pattern to a regular expression.
|
[
"Translate",
"a",
"shell",
"-",
"like",
"glob",
"pattern",
"to",
"a",
"regular",
"expression",
"."
] |
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters.
# XXX currently the "special characters" are just slash -- i.e. this is
# Unix-only.
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^/]', pattern_re)
return pattern_re
|
[
"def",
"glob_to_re",
"(",
"pattern",
")",
":",
"pattern_re",
"=",
"fnmatch",
".",
"translate",
"(",
"pattern",
")",
"# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which",
"# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,",
"# and by extension they shouldn't match such \"special characters\" under",
"# any OS. So change all non-escaped dots in the RE to match any",
"# character except the special characters.",
"# XXX currently the \"special characters\" are just slash -- i.e. this is",
"# Unix-only.",
"pattern_re",
"=",
"re",
".",
"sub",
"(",
"r'((?<!\\\\)(\\\\\\\\)*)\\.'",
",",
"r'\\1[^/]'",
",",
"pattern_re",
")",
"return",
"pattern_re"
] |
https://github.com/ctxis/canape/blob/5f0e03424577296bcc60c2008a60a98ec5307e4b/CANAPE.Scripting/Lib/distutils/filelist.py#L287-L305
|
|
cuthbertLab/music21
|
bd30d4663e52955ed922c10fdf541419d8c67671
|
music21/mei/base.py
|
python
|
_barlineFromAttr
|
(attr)
|
Use :func:`_attrTranslator` to convert the value of a "left" or "right" attribute to a
:class:`Barline` or :class:`Repeat` or occasionally a list of :class:`Repeat`. The only time a
list is returned is when "attr" is ``'rptboth'``, in which case the end and start barlines are
both returned.
:param str attr: The MEI @left or @right attribute to convert to a barline.
:returns: The barline.
:rtype: :class:`music21.bar.Barline` or :class:`~music21.bar.Repeat` or list of them
|
Use :func:`_attrTranslator` to convert the value of a "left" or "right" attribute to a
:class:`Barline` or :class:`Repeat` or occasionally a list of :class:`Repeat`. The only time a
list is returned is when "attr" is ``'rptboth'``, in which case the end and start barlines are
both returned.
|
[
"Use",
":",
"func",
":",
"_attrTranslator",
"to",
"convert",
"the",
"value",
"of",
"a",
"left",
"or",
"right",
"attribute",
"to",
"a",
":",
"class",
":",
"Barline",
"or",
":",
"class",
":",
"Repeat",
"or",
"occasionally",
"a",
"list",
"of",
":",
"class",
":",
"Repeat",
".",
"The",
"only",
"time",
"a",
"list",
"is",
"returned",
"is",
"when",
"attr",
"is",
"rptboth",
"in",
"which",
"case",
"the",
"end",
"and",
"start",
"barlines",
"are",
"both",
"returned",
"."
] |
def _barlineFromAttr(attr):
'''
Use :func:`_attrTranslator` to convert the value of a "left" or "right" attribute to a
:class:`Barline` or :class:`Repeat` or occasionally a list of :class:`Repeat`. The only time a
list is returned is when "attr" is ``'rptboth'``, in which case the end and start barlines are
both returned.
:param str attr: The MEI @left or @right attribute to convert to a barline.
:returns: The barline.
:rtype: :class:`music21.bar.Barline` or :class:`~music21.bar.Repeat` or list of them
'''
# NB: the MEI Specification says @left is used only for legacy-format conversions, so we'll
# just assume it's a @right attribute. Not a huge deal if we get this wrong (I hope).
if attr.startswith('rpt'):
if 'rptboth' == attr:
return _barlineFromAttr('rptend'), _barlineFromAttr('rptstart')
elif 'rptend' == attr:
return bar.Repeat('end', times=2)
else:
return bar.Repeat('start')
else:
return bar.Barline(_attrTranslator(attr, 'right', _BAR_ATTR_DICT))
|
[
"def",
"_barlineFromAttr",
"(",
"attr",
")",
":",
"# NB: the MEI Specification says @left is used only for legacy-format conversions, so we'll",
"# just assume it's a @right attribute. Not a huge deal if we get this wrong (I hope).",
"if",
"attr",
".",
"startswith",
"(",
"'rpt'",
")",
":",
"if",
"'rptboth'",
"==",
"attr",
":",
"return",
"_barlineFromAttr",
"(",
"'rptend'",
")",
",",
"_barlineFromAttr",
"(",
"'rptstart'",
")",
"elif",
"'rptend'",
"==",
"attr",
":",
"return",
"bar",
".",
"Repeat",
"(",
"'end'",
",",
"times",
"=",
"2",
")",
"else",
":",
"return",
"bar",
".",
"Repeat",
"(",
"'start'",
")",
"else",
":",
"return",
"bar",
".",
"Barline",
"(",
"_attrTranslator",
"(",
"attr",
",",
"'right'",
",",
"_BAR_ATTR_DICT",
")",
")"
] |
https://github.com/cuthbertLab/music21/blob/bd30d4663e52955ed922c10fdf541419d8c67671/music21/mei/base.py#L1099-L1120
|
||
deadfoxygrandpa/Elm.tmLanguage
|
155ce91c81a3b98fdf2785fe69b5460a3075d9f0
|
elm_generate.py
|
python
|
tokenize
|
(t)
|
return [v.strip() for v in t.split('->')]
|
[] |
def tokenize(t):
return [v.strip() for v in t.split('->')]
|
[
"def",
"tokenize",
"(",
"t",
")",
":",
"return",
"[",
"v",
".",
"strip",
"(",
")",
"for",
"v",
"in",
"t",
".",
"split",
"(",
"'->'",
")",
"]"
] |
https://github.com/deadfoxygrandpa/Elm.tmLanguage/blob/155ce91c81a3b98fdf2785fe69b5460a3075d9f0/elm_generate.py#L68-L69
|
|||
nelson-liu/paraphrase-id-tensorflow
|
108e461dea0dd148464e985e47ac5c6c11818fcb
|
duplicate_questions/data/dataset.py
|
python
|
IndexedDataset.sort
|
(self, reverse=True)
|
Sorts the list of IndexedInstances, in either ascending or descending order,
if the instances are IndexedSTSInstances
Parameters
----------
reverse: boolean, optional (default=True)
Boolean which detrmines what reverse parameter is used in the
sorting function.
|
Sorts the list of IndexedInstances, in either ascending or descending order,
if the instances are IndexedSTSInstances
|
[
"Sorts",
"the",
"list",
"of",
"IndexedInstances",
"in",
"either",
"ascending",
"or",
"descending",
"order",
"if",
"the",
"instances",
"are",
"IndexedSTSInstances"
] |
def sort(self, reverse=True):
"""
Sorts the list of IndexedInstances, in either ascending or descending order,
if the instances are IndexedSTSInstances
Parameters
----------
reverse: boolean, optional (default=True)
Boolean which detrmines what reverse parameter is used in the
sorting function.
"""
self.instances.sort(reverse=reverse)
|
[
"def",
"sort",
"(",
"self",
",",
"reverse",
"=",
"True",
")",
":",
"self",
".",
"instances",
".",
"sort",
"(",
"reverse",
"=",
"reverse",
")"
] |
https://github.com/nelson-liu/paraphrase-id-tensorflow/blob/108e461dea0dd148464e985e47ac5c6c11818fcb/duplicate_questions/data/dataset.py#L290-L301
|
||
mps-youtube/mps-youtube
|
4c6ee0f8f4643fc1308e637b622d0337bf9bce1b
|
mps_youtube/mpris.py
|
python
|
Mpris2MediaPlayer.GetAll
|
(self, interface_name)
|
getter for org.freedesktop.DBus.Properties on this object
|
getter for org.freedesktop.DBus.Properties on this object
|
[
"getter",
"for",
"org",
".",
"freedesktop",
".",
"DBus",
".",
"Properties",
"on",
"this",
"object"
] |
def GetAll(self, interface_name):
"""
getter for org.freedesktop.DBus.Properties on this object
"""
if interface_name in self.properties:
t = copy.copy(self.properties[interface_name]['read_only'])
t.update(self.properties[interface_name]['read_write'])
return t
else:
raise dbus.exceptions.DBusException(
'com.example.UnknownInterface',
'This object does not implement the %s interface'
% interface_name)
|
[
"def",
"GetAll",
"(",
"self",
",",
"interface_name",
")",
":",
"if",
"interface_name",
"in",
"self",
".",
"properties",
":",
"t",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"properties",
"[",
"interface_name",
"]",
"[",
"'read_only'",
"]",
")",
"t",
".",
"update",
"(",
"self",
".",
"properties",
"[",
"interface_name",
"]",
"[",
"'read_write'",
"]",
")",
"return",
"t",
"else",
":",
"raise",
"dbus",
".",
"exceptions",
".",
"DBusException",
"(",
"'com.example.UnknownInterface'",
",",
"'This object does not implement the %s interface'",
"%",
"interface_name",
")"
] |
https://github.com/mps-youtube/mps-youtube/blob/4c6ee0f8f4643fc1308e637b622d0337bf9bce1b/mps_youtube/mpris.py#L479-L492
|
||
jython/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
lib-python/3.5.1/collections/__init__.py
|
python
|
ChainMap.__init__
|
(self, *maps)
|
Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
|
Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
|
[
"Initialize",
"a",
"ChainMap",
"by",
"setting",
"*",
"maps",
"*",
"to",
"the",
"given",
"mappings",
".",
"If",
"no",
"mappings",
"are",
"provided",
"a",
"single",
"empty",
"dictionary",
"is",
"used",
"."
] |
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}]
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"maps",
")",
":",
"self",
".",
"maps",
"=",
"list",
"(",
"maps",
")",
"or",
"[",
"{",
"}",
"]"
] |
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/collections/__init__.py#L845-L850
|
||
PokemonGoF/PokemonGo-Bot-Desktop
|
4bfa94f0183406c6a86f93645eff7abd3ad4ced8
|
build/pywin/Lib/locale.py
|
python
|
_parse_localename
|
(localename)
|
Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
|
Parses the locale code for localename and returns the
result as tuple (language code, encoding).
|
[
"Parses",
"the",
"locale",
"code",
"for",
"localename",
"and",
"returns",
"the",
"result",
"as",
"tuple",
"(",
"language",
"code",
"encoding",
")",
"."
] |
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError, 'unknown locale: %s' % localename
|
[
"def",
"_parse_localename",
"(",
"localename",
")",
":",
"code",
"=",
"normalize",
"(",
"localename",
")",
"if",
"'@'",
"in",
"code",
":",
"# Deal with locale modifiers",
"code",
",",
"modifier",
"=",
"code",
".",
"split",
"(",
"'@'",
",",
"1",
")",
"if",
"modifier",
"==",
"'euro'",
"and",
"'.'",
"not",
"in",
"code",
":",
"# Assume Latin-9 for @euro locales. This is bogus,",
"# since some systems may use other encodings for these",
"# locales. Also, we ignore other modifiers.",
"return",
"code",
",",
"'iso-8859-15'",
"if",
"'.'",
"in",
"code",
":",
"return",
"tuple",
"(",
"code",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"2",
"]",
")",
"elif",
"code",
"==",
"'C'",
":",
"return",
"None",
",",
"None",
"raise",
"ValueError",
",",
"'unknown locale: %s'",
"%",
"localename"
] |
https://github.com/PokemonGoF/PokemonGo-Bot-Desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/locale.py#L449-L477
|
||
RiotGames/cloud-inquisitor
|
29a26c705381fdba3538b4efedb25b9e09b387ed
|
backend/cloud_inquisitor/plugins/types/issues.py
|
python
|
BaseIssue.update
|
(self, data)
|
Updates the issue object with the information from `data`. The changes will be added to the current
db.session but will not be commited. The user will need to perform the commit explicitly to save the changes
Returns:
True if issue object was updated, else False
|
Updates the issue object with the information from `data`. The changes will be added to the current
db.session but will not be commited. The user will need to perform the commit explicitly to save the changes
|
[
"Updates",
"the",
"issue",
"object",
"with",
"the",
"information",
"from",
"data",
".",
"The",
"changes",
"will",
"be",
"added",
"to",
"the",
"current",
"db",
".",
"session",
"but",
"will",
"not",
"be",
"commited",
".",
"The",
"user",
"will",
"need",
"to",
"perform",
"the",
"commit",
"explicitly",
"to",
"save",
"the",
"changes"
] |
def update(self, data):
"""Updates the issue object with the information from `data`. The changes will be added to the current
db.session but will not be commited. The user will need to perform the commit explicitly to save the changes
Returns:
True if issue object was updated, else False
"""
|
[
"def",
"update",
"(",
"self",
",",
"data",
")",
":"
] |
https://github.com/RiotGames/cloud-inquisitor/blob/29a26c705381fdba3538b4efedb25b9e09b387ed/backend/cloud_inquisitor/plugins/types/issues.py#L115-L121
|
||
seantis/suitable
|
047d8634c74498850911e2b14425fee6713adcca
|
suitable/module_runner.py
|
python
|
ModuleRunner.evaluate_results
|
(self, callback)
|
return RunnerResults({
'contacted': {
server: answer['result']
for server, answer in callback.contacted.items()
},
'unreachable': {
server: result
for server, result in callback.unreachable.items()
}
})
|
prepare the result of runner call for use with RunnerResults.
|
prepare the result of runner call for use with RunnerResults.
|
[
"prepare",
"the",
"result",
"of",
"runner",
"call",
"for",
"use",
"with",
"RunnerResults",
"."
] |
def evaluate_results(self, callback):
""" prepare the result of runner call for use with RunnerResults. """
for server, result in callback.unreachable.items():
log.error(u'{} could not be reached'.format(server))
log.debug(u'ansible-output =>\n{}'.format(pformat(result)))
if self.api.ignore_unreachable:
continue
self.trigger_event(server, 'on_unreachable_host', (
self, server
))
for server, answer in callback.contacted.items():
success = answer['success']
result = answer['result']
# none of the modules in our tests hit the 'failed' result
# codepath (which seems to not be implemented by all modules)
# seo we ignore this branch since it's rather trivial
if result.get('failed'): # pragma: no cover
success = False
if 'rc' in result:
if self.api.is_valid_return_code(result['rc']):
success = True
# Add success to result
result['success'] = success
if not success:
log.error(u'{} failed on {}'.format(self, server))
log.debug(u'ansible-output =>\n{}'.format(pformat(result)))
if self.api.ignore_errors:
continue
self.trigger_event(server, 'on_module_error', (
self, server, result
))
# XXX this is a weird structure because RunnerResults still works
# like it did with Ansible 1.x, where the results where structured
# like this
return RunnerResults({
'contacted': {
server: answer['result']
for server, answer in callback.contacted.items()
},
'unreachable': {
server: result
for server, result in callback.unreachable.items()
}
})
|
[
"def",
"evaluate_results",
"(",
"self",
",",
"callback",
")",
":",
"for",
"server",
",",
"result",
"in",
"callback",
".",
"unreachable",
".",
"items",
"(",
")",
":",
"log",
".",
"error",
"(",
"u'{} could not be reached'",
".",
"format",
"(",
"server",
")",
")",
"log",
".",
"debug",
"(",
"u'ansible-output =>\\n{}'",
".",
"format",
"(",
"pformat",
"(",
"result",
")",
")",
")",
"if",
"self",
".",
"api",
".",
"ignore_unreachable",
":",
"continue",
"self",
".",
"trigger_event",
"(",
"server",
",",
"'on_unreachable_host'",
",",
"(",
"self",
",",
"server",
")",
")",
"for",
"server",
",",
"answer",
"in",
"callback",
".",
"contacted",
".",
"items",
"(",
")",
":",
"success",
"=",
"answer",
"[",
"'success'",
"]",
"result",
"=",
"answer",
"[",
"'result'",
"]",
"# none of the modules in our tests hit the 'failed' result",
"# codepath (which seems to not be implemented by all modules)",
"# seo we ignore this branch since it's rather trivial",
"if",
"result",
".",
"get",
"(",
"'failed'",
")",
":",
"# pragma: no cover",
"success",
"=",
"False",
"if",
"'rc'",
"in",
"result",
":",
"if",
"self",
".",
"api",
".",
"is_valid_return_code",
"(",
"result",
"[",
"'rc'",
"]",
")",
":",
"success",
"=",
"True",
"# Add success to result",
"result",
"[",
"'success'",
"]",
"=",
"success",
"if",
"not",
"success",
":",
"log",
".",
"error",
"(",
"u'{} failed on {}'",
".",
"format",
"(",
"self",
",",
"server",
")",
")",
"log",
".",
"debug",
"(",
"u'ansible-output =>\\n{}'",
".",
"format",
"(",
"pformat",
"(",
"result",
")",
")",
")",
"if",
"self",
".",
"api",
".",
"ignore_errors",
":",
"continue",
"self",
".",
"trigger_event",
"(",
"server",
",",
"'on_module_error'",
",",
"(",
"self",
",",
"server",
",",
"result",
")",
")",
"# XXX this is a weird structure because RunnerResults still works",
"# like it did with Ansible 1.x, where the results where structured",
"# like this",
"return",
"RunnerResults",
"(",
"{",
"'contacted'",
":",
"{",
"server",
":",
"answer",
"[",
"'result'",
"]",
"for",
"server",
",",
"answer",
"in",
"callback",
".",
"contacted",
".",
"items",
"(",
")",
"}",
",",
"'unreachable'",
":",
"{",
"server",
":",
"result",
"for",
"server",
",",
"result",
"in",
"callback",
".",
"unreachable",
".",
"items",
"(",
")",
"}",
"}",
")"
] |
https://github.com/seantis/suitable/blob/047d8634c74498850911e2b14425fee6713adcca/suitable/module_runner.py#L287-L342
|
|
nortikin/sverchok
|
7b460f01317c15f2681bfa3e337c5e7346f3711b
|
utils/surface/nurbs.py
|
python
|
SvNurbsSurface.get_min_v_continuity
|
(self)
|
return sv_knotvector.get_min_continuity(kv, degree)
|
Return minimum continuity degree of the surface in the V direction (guaranteed by knotvector):
0 - point-wise continuity only (C0),
1 - tangent continuity (C1),
2 - 2nd derivative continuity (C2), and so on.
|
Return minimum continuity degree of the surface in the V direction (guaranteed by knotvector):
0 - point-wise continuity only (C0),
1 - tangent continuity (C1),
2 - 2nd derivative continuity (C2), and so on.
|
[
"Return",
"minimum",
"continuity",
"degree",
"of",
"the",
"surface",
"in",
"the",
"V",
"direction",
"(",
"guaranteed",
"by",
"knotvector",
")",
":",
"0",
"-",
"point",
"-",
"wise",
"continuity",
"only",
"(",
"C0",
")",
"1",
"-",
"tangent",
"continuity",
"(",
"C1",
")",
"2",
"-",
"2nd",
"derivative",
"continuity",
"(",
"C2",
")",
"and",
"so",
"on",
"."
] |
def get_min_v_continuity(self):
"""
Return minimum continuity degree of the surface in the V direction (guaranteed by knotvector):
0 - point-wise continuity only (C0),
1 - tangent continuity (C1),
2 - 2nd derivative continuity (C2), and so on.
"""
kv = self.get_knotvector_v()
degree = self.get_degree_v()
return sv_knotvector.get_min_continuity(kv, degree)
|
[
"def",
"get_min_v_continuity",
"(",
"self",
")",
":",
"kv",
"=",
"self",
".",
"get_knotvector_v",
"(",
")",
"degree",
"=",
"self",
".",
"get_degree_v",
"(",
")",
"return",
"sv_knotvector",
".",
"get_min_continuity",
"(",
"kv",
",",
"degree",
")"
] |
https://github.com/nortikin/sverchok/blob/7b460f01317c15f2681bfa3e337c5e7346f3711b/utils/surface/nurbs.py#L227-L236
|
|
kobayashi/s3monkey
|
cd59e6328fef94cac14f255a9755e2dd456ac302
|
s3monkey/pyfakefs/fake_filesystem.py
|
python
|
FakeFile.__getattr__
|
(self, item)
|
return getattr(self.stat_result, item)
|
Forward some properties to stat_result.
|
Forward some properties to stat_result.
|
[
"Forward",
"some",
"properties",
"to",
"stat_result",
"."
] |
def __getattr__(self, item):
"""Forward some properties to stat_result."""
return getattr(self.stat_result, item)
|
[
"def",
"__getattr__",
"(",
"self",
",",
"item",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"stat_result",
",",
"item",
")"
] |
https://github.com/kobayashi/s3monkey/blob/cd59e6328fef94cac14f255a9755e2dd456ac302/s3monkey/pyfakefs/fake_filesystem.py#L448-L450
|
|
fluentpython/example-code-2e
|
80f7f84274a47579e59c29a4657691525152c9d5
|
21-async/domains/asyncio/domaincheck.py
|
python
|
main
|
(tld: str)
|
[] |
async def main(tld: str) -> None:
tld = tld.strip('.')
names = (kw for kw in kwlist if len(kw) <= 4) # <1>
domains = (f'{name}.{tld}'.lower() for name in names) # <2>
print('FOUND\t\tNOT FOUND') # <3>
print('=====\t\t=========')
async for domain, found in multi_probe(domains): # <4>
indent = '' if found else '\t\t' # <5>
print(f'{indent}{domain}')
|
[
"async",
"def",
"main",
"(",
"tld",
":",
"str",
")",
"->",
"None",
":",
"tld",
"=",
"tld",
".",
"strip",
"(",
"'.'",
")",
"names",
"=",
"(",
"kw",
"for",
"kw",
"in",
"kwlist",
"if",
"len",
"(",
"kw",
")",
"<=",
"4",
")",
"# <1>",
"domains",
"=",
"(",
"f'{name}.{tld}'",
".",
"lower",
"(",
")",
"for",
"name",
"in",
"names",
")",
"# <2>",
"print",
"(",
"'FOUND\\t\\tNOT FOUND'",
")",
"# <3>",
"print",
"(",
"'=====\\t\\t========='",
")",
"async",
"for",
"domain",
",",
"found",
"in",
"multi_probe",
"(",
"domains",
")",
":",
"# <4>",
"indent",
"=",
"''",
"if",
"found",
"else",
"'\\t\\t'",
"# <5>",
"print",
"(",
"f'{indent}{domain}'",
")"
] |
https://github.com/fluentpython/example-code-2e/blob/80f7f84274a47579e59c29a4657691525152c9d5/21-async/domains/asyncio/domaincheck.py#L9-L17
|
||||
mdiazcl/fuzzbunch-debian
|
2b76c2249ade83a389ae3badb12a1bd09901fd2c
|
windows/Resources/Python/Core/Lib/mailbox.py
|
python
|
MH.__len__
|
(self)
|
return len(list(self.iterkeys()))
|
Return a count of messages in the mailbox.
|
Return a count of messages in the mailbox.
|
[
"Return",
"a",
"count",
"of",
"messages",
"in",
"the",
"mailbox",
"."
] |
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
|
[
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"list",
"(",
"self",
".",
"iterkeys",
"(",
")",
")",
")"
] |
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/mailbox.py#L1005-L1007
|
|
Mellcap/MellPlayer
|
90b3210eaaed675552fd69717b6b953fd0e0b07d
|
mellplayer/ui.py
|
python
|
UI.gen_color
|
(self, data, color='default')
|
return data
|
参考地址:http://blog.csdn.net/gatieme/article/details/45439671
但是目前用不到这么多类型,目前只用前景色
|
参考地址:http://blog.csdn.net/gatieme/article/details/45439671
但是目前用不到这么多类型,目前只用前景色
|
[
"参考地址",
":",
"http",
":",
"//",
"blog",
".",
"csdn",
".",
"net",
"/",
"gatieme",
"/",
"article",
"/",
"details",
"/",
"45439671",
"但是目前用不到这么多类型,目前只用前景色"
] |
def gen_color(self, data, color='default'):
'''
参考地址:http://blog.csdn.net/gatieme/article/details/45439671
但是目前用不到这么多类型,目前只用前景色
'''
color_code = FOREGROUND_COLOR.get(color, 246)
data = "\001\033[38;5;%sm\002%s\001\033[0m\002" % (color_code, data)
return data
|
[
"def",
"gen_color",
"(",
"self",
",",
"data",
",",
"color",
"=",
"'default'",
")",
":",
"color_code",
"=",
"FOREGROUND_COLOR",
".",
"get",
"(",
"color",
",",
"246",
")",
"data",
"=",
"\"\\001\\033[38;5;%sm\\002%s\\001\\033[0m\\002\"",
"%",
"(",
"color_code",
",",
"data",
")",
"return",
"data"
] |
https://github.com/Mellcap/MellPlayer/blob/90b3210eaaed675552fd69717b6b953fd0e0b07d/mellplayer/ui.py#L194-L201
|
|
nttcslab-sp/kaldiio
|
60c3c928e4fb499d8adbf08fa7297c55551277d7
|
kaldiio/matio.py
|
python
|
write_array_ascii
|
(fd, array, digit=".12g")
|
return size
|
write_array_ascii
Args:
fd (file): binary mode
array (np.ndarray):
digit (str):
Returns:
size (int):
|
write_array_ascii
|
[
"write_array_ascii"
] |
def write_array_ascii(fd, array, digit=".12g"):
"""write_array_ascii
Args:
fd (file): binary mode
array (np.ndarray):
digit (str):
Returns:
size (int):
"""
assert isinstance(array, np.ndarray), type(array)
assert array.ndim in (1, 2), array.ndim
size = 0
fd.write(b" [")
size += 2
if array.ndim == 2:
for row in array:
fd.write(b"\n ")
size += 3
for i in row:
string = format(i, digit)
fd.write(string.encode(encoding=default_encoding))
fd.write(b" ")
size += len(string) + 1
fd.write(b"]\n")
size += 2
elif array.ndim == 1:
fd.write(b" ")
size += 1
for i in array:
string = format(i, digit)
fd.write(string.encode(encoding=default_encoding))
fd.write(b" ")
size += len(string) + 1
fd.write(b"]\n")
size += 2
return size
|
[
"def",
"write_array_ascii",
"(",
"fd",
",",
"array",
",",
"digit",
"=",
"\".12g\"",
")",
":",
"assert",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
",",
"type",
"(",
"array",
")",
"assert",
"array",
".",
"ndim",
"in",
"(",
"1",
",",
"2",
")",
",",
"array",
".",
"ndim",
"size",
"=",
"0",
"fd",
".",
"write",
"(",
"b\" [\"",
")",
"size",
"+=",
"2",
"if",
"array",
".",
"ndim",
"==",
"2",
":",
"for",
"row",
"in",
"array",
":",
"fd",
".",
"write",
"(",
"b\"\\n \"",
")",
"size",
"+=",
"3",
"for",
"i",
"in",
"row",
":",
"string",
"=",
"format",
"(",
"i",
",",
"digit",
")",
"fd",
".",
"write",
"(",
"string",
".",
"encode",
"(",
"encoding",
"=",
"default_encoding",
")",
")",
"fd",
".",
"write",
"(",
"b\" \"",
")",
"size",
"+=",
"len",
"(",
"string",
")",
"+",
"1",
"fd",
".",
"write",
"(",
"b\"]\\n\"",
")",
"size",
"+=",
"2",
"elif",
"array",
".",
"ndim",
"==",
"1",
":",
"fd",
".",
"write",
"(",
"b\" \"",
")",
"size",
"+=",
"1",
"for",
"i",
"in",
"array",
":",
"string",
"=",
"format",
"(",
"i",
",",
"digit",
")",
"fd",
".",
"write",
"(",
"string",
".",
"encode",
"(",
"encoding",
"=",
"default_encoding",
")",
")",
"fd",
".",
"write",
"(",
"b\" \"",
")",
"size",
"+=",
"len",
"(",
"string",
")",
"+",
"1",
"fd",
".",
"write",
"(",
"b\"]\\n\"",
")",
"size",
"+=",
"2",
"return",
"size"
] |
https://github.com/nttcslab-sp/kaldiio/blob/60c3c928e4fb499d8adbf08fa7297c55551277d7/kaldiio/matio.py#L906-L942
|
|
cykl/hprof2flamegraph
|
671941301380a831647b3fac36265ae42bf078c5
|
stackcollapse_hprof.py
|
python
|
get_stacks
|
(content, discard_lineno=False, discard_thread=False, shorten_pkgs=False)
|
return stacks
|
Get the stack traces from an hprof file. Return a dict indexed by trace ID.
|
Get the stack traces from an hprof file. Return a dict indexed by trace ID.
|
[
"Get",
"the",
"stack",
"traces",
"from",
"an",
"hprof",
"file",
".",
"Return",
"a",
"dict",
"indexed",
"by",
"trace",
"ID",
"."
] |
def get_stacks(content, discard_lineno=False, discard_thread=False, shorten_pkgs=False):
""" Get the stack traces from an hprof file. Return a dict indexed by trace ID. """
stacks = {}
pattern = r'TRACE (?P<trace_id>[0-9]+):( \(thread=(?P<thread_id>[0-9]+)\))?\n(?P<stack>(\t.+\n)+)'
match_objects = re.finditer(pattern, content, re.M)
for match_object in match_objects:
trace_id = match_object.group('trace_id')
if "<empty>" in match_object.group('stack'):
continue
stack = _process_stack(match_object.group('stack'), discard_lineno, shorten_pkgs)
thread_id = match_object.group('thread_id')
if thread_id and not discard_thread:
stack.append("Thread {0}".format(thread_id))
stacks[trace_id] = stack
return stacks
|
[
"def",
"get_stacks",
"(",
"content",
",",
"discard_lineno",
"=",
"False",
",",
"discard_thread",
"=",
"False",
",",
"shorten_pkgs",
"=",
"False",
")",
":",
"stacks",
"=",
"{",
"}",
"pattern",
"=",
"r'TRACE (?P<trace_id>[0-9]+):( \\(thread=(?P<thread_id>[0-9]+)\\))?\\n(?P<stack>(\\t.+\\n)+)'",
"match_objects",
"=",
"re",
".",
"finditer",
"(",
"pattern",
",",
"content",
",",
"re",
".",
"M",
")",
"for",
"match_object",
"in",
"match_objects",
":",
"trace_id",
"=",
"match_object",
".",
"group",
"(",
"'trace_id'",
")",
"if",
"\"<empty>\"",
"in",
"match_object",
".",
"group",
"(",
"'stack'",
")",
":",
"continue",
"stack",
"=",
"_process_stack",
"(",
"match_object",
".",
"group",
"(",
"'stack'",
")",
",",
"discard_lineno",
",",
"shorten_pkgs",
")",
"thread_id",
"=",
"match_object",
".",
"group",
"(",
"'thread_id'",
")",
"if",
"thread_id",
"and",
"not",
"discard_thread",
":",
"stack",
".",
"append",
"(",
"\"Thread {0}\"",
".",
"format",
"(",
"thread_id",
")",
")",
"stacks",
"[",
"trace_id",
"]",
"=",
"stack",
"return",
"stacks"
] |
https://github.com/cykl/hprof2flamegraph/blob/671941301380a831647b3fac36265ae42bf078c5/stackcollapse_hprof.py#L111-L128
|
|
econ-ark/HARK
|
9562cafef854d9c3d6b4aba2540e3e442ba6ec6c
|
HARK/utilities.py
|
python
|
in_ipynb
|
()
|
If the ipython process contains 'terminal' assume not in a notebook.
Returns
--------
bool: Boolean
True if called from a jupyter notebook, else False
|
If the ipython process contains 'terminal' assume not in a notebook.
|
[
"If",
"the",
"ipython",
"process",
"contains",
"terminal",
"assume",
"not",
"in",
"a",
"notebook",
"."
] |
def in_ipynb():
""" If the ipython process contains 'terminal' assume not in a notebook.
Returns
--------
bool: Boolean
True if called from a jupyter notebook, else False
"""
try:
if "terminal" in str(type(get_ipython())):
return False
else:
return True
except NameError:
return False
|
[
"def",
"in_ipynb",
"(",
")",
":",
"try",
":",
"if",
"\"terminal\"",
"in",
"str",
"(",
"type",
"(",
"get_ipython",
"(",
")",
")",
")",
":",
"return",
"False",
"else",
":",
"return",
"True",
"except",
"NameError",
":",
"return",
"False"
] |
https://github.com/econ-ark/HARK/blob/9562cafef854d9c3d6b4aba2540e3e442ba6ec6c/HARK/utilities.py#L985-L999
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/sympy/sympy/printing/latex.py
|
python
|
LatexPrinter._print_Sum
|
(self, expr)
|
return tex
|
[] |
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
|
[
"def",
"_print_Sum",
"(",
"self",
",",
"expr",
")",
":",
"if",
"len",
"(",
"expr",
".",
"limits",
")",
"==",
"1",
":",
"tex",
"=",
"r\"\\sum_{%s=%s}^{%s} \"",
"%",
"tuple",
"(",
"[",
"self",
".",
"_print",
"(",
"i",
")",
"for",
"i",
"in",
"expr",
".",
"limits",
"[",
"0",
"]",
"]",
")",
"else",
":",
"def",
"_format_ineq",
"(",
"l",
")",
":",
"return",
"r\"%s \\leq %s \\leq %s\"",
"%",
"tuple",
"(",
"[",
"self",
".",
"_print",
"(",
"s",
")",
"for",
"s",
"in",
"(",
"l",
"[",
"1",
"]",
",",
"l",
"[",
"0",
"]",
",",
"l",
"[",
"2",
"]",
")",
"]",
")",
"tex",
"=",
"r\"\\sum_{\\substack{%s}} \"",
"%",
"str",
".",
"join",
"(",
"'\\\\\\\\'",
",",
"[",
"_format_ineq",
"(",
"l",
")",
"for",
"l",
"in",
"expr",
".",
"limits",
"]",
")",
"if",
"isinstance",
"(",
"expr",
".",
"function",
",",
"Add",
")",
":",
"tex",
"+=",
"r\"\\left(%s\\right)\"",
"%",
"self",
".",
"_print",
"(",
"expr",
".",
"function",
")",
"else",
":",
"tex",
"+=",
"self",
".",
"_print",
"(",
"expr",
".",
"function",
")",
"return",
"tex"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/printing/latex.py#L408-L425
|
|||
huggingface/hmtl
|
d4f48b88249345c3273e27c4d69b6f225506d8a9
|
hmtl/training/multi_task_trainer.py
|
python
|
MultiTaskTrainer.find_latest_checkpoint
|
(self)
|
return (model_path, training_state_path)
|
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
|
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
|
[
"Return",
"the",
"location",
"of",
"the",
"latest",
"model",
"and",
"training",
"state",
"files",
".",
"If",
"there",
"isn",
"t",
"a",
"valid",
"checkpoint",
"then",
"return",
"None",
"."
] |
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (
self._serialization_dir is not None
and any("model_state" in x for x in os.listdir(self._serialization_dir))
and any("training_state" in x for x in os.listdir(self._serialization_dir))
)
if not have_checkpoint:
return None
model_path = os.path.join(self._serialization_dir, "model_state.th")
training_state_path = os.path.join(self._serialization_dir, "training_state.th")
return (model_path, training_state_path)
|
[
"def",
"find_latest_checkpoint",
"(",
"self",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"have_checkpoint",
"=",
"(",
"self",
".",
"_serialization_dir",
"is",
"not",
"None",
"and",
"any",
"(",
"\"model_state\"",
"in",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"_serialization_dir",
")",
")",
"and",
"any",
"(",
"\"training_state\"",
"in",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"_serialization_dir",
")",
")",
")",
"if",
"not",
"have_checkpoint",
":",
"return",
"None",
"model_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_serialization_dir",
",",
"\"model_state.th\"",
")",
"training_state_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_serialization_dir",
",",
"\"training_state.th\"",
")",
"return",
"(",
"model_path",
",",
"training_state_path",
")"
] |
https://github.com/huggingface/hmtl/blob/d4f48b88249345c3273e27c4d69b6f225506d8a9/hmtl/training/multi_task_trainer.py#L277-L294
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/redis/connection.py
|
python
|
HiredisParser.read_response
|
(self)
|
return response
|
[] |
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = recv_into(self._sock, self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = recv(self._sock, socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
|
[
"def",
"read_response",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_reader",
":",
"raise",
"ConnectionError",
"(",
"SERVER_CLOSED_CONNECTION_ERROR",
")",
"# _next_response might be cached from a can_read() call",
"if",
"self",
".",
"_next_response",
"is",
"not",
"False",
":",
"response",
"=",
"self",
".",
"_next_response",
"self",
".",
"_next_response",
"=",
"False",
"return",
"response",
"response",
"=",
"self",
".",
"_reader",
".",
"gets",
"(",
")",
"socket_read_size",
"=",
"self",
".",
"socket_read_size",
"while",
"response",
"is",
"False",
":",
"try",
":",
"if",
"HIREDIS_USE_BYTE_BUFFER",
":",
"bufflen",
"=",
"recv_into",
"(",
"self",
".",
"_sock",
",",
"self",
".",
"_buffer",
")",
"if",
"bufflen",
"==",
"0",
":",
"raise",
"socket",
".",
"error",
"(",
"SERVER_CLOSED_CONNECTION_ERROR",
")",
"else",
":",
"buffer",
"=",
"recv",
"(",
"self",
".",
"_sock",
",",
"socket_read_size",
")",
"# an empty string indicates the server shutdown the socket",
"if",
"not",
"isinstance",
"(",
"buffer",
",",
"bytes",
")",
"or",
"len",
"(",
"buffer",
")",
"==",
"0",
":",
"raise",
"socket",
".",
"error",
"(",
"SERVER_CLOSED_CONNECTION_ERROR",
")",
"except",
"socket",
".",
"timeout",
":",
"raise",
"TimeoutError",
"(",
"\"Timeout reading from socket\"",
")",
"except",
"socket",
".",
"error",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"raise",
"ConnectionError",
"(",
"\"Error while reading from socket: %s\"",
"%",
"(",
"e",
".",
"args",
",",
")",
")",
"if",
"HIREDIS_USE_BYTE_BUFFER",
":",
"self",
".",
"_reader",
".",
"feed",
"(",
"self",
".",
"_buffer",
",",
"0",
",",
"bufflen",
")",
"else",
":",
"self",
".",
"_reader",
".",
"feed",
"(",
"buffer",
")",
"response",
"=",
"self",
".",
"_reader",
".",
"gets",
"(",
")",
"# if an older version of hiredis is installed, we need to attempt",
"# to convert ResponseErrors to their appropriate types.",
"if",
"not",
"HIREDIS_SUPPORTS_CALLABLE_ERRORS",
":",
"if",
"isinstance",
"(",
"response",
",",
"ResponseError",
")",
":",
"response",
"=",
"self",
".",
"parse_error",
"(",
"response",
".",
"args",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"response",
",",
"list",
")",
"and",
"response",
"and",
"isinstance",
"(",
"response",
"[",
"0",
"]",
",",
"ResponseError",
")",
":",
"response",
"[",
"0",
"]",
"=",
"self",
".",
"parse_error",
"(",
"response",
"[",
"0",
"]",
".",
"args",
"[",
"0",
"]",
")",
"# if the response is a ConnectionError or the response is a list and",
"# the first item is a ConnectionError, raise it as something bad",
"# happened",
"if",
"isinstance",
"(",
"response",
",",
"ConnectionError",
")",
":",
"raise",
"response",
"elif",
"isinstance",
"(",
"response",
",",
"list",
")",
"and",
"response",
"and",
"isinstance",
"(",
"response",
"[",
"0",
"]",
",",
"ConnectionError",
")",
":",
"raise",
"response",
"[",
"0",
"]",
"return",
"response"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/redis/connection.py#L375-L425
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.