Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Environment.compile_expression | (self, source, undefined_to_none=True) | A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
| A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression. | def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none) | [
"def",
"compile_expression",
"(",
"self",
",",
"source",
",",
"undefined_to_none",
"=",
"True",
")",
":",
"parser",
"=",
"Parser",
"(",
"self",
",",
"source",
",",
"state",
"=",
"'variable'",
")",
"exc_info",
"=",
"None",
"try",
":",
"expr",
"=",
"parser",
".",
"parse_expression",
"(",
")",
"if",
"not",
"parser",
".",
"stream",
".",
"eos",
":",
"raise",
"TemplateSyntaxError",
"(",
"'chunk after expression'",
",",
"parser",
".",
"stream",
".",
"current",
".",
"lineno",
",",
"None",
",",
"None",
")",
"expr",
".",
"set_environment",
"(",
"self",
")",
"except",
"TemplateSyntaxError",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"exc_info",
"is",
"not",
"None",
":",
"self",
".",
"handle_exception",
"(",
"exc_info",
",",
"source_hint",
"=",
"source",
")",
"body",
"=",
"[",
"nodes",
".",
"Assign",
"(",
"nodes",
".",
"Name",
"(",
"'result'",
",",
"'store'",
")",
",",
"expr",
",",
"lineno",
"=",
"1",
")",
"]",
"template",
"=",
"self",
".",
"from_string",
"(",
"nodes",
".",
"Template",
"(",
"body",
",",
"lineno",
"=",
"1",
")",
")",
"return",
"TemplateExpression",
"(",
"template",
",",
"undefined_to_none",
")"
] | [
592,
4
] | [
635,
62
] | python | en | ['en', 'en', 'en'] | True |
Environment.compile_templates | (self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False) | Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
By default a deflate zip algorithm is used. To switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
| Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
By default a deflate zip algorithm is used. To switch to
the stored algorithm, `zip` can be set to ``'stored'``. | def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
By default a deflate zip algorithm is used. To switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp
import marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates') | [
"def",
"compile_templates",
"(",
"self",
",",
"target",
",",
"extensions",
"=",
"None",
",",
"filter_func",
"=",
"None",
",",
"zip",
"=",
"'deflated'",
",",
"log_function",
"=",
"None",
",",
"ignore_errors",
"=",
"True",
",",
"py_compile",
"=",
"False",
")",
":",
"from",
"jinja2",
".",
"loaders",
"import",
"ModuleLoader",
"if",
"log_function",
"is",
"None",
":",
"log_function",
"=",
"lambda",
"x",
":",
"None",
"if",
"py_compile",
":",
"if",
"not",
"PY2",
"or",
"PYPY",
":",
"from",
"warnings",
"import",
"warn",
"warn",
"(",
"Warning",
"(",
"'py_compile has no effect on pypy or Python 3'",
")",
")",
"py_compile",
"=",
"False",
"else",
":",
"import",
"imp",
"import",
"marshal",
"py_header",
"=",
"imp",
".",
"get_magic",
"(",
")",
"+",
"u'\\xff\\xff\\xff\\xff'",
".",
"encode",
"(",
"'iso-8859-15'",
")",
"# Python 3.3 added a source filesize to the header",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"3",
")",
":",
"py_header",
"+=",
"u'\\x00\\x00\\x00\\x00'",
".",
"encode",
"(",
"'iso-8859-15'",
")",
"def",
"write_file",
"(",
"filename",
",",
"data",
",",
"mode",
")",
":",
"if",
"zip",
":",
"info",
"=",
"ZipInfo",
"(",
"filename",
")",
"info",
".",
"external_attr",
"=",
"0o755",
"<<",
"16",
"zip_file",
".",
"writestr",
"(",
"info",
",",
"data",
")",
"else",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"filename",
")",
",",
"mode",
")",
"try",
":",
"f",
".",
"write",
"(",
"data",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"if",
"zip",
"is",
"not",
"None",
":",
"from",
"zipfile",
"import",
"ZipFile",
",",
"ZipInfo",
",",
"ZIP_DEFLATED",
",",
"ZIP_STORED",
"zip_file",
"=",
"ZipFile",
"(",
"target",
",",
"'w'",
",",
"dict",
"(",
"deflated",
"=",
"ZIP_DEFLATED",
",",
"stored",
"=",
"ZIP_STORED",
")",
"[",
"zip",
"]",
")",
"log_function",
"(",
"'Compiling into Zip archive \"%s\"'",
"%",
"target",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"target",
")",
"log_function",
"(",
"'Compiling into folder \"%s\"'",
"%",
"target",
")",
"try",
":",
"for",
"name",
"in",
"self",
".",
"list_templates",
"(",
"extensions",
",",
"filter_func",
")",
":",
"source",
",",
"filename",
",",
"_",
"=",
"self",
".",
"loader",
".",
"get_source",
"(",
"self",
",",
"name",
")",
"try",
":",
"code",
"=",
"self",
".",
"compile",
"(",
"source",
",",
"name",
",",
"filename",
",",
"True",
",",
"True",
")",
"except",
"TemplateSyntaxError",
"as",
"e",
":",
"if",
"not",
"ignore_errors",
":",
"raise",
"log_function",
"(",
"'Could not compile \"%s\": %s'",
"%",
"(",
"name",
",",
"e",
")",
")",
"continue",
"filename",
"=",
"ModuleLoader",
".",
"get_module_filename",
"(",
"name",
")",
"if",
"py_compile",
":",
"c",
"=",
"self",
".",
"_compile",
"(",
"code",
",",
"encode_filename",
"(",
"filename",
")",
")",
"write_file",
"(",
"filename",
"+",
"'c'",
",",
"py_header",
"+",
"marshal",
".",
"dumps",
"(",
"c",
")",
",",
"'wb'",
")",
"log_function",
"(",
"'Byte-compiled \"%s\" as %s'",
"%",
"(",
"name",
",",
"filename",
"+",
"'c'",
")",
")",
"else",
":",
"write_file",
"(",
"filename",
",",
"code",
",",
"'w'",
")",
"log_function",
"(",
"'Compiled \"%s\" as %s'",
"%",
"(",
"name",
",",
"filename",
")",
")",
"finally",
":",
"if",
"zip",
":",
"zip_file",
".",
"close",
"(",
")",
"log_function",
"(",
"'Finished compiling templates'",
")"
] | [
637,
4
] | [
730,
52
] | python | en | ['en', 'en', 'en'] | True |
Environment.list_templates | (self, extensions=None, filter_func=None) | Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
| Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method. | def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = list(ifilter(filter_func, x))
return x | [
"def",
"list_templates",
"(",
"self",
",",
"extensions",
"=",
"None",
",",
"filter_func",
"=",
"None",
")",
":",
"x",
"=",
"self",
".",
"loader",
".",
"list_templates",
"(",
")",
"if",
"extensions",
"is",
"not",
"None",
":",
"if",
"filter_func",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'either extensions or filter_func '",
"'can be passed, but not both'",
")",
"filter_func",
"=",
"lambda",
"x",
":",
"'.'",
"in",
"x",
"and",
"x",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"1",
"]",
"in",
"extensions",
"if",
"filter_func",
"is",
"not",
"None",
":",
"x",
"=",
"list",
"(",
"ifilter",
"(",
"filter_func",
",",
"x",
")",
")",
"return",
"x"
] | [
732,
4
] | [
757,
16
] | python | en | ['en', 'en', 'en'] | True |
Environment.handle_exception | (self, exc_info=None, rendered=False, source_hint=None) | Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
| Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
| def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb) | [
"def",
"handle_exception",
"(",
"self",
",",
"exc_info",
"=",
"None",
",",
"rendered",
"=",
"False",
",",
"source_hint",
"=",
"None",
")",
":",
"global",
"_make_traceback",
"if",
"exc_info",
"is",
"None",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"# the debugging module is imported when it's used for the first time.",
"# we're doing a lot of stuff there and for applications that do not",
"# get any exceptions in template rendering there is no need to load",
"# all of that.",
"if",
"_make_traceback",
"is",
"None",
":",
"from",
"jinja2",
".",
"debug",
"import",
"make_traceback",
"as",
"_make_traceback",
"traceback",
"=",
"_make_traceback",
"(",
"exc_info",
",",
"source_hint",
")",
"if",
"rendered",
"and",
"self",
".",
"exception_formatter",
"is",
"not",
"None",
":",
"return",
"self",
".",
"exception_formatter",
"(",
"traceback",
")",
"if",
"self",
".",
"exception_handler",
"is",
"not",
"None",
":",
"self",
".",
"exception_handler",
"(",
"traceback",
")",
"exc_type",
",",
"exc_value",
",",
"tb",
"=",
"traceback",
".",
"standard_exc_info",
"reraise",
"(",
"exc_type",
",",
"exc_value",
",",
"tb",
")"
] | [
759,
4
] | [
779,
40
] | python | en | ['en', 'en', 'en'] | True |
Environment.join_path | (self, template, parent) | Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
| Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name. | def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template | [
"def",
"join_path",
"(",
"self",
",",
"template",
",",
"parent",
")",
":",
"return",
"template"
] | [
781,
4
] | [
791,
23
] | python | en | ['en', 'en', 'en'] | True |
Environment.get_template | (self, name, parent=None, globals=None) | Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
| Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading. | def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals)) | [
"def",
"get_template",
"(",
"self",
",",
"name",
",",
"parent",
"=",
"None",
",",
"globals",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"Template",
")",
":",
"return",
"name",
"if",
"parent",
"is",
"not",
"None",
":",
"name",
"=",
"self",
".",
"join_path",
"(",
"name",
",",
"parent",
")",
"return",
"self",
".",
"_load_template",
"(",
"name",
",",
"self",
".",
"make_globals",
"(",
"globals",
")",
")"
] | [
809,
4
] | [
829,
68
] | python | en | ['en', 'en', 'en'] | True |
Environment.select_template | (self, names, parent=None, globals=None) | Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
| Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception. | def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names) | [
"def",
"select_template",
"(",
"self",
",",
"names",
",",
"parent",
"=",
"None",
",",
"globals",
"=",
"None",
")",
":",
"if",
"not",
"names",
":",
"raise",
"TemplatesNotFound",
"(",
"message",
"=",
"u'Tried to select from an empty list '",
"u'of templates.'",
")",
"globals",
"=",
"self",
".",
"make_globals",
"(",
"globals",
")",
"for",
"name",
"in",
"names",
":",
"if",
"isinstance",
"(",
"name",
",",
"Template",
")",
":",
"return",
"name",
"if",
"parent",
"is",
"not",
"None",
":",
"name",
"=",
"self",
".",
"join_path",
"(",
"name",
",",
"parent",
")",
"try",
":",
"return",
"self",
".",
"_load_template",
"(",
"name",
",",
"globals",
")",
"except",
"TemplateNotFound",
":",
"pass",
"raise",
"TemplatesNotFound",
"(",
"names",
")"
] | [
832,
4
] | [
856,
38
] | python | en | ['en', 'en', 'en'] | True |
Environment.get_or_select_template | (self, template_name_or_list,
parent=None, globals=None) | Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
| Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`. | def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals) | [
"def",
"get_or_select_template",
"(",
"self",
",",
"template_name_or_list",
",",
"parent",
"=",
"None",
",",
"globals",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"template_name_or_list",
",",
"string_types",
")",
":",
"return",
"self",
".",
"get_template",
"(",
"template_name_or_list",
",",
"parent",
",",
"globals",
")",
"elif",
"isinstance",
"(",
"template_name_or_list",
",",
"Template",
")",
":",
"return",
"template_name_or_list",
"return",
"self",
".",
"select_template",
"(",
"template_name_or_list",
",",
"parent",
",",
"globals",
")"
] | [
859,
4
] | [
871,
75
] | python | en | ['en', 'en', 'en'] | True |
Environment.from_string | (self, source, globals=None, template_class=None) | Load a template from a string. This parses the source given and
returns a :class:`Template` object.
| Load a template from a string. This parses the source given and
returns a :class:`Template` object.
| def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None) | [
"def",
"from_string",
"(",
"self",
",",
"source",
",",
"globals",
"=",
"None",
",",
"template_class",
"=",
"None",
")",
":",
"globals",
"=",
"self",
".",
"make_globals",
"(",
"globals",
")",
"cls",
"=",
"template_class",
"or",
"self",
".",
"template_class",
"return",
"cls",
".",
"from_code",
"(",
"self",
",",
"self",
".",
"compile",
"(",
"source",
")",
",",
"globals",
",",
"None",
")"
] | [
873,
4
] | [
879,
71
] | python | en | ['en', 'en', 'en'] | True |
Environment.make_globals | (self, d) | Return a dict for the globals. | Return a dict for the globals. | def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d) | [
"def",
"make_globals",
"(",
"self",
",",
"d",
")",
":",
"if",
"not",
"d",
":",
"return",
"self",
".",
"globals",
"return",
"dict",
"(",
"self",
".",
"globals",
",",
"*",
"*",
"d",
")"
] | [
881,
4
] | [
885,
38
] | python | en | ['en', 'en', 'en'] | True |
Template.from_code | (cls, environment, code, globals, uptodate=None) | Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
| Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
| def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv | [
"def",
"from_code",
"(",
"cls",
",",
"environment",
",",
"code",
",",
"globals",
",",
"uptodate",
"=",
"None",
")",
":",
"namespace",
"=",
"{",
"'environment'",
":",
"environment",
",",
"'__file__'",
":",
"code",
".",
"co_filename",
"}",
"exec",
"(",
"code",
",",
"namespace",
")",
"rv",
"=",
"cls",
".",
"_from_namespace",
"(",
"environment",
",",
"namespace",
",",
"globals",
")",
"rv",
".",
"_uptodate",
"=",
"uptodate",
"return",
"rv"
] | [
947,
4
] | [
958,
17
] | python | en | ['en', 'en', 'en'] | True |
Template.from_module_dict | (cls, environment, module_dict, globals) | Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
| Creates a template object from a module. This is used by the
module loader to create a template object. | def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals) | [
"def",
"from_module_dict",
"(",
"cls",
",",
"environment",
",",
"module_dict",
",",
"globals",
")",
":",
"return",
"cls",
".",
"_from_namespace",
"(",
"environment",
",",
"module_dict",
",",
"globals",
")"
] | [
961,
4
] | [
967,
69
] | python | en | ['en', 'en', 'en'] | True |
Template.render | (self, *args, **kwargs) | This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
| This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same:: | def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True) | [
"def",
"render",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"vars",
"=",
"dict",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"return",
"concat",
"(",
"self",
".",
"root_render_func",
"(",
"self",
".",
"new_context",
"(",
"vars",
")",
")",
")",
"except",
"Exception",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"return",
"self",
".",
"environment",
".",
"handle_exception",
"(",
"exc_info",
",",
"True",
")"
] | [
992,
4
] | [
1007,
64
] | python | en | ['en', 'en', 'en'] | True |
Template.render_async | (self, *args, **kwargs) | This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
Example usage::
await template.render_async(knights='that say nih; asynchronously')
| This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled. | def render_async(self, *args, **kwargs):
"""This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
Example usage::
await template.render_async(knights='that say nih; asynchronously')
"""
# see asyncsupport for the actual implementation
raise NotImplementedError('This feature is not available for this '
'version of Python') | [
"def",
"render_async",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# see asyncsupport for the actual implementation",
"raise",
"NotImplementedError",
"(",
"'This feature is not available for this '",
"'version of Python'",
")"
] | [
1009,
4
] | [
1020,
54
] | python | en | ['en', 'en', 'en'] | True |
Template.stream | (self, *args, **kwargs) | Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
| Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
| def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs)) | [
"def",
"stream",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"TemplateStream",
"(",
"self",
".",
"generate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | [
1022,
4
] | [
1026,
61
] | python | en | ['en', 'en', 'en'] | True |
Template.generate | (self, *args, **kwargs) | For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
| For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings. | def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True) | [
"def",
"generate",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"vars",
"=",
"dict",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"for",
"event",
"in",
"self",
".",
"root_render_func",
"(",
"self",
".",
"new_context",
"(",
"vars",
")",
")",
":",
"yield",
"event",
"except",
"Exception",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"else",
":",
"return",
"yield",
"self",
".",
"environment",
".",
"handle_exception",
"(",
"exc_info",
",",
"True",
")"
] | [
1028,
4
] | [
1044,
63
] | python | en | ['en', 'en', 'en'] | True |
Template.generate_async | (self, *args, **kwargs) | An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
| An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
| def generate_async(self, *args, **kwargs):
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
# see asyncsupport for the actual implementation
raise NotImplementedError('This feature is not available for this '
'version of Python') | [
"def",
"generate_async",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# see asyncsupport for the actual implementation",
"raise",
"NotImplementedError",
"(",
"'This feature is not available for this '",
"'version of Python'",
")"
] | [
1046,
4
] | [
1052,
54
] | python | en | ['en', 'en', 'en'] | True |
Template.new_context | (self, vars=None, shared=False, locals=None) | Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
| Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals. | def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals) | [
"def",
"new_context",
"(",
"self",
",",
"vars",
"=",
"None",
",",
"shared",
"=",
"False",
",",
"locals",
"=",
"None",
")",
":",
"return",
"new_context",
"(",
"self",
".",
"environment",
",",
"self",
".",
"name",
",",
"self",
".",
"blocks",
",",
"vars",
",",
"shared",
",",
"self",
".",
"globals",
",",
"locals",
")"
] | [
1054,
4
] | [
1063,
62
] | python | en | ['en', 'en', 'en'] | True |
Template.make_module | (self, vars=None, shared=False, locals=None) | This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
| This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
| def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals)) | [
"def",
"make_module",
"(",
"self",
",",
"vars",
"=",
"None",
",",
"shared",
"=",
"False",
",",
"locals",
"=",
"None",
")",
":",
"return",
"TemplateModule",
"(",
"self",
",",
"self",
".",
"new_context",
"(",
"vars",
",",
"shared",
",",
"locals",
")",
")"
] | [
1065,
4
] | [
1072,
75
] | python | en | ['en', 'en', 'en'] | True |
Template.make_module_async | (self, vars=None, shared=False, locals=None) | As template module creation can invoke template code for
asynchronous exections this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
| As template module creation can invoke template code for
asynchronous exections this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
| def make_module_async(self, vars=None, shared=False, locals=None):
"""As template module creation can invoke template code for
asynchronous exections this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
# see asyncsupport for the actual implementation
raise NotImplementedError('This feature is not available for this '
'version of Python') | [
"def",
"make_module_async",
"(",
"self",
",",
"vars",
"=",
"None",
",",
"shared",
"=",
"False",
",",
"locals",
"=",
"None",
")",
":",
"# see asyncsupport for the actual implementation",
"raise",
"NotImplementedError",
"(",
"'This feature is not available for this '",
"'version of Python'",
")"
] | [
1074,
4
] | [
1082,
54
] | python | en | ['ro', 'en', 'en'] | True |
Template.module | (self) | The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> str(t.module)
'23'
>>> t.module.foo() == u'42'
True
This attribute is not available if async mode is enabled.
| The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer: | def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> str(t.module)
'23'
>>> t.module.foo() == u'42'
True
This attribute is not available if async mode is enabled.
"""
return self._get_default_module() | [
"def",
"module",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_default_module",
"(",
")"
] | [
1092,
4
] | [
1105,
41
] | python | en | ['en', 'en', 'en'] | True |
Template.get_corresponding_lineno | (self, lineno) | Return the source line number of a line number in the
generated bytecode as they are not in sync.
| Return the source line number of a line number in the
generated bytecode as they are not in sync.
| def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1 | [
"def",
"get_corresponding_lineno",
"(",
"self",
",",
"lineno",
")",
":",
"for",
"template_line",
",",
"code_line",
"in",
"reversed",
"(",
"self",
".",
"debug_info",
")",
":",
"if",
"code_line",
"<=",
"lineno",
":",
"return",
"template_line",
"return",
"1"
] | [
1107,
4
] | [
1114,
16
] | python | en | ['en', 'en', 'en'] | True |
Template.is_up_to_date | (self) | If this variable is `False` there is a newer version available. | If this variable is `False` there is a newer version available. | def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate() | [
"def",
"is_up_to_date",
"(",
"self",
")",
":",
"if",
"self",
".",
"_uptodate",
"is",
"None",
":",
"return",
"True",
"return",
"self",
".",
"_uptodate",
"(",
")"
] | [
1117,
4
] | [
1121,
31
] | python | en | ['en', 'en', 'en'] | True |
Template.debug_info | (self) | The debug info mapping. | The debug info mapping. | def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')] | [
"def",
"debug_info",
"(",
"self",
")",
":",
"return",
"[",
"tuple",
"(",
"imap",
"(",
"int",
",",
"x",
".",
"split",
"(",
"'='",
")",
")",
")",
"for",
"x",
"in",
"self",
".",
"_debug_info",
".",
"split",
"(",
"'&'",
")",
"]"
] | [
1124,
4
] | [
1127,
44
] | python | en | ['en', 'en', 'en'] | True |
Index.clone | (self) | Create a copy of this Index. | Create a copy of this Index. | def clone(self):
"""Create a copy of this Index."""
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs) | [
"def",
"clone",
"(",
"self",
")",
":",
"_",
",",
"args",
",",
"kwargs",
"=",
"self",
".",
"deconstruct",
"(",
")",
"return",
"self",
".",
"__class__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
136,
4
] | [
139,
46
] | python | en | ['en', 'en', 'en'] | True |
Index.set_name_with_model | (self, model) |
Generate a unique name for the index.
The name is divided into 3 parts - table name (12 chars), field name
(8 chars) and unique hash + suffix (10 chars). Each part is made to
fit its size by truncating the excess length.
|
Generate a unique name for the index. | def set_name_with_model(self, model):
"""
Generate a unique name for the index.
The name is divided into 3 parts - table name (12 chars), field name
(8 chars) and unique hash + suffix (10 chars). Each part is made to
fit its size by truncating the excess length.
"""
_, table_name = split_identifier(model._meta.db_table)
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix]
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (names_digest(*hash_data, length=6), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
if self.name[0] == '_' or self.name[0].isdigit():
self.name = 'D%s' % self.name[1:] | [
"def",
"set_name_with_model",
"(",
"self",
",",
"model",
")",
":",
"_",
",",
"table_name",
"=",
"split_identifier",
"(",
"model",
".",
"_meta",
".",
"db_table",
")",
"column_names",
"=",
"[",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
".",
"column",
"for",
"field_name",
",",
"order",
"in",
"self",
".",
"fields_orders",
"]",
"column_names_with_order",
"=",
"[",
"(",
"(",
"'-%s'",
"if",
"order",
"else",
"'%s'",
")",
"%",
"column_name",
")",
"for",
"column_name",
",",
"(",
"field_name",
",",
"order",
")",
"in",
"zip",
"(",
"column_names",
",",
"self",
".",
"fields_orders",
")",
"]",
"# The length of the parts of the name is based on the default max",
"# length of 30 characters.",
"hash_data",
"=",
"[",
"table_name",
"]",
"+",
"column_names_with_order",
"+",
"[",
"self",
".",
"suffix",
"]",
"self",
".",
"name",
"=",
"'%s_%s_%s'",
"%",
"(",
"table_name",
"[",
":",
"11",
"]",
",",
"column_names",
"[",
"0",
"]",
"[",
":",
"7",
"]",
",",
"'%s_%s'",
"%",
"(",
"names_digest",
"(",
"*",
"hash_data",
",",
"length",
"=",
"6",
")",
",",
"self",
".",
"suffix",
")",
",",
")",
"assert",
"len",
"(",
"self",
".",
"name",
")",
"<=",
"self",
".",
"max_name_length",
",",
"(",
"'Index too long for multiple database support. Is self.suffix '",
"'longer than 3 characters?'",
")",
"if",
"self",
".",
"name",
"[",
"0",
"]",
"==",
"'_'",
"or",
"self",
".",
"name",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"name",
"=",
"'D%s'",
"%",
"self",
".",
"name",
"[",
"1",
":",
"]"
] | [
141,
4
] | [
168,
45
] | python | en | ['en', 'error', 'th'] | False |
inject_into_urllib3 | () |
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
|
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
| def inject_into_urllib3():
"""
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
"""
util.SSLContext = SecureTransportContext
util.ssl_.SSLContext = SecureTransportContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_SECURETRANSPORT = True
util.ssl_.IS_SECURETRANSPORT = True | [
"def",
"inject_into_urllib3",
"(",
")",
":",
"util",
".",
"SSLContext",
"=",
"SecureTransportContext",
"util",
".",
"ssl_",
".",
"SSLContext",
"=",
"SecureTransportContext",
"util",
".",
"HAS_SNI",
"=",
"HAS_SNI",
"util",
".",
"ssl_",
".",
"HAS_SNI",
"=",
"HAS_SNI",
"util",
".",
"IS_SECURETRANSPORT",
"=",
"True",
"util",
".",
"ssl_",
".",
"IS_SECURETRANSPORT",
"=",
"True"
] | [
188,
0
] | [
197,
39
] | python | en | ['en', 'error', 'th'] | False |
extract_from_urllib3 | () |
Undo monkey-patching by :func:`inject_into_urllib3`.
|
Undo monkey-patching by :func:`inject_into_urllib3`.
| def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False | [
"def",
"extract_from_urllib3",
"(",
")",
":",
"util",
".",
"SSLContext",
"=",
"orig_util_SSLContext",
"util",
".",
"ssl_",
".",
"SSLContext",
"=",
"orig_util_SSLContext",
"util",
".",
"HAS_SNI",
"=",
"orig_util_HAS_SNI",
"util",
".",
"ssl_",
".",
"HAS_SNI",
"=",
"orig_util_HAS_SNI",
"util",
".",
"IS_SECURETRANSPORT",
"=",
"False",
"util",
".",
"ssl_",
".",
"IS_SECURETRANSPORT",
"=",
"False"
] | [
200,
0
] | [
209,
40
] | python | en | ['en', 'error', 'th'] | False |
_read_callback | (connection_id, data_buffer, data_length_pointer) |
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
|
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
| def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
if not util.wait_for_read(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
remaining = requested_length - read_count
buffer = (ctypes.c_char * remaining).from_address(
data_buffer + read_count
)
chunk_size = base_socket.recv_into(buffer, remaining)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = read_count
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal | [
"def",
"_read_callback",
"(",
"connection_id",
",",
"data_buffer",
",",
"data_length_pointer",
")",
":",
"wrapped_socket",
"=",
"None",
"try",
":",
"wrapped_socket",
"=",
"_connection_refs",
".",
"get",
"(",
"connection_id",
")",
"if",
"wrapped_socket",
"is",
"None",
":",
"return",
"SecurityConst",
".",
"errSSLInternal",
"base_socket",
"=",
"wrapped_socket",
".",
"socket",
"requested_length",
"=",
"data_length_pointer",
"[",
"0",
"]",
"timeout",
"=",
"wrapped_socket",
".",
"gettimeout",
"(",
")",
"error",
"=",
"None",
"read_count",
"=",
"0",
"try",
":",
"while",
"read_count",
"<",
"requested_length",
":",
"if",
"timeout",
"is",
"None",
"or",
"timeout",
">=",
"0",
":",
"if",
"not",
"util",
".",
"wait_for_read",
"(",
"base_socket",
",",
"timeout",
")",
":",
"raise",
"socket",
".",
"error",
"(",
"errno",
".",
"EAGAIN",
",",
"\"timed out\"",
")",
"remaining",
"=",
"requested_length",
"-",
"read_count",
"buffer",
"=",
"(",
"ctypes",
".",
"c_char",
"*",
"remaining",
")",
".",
"from_address",
"(",
"data_buffer",
"+",
"read_count",
")",
"chunk_size",
"=",
"base_socket",
".",
"recv_into",
"(",
"buffer",
",",
"remaining",
")",
"read_count",
"+=",
"chunk_size",
"if",
"not",
"chunk_size",
":",
"if",
"not",
"read_count",
":",
"return",
"SecurityConst",
".",
"errSSLClosedGraceful",
"break",
"except",
"(",
"socket",
".",
"error",
")",
"as",
"e",
":",
"error",
"=",
"e",
".",
"errno",
"if",
"error",
"is",
"not",
"None",
"and",
"error",
"!=",
"errno",
".",
"EAGAIN",
":",
"data_length_pointer",
"[",
"0",
"]",
"=",
"read_count",
"if",
"error",
"==",
"errno",
".",
"ECONNRESET",
"or",
"error",
"==",
"errno",
".",
"EPIPE",
":",
"return",
"SecurityConst",
".",
"errSSLClosedAbort",
"raise",
"data_length_pointer",
"[",
"0",
"]",
"=",
"read_count",
"if",
"read_count",
"!=",
"requested_length",
":",
"return",
"SecurityConst",
".",
"errSSLWouldBlock",
"return",
"0",
"except",
"Exception",
"as",
"e",
":",
"if",
"wrapped_socket",
"is",
"not",
"None",
":",
"wrapped_socket",
".",
"_exception",
"=",
"e",
"return",
"SecurityConst",
".",
"errSSLInternal"
] | [
212,
0
] | [
264,
43
] | python | en | ['en', 'error', 'th'] | False |
_write_callback | (connection_id, data_buffer, data_length_pointer) |
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
|
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
| def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
if not util.wait_for_write(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = sent
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal | [
"def",
"_write_callback",
"(",
"connection_id",
",",
"data_buffer",
",",
"data_length_pointer",
")",
":",
"wrapped_socket",
"=",
"None",
"try",
":",
"wrapped_socket",
"=",
"_connection_refs",
".",
"get",
"(",
"connection_id",
")",
"if",
"wrapped_socket",
"is",
"None",
":",
"return",
"SecurityConst",
".",
"errSSLInternal",
"base_socket",
"=",
"wrapped_socket",
".",
"socket",
"bytes_to_write",
"=",
"data_length_pointer",
"[",
"0",
"]",
"data",
"=",
"ctypes",
".",
"string_at",
"(",
"data_buffer",
",",
"bytes_to_write",
")",
"timeout",
"=",
"wrapped_socket",
".",
"gettimeout",
"(",
")",
"error",
"=",
"None",
"sent",
"=",
"0",
"try",
":",
"while",
"sent",
"<",
"bytes_to_write",
":",
"if",
"timeout",
"is",
"None",
"or",
"timeout",
">=",
"0",
":",
"if",
"not",
"util",
".",
"wait_for_write",
"(",
"base_socket",
",",
"timeout",
")",
":",
"raise",
"socket",
".",
"error",
"(",
"errno",
".",
"EAGAIN",
",",
"\"timed out\"",
")",
"chunk_sent",
"=",
"base_socket",
".",
"send",
"(",
"data",
")",
"sent",
"+=",
"chunk_sent",
"# This has some needless copying here, but I'm not sure there's",
"# much value in optimising this data path.",
"data",
"=",
"data",
"[",
"chunk_sent",
":",
"]",
"except",
"(",
"socket",
".",
"error",
")",
"as",
"e",
":",
"error",
"=",
"e",
".",
"errno",
"if",
"error",
"is",
"not",
"None",
"and",
"error",
"!=",
"errno",
".",
"EAGAIN",
":",
"data_length_pointer",
"[",
"0",
"]",
"=",
"sent",
"if",
"error",
"==",
"errno",
".",
"ECONNRESET",
"or",
"error",
"==",
"errno",
".",
"EPIPE",
":",
"return",
"SecurityConst",
".",
"errSSLClosedAbort",
"raise",
"data_length_pointer",
"[",
"0",
"]",
"=",
"sent",
"if",
"sent",
"!=",
"bytes_to_write",
":",
"return",
"SecurityConst",
".",
"errSSLWouldBlock",
"return",
"0",
"except",
"Exception",
"as",
"e",
":",
"if",
"wrapped_socket",
"is",
"not",
"None",
":",
"wrapped_socket",
".",
"_exception",
"=",
"e",
"return",
"SecurityConst",
".",
"errSSLInternal"
] | [
267,
0
] | [
315,
43
] | python | en | ['en', 'error', 'th'] | False |
WrappedSocket._raise_on_error | (self) |
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
|
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions. | def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception | [
"def",
"_raise_on_error",
"(",
"self",
")",
":",
"self",
".",
"_exception",
"=",
"None",
"# We explicitly don't catch around this yield because in the unlikely",
"# event that an exception was hit in the block we don't want to swallow",
"# it.",
"yield",
"if",
"self",
".",
"_exception",
"is",
"not",
"None",
":",
"exception",
",",
"self",
".",
"_exception",
"=",
"self",
".",
"_exception",
",",
"None",
"self",
".",
"close",
"(",
")",
"raise",
"exception"
] | [
352,
4
] | [
370,
27
] | python | en | ['en', 'error', 'th'] | False |
WrappedSocket._set_ciphers | (self) |
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
|
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
| def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result) | [
"def",
"_set_ciphers",
"(",
"self",
")",
":",
"ciphers",
"=",
"(",
"Security",
".",
"SSLCipherSuite",
"*",
"len",
"(",
"CIPHER_SUITES",
")",
")",
"(",
"*",
"CIPHER_SUITES",
")",
"result",
"=",
"Security",
".",
"SSLSetEnabledCiphers",
"(",
"self",
".",
"context",
",",
"ciphers",
",",
"len",
"(",
"CIPHER_SUITES",
")",
")",
"_assert_no_error",
"(",
"result",
")"
] | [
372,
4
] | [
383,
32
] | python | en | ['en', 'error', 'th'] | False |
WrappedSocket._set_alpn_protocols | (self, protocols) |
Sets up the ALPN protocols on the context.
|
Sets up the ALPN protocols on the context.
| def _set_alpn_protocols(self, protocols):
"""
Sets up the ALPN protocols on the context.
"""
if not protocols:
return
protocols_arr = _create_cfstring_array(protocols)
try:
result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
_assert_no_error(result)
finally:
CoreFoundation.CFRelease(protocols_arr) | [
"def",
"_set_alpn_protocols",
"(",
"self",
",",
"protocols",
")",
":",
"if",
"not",
"protocols",
":",
"return",
"protocols_arr",
"=",
"_create_cfstring_array",
"(",
"protocols",
")",
"try",
":",
"result",
"=",
"Security",
".",
"SSLSetALPNProtocols",
"(",
"self",
".",
"context",
",",
"protocols_arr",
")",
"_assert_no_error",
"(",
"result",
")",
"finally",
":",
"CoreFoundation",
".",
"CFRelease",
"(",
"protocols_arr",
")"
] | [
385,
4
] | [
396,
51
] | python | en | ['en', 'error', 'th'] | False |
WrappedSocket._custom_validate | (self, verify, trust_bundle) |
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
Raises an SSLError if the connection is not trusted.
|
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
Raises an SSLError if the connection is not trusted.
| def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
Raises an SSLError if the connection is not trusted.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed,
)
try:
trust_result = self._evaluate_trust(trust_bundle)
if trust_result in successes:
return
reason = "error code: %d" % (trust_result,)
except Exception as e:
# Do not trust on error
reason = "exception: %r" % (e,)
# SecureTransport does not send an alert nor shuts down the connection.
rec = _build_tls_unknown_ca_alert(self.version())
self.socket.sendall(rec)
# close the connection immediately
# l_onoff = 1, activate linger
# l_linger = 0, linger for 0 seoncds
opts = struct.pack("ii", 1, 0)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
self.close()
raise ssl.SSLError("certificate verify failed, %s" % reason) | [
"def",
"_custom_validate",
"(",
"self",
",",
"verify",
",",
"trust_bundle",
")",
":",
"# If we disabled cert validation, just say: cool.",
"if",
"not",
"verify",
":",
"return",
"successes",
"=",
"(",
"SecurityConst",
".",
"kSecTrustResultUnspecified",
",",
"SecurityConst",
".",
"kSecTrustResultProceed",
",",
")",
"try",
":",
"trust_result",
"=",
"self",
".",
"_evaluate_trust",
"(",
"trust_bundle",
")",
"if",
"trust_result",
"in",
"successes",
":",
"return",
"reason",
"=",
"\"error code: %d\"",
"%",
"(",
"trust_result",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"# Do not trust on error",
"reason",
"=",
"\"exception: %r\"",
"%",
"(",
"e",
",",
")",
"# SecureTransport does not send an alert nor shuts down the connection.",
"rec",
"=",
"_build_tls_unknown_ca_alert",
"(",
"self",
".",
"version",
"(",
")",
")",
"self",
".",
"socket",
".",
"sendall",
"(",
"rec",
")",
"# close the connection immediately",
"# l_onoff = 1, activate linger",
"# l_linger = 0, linger for 0 seoncds",
"opts",
"=",
"struct",
".",
"pack",
"(",
"\"ii\"",
",",
"1",
",",
"0",
")",
"self",
".",
"socket",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_LINGER",
",",
"opts",
")",
"self",
".",
"close",
"(",
")",
"raise",
"ssl",
".",
"SSLError",
"(",
"\"certificate verify failed, %s\"",
"%",
"reason",
")"
] | [
398,
4
] | [
431,
68
] | python | en | ['en', 'error', 'th'] | False |
WrappedSocket.handshake | (
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
alpn_protocols,
) |
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
|
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
| def handshake(
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
alpn_protocols,
):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode("utf-8")
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Setup the ALPN protocols.
self._set_alpn_protocols(alpn_protocols)
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break | [
"def",
"handshake",
"(",
"self",
",",
"server_hostname",
",",
"verify",
",",
"trust_bundle",
",",
"min_version",
",",
"max_version",
",",
"client_cert",
",",
"client_key",
",",
"client_key_passphrase",
",",
"alpn_protocols",
",",
")",
":",
"# First, we do the initial bits of connection setup. We need to create",
"# a context, set its I/O funcs, and set the connection reference.",
"self",
".",
"context",
"=",
"Security",
".",
"SSLCreateContext",
"(",
"None",
",",
"SecurityConst",
".",
"kSSLClientSide",
",",
"SecurityConst",
".",
"kSSLStreamType",
")",
"result",
"=",
"Security",
".",
"SSLSetIOFuncs",
"(",
"self",
".",
"context",
",",
"_read_callback_pointer",
",",
"_write_callback_pointer",
")",
"_assert_no_error",
"(",
"result",
")",
"# Here we need to compute the handle to use. We do this by taking the",
"# id of self modulo 2**31 - 1. If this is already in the dictionary, we",
"# just keep incrementing by one until we find a free space.",
"with",
"_connection_ref_lock",
":",
"handle",
"=",
"id",
"(",
"self",
")",
"%",
"2147483647",
"while",
"handle",
"in",
"_connection_refs",
":",
"handle",
"=",
"(",
"handle",
"+",
"1",
")",
"%",
"2147483647",
"_connection_refs",
"[",
"handle",
"]",
"=",
"self",
"result",
"=",
"Security",
".",
"SSLSetConnection",
"(",
"self",
".",
"context",
",",
"handle",
")",
"_assert_no_error",
"(",
"result",
")",
"# If we have a server hostname, we should set that too.",
"if",
"server_hostname",
":",
"if",
"not",
"isinstance",
"(",
"server_hostname",
",",
"bytes",
")",
":",
"server_hostname",
"=",
"server_hostname",
".",
"encode",
"(",
"\"utf-8\"",
")",
"result",
"=",
"Security",
".",
"SSLSetPeerDomainName",
"(",
"self",
".",
"context",
",",
"server_hostname",
",",
"len",
"(",
"server_hostname",
")",
")",
"_assert_no_error",
"(",
"result",
")",
"# Setup the ciphers.",
"self",
".",
"_set_ciphers",
"(",
")",
"# Setup the ALPN protocols.",
"self",
".",
"_set_alpn_protocols",
"(",
"alpn_protocols",
")",
"# Set the minimum and maximum TLS versions.",
"result",
"=",
"Security",
".",
"SSLSetProtocolVersionMin",
"(",
"self",
".",
"context",
",",
"min_version",
")",
"_assert_no_error",
"(",
"result",
")",
"result",
"=",
"Security",
".",
"SSLSetProtocolVersionMax",
"(",
"self",
".",
"context",
",",
"max_version",
")",
"_assert_no_error",
"(",
"result",
")",
"# If there's a trust DB, we need to use it. We do that by telling",
"# SecureTransport to break on server auth. We also do that if we don't",
"# want to validate the certs at all: we just won't actually do any",
"# authing in that case.",
"if",
"not",
"verify",
"or",
"trust_bundle",
"is",
"not",
"None",
":",
"result",
"=",
"Security",
".",
"SSLSetSessionOption",
"(",
"self",
".",
"context",
",",
"SecurityConst",
".",
"kSSLSessionOptionBreakOnServerAuth",
",",
"True",
")",
"_assert_no_error",
"(",
"result",
")",
"# If there's a client cert, we need to use it.",
"if",
"client_cert",
":",
"self",
".",
"_keychain",
",",
"self",
".",
"_keychain_dir",
"=",
"_temporary_keychain",
"(",
")",
"self",
".",
"_client_cert_chain",
"=",
"_load_client_cert_chain",
"(",
"self",
".",
"_keychain",
",",
"client_cert",
",",
"client_key",
")",
"result",
"=",
"Security",
".",
"SSLSetCertificate",
"(",
"self",
".",
"context",
",",
"self",
".",
"_client_cert_chain",
")",
"_assert_no_error",
"(",
"result",
")",
"while",
"True",
":",
"with",
"self",
".",
"_raise_on_error",
"(",
")",
":",
"result",
"=",
"Security",
".",
"SSLHandshake",
"(",
"self",
".",
"context",
")",
"if",
"result",
"==",
"SecurityConst",
".",
"errSSLWouldBlock",
":",
"raise",
"socket",
".",
"timeout",
"(",
"\"handshake timed out\"",
")",
"elif",
"result",
"==",
"SecurityConst",
".",
"errSSLServerAuthCompleted",
":",
"self",
".",
"_custom_validate",
"(",
"verify",
",",
"trust_bundle",
")",
"continue",
"else",
":",
"_assert_no_error",
"(",
"result",
")",
"break"
] | [
473,
4
] | [
564,
25
] | python | en | ['en', 'error', 'th'] | False |
SecureTransportContext.check_hostname | (self) |
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
|
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
| def check_hostname(self):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
return True | [
"def",
"check_hostname",
"(",
"self",
")",
":",
"return",
"True"
] | [
803,
4
] | [
808,
19
] | python | en | ['en', 'error', 'th'] | False |
SecureTransportContext.check_hostname | (self, value) |
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
|
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
| def check_hostname(self, value):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
pass | [
"def",
"check_hostname",
"(",
"self",
",",
"value",
")",
":",
"pass"
] | [
811,
4
] | [
816,
12
] | python | en | ['en', 'error', 'th'] | False |
SecureTransportContext.set_alpn_protocols | (self, protocols) |
Sets the ALPN protocols that will later be set on the context.
Raises a NotImplementedError if ALPN is not supported.
|
Sets the ALPN protocols that will later be set on the context. | def set_alpn_protocols(self, protocols):
"""
Sets the ALPN protocols that will later be set on the context.
Raises a NotImplementedError if ALPN is not supported.
"""
if not hasattr(Security, "SSLSetALPNProtocols"):
raise NotImplementedError(
"SecureTransport supports ALPN only in macOS 10.12+"
)
self._alpn_protocols = [six.ensure_binary(p) for p in protocols] | [
"def",
"set_alpn_protocols",
"(",
"self",
",",
"protocols",
")",
":",
"if",
"not",
"hasattr",
"(",
"Security",
",",
"\"SSLSetALPNProtocols\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"SecureTransport supports ALPN only in macOS 10.12+\"",
")",
"self",
".",
"_alpn_protocols",
"=",
"[",
"six",
".",
"ensure_binary",
"(",
"p",
")",
"for",
"p",
"in",
"protocols",
"]"
] | [
878,
4
] | [
888,
72
] | python | en | ['en', 'error', 'th'] | False |
i16le | (c, o=0) |
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
|
Converts a 2-bytes (16 bits) string to an unsigned integer. | def i16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<H", c, o)[0] | [
"def",
"i16le",
"(",
"c",
",",
"o",
"=",
"0",
")",
":",
"return",
"unpack_from",
"(",
"\"<H\"",
",",
"c",
",",
"o",
")",
"[",
"0",
"]"
] | [
29,
0
] | [
36,
37
] | python | en | ['en', 'error', 'th'] | False |
si16le | (c, o=0) |
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
|
Converts a 2-bytes (16 bits) string to a signed integer. | def si16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<h", c, o)[0] | [
"def",
"si16le",
"(",
"c",
",",
"o",
"=",
"0",
")",
":",
"return",
"unpack_from",
"(",
"\"<h\"",
",",
"c",
",",
"o",
")",
"[",
"0",
"]"
] | [
39,
0
] | [
46,
37
] | python | en | ['en', 'error', 'th'] | False |
i32le | (c, o=0) |
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
|
Converts a 4-bytes (32 bits) string to an unsigned integer. | def i32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<I", c, o)[0] | [
"def",
"i32le",
"(",
"c",
",",
"o",
"=",
"0",
")",
":",
"return",
"unpack_from",
"(",
"\"<I\"",
",",
"c",
",",
"o",
")",
"[",
"0",
"]"
] | [
49,
0
] | [
56,
37
] | python | en | ['en', 'error', 'th'] | False |
si32le | (c, o=0) |
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
|
Converts a 4-bytes (32 bits) string to a signed integer. | def si32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<i", c, o)[0] | [
"def",
"si32le",
"(",
"c",
",",
"o",
"=",
"0",
")",
":",
"return",
"unpack_from",
"(",
"\"<i\"",
",",
"c",
",",
"o",
")",
"[",
"0",
"]"
] | [
59,
0
] | [
66,
37
] | python | en | ['en', 'error', 'th'] | False |
anomaly_detection | (features, labels, mode, params) | Custom Estimator model function for anomaly detection.
Given dictionary of feature tensors, labels tensor, Estimator mode, and
dictionary for parameters, return EstimatorSpec object for custom Estimator.
Args:
features: Dictionary of feature tensors.
labels: Labels tensor or None.
mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.
params: Dictionary of parameters.
Returns:
EstimatorSpec object.
| Custom Estimator model function for anomaly detection. | def anomaly_detection(features, labels, mode, params):
"""Custom Estimator model function for anomaly detection.
Given dictionary of feature tensors, labels tensor, Estimator mode, and
dictionary for parameters, return EstimatorSpec object for custom Estimator.
Args:
features: Dictionary of feature tensors.
labels: Labels tensor or None.
mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.
params: Dictionary of parameters.
Returns:
EstimatorSpec object.
"""
print("\nanomaly_detection: features = \n{}".format(features))
print("anomaly_detection: labels = \n{}".format(labels))
print("anomaly_detection: mode = \n{}".format(mode))
print("anomaly_detection: params = \n{}".format(params))
# Get input sequence tensor into correct shape
# Get dynamic batch size in case there was a partially filled batch
cur_batch_size = tf.shape(
input=features[params["feat_names"][0]], out_type=tf.int64)[0]
# Stack all of the features into a 3-D tensor
# shape = (cur_batch_size, seq_len, num_feat)
X = tf.stack(
values=[features[key] for key in params["feat_names"]], axis=2)
##############################################################################
# Important to note that flags determining which variables should be created
# need to remain the same through all stages or else they won't be in the
# checkpoint.
# Variables for calculating error distribution statistics
(abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var,
abs_err_inv_cov_time_var,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var,
abs_err_inv_cov_feat_var) = create_both_mahalanobis_dist_vars(
seq_len=params["seq_len"], num_feat=params["num_feat"])
# Variables for automatically tuning anomaly thresh
if params["labeled_tune_thresh"]:
(tp_thresh_time_var,
fn_thresh_time_var,
fp_thresh_time_var,
tn_thresh_time_var,
tp_thresh_feat_var,
fn_thresh_feat_var,
fp_thresh_feat_var,
tn_thresh_feat_var) = create_both_confusion_matrix_thresh_vars(
scope="mahalanobis_dist_thresh_vars",
time_thresh_size=[params["num_time_anom_thresh"]],
feat_thresh_size=[params["num_feat_anom_thresh"]])
else:
(count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var) = create_both_mahalanobis_unsupervised_thresh_vars(
scope="mahalanobis_dist_thresh_vars")
with tf.variable_scope(
name_or_scope="mahalanobis_dist_thresh_vars", reuse=tf.AUTO_REUSE):
time_anom_thresh_var = tf.get_variable(
name="time_anom_thresh_var",
dtype=tf.float64,
initializer=tf.zeros(shape=[], dtype=tf.float64),
trainable=False)
feat_anom_thresh_var = tf.get_variable(
name="feat_anom_thresh_var",
dtype=tf.float64,
initializer=tf.zeros(shape=[], dtype=tf.float64),
trainable=False)
# Variables for tuning anomaly thresh evaluation
if params["labeled_tune_thresh"]:
(tp_thresh_eval_time_var,
fn_thresh_eval_time_var,
fp_thresh_eval_time_var,
tn_thresh_eval_time_var,
tp_thresh_eval_feat_var,
fn_thresh_eval_feat_var,
fp_thresh_eval_feat_var,
tn_thresh_eval_feat_var) = create_both_confusion_matrix_thresh_vars(
scope="anom_thresh_eval_vars",
time_thresh_size=[],
feat_thresh_size=[])
# Create dummy variable for graph dependency requiring a gradient for TRAIN
dummy_var = tf.get_variable(
name="dummy_var",
dtype=tf.float64,
initializer=tf.zeros(shape=[], dtype=tf.float64),
trainable=True)
################################################################################
predictions_dict = None
loss = None
train_op = None
eval_metric_ops = None
export_outputs = None
# Now branch off based on which mode we are in
# Call specific model
model_functions = {
"dense_autoencoder": dense_autoencoder_model,
"lstm_enc_dec_autoencoder": lstm_enc_dec_autoencoder_model,
"pca": pca_model}
# Get function pointer for selected model type
model_function = model_functions[params["model_type"]]
# Build selected model
loss, train_op, X_time_orig, X_time_recon, X_feat_orig, X_feat_recon = \
model_function(X, mode, params, cur_batch_size, dummy_var)
if not (mode == tf.estimator.ModeKeys.TRAIN and
params["training_mode"] == "reconstruction"):
# shape = (cur_batch_size * seq_len, num_feat)
X_time_abs_recon_err = tf.abs(
x=X_time_orig - X_time_recon)
# Features based
# shape = (cur_batch_size * num_feat, seq_len)
X_feat_abs_recon_err = tf.abs(
x=X_feat_orig - X_feat_recon)
if (mode == tf.estimator.ModeKeys.TRAIN and
params["training_mode"] == "calculate_error_distribution_statistics"):
loss, train_op = calculate_error_distribution_statistics_training(
cur_batch_size,
X_time_abs_recon_err,
abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var,
abs_err_inv_cov_time_var,
X_feat_abs_recon_err,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var,
abs_err_inv_cov_feat_var,
params,
dummy_var)
elif (mode == tf.estimator.ModeKeys.EVAL and
params["training_mode"] != "tune_anomaly_thresholds"):
loss, eval_metric_ops = reconstruction_evaluation(
X_time_orig, X_time_recon, params["training_mode"])
elif (mode == tf.estimator.ModeKeys.PREDICT or
((mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL) and
params["training_mode"] == "tune_anomaly_thresholds")):
with tf.variable_scope(
name_or_scope="mahalanobis_dist_vars", reuse=tf.AUTO_REUSE):
# Time based
# shape = (cur_batch_size, seq_len)
mahalanobis_dist_time = mahalanobis_dist(
err_vec=X_time_abs_recon_err,
mean_vec=abs_err_mean_time_var,
inv_cov=abs_err_inv_cov_time_var,
final_shape=params["seq_len"])
# Features based
# shape = (cur_batch_size, num_feat)
mahalanobis_dist_feat = mahalanobis_dist(
err_vec=X_feat_abs_recon_err,
mean_vec=abs_err_mean_feat_var,
inv_cov=abs_err_inv_cov_feat_var,
final_shape=params["num_feat"])
if mode != tf.estimator.ModeKeys.PREDICT:
if params["labeled_tune_thresh"]:
labels_norm_mask = tf.equal(x=labels, y=0)
labels_anom_mask = tf.equal(x=labels, y=1)
if mode == tf.estimator.ModeKeys.TRAIN:
loss, train_op = tune_anomaly_thresholds_supervised_training(
labels_norm_mask,
labels_anom_mask,
mahalanobis_dist_time,
tp_thresh_time_var,
fn_thresh_time_var,
fp_thresh_time_var,
tn_thresh_time_var,
time_anom_thresh_var,
mahalanobis_dist_feat,
tp_thresh_feat_var,
fn_thresh_feat_var,
fp_thresh_feat_var,
tn_thresh_feat_var,
feat_anom_thresh_var,
params,
mode,
dummy_var)
elif mode == tf.estimator.ModeKeys.EVAL:
loss, eval_metric_ops = tune_anomaly_thresholds_supervised_eval(
labels_norm_mask,
labels_anom_mask,
time_anom_thresh_var,
mahalanobis_dist_time,
tp_thresh_eval_time_var,
fn_thresh_eval_time_var,
fp_thresh_eval_time_var,
tn_thresh_eval_time_var,
feat_anom_thresh_var,
mahalanobis_dist_feat,
tp_thresh_eval_feat_var,
fn_thresh_eval_feat_var,
fp_thresh_eval_feat_var,
tn_thresh_eval_feat_var,
params,
mode)
else: # not params["labeled_tune_thresh"]
if mode == tf.estimator.ModeKeys.TRAIN:
loss, train_op = tune_anomaly_thresholds_unsupervised_training(
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var,
feat_anom_thresh_var,
mahalanobis_dist_feat,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var,
params,
dummy_var)
elif mode == tf.estimator.ModeKeys.EVAL:
loss, eval_metric_ops = tune_anomaly_thresholds_unsupervised_eval(
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
feat_anom_thresh_var,
mahalanobis_dist_feat)
else: # mode == tf.estimator.ModeKeys.PREDICT
predictions_dict, export_outputs = anomaly_detection_predictions(
cur_batch_size,
params["seq_len"],
params["num_feat"],
mahalanobis_dist_time,
mahalanobis_dist_feat,
time_anom_thresh_var,
feat_anom_thresh_var,
X_time_abs_recon_err,
X_feat_abs_recon_err)
# Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs) | [
"def",
"anomaly_detection",
"(",
"features",
",",
"labels",
",",
"mode",
",",
"params",
")",
":",
"print",
"(",
"\"\\nanomaly_detection: features = \\n{}\"",
".",
"format",
"(",
"features",
")",
")",
"print",
"(",
"\"anomaly_detection: labels = \\n{}\"",
".",
"format",
"(",
"labels",
")",
")",
"print",
"(",
"\"anomaly_detection: mode = \\n{}\"",
".",
"format",
"(",
"mode",
")",
")",
"print",
"(",
"\"anomaly_detection: params = \\n{}\"",
".",
"format",
"(",
"params",
")",
")",
"# Get input sequence tensor into correct shape",
"# Get dynamic batch size in case there was a partially filled batch",
"cur_batch_size",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"features",
"[",
"params",
"[",
"\"feat_names\"",
"]",
"[",
"0",
"]",
"]",
",",
"out_type",
"=",
"tf",
".",
"int64",
")",
"[",
"0",
"]",
"# Stack all of the features into a 3-D tensor",
"# shape = (cur_batch_size, seq_len, num_feat)",
"X",
"=",
"tf",
".",
"stack",
"(",
"values",
"=",
"[",
"features",
"[",
"key",
"]",
"for",
"key",
"in",
"params",
"[",
"\"feat_names\"",
"]",
"]",
",",
"axis",
"=",
"2",
")",
"##############################################################################",
"# Important to note that flags determining which variables should be created ",
"# need to remain the same through all stages or else they won't be in the",
"# checkpoint.",
"# Variables for calculating error distribution statistics",
"(",
"abs_err_count_time_var",
",",
"abs_err_mean_time_var",
",",
"abs_err_cov_time_var",
",",
"abs_err_inv_cov_time_var",
",",
"abs_err_count_feat_var",
",",
"abs_err_mean_feat_var",
",",
"abs_err_cov_feat_var",
",",
"abs_err_inv_cov_feat_var",
")",
"=",
"create_both_mahalanobis_dist_vars",
"(",
"seq_len",
"=",
"params",
"[",
"\"seq_len\"",
"]",
",",
"num_feat",
"=",
"params",
"[",
"\"num_feat\"",
"]",
")",
"# Variables for automatically tuning anomaly thresh",
"if",
"params",
"[",
"\"labeled_tune_thresh\"",
"]",
":",
"(",
"tp_thresh_time_var",
",",
"fn_thresh_time_var",
",",
"fp_thresh_time_var",
",",
"tn_thresh_time_var",
",",
"tp_thresh_feat_var",
",",
"fn_thresh_feat_var",
",",
"fp_thresh_feat_var",
",",
"tn_thresh_feat_var",
")",
"=",
"create_both_confusion_matrix_thresh_vars",
"(",
"scope",
"=",
"\"mahalanobis_dist_thresh_vars\"",
",",
"time_thresh_size",
"=",
"[",
"params",
"[",
"\"num_time_anom_thresh\"",
"]",
"]",
",",
"feat_thresh_size",
"=",
"[",
"params",
"[",
"\"num_feat_anom_thresh\"",
"]",
"]",
")",
"else",
":",
"(",
"count_thresh_time_var",
",",
"mean_thresh_time_var",
",",
"var_thresh_time_var",
",",
"count_thresh_feat_var",
",",
"mean_thresh_feat_var",
",",
"var_thresh_feat_var",
")",
"=",
"create_both_mahalanobis_unsupervised_thresh_vars",
"(",
"scope",
"=",
"\"mahalanobis_dist_thresh_vars\"",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"\"mahalanobis_dist_thresh_vars\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"time_anom_thresh_var",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"\"time_anom_thresh_var\"",
",",
"dtype",
"=",
"tf",
".",
"float64",
",",
"initializer",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
",",
"trainable",
"=",
"False",
")",
"feat_anom_thresh_var",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"\"feat_anom_thresh_var\"",
",",
"dtype",
"=",
"tf",
".",
"float64",
",",
"initializer",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
",",
"trainable",
"=",
"False",
")",
"# Variables for tuning anomaly thresh evaluation",
"if",
"params",
"[",
"\"labeled_tune_thresh\"",
"]",
":",
"(",
"tp_thresh_eval_time_var",
",",
"fn_thresh_eval_time_var",
",",
"fp_thresh_eval_time_var",
",",
"tn_thresh_eval_time_var",
",",
"tp_thresh_eval_feat_var",
",",
"fn_thresh_eval_feat_var",
",",
"fp_thresh_eval_feat_var",
",",
"tn_thresh_eval_feat_var",
")",
"=",
"create_both_confusion_matrix_thresh_vars",
"(",
"scope",
"=",
"\"anom_thresh_eval_vars\"",
",",
"time_thresh_size",
"=",
"[",
"]",
",",
"feat_thresh_size",
"=",
"[",
"]",
")",
"# Create dummy variable for graph dependency requiring a gradient for TRAIN",
"dummy_var",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"\"dummy_var\"",
",",
"dtype",
"=",
"tf",
".",
"float64",
",",
"initializer",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
",",
"trainable",
"=",
"True",
")",
"################################################################################",
"predictions_dict",
"=",
"None",
"loss",
"=",
"None",
"train_op",
"=",
"None",
"eval_metric_ops",
"=",
"None",
"export_outputs",
"=",
"None",
"# Now branch off based on which mode we are in",
"# Call specific model",
"model_functions",
"=",
"{",
"\"dense_autoencoder\"",
":",
"dense_autoencoder_model",
",",
"\"lstm_enc_dec_autoencoder\"",
":",
"lstm_enc_dec_autoencoder_model",
",",
"\"pca\"",
":",
"pca_model",
"}",
"# Get function pointer for selected model type",
"model_function",
"=",
"model_functions",
"[",
"params",
"[",
"\"model_type\"",
"]",
"]",
"# Build selected model",
"loss",
",",
"train_op",
",",
"X_time_orig",
",",
"X_time_recon",
",",
"X_feat_orig",
",",
"X_feat_recon",
"=",
"model_function",
"(",
"X",
",",
"mode",
",",
"params",
",",
"cur_batch_size",
",",
"dummy_var",
")",
"if",
"not",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
"and",
"params",
"[",
"\"training_mode\"",
"]",
"==",
"\"reconstruction\"",
")",
":",
"# shape = (cur_batch_size * seq_len, num_feat)",
"X_time_abs_recon_err",
"=",
"tf",
".",
"abs",
"(",
"x",
"=",
"X_time_orig",
"-",
"X_time_recon",
")",
"# Features based",
"# shape = (cur_batch_size * num_feat, seq_len)",
"X_feat_abs_recon_err",
"=",
"tf",
".",
"abs",
"(",
"x",
"=",
"X_feat_orig",
"-",
"X_feat_recon",
")",
"if",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
"and",
"params",
"[",
"\"training_mode\"",
"]",
"==",
"\"calculate_error_distribution_statistics\"",
")",
":",
"loss",
",",
"train_op",
"=",
"calculate_error_distribution_statistics_training",
"(",
"cur_batch_size",
",",
"X_time_abs_recon_err",
",",
"abs_err_count_time_var",
",",
"abs_err_mean_time_var",
",",
"abs_err_cov_time_var",
",",
"abs_err_inv_cov_time_var",
",",
"X_feat_abs_recon_err",
",",
"abs_err_count_feat_var",
",",
"abs_err_mean_feat_var",
",",
"abs_err_cov_feat_var",
",",
"abs_err_inv_cov_feat_var",
",",
"params",
",",
"dummy_var",
")",
"elif",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
"and",
"params",
"[",
"\"training_mode\"",
"]",
"!=",
"\"tune_anomaly_thresholds\"",
")",
":",
"loss",
",",
"eval_metric_ops",
"=",
"reconstruction_evaluation",
"(",
"X_time_orig",
",",
"X_time_recon",
",",
"params",
"[",
"\"training_mode\"",
"]",
")",
"elif",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
"or",
"(",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
"or",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
")",
"and",
"params",
"[",
"\"training_mode\"",
"]",
"==",
"\"tune_anomaly_thresholds\"",
")",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"\"mahalanobis_dist_vars\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"# Time based",
"# shape = (cur_batch_size, seq_len)",
"mahalanobis_dist_time",
"=",
"mahalanobis_dist",
"(",
"err_vec",
"=",
"X_time_abs_recon_err",
",",
"mean_vec",
"=",
"abs_err_mean_time_var",
",",
"inv_cov",
"=",
"abs_err_inv_cov_time_var",
",",
"final_shape",
"=",
"params",
"[",
"\"seq_len\"",
"]",
")",
"# Features based",
"# shape = (cur_batch_size, num_feat)",
"mahalanobis_dist_feat",
"=",
"mahalanobis_dist",
"(",
"err_vec",
"=",
"X_feat_abs_recon_err",
",",
"mean_vec",
"=",
"abs_err_mean_feat_var",
",",
"inv_cov",
"=",
"abs_err_inv_cov_feat_var",
",",
"final_shape",
"=",
"params",
"[",
"\"num_feat\"",
"]",
")",
"if",
"mode",
"!=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
":",
"if",
"params",
"[",
"\"labeled_tune_thresh\"",
"]",
":",
"labels_norm_mask",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"labels",
",",
"y",
"=",
"0",
")",
"labels_anom_mask",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"labels",
",",
"y",
"=",
"1",
")",
"if",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"loss",
",",
"train_op",
"=",
"tune_anomaly_thresholds_supervised_training",
"(",
"labels_norm_mask",
",",
"labels_anom_mask",
",",
"mahalanobis_dist_time",
",",
"tp_thresh_time_var",
",",
"fn_thresh_time_var",
",",
"fp_thresh_time_var",
",",
"tn_thresh_time_var",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_feat",
",",
"tp_thresh_feat_var",
",",
"fn_thresh_feat_var",
",",
"fp_thresh_feat_var",
",",
"tn_thresh_feat_var",
",",
"feat_anom_thresh_var",
",",
"params",
",",
"mode",
",",
"dummy_var",
")",
"elif",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
":",
"loss",
",",
"eval_metric_ops",
"=",
"tune_anomaly_thresholds_supervised_eval",
"(",
"labels_norm_mask",
",",
"labels_anom_mask",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_time",
",",
"tp_thresh_eval_time_var",
",",
"fn_thresh_eval_time_var",
",",
"fp_thresh_eval_time_var",
",",
"tn_thresh_eval_time_var",
",",
"feat_anom_thresh_var",
",",
"mahalanobis_dist_feat",
",",
"tp_thresh_eval_feat_var",
",",
"fn_thresh_eval_feat_var",
",",
"fp_thresh_eval_feat_var",
",",
"tn_thresh_eval_feat_var",
",",
"params",
",",
"mode",
")",
"else",
":",
"# not params[\"labeled_tune_thresh\"]",
"if",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"loss",
",",
"train_op",
"=",
"tune_anomaly_thresholds_unsupervised_training",
"(",
"cur_batch_size",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_time",
",",
"count_thresh_time_var",
",",
"mean_thresh_time_var",
",",
"var_thresh_time_var",
",",
"feat_anom_thresh_var",
",",
"mahalanobis_dist_feat",
",",
"count_thresh_feat_var",
",",
"mean_thresh_feat_var",
",",
"var_thresh_feat_var",
",",
"params",
",",
"dummy_var",
")",
"elif",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
":",
"loss",
",",
"eval_metric_ops",
"=",
"tune_anomaly_thresholds_unsupervised_eval",
"(",
"cur_batch_size",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_time",
",",
"feat_anom_thresh_var",
",",
"mahalanobis_dist_feat",
")",
"else",
":",
"# mode == tf.estimator.ModeKeys.PREDICT",
"predictions_dict",
",",
"export_outputs",
"=",
"anomaly_detection_predictions",
"(",
"cur_batch_size",
",",
"params",
"[",
"\"seq_len\"",
"]",
",",
"params",
"[",
"\"num_feat\"",
"]",
",",
"mahalanobis_dist_time",
",",
"mahalanobis_dist_feat",
",",
"time_anom_thresh_var",
",",
"feat_anom_thresh_var",
",",
"X_time_abs_recon_err",
",",
"X_feat_abs_recon_err",
")",
"# Return EstimatorSpec",
"return",
"tf",
".",
"estimator",
".",
"EstimatorSpec",
"(",
"mode",
"=",
"mode",
",",
"predictions",
"=",
"predictions_dict",
",",
"loss",
"=",
"loss",
",",
"train_op",
"=",
"train_op",
",",
"eval_metric_ops",
"=",
"eval_metric_ops",
",",
"export_outputs",
"=",
"export_outputs",
")"
] | [
19,
0
] | [
283,
36
] | python | en | ['es', 'no', 'en'] | False |
BprMF.__init__ | (self, train_file=None, test_file=None, output_file=None, factors=10, learn_rate=0.05, epochs=30,
batch_size=0, rank_length=10, init_mean=0, init_stdev=0.1, reg_u=0.0025, reg_i=0.0025,
reg_j=0.00025, reg_bias=0, sep='\t', output_sep='\t', random_seed=None, items_test=False) |
BPRMF for Item Recommendation
BPR reduces ranking to pairwise classification. The different variants (settings) of this recommender
roughly optimize the area under the ROC curve (AUC).
Usage::
>> BprMF(train, test).compute()
>> BprMF(train, test, batch_size=30).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param epochs: Number of epochs over the training data
:type epochs: int, default 30
:param batch_size: Reduce number of interactions in each epoch, if 0 usage the number of positive interactions
in the train set
:type batch_size: int, default 0
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param reg_u: Regularization parameter for user factors
:type reg_u: float, default 0.0025
:param reg_i: Regularization parameter for positive item factors
:type reg_i: float, default 0.0025
:param reg_j: Regularization parameter for negative item factors
:type reg_j: float, default 0.00025
:param reg_bias: Regularization parameter for the bias term
:type reg_bias: default 0
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
:param items_test: If True, update unobserved set of each user with samples in the test set
:type items_test: bool, default False
|
BPRMF for Item Recommendation | def __init__(self, train_file=None, test_file=None, output_file=None, factors=10, learn_rate=0.05, epochs=30,
batch_size=0, rank_length=10, init_mean=0, init_stdev=0.1, reg_u=0.0025, reg_i=0.0025,
reg_j=0.00025, reg_bias=0, sep='\t', output_sep='\t', random_seed=None, items_test=False):
"""
BPRMF for Item Recommendation
BPR reduces ranking to pairwise classification. The different variants (settings) of this recommender
roughly optimize the area under the ROC curve (AUC).
Usage::
>> BprMF(train, test).compute()
>> BprMF(train, test, batch_size=30).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param epochs: Number of epochs over the training data
:type epochs: int, default 30
:param batch_size: Reduce number of interactions in each epoch, if 0 usage the number of positive interactions
in the train set
:type batch_size: int, default 0
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param reg_u: Regularization parameter for user factors
:type reg_u: float, default 0.0025
:param reg_i: Regularization parameter for positive item factors
:type reg_i: float, default 0.0025
:param reg_j: Regularization parameter for negative item factors
:type reg_j: float, default 0.00025
:param reg_bias: Regularization parameter for the bias term
:type reg_bias: default 0
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
:param items_test: If True, update unobserved set of each user with samples in the test set
:type items_test: bool, default False
"""
super(BprMF, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
rank_length=rank_length, sep=sep, output_sep=output_sep)
self.recommender_name = 'BPRMF'
self.factors = factors
self.learn_rate = learn_rate
self.epochs = epochs
self.batch_size = batch_size
self.init_mean = init_mean
self.init_stdev = init_stdev
self.reg_bias = reg_bias
self.reg_u = reg_u
self.reg_i = reg_i
self.reg_j = reg_j
self.items_test = items_test
if random_seed is not None:
np.random.seed(random_seed)
random.seed(random_seed)
# internal vars
self.p = None
self.q = None
self.bias = None
self.num_interactions = None | [
"def",
"__init__",
"(",
"self",
",",
"train_file",
"=",
"None",
",",
"test_file",
"=",
"None",
",",
"output_file",
"=",
"None",
",",
"factors",
"=",
"10",
",",
"learn_rate",
"=",
"0.05",
",",
"epochs",
"=",
"30",
",",
"batch_size",
"=",
"0",
",",
"rank_length",
"=",
"10",
",",
"init_mean",
"=",
"0",
",",
"init_stdev",
"=",
"0.1",
",",
"reg_u",
"=",
"0.0025",
",",
"reg_i",
"=",
"0.0025",
",",
"reg_j",
"=",
"0.00025",
",",
"reg_bias",
"=",
"0",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
",",
"random_seed",
"=",
"None",
",",
"items_test",
"=",
"False",
")",
":",
"super",
"(",
"BprMF",
",",
"self",
")",
".",
"__init__",
"(",
"train_file",
"=",
"train_file",
",",
"test_file",
"=",
"test_file",
",",
"output_file",
"=",
"output_file",
",",
"rank_length",
"=",
"rank_length",
",",
"sep",
"=",
"sep",
",",
"output_sep",
"=",
"output_sep",
")",
"self",
".",
"recommender_name",
"=",
"'BPRMF'",
"self",
".",
"factors",
"=",
"factors",
"self",
".",
"learn_rate",
"=",
"learn_rate",
"self",
".",
"epochs",
"=",
"epochs",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"init_mean",
"=",
"init_mean",
"self",
".",
"init_stdev",
"=",
"init_stdev",
"self",
".",
"reg_bias",
"=",
"reg_bias",
"self",
".",
"reg_u",
"=",
"reg_u",
"self",
".",
"reg_i",
"=",
"reg_i",
"self",
".",
"reg_j",
"=",
"reg_j",
"self",
".",
"items_test",
"=",
"items_test",
"if",
"random_seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"random_seed",
")",
"random",
".",
"seed",
"(",
"random_seed",
")",
"# internal vars",
"self",
".",
"p",
"=",
"None",
"self",
".",
"q",
"=",
"None",
"self",
".",
"bias",
"=",
"None",
"self",
".",
"num_interactions",
"=",
"None"
] | [
25,
4
] | [
123,
36
] | python | en | ['en', 'error', 'th'] | False |
BprMF.init_model | (self) |
Method to treat and initialize the model
|
Method to treat and initialize the model | def init_model(self):
"""
Method to treat and initialize the model
"""
# Upgrade unobserved items with test set samples
if self.items_test:
for u, user in enumerate(self.users):
self.train_set['items_unobserved'][user] = list(set(self.items) -
set(self.train_set['items_seen_by_user'][user]))
# Initialize factors
self.create_factors()
# Define number of interactions in each epoch
if self.batch_size <= 0:
self.num_interactions = self.train_set['number_interactions']
else:
self.num_interactions = int(self.train_set['number_interactions'] / self.batch_size) + 1 | [
"def",
"init_model",
"(",
"self",
")",
":",
"# Upgrade unobserved items with test set samples",
"if",
"self",
".",
"items_test",
":",
"for",
"u",
",",
"user",
"in",
"enumerate",
"(",
"self",
".",
"users",
")",
":",
"self",
".",
"train_set",
"[",
"'items_unobserved'",
"]",
"[",
"user",
"]",
"=",
"list",
"(",
"set",
"(",
"self",
".",
"items",
")",
"-",
"set",
"(",
"self",
".",
"train_set",
"[",
"'items_seen_by_user'",
"]",
"[",
"user",
"]",
")",
")",
"# Initialize factors",
"self",
".",
"create_factors",
"(",
")",
"# Define number of interactions in each epoch",
"if",
"self",
".",
"batch_size",
"<=",
"0",
":",
"self",
".",
"num_interactions",
"=",
"self",
".",
"train_set",
"[",
"'number_interactions'",
"]",
"else",
":",
"self",
".",
"num_interactions",
"=",
"int",
"(",
"self",
".",
"train_set",
"[",
"'number_interactions'",
"]",
"/",
"self",
".",
"batch_size",
")",
"+",
"1"
] | [
125,
4
] | [
144,
100
] | python | en | ['en', 'error', 'th'] | False |
BprMF.fit | (self) |
This method performs iterations of stochastic gradient ascent over the training data. One iteration is samples
number of positive entries in the training matrix times, if batch size is 0, else we divide the number of
positive entries per batch size (see in the init_model).
|
This method performs iterations of stochastic gradient ascent over the training data. One iteration is samples
number of positive entries in the training matrix times, if batch size is 0, else we divide the number of
positive entries per batch size (see in the init_model). | def fit(self):
"""
This method performs iterations of stochastic gradient ascent over the training data. One iteration is samples
number of positive entries in the training matrix times, if batch size is 0, else we divide the number of
positive entries per batch size (see in the init_model).
"""
for n in range(self.epochs):
random_users = random.choices(self.train_set['users'], k=self.num_interactions)
for user in random_users:
i, j = self.sample_pair(user)
self.update_factors(self.user_to_user_id[user], self.item_to_item_id[i], self.item_to_item_id[j]) | [
"def",
"fit",
"(",
"self",
")",
":",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"epochs",
")",
":",
"random_users",
"=",
"random",
".",
"choices",
"(",
"self",
".",
"train_set",
"[",
"'users'",
"]",
",",
"k",
"=",
"self",
".",
"num_interactions",
")",
"for",
"user",
"in",
"random_users",
":",
"i",
",",
"j",
"=",
"self",
".",
"sample_pair",
"(",
"user",
")",
"self",
".",
"update_factors",
"(",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
",",
"self",
".",
"item_to_item_id",
"[",
"i",
"]",
",",
"self",
".",
"item_to_item_id",
"[",
"j",
"]",
")"
] | [
146,
4
] | [
158,
113
] | python | en | ['en', 'error', 'th'] | False |
BprMF.create_factors | (self) |
This method create factors for users, items and bias
|
This method create factors for users, items and bias | def create_factors(self):
"""
This method create factors for users, items and bias
"""
self.p = np.random.normal(self.init_mean, self.init_stdev, (len(self.users), self.factors))
self.q = np.random.normal(self.init_mean, self.init_stdev, (len(self.items), self.factors))
self.bias = np.zeros(len(self.items), np.double) | [
"def",
"create_factors",
"(",
"self",
")",
":",
"self",
".",
"p",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"self",
".",
"init_mean",
",",
"self",
".",
"init_stdev",
",",
"(",
"len",
"(",
"self",
".",
"users",
")",
",",
"self",
".",
"factors",
")",
")",
"self",
".",
"q",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"self",
".",
"init_mean",
",",
"self",
".",
"init_stdev",
",",
"(",
"len",
"(",
"self",
".",
"items",
")",
",",
"self",
".",
"factors",
")",
")",
"self",
".",
"bias",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"self",
".",
"items",
")",
",",
"np",
".",
"double",
")"
] | [
160,
4
] | [
168,
56
] | python | en | ['en', 'error', 'th'] | False |
BprMF.sample_pair | (self, user) |
Randomly selects a known and unknown item to a particular user.
:param user: User to generate pairs
:type user: int
:return: known item, unknown item
|
Randomly selects a known and unknown item to a particular user. | def sample_pair(self, user):
"""
Randomly selects a known and unknown item to a particular user.
:param user: User to generate pairs
:type user: int
:return: known item, unknown item
"""
return random.choice(list(self.train_set['items_seen_by_user'][user])), random.choice(
self.train_set['items_unobserved'][user]) | [
"def",
"sample_pair",
"(",
"self",
",",
"user",
")",
":",
"return",
"random",
".",
"choice",
"(",
"list",
"(",
"self",
".",
"train_set",
"[",
"'items_seen_by_user'",
"]",
"[",
"user",
"]",
")",
")",
",",
"random",
".",
"choice",
"(",
"self",
".",
"train_set",
"[",
"'items_unobserved'",
"]",
"[",
"user",
"]",
")"
] | [
170,
4
] | [
181,
53
] | python | en | ['en', 'error', 'th'] | False |
BprMF.predict_score | (self, user, item) |
Method to predict a single score for a pair (user, item)
:param user: User ID
:type user: int
:param item: Item ID
:type item: int
:return: Score generate for pair (user, item)
:rtype: float
|
Method to predict a single score for a pair (user, item) | def predict_score(self, user, item):
"""
Method to predict a single score for a pair (user, item)
:param user: User ID
:type user: int
:param item: Item ID
:type item: int
:return: Score generate for pair (user, item)
:rtype: float
"""
return np.dot(self.p[user], self.q[item]) | [
"def",
"predict_score",
"(",
"self",
",",
"user",
",",
"item",
")",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"p",
"[",
"user",
"]",
",",
"self",
".",
"q",
"[",
"item",
"]",
")"
] | [
183,
4
] | [
198,
49
] | python | en | ['en', 'error', 'th'] | False |
BprMF.update_factors | (self, u, i, j) |
Update latent factors according to the stochastic gradient descent update rule
:param u: User ID for update
:type u: int
:param i: Known Item ID
:type i: int
:param j: Unknown Item ID
:type j: int
|
Update latent factors according to the stochastic gradient descent update rule | def update_factors(self, u, i, j):
"""
Update latent factors according to the stochastic gradient descent update rule
:param u: User ID for update
:type u: int
:param i: Known Item ID
:type i: int
:param j: Unknown Item ID
:type j: int
"""
# Compute Difference
x_uij = self.bias[i] - self.bias[j] + (self.predict_score(u, i) - self.predict_score(u, j))
eps = 1 / (1 + np.exp(x_uij))
self.bias[i] += self.learn_rate * (eps - self.reg_bias * self.bias[i])
self.bias[j] += self.learn_rate * (-eps - self.reg_bias * self.bias[j])
# Adjust the factors
u_f = self.p[u]
i_f = self.q[i]
j_f = self.q[j]
# Compute and apply factor updates
self.p[u] += self.learn_rate * ((i_f - j_f) * eps - self.reg_u * u_f)
self.q[i] += self.learn_rate * (u_f * eps - self.reg_i * i_f)
self.q[j] += self.learn_rate * (-u_f * eps - self.reg_j * j_f) | [
"def",
"update_factors",
"(",
"self",
",",
"u",
",",
"i",
",",
"j",
")",
":",
"# Compute Difference",
"x_uij",
"=",
"self",
".",
"bias",
"[",
"i",
"]",
"-",
"self",
".",
"bias",
"[",
"j",
"]",
"+",
"(",
"self",
".",
"predict_score",
"(",
"u",
",",
"i",
")",
"-",
"self",
".",
"predict_score",
"(",
"u",
",",
"j",
")",
")",
"eps",
"=",
"1",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"x_uij",
")",
")",
"self",
".",
"bias",
"[",
"i",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"(",
"eps",
"-",
"self",
".",
"reg_bias",
"*",
"self",
".",
"bias",
"[",
"i",
"]",
")",
"self",
".",
"bias",
"[",
"j",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"(",
"-",
"eps",
"-",
"self",
".",
"reg_bias",
"*",
"self",
".",
"bias",
"[",
"j",
"]",
")",
"# Adjust the factors",
"u_f",
"=",
"self",
".",
"p",
"[",
"u",
"]",
"i_f",
"=",
"self",
".",
"q",
"[",
"i",
"]",
"j_f",
"=",
"self",
".",
"q",
"[",
"j",
"]",
"# Compute and apply factor updates",
"self",
".",
"p",
"[",
"u",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"(",
"(",
"i_f",
"-",
"j_f",
")",
"*",
"eps",
"-",
"self",
".",
"reg_u",
"*",
"u_f",
")",
"self",
".",
"q",
"[",
"i",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"(",
"u_f",
"*",
"eps",
"-",
"self",
".",
"reg_i",
"*",
"i_f",
")",
"self",
".",
"q",
"[",
"j",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"(",
"-",
"u_f",
"*",
"eps",
"-",
"self",
".",
"reg_j",
"*",
"j_f",
")"
] | [
200,
4
] | [
229,
70
] | python | en | ['en', 'error', 'th'] | False |
BprMF.predict | (self) |
This method predict final result, building an rank of each user of the train set.
|
This method predict final result, building an rank of each user of the train set. | def predict(self):
"""
This method predict final result, building an rank of each user of the train set.
"""
w = self.bias.T + np.dot(self.p, self.q.T)
for u, user in enumerate(self.users):
partial_ranking = list()
candidate_items = sorted(range(len(w[u])), key=lambda k: w[u][k], reverse=True)
for i in candidate_items:
item = self.item_id_to_item[i]
if item not in self.train_set['items_seen_by_user'].get(user, self.items):
partial_ranking.append((user, item, w[u][i]))
if len(partial_ranking) == self.rank_length:
break
self.ranking += partial_ranking | [
"def",
"predict",
"(",
"self",
")",
":",
"w",
"=",
"self",
".",
"bias",
".",
"T",
"+",
"np",
".",
"dot",
"(",
"self",
".",
"p",
",",
"self",
".",
"q",
".",
"T",
")",
"for",
"u",
",",
"user",
"in",
"enumerate",
"(",
"self",
".",
"users",
")",
":",
"partial_ranking",
"=",
"list",
"(",
")",
"candidate_items",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"w",
"[",
"u",
"]",
")",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"w",
"[",
"u",
"]",
"[",
"k",
"]",
",",
"reverse",
"=",
"True",
")",
"for",
"i",
"in",
"candidate_items",
":",
"item",
"=",
"self",
".",
"item_id_to_item",
"[",
"i",
"]",
"if",
"item",
"not",
"in",
"self",
".",
"train_set",
"[",
"'items_seen_by_user'",
"]",
".",
"get",
"(",
"user",
",",
"self",
".",
"items",
")",
":",
"partial_ranking",
".",
"append",
"(",
"(",
"user",
",",
"item",
",",
"w",
"[",
"u",
"]",
"[",
"i",
"]",
")",
")",
"if",
"len",
"(",
"partial_ranking",
")",
"==",
"self",
".",
"rank_length",
":",
"break",
"self",
".",
"ranking",
"+=",
"partial_ranking"
] | [
231,
4
] | [
252,
43
] | python | en | ['en', 'error', 'th'] | False |
BprMF.compute | (self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t') |
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
|
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm | def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(BprMF, self).compute(verbose=verbose)
if verbose:
self.init_model()
print("training_time:: %4f sec" % timed(self.fit))
if self.extra_info_header is not None:
print(self.extra_info_header)
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
# Execute all in silence without prints
self.init_model()
self.fit()
self.predict()
self.write_ranking()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | [
"def",
"compute",
"(",
"self",
",",
"verbose",
"=",
"True",
",",
"metrics",
"=",
"None",
",",
"verbose_evaluation",
"=",
"True",
",",
"as_table",
"=",
"False",
",",
"table_sep",
"=",
"'\\t'",
")",
":",
"super",
"(",
"BprMF",
",",
"self",
")",
".",
"compute",
"(",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"self",
".",
"init_model",
"(",
")",
"print",
"(",
"\"training_time:: %4f sec\"",
"%",
"timed",
"(",
"self",
".",
"fit",
")",
")",
"if",
"self",
".",
"extra_info_header",
"is",
"not",
"None",
":",
"print",
"(",
"self",
".",
"extra_info_header",
")",
"print",
"(",
"\"prediction_time:: %4f sec\"",
"%",
"timed",
"(",
"self",
".",
"predict",
")",
")",
"print",
"(",
"'\\n'",
")",
"else",
":",
"# Execute all in silence without prints",
"self",
".",
"init_model",
"(",
")",
"self",
".",
"fit",
"(",
")",
"self",
".",
"predict",
"(",
")",
"self",
".",
"write_ranking",
"(",
")",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"self",
".",
"evaluate",
"(",
"metrics",
",",
"verbose_evaluation",
",",
"as_table",
"=",
"as_table",
",",
"table_sep",
"=",
"table_sep",
")"
] | [
254,
4
] | [
296,
94
] | python | en | ['en', 'error', 'th'] | False |
BaseReporter.starting | (self) | Called before the resolution actually starts. | Called before the resolution actually starts. | def starting(self):
"""Called before the resolution actually starts.""" | [
"def",
"starting",
"(",
"self",
")",
":"
] | [
3,
4
] | [
4,
59
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.starting_round | (self, index) | Called before each round of resolution starts.
The index is zero-based.
| Called before each round of resolution starts. | def starting_round(self, index):
"""Called before each round of resolution starts.
The index is zero-based.
""" | [
"def",
"starting_round",
"(",
"self",
",",
"index",
")",
":"
] | [
6,
4
] | [
10,
11
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.ending_round | (self, index, state) | Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
| Called before each round of resolution ends. | def ending_round(self, index, state):
"""Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
""" | [
"def",
"ending_round",
"(",
"self",
",",
"index",
",",
"state",
")",
":"
] | [
12,
4
] | [
17,
11
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.ending | (self, state) | Called before the resolution ends successfully. | Called before the resolution ends successfully. | def ending(self, state):
"""Called before the resolution ends successfully.""" | [
"def",
"ending",
"(",
"self",
",",
"state",
")",
":"
] | [
19,
4
] | [
20,
61
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.adding_requirement | (self, requirement, parent) | Called when adding a new requirement into the resolve criteria.
:param requirement: The additional requirement to be applied to filter
the available candidaites.
:param parent: The candidate that requires ``requirement`` as a
dependency, or None if ``requirement`` is one of the root
requirements passed in from ``Resolver.resolve()``.
| Called when adding a new requirement into the resolve criteria. | def adding_requirement(self, requirement, parent):
"""Called when adding a new requirement into the resolve criteria.
:param requirement: The additional requirement to be applied to filter
the available candidaites.
:param parent: The candidate that requires ``requirement`` as a
dependency, or None if ``requirement`` is one of the root
requirements passed in from ``Resolver.resolve()``.
""" | [
"def",
"adding_requirement",
"(",
"self",
",",
"requirement",
",",
"parent",
")",
":"
] | [
22,
4
] | [
30,
11
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.backtracking | (self, candidate) | Called when rejecting a candidate during backtracking. | Called when rejecting a candidate during backtracking. | def backtracking(self, candidate):
"""Called when rejecting a candidate during backtracking.""" | [
"def",
"backtracking",
"(",
"self",
",",
"candidate",
")",
":"
] | [
32,
4
] | [
33,
68
] | python | en | ['en', 'en', 'en'] | True |
BaseReporter.pinning | (self, candidate) | Called when adding a candidate to the potential solution. | Called when adding a candidate to the potential solution. | def pinning(self, candidate):
"""Called when adding a candidate to the potential solution.""" | [
"def",
"pinning",
"(",
"self",
",",
"candidate",
")",
":"
] | [
35,
4
] | [
36,
71
] | python | en | ['en', 'en', 'en'] | True |
Clawler.save_img | (self, rtn_folpath=False) |
フォルダを作成し画像を収集する
:return: 作成したフォルダの相対パス
|
フォルダを作成し画像を収集する
:return: 作成したフォルダの相対パス
| def save_img(self, rtn_folpath=False):
"""
フォルダを作成し画像を収集する
:return: 作成したフォルダの相対パス
"""
if self.urls:
super().save_img(self.urls)
rtn = self.made_imgdir if rtn_folpath else None
return rtn
else:
raise ex.CrawlingError | [
"def",
"save_img",
"(",
"self",
",",
"rtn_folpath",
"=",
"False",
")",
":",
"if",
"self",
".",
"urls",
":",
"super",
"(",
")",
".",
"save_img",
"(",
"self",
".",
"urls",
")",
"rtn",
"=",
"self",
".",
"made_imgdir",
"if",
"rtn_folpath",
"else",
"None",
"return",
"rtn",
"else",
":",
"raise",
"ex",
".",
"CrawlingError"
] | [
23,
4
] | [
33,
34
] | python | en | ['en', 'error', 'th'] | False |
Clawler.delete_datas_dir | (self) |
data/img配下を全削除
:return: なし
|
data/img配下を全削除
:return: なし
| def delete_datas_dir(self):
"""
data/img配下を全削除
:return: なし
"""
data_path = os.path.join('/'.join(inspect.stack()[0][1].split('/')[:-2]), 'data', 'img')
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path, exist_ok=True)
print('init data stacks.') | [
"def",
"delete_datas_dir",
"(",
"self",
")",
":",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/'",
".",
"join",
"(",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"2",
"]",
")",
",",
"'data'",
",",
"'img'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"data_path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"data_path",
")",
"os",
".",
"makedirs",
"(",
"data_path",
",",
"exist_ok",
"=",
"True",
")",
"print",
"(",
"'init data stacks.'",
")"
] | [
35,
4
] | [
44,
38
] | python | en | ['en', 'error', 'th'] | False |
Clawler.write_crawl_stat | (self, del_mode=False) |
直近のクロール枚数を保持してファイルに書き込む
del_mode(bool)
Trueでログリフレッシュ
:return: なし
|
直近のクロール枚数を保持してファイルに書き込む
del_mode(bool)
Trueでログリフレッシュ
:return: なし
| def write_crawl_stat(self, del_mode=False):
"""
直近のクロール枚数を保持してファイルに書き込む
del_mode(bool)
Trueでログリフレッシュ
:return: なし
"""
here = os.path.join('/'.join(inspect.stack()[0][1].split('/')[:-1]))
crawler_logs_path = os.path.join(here, '.crawler_logs')
mode = 'a' if del_mode == False else 'w'
with open(crawler_logs_path, mode=mode) as log:
status = [self.keyword, str(self.num)]
log.write(':'.join(status) + '\n') | [
"def",
"write_crawl_stat",
"(",
"self",
",",
"del_mode",
"=",
"False",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/'",
".",
"join",
"(",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
")",
")",
"crawler_logs_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"'.crawler_logs'",
")",
"mode",
"=",
"'a'",
"if",
"del_mode",
"==",
"False",
"else",
"'w'",
"with",
"open",
"(",
"crawler_logs_path",
",",
"mode",
"=",
"mode",
")",
"as",
"log",
":",
"status",
"=",
"[",
"self",
".",
"keyword",
",",
"str",
"(",
"self",
".",
"num",
")",
"]",
"log",
".",
"write",
"(",
"':'",
".",
"join",
"(",
"status",
")",
"+",
"'\\n'",
")"
] | [
46,
4
] | [
58,
46
] | python | en | ['en', 'error', 'th'] | False |
Clawler.read_crawl_stat | (self, target) |
write_crawl_statで書き込まれたログを読み込む
:return: クロール枚数(int)
|
write_crawl_statで書き込まれたログを読み込む
:return: クロール枚数(int)
| def read_crawl_stat(self, target):
"""
write_crawl_statで書き込まれたログを読み込む
:return: クロール枚数(int)
"""
import re
here = os.path.join('/'.join(inspect.stack()[0][1].split('/')[:-1]))
crawler_logs_path = os.path.join(here, '.crawler_logs')
with open(crawler_logs_path, 'r') as log:
logs = log.readlines()
num = [keyword.split(':')[0] for keyword in logs if re.match(r'^{}:.+'.format(target))]
return num | [
"def",
"read_crawl_stat",
"(",
"self",
",",
"target",
")",
":",
"import",
"re",
"here",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/'",
".",
"join",
"(",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
")",
")",
"crawler_logs_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"'.crawler_logs'",
")",
"with",
"open",
"(",
"crawler_logs_path",
",",
"'r'",
")",
"as",
"log",
":",
"logs",
"=",
"log",
".",
"readlines",
"(",
")",
"num",
"=",
"[",
"keyword",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"for",
"keyword",
"in",
"logs",
"if",
"re",
".",
"match",
"(",
"r'^{}:.+'",
".",
"format",
"(",
"target",
")",
")",
"]",
"return",
"num"
] | [
60,
4
] | [
71,
18
] | python | en | ['en', 'error', 'th'] | False |
get | (http, path, root=METADATA_ROOT, recursive=None) | Fetch a resource from the metadata server.
Args:
http: an object to be used to make HTTP requests.
path: A string indicating the resource to retrieve. For example,
'instance/service-accounts/default'
root: A string indicating the full path to the metadata server root.
recursive: A boolean indicating whether to do a recursive query of
metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents
Returns:
A dictionary if the metadata server returns JSON, otherwise a string.
Raises:
http_client.HTTPException if an error corrured while
retrieving metadata.
| Fetch a resource from the metadata server. | def get(http, path, root=METADATA_ROOT, recursive=None):
"""Fetch a resource from the metadata server.
Args:
http: an object to be used to make HTTP requests.
path: A string indicating the resource to retrieve. For example,
'instance/service-accounts/default'
root: A string indicating the full path to the metadata server root.
recursive: A boolean indicating whether to do a recursive query of
metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents
Returns:
A dictionary if the metadata server returns JSON, otherwise a string.
Raises:
http_client.HTTPException if an error corrured while
retrieving metadata.
"""
url = urlparse.urljoin(root, path)
url = _helpers._add_query_parameter(url, 'recursive', recursive)
response, content = transport.request(
http, url, headers=METADATA_HEADERS)
if response.status == http_client.OK:
decoded = _helpers._from_bytes(content)
if response['content-type'] == 'application/json':
return json.loads(decoded)
else:
return decoded
else:
raise http_client.HTTPException(
'Failed to retrieve {0} from the Google Compute Engine'
'metadata service. Response:\n{1}'.format(url, response)) | [
"def",
"get",
"(",
"http",
",",
"path",
",",
"root",
"=",
"METADATA_ROOT",
",",
"recursive",
"=",
"None",
")",
":",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"root",
",",
"path",
")",
"url",
"=",
"_helpers",
".",
"_add_query_parameter",
"(",
"url",
",",
"'recursive'",
",",
"recursive",
")",
"response",
",",
"content",
"=",
"transport",
".",
"request",
"(",
"http",
",",
"url",
",",
"headers",
"=",
"METADATA_HEADERS",
")",
"if",
"response",
".",
"status",
"==",
"http_client",
".",
"OK",
":",
"decoded",
"=",
"_helpers",
".",
"_from_bytes",
"(",
"content",
")",
"if",
"response",
"[",
"'content-type'",
"]",
"==",
"'application/json'",
":",
"return",
"json",
".",
"loads",
"(",
"decoded",
")",
"else",
":",
"return",
"decoded",
"else",
":",
"raise",
"http_client",
".",
"HTTPException",
"(",
"'Failed to retrieve {0} from the Google Compute Engine'",
"'metadata service. Response:\\n{1}'",
".",
"format",
"(",
"url",
",",
"response",
")",
")"
] | [
36,
0
] | [
70,
69
] | python | en | ['en', 'en', 'en'] | True |
get_service_account_info | (http, service_account='default') | Get information about a service account from the metadata server.
Args:
http: an object to be used to make HTTP requests.
service_account: An email specifying the service account for which to
look up information. Default will be information for the "default"
service account of the current compute engine instance.
Returns:
A dictionary with information about the specified service account,
for example:
{
'email': '...',
'scopes': ['scope', ...],
'aliases': ['default', '...']
}
| Get information about a service account from the metadata server. | def get_service_account_info(http, service_account='default'):
"""Get information about a service account from the metadata server.
Args:
http: an object to be used to make HTTP requests.
service_account: An email specifying the service account for which to
look up information. Default will be information for the "default"
service account of the current compute engine instance.
Returns:
A dictionary with information about the specified service account,
for example:
{
'email': '...',
'scopes': ['scope', ...],
'aliases': ['default', '...']
}
"""
return get(
http,
'instance/service-accounts/{0}/'.format(service_account),
recursive=True) | [
"def",
"get_service_account_info",
"(",
"http",
",",
"service_account",
"=",
"'default'",
")",
":",
"return",
"get",
"(",
"http",
",",
"'instance/service-accounts/{0}/'",
".",
"format",
"(",
"service_account",
")",
",",
"recursive",
"=",
"True",
")"
] | [
73,
0
] | [
95,
23
] | python | en | ['en', 'en', 'en'] | True |
get_token | (http, service_account='default') | Fetch an oauth token for the
Args:
http: an object to be used to make HTTP requests.
service_account: An email specifying the service account this token
should represent. Default will be a token for the "default" service
account of the current compute engine instance.
Returns:
A tuple of (access token, token expiration), where access token is the
access token as a string and token expiration is a datetime object
that indicates when the access token will expire.
| Fetch an oauth token for the | def get_token(http, service_account='default'):
"""Fetch an oauth token for the
Args:
http: an object to be used to make HTTP requests.
service_account: An email specifying the service account this token
should represent. Default will be a token for the "default" service
account of the current compute engine instance.
Returns:
A tuple of (access token, token expiration), where access token is the
access token as a string and token expiration is a datetime object
that indicates when the access token will expire.
"""
token_json = get(
http,
'instance/service-accounts/{0}/token'.format(service_account))
token_expiry = client._UTCNOW() + datetime.timedelta(
seconds=token_json['expires_in'])
return token_json['access_token'], token_expiry | [
"def",
"get_token",
"(",
"http",
",",
"service_account",
"=",
"'default'",
")",
":",
"token_json",
"=",
"get",
"(",
"http",
",",
"'instance/service-accounts/{0}/token'",
".",
"format",
"(",
"service_account",
")",
")",
"token_expiry",
"=",
"client",
".",
"_UTCNOW",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"token_json",
"[",
"'expires_in'",
"]",
")",
"return",
"token_json",
"[",
"'access_token'",
"]",
",",
"token_expiry"
] | [
98,
0
] | [
117,
51
] | python | en | ['en', 'en', 'en'] | True |
url_parse | (url, scheme=None, allow_fragments=True) | Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
| Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`. | def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment) | [
"def",
"url_parse",
"(",
"url",
",",
"scheme",
"=",
"None",
",",
"allow_fragments",
"=",
"True",
")",
":",
"s",
"=",
"make_literal_wrapper",
"(",
"url",
")",
"is_text_based",
"=",
"isinstance",
"(",
"url",
",",
"text_type",
")",
"if",
"scheme",
"is",
"None",
":",
"scheme",
"=",
"s",
"(",
"\"\"",
")",
"netloc",
"=",
"query",
"=",
"fragment",
"=",
"s",
"(",
"\"\"",
")",
"i",
"=",
"url",
".",
"find",
"(",
"s",
"(",
"\":\"",
")",
")",
"if",
"i",
">",
"0",
"and",
"_scheme_re",
".",
"match",
"(",
"to_native",
"(",
"url",
"[",
":",
"i",
"]",
",",
"errors",
"=",
"\"replace\"",
")",
")",
":",
"# make sure \"iri\" is not actually a port number (in which case",
"# \"scheme\" is really part of the path)",
"rest",
"=",
"url",
"[",
"i",
"+",
"1",
":",
"]",
"if",
"not",
"rest",
"or",
"any",
"(",
"c",
"not",
"in",
"s",
"(",
"\"0123456789\"",
")",
"for",
"c",
"in",
"rest",
")",
":",
"# not a port number",
"scheme",
",",
"url",
"=",
"url",
"[",
":",
"i",
"]",
".",
"lower",
"(",
")",
",",
"rest",
"if",
"url",
"[",
":",
"2",
"]",
"==",
"s",
"(",
"\"//\"",
")",
":",
"delim",
"=",
"len",
"(",
"url",
")",
"for",
"c",
"in",
"s",
"(",
"\"/?#\"",
")",
":",
"wdelim",
"=",
"url",
".",
"find",
"(",
"c",
",",
"2",
")",
"if",
"wdelim",
">=",
"0",
":",
"delim",
"=",
"min",
"(",
"delim",
",",
"wdelim",
")",
"netloc",
",",
"url",
"=",
"url",
"[",
"2",
":",
"delim",
"]",
",",
"url",
"[",
"delim",
":",
"]",
"if",
"(",
"s",
"(",
"\"[\"",
")",
"in",
"netloc",
"and",
"s",
"(",
"\"]\"",
")",
"not",
"in",
"netloc",
")",
"or",
"(",
"s",
"(",
"\"]\"",
")",
"in",
"netloc",
"and",
"s",
"(",
"\"[\"",
")",
"not",
"in",
"netloc",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid IPv6 URL\"",
")",
"if",
"allow_fragments",
"and",
"s",
"(",
"\"#\"",
")",
"in",
"url",
":",
"url",
",",
"fragment",
"=",
"url",
".",
"split",
"(",
"s",
"(",
"\"#\"",
")",
",",
"1",
")",
"if",
"s",
"(",
"\"?\"",
")",
"in",
"url",
":",
"url",
",",
"query",
"=",
"url",
".",
"split",
"(",
"s",
"(",
"\"?\"",
")",
",",
"1",
")",
"result_type",
"=",
"URL",
"if",
"is_text_based",
"else",
"BytesURL",
"return",
"result_type",
"(",
"scheme",
",",
"netloc",
",",
"url",
",",
"query",
",",
"fragment",
")"
] | [
437,
0
] | [
483,
60
] | python | en | ['en', 'en', 'en'] | True |
_make_fast_url_quote | (charset="utf-8", errors="strict", safe="/:", unsafe="") | Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
| Precompile the translation table for a URL encoding function. | def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
"""
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
if not PY2:
def quote(string):
return "".join([table[c] for c in string])
else:
def quote(string):
return "".join([table[c] for c in bytearray(string)])
return quote | [
"def",
"_make_fast_url_quote",
"(",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"strict\"",
",",
"safe",
"=",
"\"/:\"",
",",
"unsafe",
"=",
"\"\"",
")",
":",
"if",
"isinstance",
"(",
"safe",
",",
"text_type",
")",
":",
"safe",
"=",
"safe",
".",
"encode",
"(",
"charset",
",",
"errors",
")",
"if",
"isinstance",
"(",
"unsafe",
",",
"text_type",
")",
":",
"unsafe",
"=",
"unsafe",
".",
"encode",
"(",
"charset",
",",
"errors",
")",
"safe",
"=",
"(",
"frozenset",
"(",
"bytearray",
"(",
"safe",
")",
")",
"|",
"_always_safe",
")",
"-",
"frozenset",
"(",
"bytearray",
"(",
"unsafe",
")",
")",
"table",
"=",
"[",
"chr",
"(",
"c",
")",
"if",
"c",
"in",
"safe",
"else",
"\"%%%02X\"",
"%",
"c",
"for",
"c",
"in",
"range",
"(",
"256",
")",
"]",
"if",
"not",
"PY2",
":",
"def",
"quote",
"(",
"string",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"table",
"[",
"c",
"]",
"for",
"c",
"in",
"string",
"]",
")",
"else",
":",
"def",
"quote",
"(",
"string",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"table",
"[",
"c",
"]",
"for",
"c",
"in",
"bytearray",
"(",
"string",
")",
"]",
")",
"return",
"quote"
] | [
486,
0
] | [
516,
16
] | python | en | ['en', 'en', 'en'] | True |
url_quote | (string, charset="utf-8", errors="strict", safe="/:", unsafe="") | URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
| URL encode a single string with a given encoding. | def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(_bytetohex[char])
return to_native(bytes(rv)) | [
"def",
"url_quote",
"(",
"string",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"strict\"",
",",
"safe",
"=",
"\"/:\"",
",",
"unsafe",
"=",
"\"\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"(",
"text_type",
",",
"bytes",
",",
"bytearray",
")",
")",
":",
"string",
"=",
"text_type",
"(",
"string",
")",
"if",
"isinstance",
"(",
"string",
",",
"text_type",
")",
":",
"string",
"=",
"string",
".",
"encode",
"(",
"charset",
",",
"errors",
")",
"if",
"isinstance",
"(",
"safe",
",",
"text_type",
")",
":",
"safe",
"=",
"safe",
".",
"encode",
"(",
"charset",
",",
"errors",
")",
"if",
"isinstance",
"(",
"unsafe",
",",
"text_type",
")",
":",
"unsafe",
"=",
"unsafe",
".",
"encode",
"(",
"charset",
",",
"errors",
")",
"safe",
"=",
"(",
"frozenset",
"(",
"bytearray",
"(",
"safe",
")",
")",
"|",
"_always_safe",
")",
"-",
"frozenset",
"(",
"bytearray",
"(",
"unsafe",
")",
")",
"rv",
"=",
"bytearray",
"(",
")",
"for",
"char",
"in",
"bytearray",
"(",
"string",
")",
":",
"if",
"char",
"in",
"safe",
":",
"rv",
".",
"append",
"(",
"char",
")",
"else",
":",
"rv",
".",
"extend",
"(",
"_bytetohex",
"[",
"char",
"]",
")",
"return",
"to_native",
"(",
"bytes",
"(",
"rv",
")",
")"
] | [
527,
0
] | [
553,
31
] | python | en | ['en', 'en', 'en'] | True |
url_quote_plus | (string, charset="utf-8", errors="strict", safe="") | URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
| URL encode a single string with the given encoding and convert
whitespace to "+". | def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+") | [
"def",
"url_quote_plus",
"(",
"string",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"strict\"",
",",
"safe",
"=",
"\"\"",
")",
":",
"return",
"url_quote",
"(",
"string",
",",
"charset",
",",
"errors",
",",
"safe",
"+",
"\" \"",
",",
"\"+\"",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"+\"",
")"
] | [
556,
0
] | [
564,
80
] | python | en | ['en', 'en', 'en'] | True |
url_unparse | (components) | The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
| The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string. | def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s("")
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s("/"))):
if path and path[:1] != s("/"):
path = s("/") + path
url = s("//") + (netloc or s("")) + path
elif path:
url += path
if scheme:
url = scheme + s(":") + url
if query:
url = url + s("?") + query
if fragment:
url = url + s("#") + fragment
return url | [
"def",
"url_unparse",
"(",
"components",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"normalize_string_tuple",
"(",
"components",
")",
"s",
"=",
"make_literal_wrapper",
"(",
"scheme",
")",
"url",
"=",
"s",
"(",
"\"\"",
")",
"# We generally treat file:///x and file:/x the same which is also",
"# what browsers seem to do. This also allows us to ignore a schema",
"# register for netloc utilization or having to differenciate between",
"# empty and missing netloc.",
"if",
"netloc",
"or",
"(",
"scheme",
"and",
"path",
".",
"startswith",
"(",
"s",
"(",
"\"/\"",
")",
")",
")",
":",
"if",
"path",
"and",
"path",
"[",
":",
"1",
"]",
"!=",
"s",
"(",
"\"/\"",
")",
":",
"path",
"=",
"s",
"(",
"\"/\"",
")",
"+",
"path",
"url",
"=",
"s",
"(",
"\"//\"",
")",
"+",
"(",
"netloc",
"or",
"s",
"(",
"\"\"",
")",
")",
"+",
"path",
"elif",
"path",
":",
"url",
"+=",
"path",
"if",
"scheme",
":",
"url",
"=",
"scheme",
"+",
"s",
"(",
"\":\"",
")",
"+",
"url",
"if",
"query",
":",
"url",
"=",
"url",
"+",
"s",
"(",
"\"?\"",
")",
"+",
"query",
"if",
"fragment",
":",
"url",
"=",
"url",
"+",
"s",
"(",
"\"#\"",
")",
"+",
"fragment",
"return",
"url"
] | [
567,
0
] | [
594,
14
] | python | en | ['en', 'en', 'en'] | True |
url_unquote | (string, charset="utf-8", errors="replace", unsafe="") | URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
| URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned. | def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv | [
"def",
"url_unquote",
"(",
"string",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
",",
"unsafe",
"=",
"\"\"",
")",
":",
"rv",
"=",
"_unquote_to_bytes",
"(",
"string",
",",
"unsafe",
")",
"if",
"charset",
"is",
"not",
"None",
":",
"rv",
"=",
"rv",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
"return",
"rv"
] | [
597,
0
] | [
610,
13
] | python | en | ['en', 'en', 'en'] | True |
url_unquote_plus | (s, charset="utf-8", errors="replace") | URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
| URL decode a single string with the given `charset` and decode "+" to
whitespace. | def url_unquote_plus(s, charset="utf-8", errors="replace"):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u"+", u" ")
else:
s = s.replace(b"+", b" ")
return url_unquote(s, charset, errors) | [
"def",
"url_unquote_plus",
"(",
"s",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"u\"+\"",
",",
"u\" \"",
")",
"else",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"b\"+\"",
",",
"b\" \"",
")",
"return",
"url_unquote",
"(",
"s",
",",
"charset",
",",
"errors",
")"
] | [
613,
0
] | [
630,
42
] | python | en | ['en', 'en', 'en'] | True |
url_fix | (s, charset="utf-8") | r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
| r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user: | def url_fix(s, charset="utf-8"):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, "replace").replace("\\", "/")
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = "file:///" + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))) | [
"def",
"url_fix",
"(",
"s",
",",
"charset",
"=",
"\"utf-8\"",
")",
":",
"# First step is to switch to unicode processing and to convert",
"# backslashes (which are invalid in URLs anyways) to slashes. This is",
"# consistent with what Chrome does.",
"s",
"=",
"to_unicode",
"(",
"s",
",",
"charset",
",",
"\"replace\"",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"# For the specific case that we look like a malformed windows URL",
"# we want to fix this up manually:",
"if",
"s",
".",
"startswith",
"(",
"\"file://\"",
")",
"and",
"s",
"[",
"7",
":",
"8",
"]",
".",
"isalpha",
"(",
")",
"and",
"s",
"[",
"8",
":",
"10",
"]",
"in",
"(",
"\":/\"",
",",
"\"|/\"",
")",
":",
"s",
"=",
"\"file:///\"",
"+",
"s",
"[",
"7",
":",
"]",
"url",
"=",
"url_parse",
"(",
"s",
")",
"path",
"=",
"url_quote",
"(",
"url",
".",
"path",
",",
"charset",
",",
"safe",
"=",
"\"/%+$!*'(),\"",
")",
"qs",
"=",
"url_quote_plus",
"(",
"url",
".",
"query",
",",
"charset",
",",
"safe",
"=",
"\":&%=+$!*'(),\"",
")",
"anchor",
"=",
"url_quote_plus",
"(",
"url",
".",
"fragment",
",",
"charset",
",",
"safe",
"=",
"\":&%=+$!*'(),\"",
")",
"return",
"to_native",
"(",
"url_unparse",
"(",
"(",
"url",
".",
"scheme",
",",
"url",
".",
"encode_netloc",
"(",
")",
",",
"path",
",",
"qs",
",",
"anchor",
")",
")",
")"
] | [
633,
0
] | [
660,
86
] | python | en | ['en', 'en', 'en'] | True |
_codec_error_url_quote | (e) | Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
| Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
| def _codec_error_url_quote(e):
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
"""
out = _fast_url_quote(e.object[e.start : e.end])
if PY2:
out = out.decode("utf-8")
return out, e.end | [
"def",
"_codec_error_url_quote",
"(",
"e",
")",
":",
"out",
"=",
"_fast_url_quote",
"(",
"e",
".",
"object",
"[",
"e",
".",
"start",
":",
"e",
".",
"end",
"]",
")",
"if",
"PY2",
":",
"out",
"=",
"out",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"out",
",",
"e",
".",
"end"
] | [
667,
0
] | [
676,
21
] | python | en | ['en', 'en', 'en'] | True |
uri_to_iri | (uri, charset="utf-8", errors="werkzeug.url_quote") | Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
| Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode. | def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment)) | [
"def",
"uri_to_iri",
"(",
"uri",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"werkzeug.url_quote\"",
")",
":",
"if",
"isinstance",
"(",
"uri",
",",
"tuple",
")",
":",
"uri",
"=",
"url_unparse",
"(",
"uri",
")",
"uri",
"=",
"url_parse",
"(",
"to_unicode",
"(",
"uri",
",",
"charset",
")",
")",
"path",
"=",
"url_unquote",
"(",
"uri",
".",
"path",
",",
"charset",
",",
"errors",
",",
"_to_iri_unsafe",
")",
"query",
"=",
"url_unquote",
"(",
"uri",
".",
"query",
",",
"charset",
",",
"errors",
",",
"_to_iri_unsafe",
")",
"fragment",
"=",
"url_unquote",
"(",
"uri",
".",
"fragment",
",",
"charset",
",",
"errors",
",",
"_to_iri_unsafe",
")",
"return",
"url_unparse",
"(",
"(",
"uri",
".",
"scheme",
",",
"uri",
".",
"decode_netloc",
"(",
")",
",",
"path",
",",
"query",
",",
"fragment",
")",
")"
] | [
682,
0
] | [
709,
80
] | python | en | ['en', 'en', 'en'] | True |
iri_to_uri | (iri, charset="utf-8", errors="strict", safe_conversion=False) | Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
| Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode. | def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
# If we're not sure if it's safe to convert the URL, and it only
# contains ASCII characters, return it unconverted.
try:
native_iri = to_native(iri)
ascii_iri = native_iri.encode("ascii")
# Only return if it doesn't have whitespace. (Why?)
if len(ascii_iri.split()) == 1:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
path = url_quote(iri.path, charset, errors, _to_uri_safe)
query = url_quote(iri.query, charset, errors, _to_uri_safe)
fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
return to_native(
url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
) | [
"def",
"iri_to_uri",
"(",
"iri",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"strict\"",
",",
"safe_conversion",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"iri",
",",
"tuple",
")",
":",
"iri",
"=",
"url_unparse",
"(",
"iri",
")",
"if",
"safe_conversion",
":",
"# If we're not sure if it's safe to convert the URL, and it only",
"# contains ASCII characters, return it unconverted.",
"try",
":",
"native_iri",
"=",
"to_native",
"(",
"iri",
")",
"ascii_iri",
"=",
"native_iri",
".",
"encode",
"(",
"\"ascii\"",
")",
"# Only return if it doesn't have whitespace. (Why?)",
"if",
"len",
"(",
"ascii_iri",
".",
"split",
"(",
")",
")",
"==",
"1",
":",
"return",
"native_iri",
"except",
"UnicodeError",
":",
"pass",
"iri",
"=",
"url_parse",
"(",
"to_unicode",
"(",
"iri",
",",
"charset",
",",
"errors",
")",
")",
"path",
"=",
"url_quote",
"(",
"iri",
".",
"path",
",",
"charset",
",",
"errors",
",",
"_to_uri_safe",
")",
"query",
"=",
"url_quote",
"(",
"iri",
".",
"query",
",",
"charset",
",",
"errors",
",",
"_to_uri_safe",
")",
"fragment",
"=",
"url_quote",
"(",
"iri",
".",
"fragment",
",",
"charset",
",",
"errors",
",",
"_to_uri_safe",
")",
"return",
"to_native",
"(",
"url_unparse",
"(",
"(",
"iri",
".",
"scheme",
",",
"iri",
".",
"encode_netloc",
"(",
")",
",",
"path",
",",
"query",
",",
"fragment",
")",
")",
")"
] | [
716,
0
] | [
776,
5
] | python | en | ['en', 'en', 'en'] | True |
url_decode | (
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
) |
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
|
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`. | def url_decode(
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or "ascii")
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or "ascii")
return cls(
_url_decode_impl(
s.split(separator), charset, decode_keys, include_empty, errors
)
) | [
"def",
"url_decode",
"(",
"s",
",",
"charset",
"=",
"\"utf-8\"",
",",
"decode_keys",
"=",
"False",
",",
"include_empty",
"=",
"True",
",",
"errors",
"=",
"\"replace\"",
",",
"separator",
"=",
"\"&\"",
",",
"cls",
"=",
"None",
",",
")",
":",
"if",
"cls",
"is",
"None",
":",
"from",
".",
"datastructures",
"import",
"MultiDict",
"cls",
"=",
"MultiDict",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
"and",
"not",
"isinstance",
"(",
"separator",
",",
"text_type",
")",
":",
"separator",
"=",
"separator",
".",
"decode",
"(",
"charset",
"or",
"\"ascii\"",
")",
"elif",
"isinstance",
"(",
"s",
",",
"bytes",
")",
"and",
"not",
"isinstance",
"(",
"separator",
",",
"bytes",
")",
":",
"separator",
"=",
"separator",
".",
"encode",
"(",
"charset",
"or",
"\"ascii\"",
")",
"return",
"cls",
"(",
"_url_decode_impl",
"(",
"s",
".",
"split",
"(",
"separator",
")",
",",
"charset",
",",
"decode_keys",
",",
"include_empty",
",",
"errors",
")",
")"
] | [
779,
0
] | [
838,
5
] | python | en | ['en', 'error', 'th'] | False |
url_decode_stream | (
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
) | Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
| Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed. | def url_decode_stream(
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from .wsgi import make_chunk_iter
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
if return_iterator:
return decoder
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
return cls(decoder) | [
"def",
"url_decode_stream",
"(",
"stream",
",",
"charset",
"=",
"\"utf-8\"",
",",
"decode_keys",
"=",
"False",
",",
"include_empty",
"=",
"True",
",",
"errors",
"=",
"\"replace\"",
",",
"separator",
"=",
"\"&\"",
",",
"cls",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"return_iterator",
"=",
"False",
",",
")",
":",
"from",
".",
"wsgi",
"import",
"make_chunk_iter",
"pair_iter",
"=",
"make_chunk_iter",
"(",
"stream",
",",
"separator",
",",
"limit",
")",
"decoder",
"=",
"_url_decode_impl",
"(",
"pair_iter",
",",
"charset",
",",
"decode_keys",
",",
"include_empty",
",",
"errors",
")",
"if",
"return_iterator",
":",
"return",
"decoder",
"if",
"cls",
"is",
"None",
":",
"from",
".",
"datastructures",
"import",
"MultiDict",
"cls",
"=",
"MultiDict",
"return",
"cls",
"(",
"decoder",
")"
] | [
841,
0
] | [
892,
23
] | python | en | ['en', 'en', 'en'] | True |
url_encode | (
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
) | URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
| URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too. | def url_encode(
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key)) | [
"def",
"url_encode",
"(",
"obj",
",",
"charset",
"=",
"\"utf-8\"",
",",
"encode_keys",
"=",
"False",
",",
"sort",
"=",
"False",
",",
"key",
"=",
"None",
",",
"separator",
"=",
"b\"&\"",
")",
":",
"separator",
"=",
"to_native",
"(",
"separator",
",",
"\"ascii\"",
")",
"return",
"separator",
".",
"join",
"(",
"_url_encode_impl",
"(",
"obj",
",",
"charset",
",",
"encode_keys",
",",
"sort",
",",
"key",
")",
")"
] | [
914,
0
] | [
938,
81
] | python | en | ['en', 'en', 'en'] | True |
url_encode_stream | (
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
) | Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
| Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned. | def url_encode_stream(
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk) | [
"def",
"url_encode_stream",
"(",
"obj",
",",
"stream",
"=",
"None",
",",
"charset",
"=",
"\"utf-8\"",
",",
"encode_keys",
"=",
"False",
",",
"sort",
"=",
"False",
",",
"key",
"=",
"None",
",",
"separator",
"=",
"b\"&\"",
",",
")",
":",
"separator",
"=",
"to_native",
"(",
"separator",
",",
"\"ascii\"",
")",
"gen",
"=",
"_url_encode_impl",
"(",
"obj",
",",
"charset",
",",
"encode_keys",
",",
"sort",
",",
"key",
")",
"if",
"stream",
"is",
"None",
":",
"return",
"gen",
"for",
"idx",
",",
"chunk",
"in",
"enumerate",
"(",
"gen",
")",
":",
"if",
"idx",
":",
"stream",
".",
"write",
"(",
"separator",
")",
"stream",
".",
"write",
"(",
"chunk",
")"
] | [
941,
0
] | [
975,
27
] | python | en | ['en', 'en', 'en'] | True |
url_join | (base, url, allow_fragments=True) | Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
| Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter. | def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment)) | [
"def",
"url_join",
"(",
"base",
",",
"url",
",",
"allow_fragments",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"base",
",",
"tuple",
")",
":",
"base",
"=",
"url_unparse",
"(",
"base",
")",
"if",
"isinstance",
"(",
"url",
",",
"tuple",
")",
":",
"url",
"=",
"url_unparse",
"(",
"url",
")",
"base",
",",
"url",
"=",
"normalize_string_tuple",
"(",
"(",
"base",
",",
"url",
")",
")",
"s",
"=",
"make_literal_wrapper",
"(",
"base",
")",
"if",
"not",
"base",
":",
"return",
"url",
"if",
"not",
"url",
":",
"return",
"base",
"bscheme",
",",
"bnetloc",
",",
"bpath",
",",
"bquery",
",",
"bfragment",
"=",
"url_parse",
"(",
"base",
",",
"allow_fragments",
"=",
"allow_fragments",
")",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"url_parse",
"(",
"url",
",",
"bscheme",
",",
"allow_fragments",
")",
"if",
"scheme",
"!=",
"bscheme",
":",
"return",
"url",
"if",
"netloc",
":",
"return",
"url_unparse",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
")",
"netloc",
"=",
"bnetloc",
"if",
"path",
"[",
":",
"1",
"]",
"==",
"s",
"(",
"\"/\"",
")",
":",
"segments",
"=",
"path",
".",
"split",
"(",
"s",
"(",
"\"/\"",
")",
")",
"elif",
"not",
"path",
":",
"segments",
"=",
"bpath",
".",
"split",
"(",
"s",
"(",
"\"/\"",
")",
")",
"if",
"not",
"query",
":",
"query",
"=",
"bquery",
"else",
":",
"segments",
"=",
"bpath",
".",
"split",
"(",
"s",
"(",
"\"/\"",
")",
")",
"[",
":",
"-",
"1",
"]",
"+",
"path",
".",
"split",
"(",
"s",
"(",
"\"/\"",
")",
")",
"# If the rightmost part is \"./\" we want to keep the slash but",
"# remove the dot.",
"if",
"segments",
"[",
"-",
"1",
"]",
"==",
"s",
"(",
"\".\"",
")",
":",
"segments",
"[",
"-",
"1",
"]",
"=",
"s",
"(",
"\"\"",
")",
"# Resolve \"..\" and \".\"",
"segments",
"=",
"[",
"segment",
"for",
"segment",
"in",
"segments",
"if",
"segment",
"!=",
"s",
"(",
"\".\"",
")",
"]",
"while",
"1",
":",
"i",
"=",
"1",
"n",
"=",
"len",
"(",
"segments",
")",
"-",
"1",
"while",
"i",
"<",
"n",
":",
"if",
"segments",
"[",
"i",
"]",
"==",
"s",
"(",
"\"..\"",
")",
"and",
"segments",
"[",
"i",
"-",
"1",
"]",
"not",
"in",
"(",
"s",
"(",
"\"\"",
")",
",",
"s",
"(",
"\"..\"",
")",
")",
":",
"del",
"segments",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"break",
"i",
"+=",
"1",
"else",
":",
"break",
"# Remove trailing \"..\" if the URL is absolute",
"unwanted_marker",
"=",
"[",
"s",
"(",
"\"\"",
")",
",",
"s",
"(",
"\"..\"",
")",
"]",
"while",
"segments",
"[",
":",
"2",
"]",
"==",
"unwanted_marker",
":",
"del",
"segments",
"[",
"1",
"]",
"path",
"=",
"s",
"(",
"\"/\"",
")",
".",
"join",
"(",
"segments",
")",
"return",
"url_unparse",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
")"
] | [
978,
0
] | [
1042,
63
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.replace | (self, **kwargs) | Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified. | Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified. | def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs) | [
"def",
"replace",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_replace",
"(",
"*",
"*",
"kwargs",
")"
] | [
64,
4
] | [
67,
38
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.host | (self) | The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
| The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
| def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0] | [
"def",
"host",
"(",
"self",
")",
":",
"return",
"self",
".",
"_split_host",
"(",
")",
"[",
"0",
"]"
] | [
70,
4
] | [
75,
36
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.ascii_host | (self) | Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
| Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
| def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
try:
rv = _encode_idna(rv)
except UnicodeError:
rv = rv.encode("ascii", "ignore")
return to_native(rv, "ascii", "ignore") | [
"def",
"ascii_host",
"(",
"self",
")",
":",
"rv",
"=",
"self",
".",
"host",
"if",
"rv",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"rv",
",",
"text_type",
")",
":",
"try",
":",
"rv",
"=",
"_encode_idna",
"(",
"rv",
")",
"except",
"UnicodeError",
":",
"rv",
"=",
"rv",
".",
"encode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"return",
"to_native",
"(",
"rv",
",",
"\"ascii\"",
",",
"\"ignore\"",
")"
] | [
78,
4
] | [
90,
47
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.port | (self) | The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
| The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
| def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass | [
"def",
"port",
"(",
"self",
")",
":",
"try",
":",
"rv",
"=",
"int",
"(",
"to_native",
"(",
"self",
".",
"_split_host",
"(",
")",
"[",
"1",
"]",
")",
")",
"if",
"0",
"<=",
"rv",
"<=",
"65535",
":",
"return",
"rv",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass"
] | [
93,
4
] | [
102,
16
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.auth | (self) | The authentication part in the URL if available, `None`
otherwise.
| The authentication part in the URL if available, `None`
otherwise.
| def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0] | [
"def",
"auth",
"(",
"self",
")",
":",
"return",
"self",
".",
"_split_netloc",
"(",
")",
"[",
"0",
"]"
] | [
105,
4
] | [
109,
38
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.username | (self) | The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
| The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
| def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv) | [
"def",
"username",
"(",
"self",
")",
":",
"rv",
"=",
"self",
".",
"_split_auth",
"(",
")",
"[",
"0",
"]",
"if",
"rv",
"is",
"not",
"None",
":",
"return",
"_url_unquote_legacy",
"(",
"rv",
")"
] | [
112,
4
] | [
118,
42
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.raw_username | (self) | The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
| The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
| def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0] | [
"def",
"raw_username",
"(",
"self",
")",
":",
"return",
"self",
".",
"_split_auth",
"(",
")",
"[",
"0",
"]"
] | [
121,
4
] | [
125,
36
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.password | (self) | The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
| The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
| def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv) | [
"def",
"password",
"(",
"self",
")",
":",
"rv",
"=",
"self",
".",
"_split_auth",
"(",
")",
"[",
"1",
"]",
"if",
"rv",
"is",
"not",
"None",
":",
"return",
"_url_unquote_legacy",
"(",
"rv",
")"
] | [
128,
4
] | [
134,
42
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.raw_password | (self) | The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
| The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
| def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1] | [
"def",
"raw_password",
"(",
"self",
")",
":",
"return",
"self",
".",
"_split_auth",
"(",
")",
"[",
"1",
"]"
] | [
137,
4
] | [
141,
36
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.decode_query | (self, *args, **kwargs) | Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
| Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
| def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs) | [
"def",
"decode_query",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"url_decode",
"(",
"self",
".",
"query",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
143,
4
] | [
148,
54
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.join | (self, *args, **kwargs) | Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
| Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
| def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs)) | [
"def",
"join",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"url_parse",
"(",
"url_join",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | [
150,
4
] | [
155,
57
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.to_url | (self) | Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
| Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
| def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self) | [
"def",
"to_url",
"(",
"self",
")",
":",
"return",
"url_unparse",
"(",
"self",
")"
] | [
157,
4
] | [
162,
32
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.decode_netloc | (self) | Decodes the netloc part into a string. | Decodes the netloc part into a string. | def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or "")
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
_url_unquote_legacy(self.raw_username or "", "/:%@"),
_url_unquote_legacy(self.raw_password or "", "/:%@"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return rv | [
"def",
"decode_netloc",
"(",
"self",
")",
":",
"rv",
"=",
"_decode_idna",
"(",
"self",
".",
"host",
"or",
"\"\"",
")",
"if",
"\":\"",
"in",
"rv",
":",
"rv",
"=",
"\"[%s]\"",
"%",
"rv",
"port",
"=",
"self",
".",
"port",
"if",
"port",
"is",
"not",
"None",
":",
"rv",
"=",
"\"%s:%d\"",
"%",
"(",
"rv",
",",
"port",
")",
"auth",
"=",
"\":\"",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"[",
"_url_unquote_legacy",
"(",
"self",
".",
"raw_username",
"or",
"\"\"",
",",
"\"/:%@\"",
")",
",",
"_url_unquote_legacy",
"(",
"self",
".",
"raw_password",
"or",
"\"\"",
",",
"\"/:%@\"",
")",
",",
"]",
",",
")",
")",
"if",
"auth",
":",
"rv",
"=",
"\"%s@%s\"",
"%",
"(",
"auth",
",",
"rv",
")",
"return",
"rv"
] | [
164,
4
] | [
184,
17
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.to_uri_tuple | (self) | Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
| Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow. | def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode("ascii")) | [
"def",
"to_uri_tuple",
"(",
"self",
")",
":",
"return",
"url_parse",
"(",
"iri_to_uri",
"(",
"self",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
")"
] | [
186,
4
] | [
194,
58
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.to_iri_tuple | (self) | Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
| Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar. | def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self)) | [
"def",
"to_iri_tuple",
"(",
"self",
")",
":",
"return",
"url_parse",
"(",
"uri_to_iri",
"(",
"self",
")",
")"
] | [
196,
4
] | [
205,
42
] | python | en | ['en', 'en', 'en'] | True |
BaseURL.get_file_location | (self, pathformat=None) | Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
| Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``. | def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != "file":
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == "nt":
pathformat = "windows"
else:
pathformat = "posix"
if pathformat == "windows":
if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
path = path[1:2] + ":" + path[3:]
windows_share = path[:3] in ("\\" * 3, "/" * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip("\\").split("\\", 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ""
elif pathformat == "posix":
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError("Invalid path format %s" % repr(pathformat))
if host in ("127.0.0.1", "::1", "localhost"):
host = None
return host, path | [
"def",
"get_file_location",
"(",
"self",
",",
"pathformat",
"=",
"None",
")",
":",
"if",
"self",
".",
"scheme",
"!=",
"\"file\"",
":",
"return",
"None",
",",
"None",
"path",
"=",
"url_unquote",
"(",
"self",
".",
"path",
")",
"host",
"=",
"self",
".",
"netloc",
"or",
"None",
"if",
"pathformat",
"is",
"None",
":",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"pathformat",
"=",
"\"windows\"",
"else",
":",
"pathformat",
"=",
"\"posix\"",
"if",
"pathformat",
"==",
"\"windows\"",
":",
"if",
"path",
"[",
":",
"1",
"]",
"==",
"\"/\"",
"and",
"path",
"[",
"1",
":",
"2",
"]",
".",
"isalpha",
"(",
")",
"and",
"path",
"[",
"2",
":",
"3",
"]",
"in",
"\"|:\"",
":",
"path",
"=",
"path",
"[",
"1",
":",
"2",
"]",
"+",
"\":\"",
"+",
"path",
"[",
"3",
":",
"]",
"windows_share",
"=",
"path",
"[",
":",
"3",
"]",
"in",
"(",
"\"\\\\\"",
"*",
"3",
",",
"\"/\"",
"*",
"3",
")",
"import",
"ntpath",
"path",
"=",
"ntpath",
".",
"normpath",
"(",
"path",
")",
"# Windows shared drives are represented as ``\\\\host\\\\directory``.",
"# That results in a URL like ``file://///host/directory``, and a",
"# path like ``///host/directory``. We need to special-case this",
"# because the path contains the hostname.",
"if",
"windows_share",
"and",
"host",
"is",
"None",
":",
"parts",
"=",
"path",
".",
"lstrip",
"(",
"\"\\\\\"",
")",
".",
"split",
"(",
"\"\\\\\"",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"host",
",",
"path",
"=",
"parts",
"else",
":",
"host",
"=",
"parts",
"[",
"0",
"]",
"path",
"=",
"\"\"",
"elif",
"pathformat",
"==",
"\"posix\"",
":",
"import",
"posixpath",
"path",
"=",
"posixpath",
".",
"normpath",
"(",
"path",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Invalid path format %s\"",
"%",
"repr",
"(",
"pathformat",
")",
")",
"if",
"host",
"in",
"(",
"\"127.0.0.1\"",
",",
"\"::1\"",
",",
"\"localhost\"",
")",
":",
"host",
"=",
"None",
"return",
"host",
",",
"path"
] | [
207,
4
] | [
265,
25
] | python | en | ['en', 'en', 'en'] | True |
BytesURL.encode_netloc | (self) | Returns the netloc unchanged as bytes. | Returns the netloc unchanged as bytes. | def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc | [
"def",
"encode_netloc",
"(",
"self",
")",
":",
"return",
"self",
".",
"netloc"
] | [
363,
4
] | [
365,
26
] | python | en | ['en', 'xh', 'en'] | True |
BytesURL.decode | (self, charset="utf-8", errors="replace") | Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
| Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
| def decode(self, charset="utf-8", errors="replace"):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode("ascii"),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors),
) | [
"def",
"decode",
"(",
"self",
",",
"charset",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
")",
":",
"return",
"URL",
"(",
"self",
".",
"scheme",
".",
"decode",
"(",
"\"ascii\"",
")",
",",
"self",
".",
"decode_netloc",
"(",
")",
",",
"self",
".",
"path",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
",",
"self",
".",
"query",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
",",
"self",
".",
"fragment",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
",",
")"
] | [
367,
4
] | [
377,
9
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.