Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
direct_url_as_pep440_direct_reference
(direct_url: DirectUrl, name: str)
Convert a DirectUrl to a pip requirement string.
Convert a DirectUrl to a pip requirement string.
def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str: """Convert a DirectUrl to a pip requirement string.""" direct_url.validate() # if invalid, this is a pip bug requirement = name + " @ " fragments = [] if isinstance(direct_url.info, VcsInfo): requirement += "{}+{}@{}".format( direct_url.info.vcs, direct_url.url, direct_url.info.commit_id ) elif isinstance(direct_url.info, ArchiveInfo): requirement += direct_url.url if direct_url.info.hash: fragments.append(direct_url.info.hash) else: assert isinstance(direct_url.info, DirInfo) requirement += direct_url.url if direct_url.subdirectory: fragments.append("subdirectory=" + direct_url.subdirectory) if fragments: requirement += "#" + "&".join(fragments) return requirement
[ "def", "direct_url_as_pep440_direct_reference", "(", "direct_url", ":", "DirectUrl", ",", "name", ":", "str", ")", "->", "str", ":", "direct_url", ".", "validate", "(", ")", "# if invalid, this is a pip bug", "requirement", "=", "name", "+", "\" @ \"", "fragments", "=", "[", "]", "if", "isinstance", "(", "direct_url", ".", "info", ",", "VcsInfo", ")", ":", "requirement", "+=", "\"{}+{}@{}\"", ".", "format", "(", "direct_url", ".", "info", ".", "vcs", ",", "direct_url", ".", "url", ",", "direct_url", ".", "info", ".", "commit_id", ")", "elif", "isinstance", "(", "direct_url", ".", "info", ",", "ArchiveInfo", ")", ":", "requirement", "+=", "direct_url", ".", "url", "if", "direct_url", ".", "info", ".", "hash", ":", "fragments", ".", "append", "(", "direct_url", ".", "info", ".", "hash", ")", "else", ":", "assert", "isinstance", "(", "direct_url", ".", "info", ",", "DirInfo", ")", "requirement", "+=", "direct_url", ".", "url", "if", "direct_url", ".", "subdirectory", ":", "fragments", ".", "append", "(", "\"subdirectory=\"", "+", "direct_url", ".", "subdirectory", ")", "if", "fragments", ":", "requirement", "+=", "\"#\"", "+", "\"&\"", ".", "join", "(", "fragments", ")", "return", "requirement" ]
[ 7, 0 ]
[ 27, 22 ]
python
en
['en', 'en', 'en']
True
colorize
(text='', opts=(), **kwargs)
Return your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Return the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red')
Return your text, enclosed in ANSI graphics codes.
def colorize(text='', opts=(), **kwargs): """ Return your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Return the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red') """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in kwargs.items(): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = '%s\x1b[%sm' % (text or '', RESET) return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
[ "def", "colorize", "(", "text", "=", "''", ",", "opts", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "code_list", "=", "[", "]", "if", "text", "==", "''", "and", "len", "(", "opts", ")", "==", "1", "and", "opts", "[", "0", "]", "==", "'reset'", ":", "return", "'\\x1b[%sm'", "%", "RESET", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "==", "'fg'", ":", "code_list", ".", "append", "(", "foreground", "[", "v", "]", ")", "elif", "k", "==", "'bg'", ":", "code_list", ".", "append", "(", "background", "[", "v", "]", ")", "for", "o", "in", "opts", ":", "if", "o", "in", "opt_dict", ":", "code_list", ".", "append", "(", "opt_dict", "[", "o", "]", ")", "if", "'noreset'", "not", "in", "opts", ":", "text", "=", "'%s\\x1b[%sm'", "%", "(", "text", "or", "''", ",", "RESET", ")", "return", "'%s%s'", "%", "(", "(", "'\\x1b[%sm'", "%", "';'", ".", "join", "(", "code_list", ")", ")", ",", "text", "or", "''", ")" ]
[ 12, 0 ]
[ 54, 68 ]
python
en
['en', 'error', 'th']
False
make_style
(opts=(), **kwargs)
Return a function with default parameters for colorize() Example: bold_red = make_style(opts=('bold',), fg='red') print(bold_red('hello')) KEYWORD = make_style(fg='yellow') COMMENT = make_style(fg='blue', opts=('bold',))
Return a function with default parameters for colorize()
def make_style(opts=(), **kwargs): """ Return a function with default parameters for colorize() Example: bold_red = make_style(opts=('bold',), fg='red') print(bold_red('hello')) KEYWORD = make_style(fg='yellow') COMMENT = make_style(fg='blue', opts=('bold',)) """ return lambda text: colorize(text, opts, **kwargs)
[ "def", "make_style", "(", "opts", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "return", "lambda", "text", ":", "colorize", "(", "text", ",", "opts", ",", "*", "*", "kwargs", ")" ]
[ 57, 0 ]
[ 67, 54 ]
python
en
['en', 'error', 'th']
False
parse_color_setting
(config_string)
Parse a DJANGO_COLORS environment variable to produce the system palette The general form of a palette definition is: "palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option" where: palette is a named palette; one of 'light', 'dark', or 'nocolor'. role is a named style used by Django fg is a foreground color. bg is a background color. option is a display options. Specifying a named palette is the same as manually specifying the individual definitions for each role. Any individual definitions following the palette definition will augment the base palette definition. Valid roles: 'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table', 'http_info', 'http_success', 'http_redirect', 'http_not_modified', 'http_bad_request', 'http_not_found', 'http_server_error', 'migrate_heading', 'migrate_label' Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold', 'underscore', 'blink', 'reverse', 'conceal', 'noreset'
Parse a DJANGO_COLORS environment variable to produce the system palette
def parse_color_setting(config_string): """Parse a DJANGO_COLORS environment variable to produce the system palette The general form of a palette definition is: "palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option" where: palette is a named palette; one of 'light', 'dark', or 'nocolor'. role is a named style used by Django fg is a foreground color. bg is a background color. option is a display options. Specifying a named palette is the same as manually specifying the individual definitions for each role. Any individual definitions following the palette definition will augment the base palette definition. Valid roles: 'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table', 'http_info', 'http_success', 'http_redirect', 'http_not_modified', 'http_bad_request', 'http_not_found', 'http_server_error', 'migrate_heading', 'migrate_label' Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold', 'underscore', 'blink', 'reverse', 'conceal', 'noreset' """ if not config_string: return PALETTES[DEFAULT_PALETTE] # Split the color configuration into parts parts = config_string.lower().split(';') palette = PALETTES[NOCOLOR_PALETTE].copy() for part in parts: if part in PALETTES: # A default palette has been specified palette.update(PALETTES[part]) elif '=' in part: # Process a palette defining string definition = {} # Break the definition into the role, # plus the list of specific instructions. # The role must be in upper case role, instructions = part.split('=') role = role.upper() styles = instructions.split(',') styles.reverse() # The first instruction can contain a slash # to break apart fg/bg. colors = styles.pop().split('/') colors.reverse() fg = colors.pop() if fg in color_names: definition['fg'] = fg if colors and colors[-1] in color_names: definition['bg'] = colors[-1] # All remaining instructions are options opts = tuple(s for s in styles if s in opt_dict) if opts: definition['opts'] = opts # The nocolor palette has all available roles. # Use that palette as the basis for determining # if the role is valid. if role in PALETTES[NOCOLOR_PALETTE] and definition: palette[role] = definition # If there are no colors specified, return the empty palette. if palette == PALETTES[NOCOLOR_PALETTE]: return None return palette
[ "def", "parse_color_setting", "(", "config_string", ")", ":", "if", "not", "config_string", ":", "return", "PALETTES", "[", "DEFAULT_PALETTE", "]", "# Split the color configuration into parts", "parts", "=", "config_string", ".", "lower", "(", ")", ".", "split", "(", "';'", ")", "palette", "=", "PALETTES", "[", "NOCOLOR_PALETTE", "]", ".", "copy", "(", ")", "for", "part", "in", "parts", ":", "if", "part", "in", "PALETTES", ":", "# A default palette has been specified", "palette", ".", "update", "(", "PALETTES", "[", "part", "]", ")", "elif", "'='", "in", "part", ":", "# Process a palette defining string", "definition", "=", "{", "}", "# Break the definition into the role,", "# plus the list of specific instructions.", "# The role must be in upper case", "role", ",", "instructions", "=", "part", ".", "split", "(", "'='", ")", "role", "=", "role", ".", "upper", "(", ")", "styles", "=", "instructions", ".", "split", "(", "','", ")", "styles", ".", "reverse", "(", ")", "# The first instruction can contain a slash", "# to break apart fg/bg.", "colors", "=", "styles", ".", "pop", "(", ")", ".", "split", "(", "'/'", ")", "colors", ".", "reverse", "(", ")", "fg", "=", "colors", ".", "pop", "(", ")", "if", "fg", "in", "color_names", ":", "definition", "[", "'fg'", "]", "=", "fg", "if", "colors", "and", "colors", "[", "-", "1", "]", "in", "color_names", ":", "definition", "[", "'bg'", "]", "=", "colors", "[", "-", "1", "]", "# All remaining instructions are options", "opts", "=", "tuple", "(", "s", "for", "s", "in", "styles", "if", "s", "in", "opt_dict", ")", "if", "opts", ":", "definition", "[", "'opts'", "]", "=", "opts", "# The nocolor palette has all available roles.", "# Use that palette as the basis for determining", "# if the role is valid.", "if", "role", "in", "PALETTES", "[", "NOCOLOR_PALETTE", "]", "and", "definition", ":", "palette", "[", "role", "]", "=", "definition", "# If there are no colors specified, return the empty palette.", "if", "palette", "==", "PALETTES", "[", "NOCOLOR_PALETTE", "]", ":", "return", "None", "return", "palette" ]
[ 136, 0 ]
[ 214, 18 ]
python
en
['en', 'en', 'en']
True
safe_range
(*args)
A range that can't generate ranges with a length of more than MAX_RANGE items.
A range that can't generate ranges with a length of more than MAX_RANGE items.
def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ rng = range_type(*args) if len(rng) > MAX_RANGE: raise OverflowError( "Range too big. The sandbox blocks ranges larger than" " MAX_RANGE (%d)." % MAX_RANGE ) return rng
[ "def", "safe_range", "(", "*", "args", ")", ":", "rng", "=", "range_type", "(", "*", "args", ")", "if", "len", "(", "rng", ")", ">", "MAX_RANGE", ":", "raise", "OverflowError", "(", "\"Range too big. The sandbox blocks ranges larger than\"", "\" MAX_RANGE (%d).\"", "%", "MAX_RANGE", ")", "return", "rng" ]
[ 144, 0 ]
[ 156, 14 ]
python
en
['en', 'en', 'en']
True
unsafe
(f)
Marks a function or method as unsafe. :: @unsafe def delete(self): pass
Marks a function or method as unsafe.
def unsafe(f): """Marks a function or method as unsafe. :: @unsafe def delete(self): pass """ f.unsafe_callable = True return f
[ "def", "unsafe", "(", "f", ")", ":", "f", ".", "unsafe_callable", "=", "True", "return", "f" ]
[ 159, 0 ]
[ 169, 12 ]
python
en
['en', 'en', 'en']
True
is_internal_attribute
(obj, attr)
Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(str, "mro") True >>> is_internal_attribute(str, "upper") False
Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(str, "mro") True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, types.FunctionType): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, types.MethodType): if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == 'mro': return True elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)): return True elif isinstance(obj, types.GeneratorType): if attr in UNSAFE_GENERATOR_ATTRIBUTES: return True elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType): if attr in UNSAFE_COROUTINE_ATTRIBUTES: return True elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType): if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES: return True return attr.startswith('__')
[ "def", "is_internal_attribute", "(", "obj", ",", "attr", ")", ":", "if", "isinstance", "(", "obj", ",", "types", ".", "FunctionType", ")", ":", "if", "attr", "in", "UNSAFE_FUNCTION_ATTRIBUTES", ":", "return", "True", "elif", "isinstance", "(", "obj", ",", "types", ".", "MethodType", ")", ":", "if", "attr", "in", "UNSAFE_FUNCTION_ATTRIBUTES", "or", "attr", "in", "UNSAFE_METHOD_ATTRIBUTES", ":", "return", "True", "elif", "isinstance", "(", "obj", ",", "type", ")", ":", "if", "attr", "==", "'mro'", ":", "return", "True", "elif", "isinstance", "(", "obj", ",", "(", "types", ".", "CodeType", ",", "types", ".", "TracebackType", ",", "types", ".", "FrameType", ")", ")", ":", "return", "True", "elif", "isinstance", "(", "obj", ",", "types", ".", "GeneratorType", ")", ":", "if", "attr", "in", "UNSAFE_GENERATOR_ATTRIBUTES", ":", "return", "True", "elif", "hasattr", "(", "types", ",", "'CoroutineType'", ")", "and", "isinstance", "(", "obj", ",", "types", ".", "CoroutineType", ")", ":", "if", "attr", "in", "UNSAFE_COROUTINE_ATTRIBUTES", ":", "return", "True", "elif", "hasattr", "(", "types", ",", "'AsyncGeneratorType'", ")", "and", "isinstance", "(", "obj", ",", "types", ".", "AsyncGeneratorType", ")", ":", "if", "attr", "in", "UNSAFE_ASYNC_GENERATOR_ATTRIBUTES", ":", "return", "True", "return", "attr", ".", "startswith", "(", "'__'", ")" ]
[ 172, 0 ]
[ 205, 32 ]
python
en
['en', 'en', 'en']
True
modifies_known_mutable
(obj, attr)
This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False
This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`.
def modifies_known_mutable(obj, attr): """This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False """ for typespec, unsafe in _mutable_spec: if isinstance(obj, typespec): return attr in unsafe return False
[ "def", "modifies_known_mutable", "(", "obj", ",", "attr", ")", ":", "for", "typespec", ",", "unsafe", "in", "_mutable_spec", ":", "if", "isinstance", "(", "obj", ",", "typespec", ")", ":", "return", "attr", "in", "unsafe", "return", "False" ]
[ 208, 0 ]
[ 233, 16 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.intercept_unop
(self, operator)
Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this method returns `True`, :meth:`call_unop` is excuted for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one. The following unary operators are interceptable: ``+`` and ``-`` Intercepted calls are always slower than the native operator call, so make sure only to intercept the ones you are interested in. .. versionadded:: 2.6
Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this method returns `True`, :meth:`call_unop` is excuted for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one.
def intercept_unop(self, operator): """Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this method returns `True`, :meth:`call_unop` is excuted for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one. The following unary operators are interceptable: ``+`` and ``-`` Intercepted calls are always slower than the native operator call, so make sure only to intercept the ones you are interested in. .. versionadded:: 2.6 """ return False
[ "def", "intercept_unop", "(", "self", ",", "operator", ")", ":", "return", "False" ]
[ 300, 4 ]
[ 315, 20 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.is_safe_attribute
(self, obj, attr, value)
The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function.
The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function.
def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith('_') or is_internal_attribute(obj, attr))
[ "def", "is_safe_attribute", "(", "self", ",", "obj", ",", "attr", ",", "value", ")", ":", "return", "not", "(", "attr", ".", "startswith", "(", "'_'", ")", "or", "is_internal_attribute", "(", "obj", ",", "attr", ")", ")" ]
[ 324, 4 ]
[ 331, 77 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.is_safe_callable
(self, obj)
Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module.
Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module.
def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ return not (getattr(obj, 'unsafe_callable', False) or getattr(obj, 'alters_data', False))
[ "def", "is_safe_callable", "(", "self", ",", "obj", ")", ":", "return", "not", "(", "getattr", "(", "obj", ",", "'unsafe_callable'", ",", "False", ")", "or", "getattr", "(", "obj", ",", "'alters_data'", ",", "False", ")", ")" ]
[ 333, 4 ]
[ 340, 55 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.call_binop
(self, context, operator, left, right)
For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6
For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators.
def call_binop(self, context, operator, left, right): """For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.binop_table[operator](left, right)
[ "def", "call_binop", "(", "self", ",", "context", ",", "operator", ",", "left", ",", "right", ")", ":", "return", "self", ".", "binop_table", "[", "operator", "]", "(", "left", ",", "right", ")" ]
[ 342, 4 ]
[ 349, 54 ]
python
en
['en', 'ig', 'en']
True
SandboxedEnvironment.call_unop
(self, context, operator, arg)
For intercepted unary operator calls (:meth:`intercepted_unops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6
For intercepted unary operator calls (:meth:`intercepted_unops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators.
def call_unop(self, context, operator, arg): """For intercepted unary operator calls (:meth:`intercepted_unops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.unop_table[operator](arg)
[ "def", "call_unop", "(", "self", ",", "context", ",", "operator", ",", "arg", ")", ":", "return", "self", ".", "unop_table", "[", "operator", "]", "(", "arg", ")" ]
[ 351, 4 ]
[ 358, 45 ]
python
en
['en', 'en', 'it']
True
SandboxedEnvironment.getitem
(self, obj, argument)
Subscribe an object from sandboxed code.
Subscribe an object from sandboxed code.
def getitem(self, obj, argument): """Subscribe an object from sandboxed code.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument)
[ "def", "getitem", "(", "self", ",", "obj", ",", "argument", ")", ":", "try", ":", "return", "obj", "[", "argument", "]", "except", "(", "TypeError", ",", "LookupError", ")", ":", "if", "isinstance", "(", "argument", ",", "string_types", ")", ":", "try", ":", "attr", "=", "str", "(", "argument", ")", "except", "Exception", ":", "pass", "else", ":", "try", ":", "value", "=", "getattr", "(", "obj", ",", "attr", ")", "except", "AttributeError", ":", "pass", "else", ":", "if", "self", ".", "is_safe_attribute", "(", "obj", ",", "argument", ",", "value", ")", ":", "return", "value", "return", "self", ".", "unsafe_undefined", "(", "obj", ",", "argument", ")", "return", "self", ".", "undefined", "(", "obj", "=", "obj", ",", "name", "=", "argument", ")" ]
[ 360, 4 ]
[ 379, 53 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.getattr
(self, obj, attribute)
Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring.
Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring.
def getattr(self, obj, attribute): """Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring. """ try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute)
[ "def", "getattr", "(", "self", ",", "obj", ",", "attribute", ")", ":", "try", ":", "value", "=", "getattr", "(", "obj", ",", "attribute", ")", "except", "AttributeError", ":", "try", ":", "return", "obj", "[", "attribute", "]", "except", "(", "TypeError", ",", "LookupError", ")", ":", "pass", "else", ":", "if", "self", ".", "is_safe_attribute", "(", "obj", ",", "attribute", ",", "value", ")", ":", "return", "value", "return", "self", ".", "unsafe_undefined", "(", "obj", ",", "attribute", ")", "return", "self", ".", "undefined", "(", "obj", "=", "obj", ",", "name", "=", "attribute", ")" ]
[ 381, 4 ]
[ 396, 54 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.unsafe_undefined
(self, obj, attribute)
Return an undefined object for unsafe attributes.
Return an undefined object for unsafe attributes.
def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError)
[ "def", "unsafe_undefined", "(", "self", ",", "obj", ",", "attribute", ")", ":", "return", "self", ".", "undefined", "(", "'access to attribute %r of %r '", "'object is unsafe.'", "%", "(", "attribute", ",", "obj", ".", "__class__", ".", "__name__", ")", ",", "name", "=", "attribute", ",", "obj", "=", "obj", ",", "exc", "=", "SecurityError", ")" ]
[ 398, 4 ]
[ 404, 54 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.format_string
(self, s, args, kwargs, format_func=None)
If a format call is detected, then this is routed through this method so that our safety sandbox can be used for it.
If a format call is detected, then this is routed through this method so that our safety sandbox can be used for it.
def format_string(self, s, args, kwargs, format_func=None): """If a format call is detected, then this is routed through this method so that our safety sandbox can be used for it. """ if isinstance(s, Markup): formatter = SandboxedEscapeFormatter(self, s.escape) else: formatter = SandboxedFormatter(self) if format_func is not None and format_func.__name__ == 'format_map': if len(args) != 1 or kwargs: raise TypeError( 'format_map() takes exactly one argument %d given' % (len(args) + (kwargs is not None)) ) kwargs = args[0] args = None kwargs = _MagicFormatMapping(args, kwargs) rv = formatter.vformat(s, args, kwargs) return type(s)(rv)
[ "def", "format_string", "(", "self", ",", "s", ",", "args", ",", "kwargs", ",", "format_func", "=", "None", ")", ":", "if", "isinstance", "(", "s", ",", "Markup", ")", ":", "formatter", "=", "SandboxedEscapeFormatter", "(", "self", ",", "s", ".", "escape", ")", "else", ":", "formatter", "=", "SandboxedFormatter", "(", "self", ")", "if", "format_func", "is", "not", "None", "and", "format_func", ".", "__name__", "==", "'format_map'", ":", "if", "len", "(", "args", ")", "!=", "1", "or", "kwargs", ":", "raise", "TypeError", "(", "'format_map() takes exactly one argument %d given'", "%", "(", "len", "(", "args", ")", "+", "(", "kwargs", "is", "not", "None", ")", ")", ")", "kwargs", "=", "args", "[", "0", "]", "args", "=", "None", "kwargs", "=", "_MagicFormatMapping", "(", "args", ",", "kwargs", ")", "rv", "=", "formatter", ".", "vformat", "(", "s", ",", "args", ",", "kwargs", ")", "return", "type", "(", "s", ")", "(", "rv", ")" ]
[ 406, 4 ]
[ 427, 26 ]
python
en
['en', 'en', 'en']
True
SandboxedEnvironment.call
(__self, __context, __obj, *args, **kwargs)
Call an object from sandboxed code.
Call an object from sandboxed code.
def call(__self, __context, __obj, *args, **kwargs): """Call an object from sandboxed code.""" fmt = inspect_format_method(__obj) if fmt is not None: return __self.format_string(fmt, args, kwargs, __obj) # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): raise SecurityError('%r is not safely callable' % (__obj,)) return __context.call(__obj, *args, **kwargs)
[ "def", "call", "(", "__self", ",", "__context", ",", "__obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fmt", "=", "inspect_format_method", "(", "__obj", ")", "if", "fmt", "is", "not", "None", ":", "return", "__self", ".", "format_string", "(", "fmt", ",", "args", ",", "kwargs", ",", "__obj", ")", "# the double prefixes are to avoid double keyword argument", "# errors when proxying the call.", "if", "not", "__self", ".", "is_safe_callable", "(", "__obj", ")", ":", "raise", "SecurityError", "(", "'%r is not safely callable'", "%", "(", "__obj", ",", ")", ")", "return", "__context", ".", "call", "(", "__obj", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
[ 429, 4 ]
[ 439, 53 ]
python
en
['en', 'en', 'en']
True
get_admin_log
(parser, token)
Populate a template variable with the admin log for the given criteria. Usage:: {% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %} Examples:: {% get_admin_log 10 as admin_log for_user 23 %} {% get_admin_log 10 as admin_log for_user user %} {% get_admin_log 10 as admin_log %} Note that ``context_var_containing_user_obj`` can be a hard-coded integer (user ID) or the name of a template context variable containing the user object whose ID you want.
Populate a template variable with the admin log for the given criteria.
def get_admin_log(parser, token): """ Populate a template variable with the admin log for the given criteria. Usage:: {% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %} Examples:: {% get_admin_log 10 as admin_log for_user 23 %} {% get_admin_log 10 as admin_log for_user user %} {% get_admin_log 10 as admin_log %} Note that ``context_var_containing_user_obj`` can be a hard-coded integer (user ID) or the name of a template context variable containing the user object whose ID you want. """ tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError( "'get_admin_log' statements require two arguments") if not tokens[1].isdigit(): raise template.TemplateSyntaxError( "First argument to 'get_admin_log' must be an integer") if tokens[2] != 'as': raise template.TemplateSyntaxError( "Second argument to 'get_admin_log' must be 'as'") if len(tokens) > 4: if tokens[4] != 'for_user': raise template.TemplateSyntaxError( "Fourth argument to 'get_admin_log' must be 'for_user'") return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
[ "def", "get_admin_log", "(", "parser", ",", "token", ")", ":", "tokens", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "<", "4", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"'get_admin_log' statements require two arguments\"", ")", "if", "not", "tokens", "[", "1", "]", ".", "isdigit", "(", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"First argument to 'get_admin_log' must be an integer\"", ")", "if", "tokens", "[", "2", "]", "!=", "'as'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Second argument to 'get_admin_log' must be 'as'\"", ")", "if", "len", "(", "tokens", ")", ">", "4", ":", "if", "tokens", "[", "4", "]", "!=", "'for_user'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Fourth argument to 'get_admin_log' must be 'for_user'\"", ")", "return", "AdminLogNode", "(", "limit", "=", "tokens", "[", "1", "]", ",", "varname", "=", "tokens", "[", "3", "]", ",", "user", "=", "(", "tokens", "[", "5", "]", "if", "len", "(", "tokens", ")", ">", "5", "else", "None", ")", ")" ]
[ 26, 0 ]
[ 58, 106 ]
python
en
['en', 'error', 'th']
False
BaseExpression.as_sql
(self, compiler, connection)
Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`.
Responsible for returning a (sql, [params]) tuple to be included in the current query.
def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()")
[ "def", "as_sql", "(", "self", ",", "compiler", ",", "connection", ")", ":", "raise", "NotImplementedError", "(", "\"Subclasses must implement as_sql()\"", ")" ]
[ 189, 4 ]
[ 215, 71 ]
python
en
['en', 'error', 'th']
False
BaseExpression.resolve_expression
(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False)
Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query.
Provide the chance to do any preprocessing or validation before being added to the query.
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c
[ "def", "resolve_expression", "(", "self", ",", "query", "=", "None", ",", "allow_joins", "=", "True", ",", "reuse", "=", "None", ",", "summarize", "=", "False", ",", "for_save", "=", "False", ")", ":", "c", "=", "self", ".", "copy", "(", ")", "c", ".", "is_summary", "=", "summarize", "c", ".", "set_source_expressions", "(", "[", "expr", ".", "resolve_expression", "(", "query", ",", "allow_joins", ",", "reuse", ",", "summarize", ")", "if", "expr", "else", "None", "for", "expr", "in", "c", ".", "get_source_expressions", "(", ")", "]", ")", "return", "c" ]
[ 229, 4 ]
[ 251, 16 ]
python
en
['en', 'error', 'th']
False
BaseExpression.output_field
(self)
Return the output type of this expressions.
Return the output type of this expressions.
def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field
[ "def", "output_field", "(", "self", ")", ":", "output_field", "=", "self", ".", "_resolve_output_field", "(", ")", "if", "output_field", "is", "None", ":", "self", ".", "_output_field_resolved_to_none", "=", "True", "raise", "FieldError", "(", "'Cannot resolve expression type, unknown output_field'", ")", "return", "output_field" ]
[ 262, 4 ]
[ 268, 27 ]
python
en
['en', 'en', 'en']
True
BaseExpression._output_field_or_none
(self)
Return the output field of this expression, or None if _resolve_output_field() didn't return an output type.
Return the output field of this expression, or None if _resolve_output_field() didn't return an output type.
def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise
[ "def", "_output_field_or_none", "(", "self", ")", ":", "try", ":", "return", "self", ".", "output_field", "except", "FieldError", ":", "if", "not", "self", ".", "_output_field_resolved_to_none", ":", "raise" ]
[ 271, 4 ]
[ 280, 21 ]
python
en
['en', 'error', 'th']
False
BaseExpression._resolve_output_field
(self)
Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property.
Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time.
def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field
[ "def", "_resolve_output_field", "(", "self", ")", ":", "sources_iter", "=", "(", "source", "for", "source", "in", "self", ".", "get_source_fields", "(", ")", "if", "source", "is", "not", "None", ")", "for", "output_field", "in", "sources_iter", ":", "for", "source", "in", "sources_iter", ":", "if", "not", "isinstance", "(", "output_field", ",", "source", ".", "__class__", ")", ":", "raise", "FieldError", "(", "'Expression contains mixed types: %s, %s. You must '", "'set output_field.'", "%", "(", "output_field", ".", "__class__", ".", "__name__", ",", "source", ".", "__class__", ".", "__name__", ",", ")", ")", "return", "output_field" ]
[ 282, 4 ]
[ 307, 31 ]
python
en
['en', 'error', 'th']
False
BaseExpression.convert_value
(self)
Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns.
Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns.
def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop
[ "def", "convert_value", "(", "self", ")", ":", "field", "=", "self", ".", "output_field", "internal_type", "=", "field", ".", "get_internal_type", "(", ")", "if", "internal_type", "==", "'FloatField'", ":", "return", "lambda", "value", ",", "expression", ",", "connection", ":", "None", "if", "value", "is", "None", "else", "float", "(", "value", ")", "elif", "internal_type", ".", "endswith", "(", "'IntegerField'", ")", ":", "return", "lambda", "value", ",", "expression", ",", "connection", ":", "None", "if", "value", "is", "None", "else", "int", "(", "value", ")", "elif", "internal_type", "==", "'DecimalField'", ":", "return", "lambda", "value", ",", "expression", ",", "connection", ":", "None", "if", "value", "is", "None", "else", "Decimal", "(", "value", ")", "return", "self", ".", "_convert_value_noop" ]
[ 314, 4 ]
[ 328, 39 ]
python
en
['en', 'error', 'th']
False
BaseExpression.get_source_fields
(self)
Return the underlying field types used by this aggregate.
Return the underlying field types used by this aggregate.
def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()]
[ "def", "get_source_fields", "(", "self", ")", ":", "return", "[", "e", ".", "_output_field_or_none", "for", "e", "in", "self", ".", "get_source_expressions", "(", ")", "]" ]
[ 355, 4 ]
[ 357, 79 ]
python
en
['en', 'en', 'en']
True
BaseExpression.flatten
(self)
Recursively yield this expression and all subexpressions, in depth-first order.
Recursively yield this expression and all subexpressions, in depth-first order.
def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, 'flatten'): yield from expr.flatten() else: yield expr
[ "def", "flatten", "(", "self", ")", ":", "yield", "self", "for", "expr", "in", "self", ".", "get_source_expressions", "(", ")", ":", "if", "expr", ":", "if", "hasattr", "(", "expr", ",", "'flatten'", ")", ":", "yield", "from", "expr", ".", "flatten", "(", ")", "else", ":", "yield", "expr" ]
[ 368, 4 ]
[ 379, 30 ]
python
en
['en', 'error', 'th']
False
BaseExpression.select_format
(self, compiler, sql, params)
Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle.
Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle.
def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return sql, params
[ "def", "select_format", "(", "self", ",", "compiler", ",", "sql", ",", "params", ")", ":", "if", "hasattr", "(", "self", ".", "output_field", ",", "'select_format'", ")", ":", "return", "self", ".", "output_field", ".", "select_format", "(", "compiler", ",", "sql", ",", "params", ")", "return", "sql", ",", "params" ]
[ 381, 4 ]
[ 388, 26 ]
python
en
['en', 'error', 'th']
False
Func._get_repr_options
(self)
Return a dict of extra __init__() options to include in the repr.
Return a dict of extra __init__() options to include in the repr.
def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {}
[ "def", "_get_repr_options", "(", "self", ")", ":", "return", "{", "}" ]
[ 663, 4 ]
[ 665, 17 ]
python
en
['en', 'en', 'en']
True
Value.__init__
(self, value, output_field=None)
Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField().
Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted.
def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value
[ "def", "__init__", "(", "self", ",", "value", ",", "output_field", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "output_field", "=", "output_field", ")", "self", ".", "value", "=", "value" ]
[ 714, 4 ]
[ 724, 26 ]
python
en
['en', 'error', 'th']
False
get_normalization_parameters
(traindf, features)
Get the normalization parameters (E.g., mean, std) for traindf for features. We will use these parameters for training, eval, and serving.
Get the normalization parameters (E.g., mean, std) for traindf for features. We will use these parameters for training, eval, and serving.
def get_normalization_parameters(traindf, features): """Get the normalization parameters (E.g., mean, std) for traindf for features. We will use these parameters for training, eval, and serving.""" def _z_score_params(column): mean = traindf[column].mean() std = traindf[column].std() return {'mean': mean, 'std': std} normalization_parameters = {} for column in features: normalization_parameters[column] = _z_score_params(column) return normalization_parameters
[ "def", "get_normalization_parameters", "(", "traindf", ",", "features", ")", ":", "def", "_z_score_params", "(", "column", ")", ":", "mean", "=", "traindf", "[", "column", "]", ".", "mean", "(", ")", "std", "=", "traindf", "[", "column", "]", ".", "std", "(", ")", "return", "{", "'mean'", ":", "mean", ",", "'std'", ":", "std", "}", "normalization_parameters", "=", "{", "}", "for", "column", "in", "features", ":", "normalization_parameters", "[", "column", "]", "=", "_z_score_params", "(", "column", ")", "return", "normalization_parameters" ]
[ 29, 0 ]
[ 41, 33 ]
python
en
['en', 'en', 'en']
True
create_feature_cols
(features, use_normalization)
Create our feature columns using tf.feature_column. This function will get executed during training, evaluation, and serving.
Create our feature columns using tf.feature_column. This function will get executed during training, evaluation, and serving.
def create_feature_cols(features, use_normalization): """Create our feature columns using tf.feature_column. This function will get executed during training, evaluation, and serving.""" normalized_feature_columns = [] for column_name in features: if use_normalization: column_params = normalization_parameters[column_name] mean = column_params['mean'] std = column_params['std'] def normalize_column(col): # Use mean, std defined above. return (col - mean)/std normalizer_fn = normalize_column else: normalizer_fn = None normalized_feature_columns.append(_numeric_column_normalized(column_name, normalizer_fn)) return normalized_feature_columns
[ "def", "create_feature_cols", "(", "features", ",", "use_normalization", ")", ":", "normalized_feature_columns", "=", "[", "]", "for", "column_name", "in", "features", ":", "if", "use_normalization", ":", "column_params", "=", "normalization_parameters", "[", "column_name", "]", "mean", "=", "column_params", "[", "'mean'", "]", "std", "=", "column_params", "[", "'std'", "]", "def", "normalize_column", "(", "col", ")", ":", "# Use mean, std defined above.", "return", "(", "col", "-", "mean", ")", "/", "std", "normalizer_fn", "=", "normalize_column", "else", ":", "normalizer_fn", "=", "None", "normalized_feature_columns", ".", "append", "(", "_numeric_column_normalized", "(", "column_name", ",", "normalizer_fn", ")", ")", "return", "normalized_feature_columns" ]
[ 54, 0 ]
[ 70, 35 ]
python
en
['en', 'en', 'en']
True
input_fn
(df, shuffle=True)
For training and evaluation inputs.
For training and evaluation inputs.
def input_fn(df, shuffle=True): """For training and evaluation inputs.""" return tf.estimator.inputs.pandas_input_fn( x = df, y = df["median_house_value"]/100000, # Scale target. shuffle = shuffle)
[ "def", "input_fn", "(", "df", ",", "shuffle", "=", "True", ")", ":", "return", "tf", ".", "estimator", ".", "inputs", ".", "pandas_input_fn", "(", "x", "=", "df", ",", "y", "=", "df", "[", "\"median_house_value\"", "]", "/", "100000", ",", "# Scale target.", "shuffle", "=", "shuffle", ")" ]
[ 72, 0 ]
[ 77, 22 ]
python
en
['en', 'en', 'en']
True
response_chunks
( response: Response, chunk_size: int = CONTENT_CHUNK_SIZE )
Given a requests Response, provide the data chunks.
Given a requests Response, provide the data chunks.
def response_chunks( response: Response, chunk_size: int = CONTENT_CHUNK_SIZE ) -> Iterator[bytes]: """Given a requests Response, provide the data chunks.""" try: # Special case for urllib3. for chunk in response.raw.stream( chunk_size, # We use decode_content=False here because we don't # want urllib3 to mess with the raw bytes we get # from the server. If we decompress inside of # urllib3 then we cannot verify the checksum # because the checksum will be of the compressed # file. This breakage will only occur if the # server adds a Content-Encoding header, which # depends on how the server was configured: # - Some servers will notice that the file isn't a # compressible file and will leave the file alone # and with an empty Content-Encoding # - Some servers will notice that the file is # already compressed and will leave the file # alone and will add a Content-Encoding: gzip # header # - Some servers won't notice anything at all and # will take a file that's already been compressed # and compress it again and set the # Content-Encoding: gzip header # # By setting this not to decode automatically we # hope to eliminate problems with the second case. decode_content=False, ): yield chunk except AttributeError: # Standard file-like object. while True: chunk = response.raw.read(chunk_size) if not chunk: break yield chunk
[ "def", "response_chunks", "(", "response", ":", "Response", ",", "chunk_size", ":", "int", "=", "CONTENT_CHUNK_SIZE", ")", "->", "Iterator", "[", "bytes", "]", ":", "try", ":", "# Special case for urllib3.", "for", "chunk", "in", "response", ".", "raw", ".", "stream", "(", "chunk_size", ",", "# We use decode_content=False here because we don't", "# want urllib3 to mess with the raw bytes we get", "# from the server. If we decompress inside of", "# urllib3 then we cannot verify the checksum", "# because the checksum will be of the compressed", "# file. This breakage will only occur if the", "# server adds a Content-Encoding header, which", "# depends on how the server was configured:", "# - Some servers will notice that the file isn't a", "# compressible file and will leave the file alone", "# and with an empty Content-Encoding", "# - Some servers will notice that the file is", "# already compressed and will leave the file", "# alone and will add a Content-Encoding: gzip", "# header", "# - Some servers won't notice anything at all and", "# will take a file that's already been compressed", "# and compress it again and set the", "# Content-Encoding: gzip header", "#", "# By setting this not to decode automatically we", "# hope to eliminate problems with the second case.", "decode_content", "=", "False", ",", ")", ":", "yield", "chunk", "except", "AttributeError", ":", "# Standard file-like object.", "while", "True", ":", "chunk", "=", "response", ".", "raw", ".", "read", "(", "chunk_size", ")", "if", "not", "chunk", ":", "break", "yield", "chunk" ]
[ 56, 0 ]
[ 95, 23 ]
python
en
['en', 'en', 'en']
True
lovasz_grad
(gt_sorted)
Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper
Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper
def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ gts = tf.reduce_sum(gt_sorted) intersection = gts - tf.cumsum(gt_sorted) union = gts + tf.cumsum(1. - gt_sorted) jaccard = 1. - intersection / union jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) return jaccard
[ "def", "lovasz_grad", "(", "gt_sorted", ")", ":", "gts", "=", "tf", ".", "reduce_sum", "(", "gt_sorted", ")", "intersection", "=", "gts", "-", "tf", ".", "cumsum", "(", "gt_sorted", ")", "union", "=", "gts", "+", "tf", ".", "cumsum", "(", "1.", "-", "gt_sorted", ")", "jaccard", "=", "1.", "-", "intersection", "/", "union", "jaccard", "=", "tf", ".", "concat", "(", "(", "jaccard", "[", "0", ":", "1", "]", ",", "jaccard", "[", "1", ":", "]", "-", "jaccard", "[", ":", "-", "1", "]", ")", ",", "0", ")", "return", "jaccard" ]
[ 14, 0 ]
[ 24, 18 ]
python
en
['en', 'error', 'th']
False
lovasz_hinge
(logits, labels, per_image=True, ignore=None)
Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id
Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id
def lovasz_hinge(logits, labels, per_image=True, ignore=None): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if per_image: def treat_image(log_lab): log, lab = log_lab log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) log, lab = flatten_binary_scores(log, lab, ignore) return lovasz_hinge_flat(log, lab) losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) loss = tf.reduce_mean(losses) else: loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) return loss
[ "def", "lovasz_hinge", "(", "logits", ",", "labels", ",", "per_image", "=", "True", ",", "ignore", "=", "None", ")", ":", "if", "per_image", ":", "def", "treat_image", "(", "log_lab", ")", ":", "log", ",", "lab", "=", "log_lab", "log", ",", "lab", "=", "tf", ".", "expand_dims", "(", "log", ",", "0", ")", ",", "tf", ".", "expand_dims", "(", "lab", ",", "0", ")", "log", ",", "lab", "=", "flatten_binary_scores", "(", "log", ",", "lab", ",", "ignore", ")", "return", "lovasz_hinge_flat", "(", "log", ",", "lab", ")", "losses", "=", "tf", ".", "map_fn", "(", "treat_image", ",", "(", "logits", ",", "labels", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "losses", ")", "else", ":", "loss", "=", "lovasz_hinge_flat", "(", "*", "flatten_binary_scores", "(", "logits", ",", "labels", ",", "ignore", ")", ")", "return", "loss" ]
[ 30, 0 ]
[ 48, 15 ]
python
en
['en', 'error', 'th']
False
lovasz_hinge_flat
(logits, labels)
Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore
Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ def compute_loss(): labelsf = tf.cast(labels, logits.dtype) signs = 2. * labelsf - 1. errors = 1. - logits * tf.stop_gradient(signs) errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") gt_sorted = tf.gather(labelsf, perm) grad = lovasz_grad(gt_sorted) loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") return loss # deal with the void prediction case (only void pixels) loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), lambda: tf.reduce_sum(logits) * 0., compute_loss, #strict=True, # for compatibility with TF2 TODO investigate name="loss" ) return loss
[ "def", "lovasz_hinge_flat", "(", "logits", ",", "labels", ")", ":", "def", "compute_loss", "(", ")", ":", "labelsf", "=", "tf", ".", "cast", "(", "labels", ",", "logits", ".", "dtype", ")", "signs", "=", "2.", "*", "labelsf", "-", "1.", "errors", "=", "1.", "-", "logits", "*", "tf", ".", "stop_gradient", "(", "signs", ")", "errors_sorted", ",", "perm", "=", "tf", ".", "nn", ".", "top_k", "(", "errors", ",", "k", "=", "tf", ".", "shape", "(", "errors", ")", "[", "0", "]", ",", "name", "=", "\"descending_sort\"", ")", "gt_sorted", "=", "tf", ".", "gather", "(", "labelsf", ",", "perm", ")", "grad", "=", "lovasz_grad", "(", "gt_sorted", ")", "loss", "=", "tf", ".", "tensordot", "(", "tf", ".", "nn", ".", "relu", "(", "errors_sorted", ")", ",", "tf", ".", "stop_gradient", "(", "grad", ")", ",", "1", ",", "name", "=", "\"loss_non_void\"", ")", "return", "loss", "# deal with the void prediction case (only void pixels)", "loss", "=", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "tf", ".", "shape", "(", "logits", ")", "[", "0", "]", ",", "0", ")", ",", "lambda", ":", "tf", ".", "reduce_sum", "(", "logits", ")", "*", "0.", ",", "compute_loss", ",", "#strict=True, # for compatibility with TF2 TODO investigate", "name", "=", "\"loss\"", ")", "return", "loss" ]
[ 51, 0 ]
[ 76, 15 ]
python
en
['en', 'error', 'th']
False
flatten_binary_scores
(scores, labels, ignore=None)
Flattens predictions in the batch (binary case) Remove labels equal to 'ignore'
Flattens predictions in the batch (binary case) Remove labels equal to 'ignore'
def flatten_binary_scores(scores, labels, ignore=None): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = tf.reshape(scores, (-1,)) labels = tf.reshape(labels, (-1,)) if ignore is None: return scores, labels valid = tf.not_equal(labels, ignore) vscores = tf.boolean_mask(scores, valid, name='valid_scores') vlabels = tf.boolean_mask(labels, valid, name='valid_labels') return vscores, vlabels
[ "def", "flatten_binary_scores", "(", "scores", ",", "labels", ",", "ignore", "=", "None", ")", ":", "scores", "=", "tf", ".", "reshape", "(", "scores", ",", "(", "-", "1", ",", ")", ")", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "(", "-", "1", ",", ")", ")", "if", "ignore", "is", "None", ":", "return", "scores", ",", "labels", "valid", "=", "tf", ".", "not_equal", "(", "labels", ",", "ignore", ")", "vscores", "=", "tf", ".", "boolean_mask", "(", "scores", ",", "valid", ",", "name", "=", "'valid_scores'", ")", "vlabels", "=", "tf", ".", "boolean_mask", "(", "labels", ",", "valid", ",", "name", "=", "'valid_labels'", ")", "return", "vscores", ",", "vlabels" ]
[ 79, 0 ]
[ 91, 27 ]
python
en
['en', 'error', 'th']
False
lovasz_softmax
(probas, labels, classes='present', per_image=False, ignore=None, order='BHWC')
Multi-class Lovasz-Softmax loss probas: [B, H, W, C] or [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. per_image: compute the loss per image instead of per batch ignore: void class labels order: use BHWC or BCHW
Multi-class Lovasz-Softmax loss probas: [B, H, W, C] or [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. per_image: compute the loss per image instead of per batch ignore: void class labels order: use BHWC or BCHW
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None, order='BHWC'): """ Multi-class Lovasz-Softmax loss probas: [B, H, W, C] or [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. per_image: compute the loss per image instead of per batch ignore: void class labels order: use BHWC or BCHW """ if per_image: def treat_image(prob_lab): prob, lab = prob_lab prob, lab = tf.expand_dims(prob, 0), tf.expand_dims(lab, 0) prob, lab = flatten_probas(prob, lab, ignore, order) return lovasz_softmax_flat(prob, lab, classes=classes) losses = tf.map_fn(treat_image, (probas, labels), dtype=tf.float32) loss = tf.reduce_mean(losses) else: loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore, order), classes=classes) return loss
[ "def", "lovasz_softmax", "(", "probas", ",", "labels", ",", "classes", "=", "'present'", ",", "per_image", "=", "False", ",", "ignore", "=", "None", ",", "order", "=", "'BHWC'", ")", ":", "if", "per_image", ":", "def", "treat_image", "(", "prob_lab", ")", ":", "prob", ",", "lab", "=", "prob_lab", "prob", ",", "lab", "=", "tf", ".", "expand_dims", "(", "prob", ",", "0", ")", ",", "tf", ".", "expand_dims", "(", "lab", ",", "0", ")", "prob", ",", "lab", "=", "flatten_probas", "(", "prob", ",", "lab", ",", "ignore", ",", "order", ")", "return", "lovasz_softmax_flat", "(", "prob", ",", "lab", ",", "classes", "=", "classes", ")", "losses", "=", "tf", ".", "map_fn", "(", "treat_image", ",", "(", "probas", ",", "labels", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "losses", ")", "else", ":", "loss", "=", "lovasz_softmax_flat", "(", "*", "flatten_probas", "(", "probas", ",", "labels", ",", "ignore", ",", "order", ")", ",", "classes", "=", "classes", ")", "return", "loss" ]
[ 97, 0 ]
[ 118, 15 ]
python
en
['en', 'error', 'th']
False
lovasz_softmax_flat
(probas, labels, classes='present')
Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
def lovasz_softmax_flat(probas, labels, classes='present'): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. """ C = probas.shape[1] losses = [] present = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: fg = tf.cast(tf.equal(labels, c), probas.dtype) # foreground for class c if classes == 'present': present.append(tf.reduce_sum(fg) > 0) if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = tf.abs(fg - class_pred) errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort_{}".format(c)) fg_sorted = tf.gather(fg, perm) grad = lovasz_grad(fg_sorted) losses.append( tf.tensordot(errors_sorted, tf.stop_gradient(grad), 1, name="loss_class_{}".format(c)) ) if len(class_to_sum) == 1: # short-circuit mean when only one class return losses[0] losses_tensor = tf.stack(losses) if classes == 'present': present = tf.stack(present) losses_tensor = tf.boolean_mask(losses_tensor, present) loss = tf.reduce_mean(losses_tensor) return loss
[ "def", "lovasz_softmax_flat", "(", "probas", ",", "labels", ",", "classes", "=", "'present'", ")", ":", "C", "=", "probas", ".", "shape", "[", "1", "]", "losses", "=", "[", "]", "present", "=", "[", "]", "class_to_sum", "=", "list", "(", "range", "(", "C", ")", ")", "if", "classes", "in", "[", "'all'", ",", "'present'", "]", "else", "classes", "for", "c", "in", "class_to_sum", ":", "fg", "=", "tf", ".", "cast", "(", "tf", ".", "equal", "(", "labels", ",", "c", ")", ",", "probas", ".", "dtype", ")", "# foreground for class c", "if", "classes", "==", "'present'", ":", "present", ".", "append", "(", "tf", ".", "reduce_sum", "(", "fg", ")", ">", "0", ")", "if", "C", "==", "1", ":", "if", "len", "(", "classes", ")", ">", "1", ":", "raise", "ValueError", "(", "'Sigmoid output possible only with 1 class'", ")", "class_pred", "=", "probas", "[", ":", ",", "0", "]", "else", ":", "class_pred", "=", "probas", "[", ":", ",", "c", "]", "errors", "=", "tf", ".", "abs", "(", "fg", "-", "class_pred", ")", "errors_sorted", ",", "perm", "=", "tf", ".", "nn", ".", "top_k", "(", "errors", ",", "k", "=", "tf", ".", "shape", "(", "errors", ")", "[", "0", "]", ",", "name", "=", "\"descending_sort_{}\"", ".", "format", "(", "c", ")", ")", "fg_sorted", "=", "tf", ".", "gather", "(", "fg", ",", "perm", ")", "grad", "=", "lovasz_grad", "(", "fg_sorted", ")", "losses", ".", "append", "(", "tf", ".", "tensordot", "(", "errors_sorted", ",", "tf", ".", "stop_gradient", "(", "grad", ")", ",", "1", ",", "name", "=", "\"loss_class_{}\"", ".", "format", "(", "c", ")", ")", ")", "if", "len", "(", "class_to_sum", ")", "==", "1", ":", "# short-circuit mean when only one class", "return", "losses", "[", "0", "]", "losses_tensor", "=", "tf", ".", "stack", "(", "losses", ")", "if", "classes", "==", "'present'", ":", "present", "=", "tf", ".", "stack", "(", "present", ")", "losses_tensor", "=", "tf", ".", "boolean_mask", "(", "losses_tensor", ",", "present", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "losses_tensor", ")", "return", "loss" ]
[ 121, 0 ]
[ 156, 15 ]
python
en
['en', 'error', 'th']
False
flatten_probas
(probas, labels, ignore=None, order='BHWC')
Flattens predictions in the batch
Flattens predictions in the batch
def flatten_probas(probas, labels, ignore=None, order='BHWC'): """ Flattens predictions in the batch """ if len(probas.shape) == 3: probas, order = tf.expand_dims(probas, 3), 'BHWC' if order == 'BCHW': probas = tf.transpose(probas, (0, 2, 3, 1), name="BCHW_to_BHWC") order = 'BHWC' if order != 'BHWC': raise NotImplementedError('Order {} unknown'.format(order)) C = probas.shape[3] probas = tf.reshape(probas, (-1, C)) labels = tf.reshape(labels, (-1,)) if ignore is None: return probas, labels valid = tf.not_equal(labels, ignore) vprobas = tf.boolean_mask(probas, valid, name='valid_probas') vlabels = tf.boolean_mask(labels, valid, name='valid_labels') return vprobas, vlabels
[ "def", "flatten_probas", "(", "probas", ",", "labels", ",", "ignore", "=", "None", ",", "order", "=", "'BHWC'", ")", ":", "if", "len", "(", "probas", ".", "shape", ")", "==", "3", ":", "probas", ",", "order", "=", "tf", ".", "expand_dims", "(", "probas", ",", "3", ")", ",", "'BHWC'", "if", "order", "==", "'BCHW'", ":", "probas", "=", "tf", ".", "transpose", "(", "probas", ",", "(", "0", ",", "2", ",", "3", ",", "1", ")", ",", "name", "=", "\"BCHW_to_BHWC\"", ")", "order", "=", "'BHWC'", "if", "order", "!=", "'BHWC'", ":", "raise", "NotImplementedError", "(", "'Order {} unknown'", ".", "format", "(", "order", ")", ")", "C", "=", "probas", ".", "shape", "[", "3", "]", "probas", "=", "tf", ".", "reshape", "(", "probas", ",", "(", "-", "1", ",", "C", ")", ")", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "(", "-", "1", ",", ")", ")", "if", "ignore", "is", "None", ":", "return", "probas", ",", "labels", "valid", "=", "tf", ".", "not_equal", "(", "labels", ",", "ignore", ")", "vprobas", "=", "tf", ".", "boolean_mask", "(", "probas", ",", "valid", ",", "name", "=", "'valid_probas'", ")", "vlabels", "=", "tf", ".", "boolean_mask", "(", "labels", ",", "valid", ",", "name", "=", "'valid_labels'", ")", "return", "vprobas", ",", "vlabels" ]
[ 159, 0 ]
[ 178, 27 ]
python
en
['en', 'error', 'th']
False
User._encrypt_pw
(self, password)
Encrypt the password with the username and return the sha digest
Encrypt the password with the username and return the sha digest
def _encrypt_pw(self, password): """Encrypt the password with the username and return the sha digest""" hash_str = (self.name + password) hash_str = hash_str.encode("utf8") return hashlib.sha256(hash_str).hexdigest()
[ "def", "_encrypt_pw", "(", "self", ",", "password", ")", ":", "hash_str", "=", "(", "self", ".", "name", "+", "password", ")", "hash_str", "=", "hash_str", ".", "encode", "(", "\"utf8\"", ")", "return", "hashlib", ".", "sha256", "(", "hash_str", ")", ".", "hexdigest", "(", ")" ]
[ 47, 4 ]
[ 52, 51 ]
python
en
['en', 'en', 'en']
True
User.check_password
(self, password)
Return True if the password is valid for this user, otherwise false
Return True if the password is valid for this user, otherwise false
def check_password(self, password): """Return True if the password is valid for this user, otherwise false""" encrypted = self._encrypt_pw(password) return encrypted == self.password
[ "def", "check_password", "(", "self", ",", "password", ")", ":", "encrypted", "=", "self", ".", "_encrypt_pw", "(", "password", ")", "return", "encrypted", "==", "self", ".", "password" ]
[ 54, 4 ]
[ 58, 41 ]
python
en
['en', 'en', 'en']
True
Authenticator.__init__
(self)
Construct an authenticator to manage users logging in and out.
Construct an authenticator to manage users logging in and out.
def __init__(self): "Construct an authenticator to manage users logging in and out." self.users = {}
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "users", "=", "{", "}" ]
[ 63, 4 ]
[ 65, 23 ]
python
en
['en', 'en', 'en']
True
Authorizor.add_permission
(self, perm_name)
Create a new permission that users can be added to.
Create a new permission that users can be added to.
def add_permission(self, perm_name): "Create a new permission that users can be added to." try: perm_set = self.permissions[perm_name] except KeyError: self.permissions[perm_name] = set() else: raise PermissionError("Permission Exists")
[ "def", "add_permission", "(", "self", ",", "perm_name", ")", ":", "try", ":", "perm_set", "=", "self", ".", "permissions", "[", "perm_name", "]", "except", "KeyError", ":", "self", ".", "permissions", "[", "perm_name", "]", "=", "set", "(", ")", "else", ":", "raise", "PermissionError", "(", "\"Permission Exists\"", ")" ]
[ 98, 4 ]
[ 105, 54 ]
python
en
['en', 'en', 'en']
True
Authorizor.permit_user
(self, perm_name, username)
Grant the given permission to the user
Grant the given permission to the user
def permit_user(self, perm_name, username): "Grant the given permission to the user" try: perm_set = self.permissions[perm_name] except KeyError: raise PermissionError("Permission does not exist") else: if username not in self.authenticator.users: raise InvalidUsername(username) perm_set.add(username)
[ "def", "permit_user", "(", "self", ",", "perm_name", ",", "username", ")", ":", "try", ":", "perm_set", "=", "self", ".", "permissions", "[", "perm_name", "]", "except", "KeyError", ":", "raise", "PermissionError", "(", "\"Permission does not exist\"", ")", "else", ":", "if", "username", "not", "in", "self", ".", "authenticator", ".", "users", ":", "raise", "InvalidUsername", "(", "username", ")", "perm_set", ".", "add", "(", "username", ")" ]
[ 107, 4 ]
[ 116, 34 ]
python
en
['en', 'en', 'en']
True
GhostNet._make_divisible
(self, v, divisor, min_value=None)
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
def _make_divisible(self, v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v
[ "def", "_make_divisible", "(", "self", ",", "v", ",", "divisor", ",", "min_value", "=", "None", ")", ":", "if", "min_value", "is", "None", ":", "min_value", "=", "divisor", "new_v", "=", "max", "(", "min_value", ",", "int", "(", "v", "+", "divisor", "/", "2", ")", "//", "divisor", "*", "divisor", ")", "# Make sure that round down does not go down by more than 10%.", "if", "new_v", "<", "0.9", "*", "v", ":", "new_v", "+=", "divisor", "return", "new_v" ]
[ 302, 4 ]
[ 315, 20 ]
python
en
['en', 'error', 'th']
False
cloudinary_direct_upload
(callback_url, **options)
Deprecated - please use cloudinary_direct_upload_field, or a proper form
Deprecated - please use cloudinary_direct_upload_field, or a proper form
def cloudinary_direct_upload(callback_url, **options): """Deprecated - please use cloudinary_direct_upload_field, or a proper form""" params = utils.build_upload_params(callback=callback_url, **options) params = utils.sign_request(params, options) api_url = utils.cloudinary_api_url("upload", resource_type=options.get("resource_type", "image"), upload_prefix=options.get("upload_prefix")) return {"params": params, "url": api_url}
[ "def", "cloudinary_direct_upload", "(", "callback_url", ",", "*", "*", "options", ")", ":", "params", "=", "utils", ".", "build_upload_params", "(", "callback", "=", "callback_url", ",", "*", "*", "options", ")", "params", "=", "utils", ".", "sign_request", "(", "params", ",", "options", ")", "api_url", "=", "utils", ".", "cloudinary_api_url", "(", "\"upload\"", ",", "resource_type", "=", "options", ".", "get", "(", "\"resource_type\"", ",", "\"image\"", ")", ",", "upload_prefix", "=", "options", ".", "get", "(", "\"upload_prefix\"", ")", ")", "return", "{", "\"params\"", ":", "params", ",", "\"url\"", ":", "api_url", "}" ]
[ 59, 0 ]
[ 67, 45 ]
python
en
['en', 'en', 'en']
True
make_graph
(dists, scheme='default')
Makes a dependency graph from the given distributions. :parameter dists: a list of distributions :type dists: list of :class:`distutils2.database.InstalledDistribution` and :class:`distutils2.database.EggInfoDistribution` instances :rtype: a :class:`DependencyGraph` instance
Makes a dependency graph from the given distributions.
def make_graph(dists, scheme='default'): """Makes a dependency graph from the given distributions. :parameter dists: a list of distributions :type dists: list of :class:`distutils2.database.InstalledDistribution` and :class:`distutils2.database.EggInfoDistribution` instances :rtype: a :class:`DependencyGraph` instance """ scheme = get_scheme(scheme) graph = DependencyGraph() provided = {} # maps names to lists of (version, dist) tuples # first, build the graph and find out what's provided for dist in dists: graph.add_distribution(dist) for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) provided.setdefault(name, []).append((version, dist)) # now make the edges for dist in dists: requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires) for req in requires: try: matcher = scheme.matcher(req) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive matched = False if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: graph.add_edge(dist, provider, req) matched = True break if not matched: graph.add_missing(dist, req) return graph
[ "def", "make_graph", "(", "dists", ",", "scheme", "=", "'default'", ")", ":", "scheme", "=", "get_scheme", "(", "scheme", ")", "graph", "=", "DependencyGraph", "(", ")", "provided", "=", "{", "}", "# maps names to lists of (version, dist) tuples", "# first, build the graph and find out what's provided", "for", "dist", "in", "dists", ":", "graph", ".", "add_distribution", "(", "dist", ")", "for", "p", "in", "dist", ".", "provides", ":", "name", ",", "version", "=", "parse_name_and_version", "(", "p", ")", "logger", ".", "debug", "(", "'Add to provided: %s, %s, %s'", ",", "name", ",", "version", ",", "dist", ")", "provided", ".", "setdefault", "(", "name", ",", "[", "]", ")", ".", "append", "(", "(", "version", ",", "dist", ")", ")", "# now make the edges", "for", "dist", "in", "dists", ":", "requires", "=", "(", "dist", ".", "run_requires", "|", "dist", ".", "meta_requires", "|", "dist", ".", "build_requires", "|", "dist", ".", "dev_requires", ")", "for", "req", "in", "requires", ":", "try", ":", "matcher", "=", "scheme", ".", "matcher", "(", "req", ")", "except", "UnsupportedVersionError", ":", "# XXX compat-mode if cannot read the version", "logger", ".", "warning", "(", "'could not read version %r - using name only'", ",", "req", ")", "name", "=", "req", ".", "split", "(", ")", "[", "0", "]", "matcher", "=", "scheme", ".", "matcher", "(", "name", ")", "name", "=", "matcher", ".", "key", "# case-insensitive", "matched", "=", "False", "if", "name", "in", "provided", ":", "for", "version", ",", "provider", "in", "provided", "[", "name", "]", ":", "try", ":", "match", "=", "matcher", ".", "match", "(", "version", ")", "except", "UnsupportedVersionError", ":", "match", "=", "False", "if", "match", ":", "graph", ".", "add_edge", "(", "dist", ",", "provider", ",", "req", ")", "matched", "=", "True", "break", "if", "not", "matched", ":", "graph", ".", "add_missing", "(", "dist", ",", "req", ")", "return", "graph" ]
[ 1224, 0 ]
[ 1275, 16 ]
python
en
['en', 'en', 'en']
True
get_dependent_dists
(dists, dist)
Recursively generate a list of distributions from *dists* that are dependent on *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested
Recursively generate a list of distributions from *dists* that are dependent on *dist*.
def get_dependent_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are dependent on *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) dep = [dist] # dependent distributions todo = graph.reverse_list[dist] # list of nodes we should inspect while todo: d = todo.pop() dep.append(d) for succ in graph.reverse_list[d]: if succ not in dep: todo.append(succ) dep.pop(0) # remove dist from dep, was there to prevent infinite loops return dep
[ "def", "get_dependent_dists", "(", "dists", ",", "dist", ")", ":", "if", "dist", "not", "in", "dists", ":", "raise", "DistlibException", "(", "'given distribution %r is not a member '", "'of the list'", "%", "dist", ".", "name", ")", "graph", "=", "make_graph", "(", "dists", ")", "dep", "=", "[", "dist", "]", "# dependent distributions", "todo", "=", "graph", ".", "reverse_list", "[", "dist", "]", "# list of nodes we should inspect", "while", "todo", ":", "d", "=", "todo", ".", "pop", "(", ")", "dep", ".", "append", "(", "d", ")", "for", "succ", "in", "graph", ".", "reverse_list", "[", "d", "]", ":", "if", "succ", "not", "in", "dep", ":", "todo", ".", "append", "(", "succ", ")", "dep", ".", "pop", "(", "0", ")", "# remove dist from dep, was there to prevent infinite loops", "return", "dep" ]
[ 1278, 0 ]
[ 1301, 14 ]
python
en
['en', 'en', 'en']
True
get_required_dists
(dists, dist)
Recursively generate a list of distributions from *dists* that are required by *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested
Recursively generate a list of distributions from *dists* that are required by *dist*.
def get_required_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are required by *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) req = [] # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect while todo: d = todo.pop()[0] req.append(d) for pred in graph.adjacency_list[d]: if pred not in req: todo.append(pred) return req
[ "def", "get_required_dists", "(", "dists", ",", "dist", ")", ":", "if", "dist", "not", "in", "dists", ":", "raise", "DistlibException", "(", "'given distribution %r is not a member '", "'of the list'", "%", "dist", ".", "name", ")", "graph", "=", "make_graph", "(", "dists", ")", "req", "=", "[", "]", "# required distributions", "todo", "=", "graph", ".", "adjacency_list", "[", "dist", "]", "# list of nodes we should inspect", "while", "todo", ":", "d", "=", "todo", ".", "pop", "(", ")", "[", "0", "]", "req", ".", "append", "(", "d", ")", "for", "pred", "in", "graph", ".", "adjacency_list", "[", "d", "]", ":", "if", "pred", "not", "in", "req", ":", "todo", ".", "append", "(", "pred", ")", "return", "req" ]
[ 1304, 0 ]
[ 1326, 14 ]
python
en
['en', 'en', 'en']
True
make_dist
(name, version, **kwargs)
A convenience method for making a dist given just a name and version.
A convenience method for making a dist given just a name and version.
def make_dist(name, version, **kwargs): """ A convenience method for making a dist given just a name and version. """ summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Placeholder for summary' return Distribution(md)
[ "def", "make_dist", "(", "name", ",", "version", ",", "*", "*", "kwargs", ")", ":", "summary", "=", "kwargs", ".", "pop", "(", "'summary'", ",", "'Placeholder for summary'", ")", "md", "=", "Metadata", "(", "*", "*", "kwargs", ")", "md", ".", "name", "=", "name", "md", ".", "version", "=", "version", "md", ".", "summary", "=", "summary", "or", "'Placeholder for summary'", "return", "Distribution", "(", "md", ")" ]
[ 1329, 0 ]
[ 1338, 27 ]
python
en
['en', 'error', 'th']
False
_Cache.__init__
(self)
Initialise an instance. There is normally one for each DistributionPath.
Initialise an instance. There is normally one for each DistributionPath.
def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. """ self.name = {} self.path = {} self.generated = False
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "name", "=", "{", "}", "self", ".", "path", "=", "{", "}", "self", ".", "generated", "=", "False" ]
[ 48, 4 ]
[ 54, 30 ]
python
en
['en', 'error', 'th']
False
_Cache.clear
(self)
Clear the cache, setting it to its initial state.
Clear the cache, setting it to its initial state.
def clear(self): """ Clear the cache, setting it to its initial state. """ self.name.clear() self.path.clear() self.generated = False
[ "def", "clear", "(", "self", ")", ":", "self", ".", "name", ".", "clear", "(", ")", "self", ".", "path", ".", "clear", "(", ")", "self", ".", "generated", "=", "False" ]
[ 56, 4 ]
[ 62, 30 ]
python
en
['en', 'error', 'th']
False
_Cache.add
(self, dist)
Add a distribution to the cache. :param dist: The distribution to add.
Add a distribution to the cache. :param dist: The distribution to add.
def add(self, dist): """ Add a distribution to the cache. :param dist: The distribution to add. """ if dist.path not in self.path: self.path[dist.path] = dist self.name.setdefault(dist.key, []).append(dist)
[ "def", "add", "(", "self", ",", "dist", ")", ":", "if", "dist", ".", "path", "not", "in", "self", ".", "path", ":", "self", ".", "path", "[", "dist", ".", "path", "]", "=", "dist", "self", ".", "name", ".", "setdefault", "(", "dist", ".", "key", ",", "[", "]", ")", ".", "append", "(", "dist", ")" ]
[ 64, 4 ]
[ 71, 59 ]
python
en
['en', 'error', 'th']
False
DistributionPath.__init__
(self, path=None, include_egg=False)
Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376.
Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376.
def __init__(self, path=None, include_egg=False): """ Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. """ if path is None: path = sys.path self.path = path self._include_dist = True self._include_egg = include_egg self._cache = _Cache() self._cache_egg = _Cache() self._cache_enabled = True self._scheme = get_scheme('default')
[ "def", "__init__", "(", "self", ",", "path", "=", "None", ",", "include_egg", "=", "False", ")", ":", "if", "path", "is", "None", ":", "path", "=", "sys", ".", "path", "self", ".", "path", "=", "path", "self", ".", "_include_dist", "=", "True", "self", ".", "_include_egg", "=", "include_egg", "self", ".", "_cache", "=", "_Cache", "(", ")", "self", ".", "_cache_egg", "=", "_Cache", "(", ")", "self", ".", "_cache_enabled", "=", "True", "self", ".", "_scheme", "=", "get_scheme", "(", "'default'", ")" ]
[ 78, 4 ]
[ 96, 44 ]
python
en
['en', 'error', 'th']
False
DistributionPath.clear_cache
(self)
Clears the internal cache.
Clears the internal cache.
def clear_cache(self): """ Clears the internal cache. """ self._cache.clear() self._cache_egg.clear()
[ "def", "clear_cache", "(", "self", ")", ":", "self", ".", "_cache", ".", "clear", "(", ")", "self", ".", "_cache_egg", ".", "clear", "(", ")" ]
[ 106, 4 ]
[ 111, 31 ]
python
en
['en', 'error', 'th']
False
DistributionPath._yield_distributions
(self)
Yield .dist-info and/or .egg(-info) distributions.
Yield .dist-info and/or .egg(-info) distributions.
def _yield_distributions(self): """ Yield .dist-info and/or .egg(-info) distributions. """ # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue if self._include_dist and entry.endswith(DISTINFO_EXT): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self)
[ "def", "_yield_distributions", "(", "self", ")", ":", "# We need to check if we've seen some resources already, because on", "# some Linux systems (e.g. some Debian/Ubuntu variants) there are", "# symlinks which alias other files in the environment.", "seen", "=", "set", "(", ")", "for", "path", "in", "self", ".", "path", ":", "finder", "=", "resources", ".", "finder_for_path", "(", "path", ")", "if", "finder", "is", "None", ":", "continue", "r", "=", "finder", ".", "find", "(", "''", ")", "if", "not", "r", "or", "not", "r", ".", "is_container", ":", "continue", "rset", "=", "sorted", "(", "r", ".", "resources", ")", "for", "entry", "in", "rset", ":", "r", "=", "finder", ".", "find", "(", "entry", ")", "if", "not", "r", "or", "r", ".", "path", "in", "seen", ":", "continue", "if", "self", ".", "_include_dist", "and", "entry", ".", "endswith", "(", "DISTINFO_EXT", ")", ":", "possible_filenames", "=", "[", "METADATA_FILENAME", ",", "WHEEL_METADATA_FILENAME", ",", "LEGACY_METADATA_FILENAME", "]", "for", "metadata_filename", "in", "possible_filenames", ":", "metadata_path", "=", "posixpath", ".", "join", "(", "entry", ",", "metadata_filename", ")", "pydist", "=", "finder", ".", "find", "(", "metadata_path", ")", "if", "pydist", ":", "break", "else", ":", "continue", "with", "contextlib", ".", "closing", "(", "pydist", ".", "as_stream", "(", ")", ")", "as", "stream", ":", "metadata", "=", "Metadata", "(", "fileobj", "=", "stream", ",", "scheme", "=", "'legacy'", ")", "logger", ".", "debug", "(", "'Found %s'", ",", "r", ".", "path", ")", "seen", ".", "add", "(", "r", ".", "path", ")", "yield", "new_dist_class", "(", "r", ".", "path", ",", "metadata", "=", "metadata", ",", "env", "=", "self", ")", "elif", "self", ".", "_include_egg", "and", "entry", ".", "endswith", "(", "(", "'.egg-info'", ",", "'.egg'", ")", ")", ":", "logger", ".", "debug", "(", "'Found %s'", ",", "r", ".", "path", ")", "seen", ".", "add", "(", "r", ".", "path", ")", "yield", "old_dist_class", "(", "r", ".", "path", ",", "self", ")" ]
[ 114, 4 ]
[ 156, 54 ]
python
en
['en', 'error', 'th']
False
DistributionPath._generate_cache
(self)
Scan the path for distributions and populate the cache with those that are found.
Scan the path for distributions and populate the cache with those that are found.
def _generate_cache(self): """ Scan the path for distributions and populate the cache with those that are found. """ gen_dist = not self._cache.generated gen_egg = self._include_egg and not self._cache_egg.generated if gen_dist or gen_egg: for dist in self._yield_distributions(): if isinstance(dist, InstalledDistribution): self._cache.add(dist) else: self._cache_egg.add(dist) if gen_dist: self._cache.generated = True if gen_egg: self._cache_egg.generated = True
[ "def", "_generate_cache", "(", "self", ")", ":", "gen_dist", "=", "not", "self", ".", "_cache", ".", "generated", "gen_egg", "=", "self", ".", "_include_egg", "and", "not", "self", ".", "_cache_egg", ".", "generated", "if", "gen_dist", "or", "gen_egg", ":", "for", "dist", "in", "self", ".", "_yield_distributions", "(", ")", ":", "if", "isinstance", "(", "dist", ",", "InstalledDistribution", ")", ":", "self", ".", "_cache", ".", "add", "(", "dist", ")", "else", ":", "self", ".", "_cache_egg", ".", "add", "(", "dist", ")", "if", "gen_dist", ":", "self", ".", "_cache", ".", "generated", "=", "True", "if", "gen_egg", ":", "self", ".", "_cache_egg", ".", "generated", "=", "True" ]
[ 158, 4 ]
[ 175, 48 ]
python
en
['en', 'error', 'th']
False
DistributionPath.distinfo_dirname
(cls, name, version)
The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string
The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number.
def distinfo_dirname(cls, name, version): """ The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string""" name = name.replace('-', '_') return '-'.join([name, version]) + DISTINFO_EXT
[ "def", "distinfo_dirname", "(", "cls", ",", "name", ",", "version", ")", ":", "name", "=", "name", ".", "replace", "(", "'-'", ",", "'_'", ")", "return", "'-'", ".", "join", "(", "[", "name", ",", "version", "]", ")", "+", "DISTINFO_EXT" ]
[ 178, 4 ]
[ 197, 55 ]
python
en
['en', 'error', 'th']
False
DistributionPath.get_distributions
(self)
Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances
Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them.
def get_distributions(self): """ Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances """ if not self._cache_enabled: for dist in self._yield_distributions(): yield dist else: self._generate_cache() for dist in self._cache.path.values(): yield dist if self._include_egg: for dist in self._cache_egg.path.values(): yield dist
[ "def", "get_distributions", "(", "self", ")", ":", "if", "not", "self", ".", "_cache_enabled", ":", "for", "dist", "in", "self", ".", "_yield_distributions", "(", ")", ":", "yield", "dist", "else", ":", "self", ".", "_generate_cache", "(", ")", "for", "dist", "in", "self", ".", "_cache", ".", "path", ".", "values", "(", ")", ":", "yield", "dist", "if", "self", ".", "_include_egg", ":", "for", "dist", "in", "self", ".", "_cache_egg", ".", "path", ".", "values", "(", ")", ":", "yield", "dist" ]
[ 199, 4 ]
[ 219, 30 ]
python
en
['en', 'error', 'th']
False
DistributionPath.get_distribution
(self, name)
Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None``
Looks for a named distribution on the path.
def get_distribution(self, name): """ Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` """ result = None name = name.lower() if not self._cache_enabled: for dist in self._yield_distributions(): if dist.key == name: result = dist break else: self._generate_cache() if name in self._cache.name: result = self._cache.name[name][0] elif self._include_egg and name in self._cache_egg.name: result = self._cache_egg.name[name][0] return result
[ "def", "get_distribution", "(", "self", ",", "name", ")", ":", "result", "=", "None", "name", "=", "name", ".", "lower", "(", ")", "if", "not", "self", ".", "_cache_enabled", ":", "for", "dist", "in", "self", ".", "_yield_distributions", "(", ")", ":", "if", "dist", ".", "key", "==", "name", ":", "result", "=", "dist", "break", "else", ":", "self", ".", "_generate_cache", "(", ")", "if", "name", "in", "self", ".", "_cache", ".", "name", ":", "result", "=", "self", ".", "_cache", ".", "name", "[", "name", "]", "[", "0", "]", "elif", "self", ".", "_include_egg", "and", "name", "in", "self", ".", "_cache_egg", ".", "name", ":", "result", "=", "self", ".", "_cache_egg", ".", "name", "[", "name", "]", "[", "0", "]", "return", "result" ]
[ 221, 4 ]
[ 245, 21 ]
python
en
['en', 'error', 'th']
False
DistributionPath.provides_distribution
(self, name, version=None)
Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string
Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results.
def provides_distribution(self, name, version=None): """ Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string """ matcher = None if version is not None: try: matcher = self._scheme.matcher('%s (%s)' % (name, version)) except ValueError: raise DistlibException('invalid name or version: %r, %r' % (name, version)) for dist in self.get_distributions(): # We hit a problem on Travis where enum34 was installed and doesn't # have a provides attribute ... if not hasattr(dist, 'provides'): logger.debug('No "provides": %s', dist) else: provided = dist.provides for p in provided: p_name, p_ver = parse_name_and_version(p) if matcher is None: if p_name == name: yield dist break else: if p_name == name and matcher.match(p_ver): yield dist break
[ "def", "provides_distribution", "(", "self", ",", "name", ",", "version", "=", "None", ")", ":", "matcher", "=", "None", "if", "version", "is", "not", "None", ":", "try", ":", "matcher", "=", "self", ".", "_scheme", ".", "matcher", "(", "'%s (%s)'", "%", "(", "name", ",", "version", ")", ")", "except", "ValueError", ":", "raise", "DistlibException", "(", "'invalid name or version: %r, %r'", "%", "(", "name", ",", "version", ")", ")", "for", "dist", "in", "self", ".", "get_distributions", "(", ")", ":", "# We hit a problem on Travis where enum34 was installed and doesn't", "# have a provides attribute ...", "if", "not", "hasattr", "(", "dist", ",", "'provides'", ")", ":", "logger", ".", "debug", "(", "'No \"provides\": %s'", ",", "dist", ")", "else", ":", "provided", "=", "dist", ".", "provides", "for", "p", "in", "provided", ":", "p_name", ",", "p_ver", "=", "parse_name_and_version", "(", "p", ")", "if", "matcher", "is", "None", ":", "if", "p_name", "==", "name", ":", "yield", "dist", "break", "else", ":", "if", "p_name", "==", "name", "and", "matcher", ".", "match", "(", "p_ver", ")", ":", "yield", "dist", "break" ]
[ 247, 4 ]
[ 286, 33 ]
python
en
['en', 'error', 'th']
False
DistributionPath.get_file_path
(self, name, relative_path)
Return the path to a resource file.
Return the path to a resource file.
def get_file_path(self, name, relative_path): """ Return the path to a resource file. """ dist = self.get_distribution(name) if dist is None: raise LookupError('no distribution named %r found' % name) return dist.get_resource_path(relative_path)
[ "def", "get_file_path", "(", "self", ",", "name", ",", "relative_path", ")", ":", "dist", "=", "self", ".", "get_distribution", "(", "name", ")", "if", "dist", "is", "None", ":", "raise", "LookupError", "(", "'no distribution named %r found'", "%", "name", ")", "return", "dist", ".", "get_resource_path", "(", "relative_path", ")" ]
[ 288, 4 ]
[ 295, 52 ]
python
en
['en', 'error', 'th']
False
DistributionPath.get_exported_entries
(self, category, name=None)
Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned.
Return all of the exported entries in a particular category.
def get_exported_entries(self, category, name=None): """ Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. """ for dist in self.get_distributions(): r = dist.exports if category in r: d = r[category] if name is not None: if name in d: yield d[name] else: for v in d.values(): yield v
[ "def", "get_exported_entries", "(", "self", ",", "category", ",", "name", "=", "None", ")", ":", "for", "dist", "in", "self", ".", "get_distributions", "(", ")", ":", "r", "=", "dist", ".", "exports", "if", "category", "in", "r", ":", "d", "=", "r", "[", "category", "]", "if", "name", "is", "not", "None", ":", "if", "name", "in", "d", ":", "yield", "d", "[", "name", "]", "else", ":", "for", "v", "in", "d", ".", "values", "(", ")", ":", "yield", "v" ]
[ 297, 4 ]
[ 313, 31 ]
python
en
['en', 'error', 'th']
False
Distribution.__init__
(self, metadata)
Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution.
Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution.
def __init__(self, metadata): """ Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. """ self.metadata = metadata self.name = metadata.name self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None self.extras = None # additional features requested self.context = None # environment marker overrides self.download_urls = set() self.digests = {}
[ "def", "__init__", "(", "self", ",", "metadata", ")", ":", "self", ".", "metadata", "=", "metadata", "self", ".", "name", "=", "metadata", ".", "name", "self", ".", "key", "=", "self", ".", "name", ".", "lower", "(", ")", "# for case-insensitive comparisons", "self", ".", "version", "=", "metadata", ".", "version", "self", ".", "locator", "=", "None", "self", ".", "digest", "=", "None", "self", ".", "extras", "=", "None", "# additional features requested", "self", ".", "context", "=", "None", "# environment marker overrides", "self", ".", "download_urls", "=", "set", "(", ")", "self", ".", "digests", "=", "{", "}" ]
[ 334, 4 ]
[ 349, 25 ]
python
en
['en', 'error', 'th']
False
Distribution.source_url
(self)
The source archive download URL for this distribution.
The source archive download URL for this distribution.
def source_url(self): """ The source archive download URL for this distribution. """ return self.metadata.source_url
[ "def", "source_url", "(", "self", ")", ":", "return", "self", ".", "metadata", ".", "source_url" ]
[ 352, 4 ]
[ 356, 39 ]
python
en
['en', 'error', 'th']
False
Distribution.name_and_version
(self)
A utility property which displays the name and version in parentheses.
A utility property which displays the name and version in parentheses.
def name_and_version(self): """ A utility property which displays the name and version in parentheses. """ return '%s (%s)' % (self.name, self.version)
[ "def", "name_and_version", "(", "self", ")", ":", "return", "'%s (%s)'", "%", "(", "self", ".", "name", ",", "self", ".", "version", ")" ]
[ 361, 4 ]
[ 365, 52 ]
python
en
['en', 'error', 'th']
False
Distribution.provides
(self)
A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings.
A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings.
def provides(self): """ A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. """ plist = self.metadata.provides s = '%s (%s)' % (self.name, self.version) if s not in plist: plist.append(s) return plist
[ "def", "provides", "(", "self", ")", ":", "plist", "=", "self", ".", "metadata", ".", "provides", "s", "=", "'%s (%s)'", "%", "(", "self", ".", "name", ",", "self", ".", "version", ")", "if", "s", "not", "in", "plist", ":", "plist", ".", "append", "(", "s", ")", "return", "plist" ]
[ 368, 4 ]
[ 377, 20 ]
python
en
['en', 'error', 'th']
False
Distribution.matches_requirement
(self, req)
Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False.
Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False.
def matches_requirement(self, req): """ Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. """ # Requirement may contain extras - parse to lose those # from what's passed to the matcher r = parse_requirement(req) scheme = get_scheme(self.metadata.scheme) try: matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive result = False for p in self.provides: p_name, p_ver = parse_name_and_version(p) if p_name != name: continue try: result = matcher.match(p_ver) break except UnsupportedVersionError: pass return result
[ "def", "matches_requirement", "(", "self", ",", "req", ")", ":", "# Requirement may contain extras - parse to lose those", "# from what's passed to the matcher", "r", "=", "parse_requirement", "(", "req", ")", "scheme", "=", "get_scheme", "(", "self", ".", "metadata", ".", "scheme", ")", "try", ":", "matcher", "=", "scheme", ".", "matcher", "(", "r", ".", "requirement", ")", "except", "UnsupportedVersionError", ":", "# XXX compat-mode if cannot read the version", "logger", ".", "warning", "(", "'could not read version %r - using name only'", ",", "req", ")", "name", "=", "req", ".", "split", "(", ")", "[", "0", "]", "matcher", "=", "scheme", ".", "matcher", "(", "name", ")", "name", "=", "matcher", ".", "key", "# case-insensitive", "result", "=", "False", "for", "p", "in", "self", ".", "provides", ":", "p_name", ",", "p_ver", "=", "parse_name_and_version", "(", "p", ")", "if", "p_name", "!=", "name", ":", "continue", "try", ":", "result", "=", "matcher", ".", "match", "(", "p_ver", ")", "break", "except", "UnsupportedVersionError", ":", "pass", "return", "result" ]
[ 406, 4 ]
[ 438, 21 ]
python
en
['en', 'error', 'th']
False
Distribution.__repr__
(self)
Return a textual representation of this instance,
Return a textual representation of this instance,
def __repr__(self): """ Return a textual representation of this instance, """ if self.source_url: suffix = ' [%s]' % self.source_url else: suffix = '' return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
[ "def", "__repr__", "(", "self", ")", ":", "if", "self", ".", "source_url", ":", "suffix", "=", "' [%s]'", "%", "self", ".", "source_url", "else", ":", "suffix", "=", "''", "return", "'<Distribution %s (%s)%s>'", "%", "(", "self", ".", "name", ",", "self", ".", "version", ",", "suffix", ")" ]
[ 440, 4 ]
[ 448, 77 ]
python
en
['en', 'error', 'th']
False
Distribution.__eq__
(self, other)
See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False.
See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False.
def __eq__(self, other): """ See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. """ if type(other) is not type(self): result = False else: result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url) return result
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "type", "(", "other", ")", "is", "not", "type", "(", "self", ")", ":", "result", "=", "False", "else", ":", "result", "=", "(", "self", ".", "name", "==", "other", ".", "name", "and", "self", ".", "version", "==", "other", ".", "version", "and", "self", ".", "source_url", "==", "other", ".", "source_url", ")", "return", "result" ]
[ 450, 4 ]
[ 464, 21 ]
python
en
['en', 'error', 'th']
False
Distribution.__hash__
(self)
Compute hash in a way which matches the equality test.
Compute hash in a way which matches the equality test.
def __hash__(self): """ Compute hash in a way which matches the equality test. """ return hash(self.name) + hash(self.version) + hash(self.source_url)
[ "def", "__hash__", "(", "self", ")", ":", "return", "hash", "(", "self", ".", "name", ")", "+", "hash", "(", "self", ".", "version", ")", "+", "hash", "(", "self", ".", "source_url", ")" ]
[ 466, 4 ]
[ 470, 75 ]
python
en
['en', 'error', 'th']
False
BaseInstalledDistribution.__init__
(self, metadata, path, env=None)
Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found.
Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found.
def __init__(self, metadata, path, env=None): """ Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. """ super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env
[ "def", "__init__", "(", "self", ",", "metadata", ",", "path", ",", "env", "=", "None", ")", ":", "super", "(", "BaseInstalledDistribution", ",", "self", ")", ".", "__init__", "(", "metadata", ")", "self", ".", "path", "=", "path", "self", ".", "dist_path", "=", "env" ]
[ 481, 4 ]
[ 494, 28 ]
python
en
['en', 'error', 'th']
False
BaseInstalledDistribution.get_hash
(self, data, hasher=None)
Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str
Get the hash of some data, using a particular hash algorithm, if specified.
def get_hash(self, data, hasher=None): """ Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str """ if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest)
[ "def", "get_hash", "(", "self", ",", "data", ",", "hasher", "=", "None", ")", ":", "if", "hasher", "is", "None", ":", "hasher", "=", "self", ".", "hasher", "if", "hasher", "is", "None", ":", "hasher", "=", "hashlib", ".", "md5", "prefix", "=", "''", "else", ":", "hasher", "=", "getattr", "(", "hashlib", ",", "hasher", ")", "prefix", "=", "'%s='", "%", "self", ".", "hasher", "digest", "=", "hasher", "(", "data", ")", ".", "digest", "(", ")", "digest", "=", "base64", ".", "urlsafe_b64encode", "(", "digest", ")", ".", "rstrip", "(", "b'='", ")", ".", "decode", "(", "'ascii'", ")", "return", "'%s%s'", "%", "(", "prefix", ",", "digest", ")" ]
[ 496, 4 ]
[ 525, 40 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution._get_records
(self)
Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376).
Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376).
def _get_records(self): """ Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). """ results = [] r = self.get_distinfo_resource('RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir #base_location = os.path.dirname(self.path) #base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing #if not os.path.isabs(path): # path = path.replace('/', os.sep) # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results
[ "def", "_get_records", "(", "self", ")", ":", "results", "=", "[", "]", "r", "=", "self", ".", "get_distinfo_resource", "(", "'RECORD'", ")", "with", "contextlib", ".", "closing", "(", "r", ".", "as_stream", "(", ")", ")", "as", "stream", ":", "with", "CSVReader", "(", "stream", "=", "stream", ")", "as", "record_reader", ":", "# Base location is parent dir of .dist-info dir", "#base_location = os.path.dirname(self.path)", "#base_location = os.path.abspath(base_location)", "for", "row", "in", "record_reader", ":", "missing", "=", "[", "None", "for", "i", "in", "range", "(", "len", "(", "row", ")", ",", "3", ")", "]", "path", ",", "checksum", ",", "size", "=", "row", "+", "missing", "#if not os.path.isabs(path):", "# path = path.replace('/', os.sep)", "# path = os.path.join(base_location, path)", "results", ".", "append", "(", "(", "path", ",", "checksum", ",", "size", ")", ")", "return", "results" ]
[ 579, 4 ]
[ 600, 22 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.exports
(self)
Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name.
Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name.
def exports(self): """ Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result
[ "def", "exports", "(", "self", ")", ":", "result", "=", "{", "}", "r", "=", "self", ".", "get_distinfo_resource", "(", "EXPORTS_FILENAME", ")", "if", "r", ":", "result", "=", "self", ".", "read_exports", "(", ")", "return", "result" ]
[ 603, 4 ]
[ 614, 21 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.read_exports
(self)
Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries.
Read exports data from a file in .ini format.
def read_exports(self): """ Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: with contextlib.closing(r.as_stream()) as stream: result = read_exports(stream) return result
[ "def", "read_exports", "(", "self", ")", ":", "result", "=", "{", "}", "r", "=", "self", ".", "get_distinfo_resource", "(", "EXPORTS_FILENAME", ")", "if", "r", ":", "with", "contextlib", ".", "closing", "(", "r", ".", "as_stream", "(", ")", ")", "as", "stream", ":", "result", "=", "read_exports", "(", "stream", ")", "return", "result" ]
[ 616, 4 ]
[ 629, 21 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.write_exports
(self, exports)
Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries.
Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries.
def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f)
[ "def", "write_exports", "(", "self", ",", "exports", ")", ":", "rf", "=", "self", ".", "get_distinfo_file", "(", "EXPORTS_FILENAME", ")", "with", "open", "(", "rf", ",", "'w'", ")", "as", "f", ":", "write_exports", "(", "exports", ",", "f", ")" ]
[ 631, 4 ]
[ 640, 37 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.get_resource_path
(self, relative_path)
NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found.
NOTE: This API may change in the future.
def get_resource_path(self, relative_path): """ NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. """ r = self.get_distinfo_resource('RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for relative, destination in resources_reader: if relative == relative_path: return destination raise KeyError('no resource file with relative path %r ' 'is installed' % relative_path)
[ "def", "get_resource_path", "(", "self", ",", "relative_path", ")", ":", "r", "=", "self", ".", "get_distinfo_resource", "(", "'RESOURCES'", ")", "with", "contextlib", ".", "closing", "(", "r", ".", "as_stream", "(", ")", ")", "as", "stream", ":", "with", "CSVReader", "(", "stream", "=", "stream", ")", "as", "resources_reader", ":", "for", "relative", ",", "destination", "in", "resources_reader", ":", "if", "relative", "==", "relative_path", ":", "return", "destination", "raise", "KeyError", "(", "'no resource file with relative path %r '", "'is installed'", "%", "relative_path", ")" ]
[ 642, 4 ]
[ 660, 54 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.list_installed_files
(self)
Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size)
Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line.
def list_installed_files(self): """ Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) """ for result in self._get_records(): yield result
[ "def", "list_installed_files", "(", "self", ")", ":", "for", "result", "in", "self", ".", "_get_records", "(", ")", ":", "yield", "result" ]
[ 662, 4 ]
[ 670, 24 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.write_installed_files
(self, paths, prefix, dry_run=False)
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths.
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten.
def write_installed_files(self, paths, prefix, dry_run=False): """ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. """ prefix = os.path.join(prefix, '') base = os.path.dirname(self.path) base_under_prefix = base.startswith(prefix) base = os.path.join(base, '') record_path = self.get_distinfo_file('RECORD') logger.info('creating %s', record_path) if dry_run: return None with CSVWriter(record_path) as writer: for path in paths: if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): # do not put size and hash, as in PEP-376 hash_value = size = '' else: size = '%d' % os.path.getsize(path) with open(path, 'rb') as fp: hash_value = self.get_hash(fp.read()) if path.startswith(base) or (base_under_prefix and path.startswith(prefix)): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) # add the RECORD file itself if record_path.startswith(base): record_path = os.path.relpath(record_path, base) writer.writerow((record_path, '', '')) return record_path
[ "def", "write_installed_files", "(", "self", ",", "paths", ",", "prefix", ",", "dry_run", "=", "False", ")", ":", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "''", ")", "base", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", "base_under_prefix", "=", "base", ".", "startswith", "(", "prefix", ")", "base", "=", "os", ".", "path", ".", "join", "(", "base", ",", "''", ")", "record_path", "=", "self", ".", "get_distinfo_file", "(", "'RECORD'", ")", "logger", ".", "info", "(", "'creating %s'", ",", "record_path", ")", "if", "dry_run", ":", "return", "None", "with", "CSVWriter", "(", "record_path", ")", "as", "writer", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "or", "path", ".", "endswith", "(", "(", "'.pyc'", ",", "'.pyo'", ")", ")", ":", "# do not put size and hash, as in PEP-376", "hash_value", "=", "size", "=", "''", "else", ":", "size", "=", "'%d'", "%", "os", ".", "path", ".", "getsize", "(", "path", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "fp", ":", "hash_value", "=", "self", ".", "get_hash", "(", "fp", ".", "read", "(", ")", ")", "if", "path", ".", "startswith", "(", "base", ")", "or", "(", "base_under_prefix", "and", "path", ".", "startswith", "(", "prefix", ")", ")", ":", "path", "=", "os", ".", "path", ".", "relpath", "(", "path", ",", "base", ")", "writer", ".", "writerow", "(", "(", "path", ",", "hash_value", ",", "size", ")", ")", "# add the RECORD file itself", "if", "record_path", ".", "startswith", "(", "base", ")", ":", "record_path", "=", "os", ".", "path", ".", "relpath", "(", "record_path", ",", "base", ")", "writer", ".", "writerow", "(", "(", "record_path", ",", "''", ",", "''", ")", ")", "return", "record_path" ]
[ 672, 4 ]
[ 705, 26 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.check_installed_files
(self)
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file('RECORD') for path, hash_value, size in self.list_installed_files(): if not os.path.isabs(path): path = os.path.join(base, path) if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if size and actual_size != size: mismatches.append((path, 'size', size, actual_size)) elif hash_value: if '=' in hash_value: hasher = hash_value.split('=', 1)[0] else: hasher = None with open(path, 'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if actual_hash != hash_value: mismatches.append((path, 'hash', hash_value, actual_hash)) return mismatches
[ "def", "check_installed_files", "(", "self", ")", ":", "mismatches", "=", "[", "]", "base", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", "record_path", "=", "self", ".", "get_distinfo_file", "(", "'RECORD'", ")", "for", "path", ",", "hash_value", ",", "size", "in", "self", ".", "list_installed_files", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "path", ")", "if", "path", "==", "record_path", ":", "continue", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "mismatches", ".", "append", "(", "(", "path", ",", "'exists'", ",", "True", ",", "False", ")", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "actual_size", "=", "str", "(", "os", ".", "path", ".", "getsize", "(", "path", ")", ")", "if", "size", "and", "actual_size", "!=", "size", ":", "mismatches", ".", "append", "(", "(", "path", ",", "'size'", ",", "size", ",", "actual_size", ")", ")", "elif", "hash_value", ":", "if", "'='", "in", "hash_value", ":", "hasher", "=", "hash_value", ".", "split", "(", "'='", ",", "1", ")", "[", "0", "]", "else", ":", "hasher", "=", "None", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "actual_hash", "=", "self", ".", "get_hash", "(", "f", ".", "read", "(", ")", ",", "hasher", ")", "if", "actual_hash", "!=", "hash_value", ":", "mismatches", ".", "append", "(", "(", "path", ",", "'hash'", ",", "hash_value", ",", "actual_hash", ")", ")", "return", "mismatches" ]
[ 707, 4 ]
[ 740, 25 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.shared_locations
(self)
A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory.
A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution.
def shared_locations(self): """ A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory. """ result = {} shared_path = os.path.join(self.path, 'SHARED') if os.path.isfile(shared_path): with codecs.open(shared_path, 'r', encoding='utf-8') as f: lines = f.read().splitlines() for line in lines: key, value = line.split('=', 1) if key == 'namespace': result.setdefault(key, []).append(value) else: result[key] = value return result
[ "def", "shared_locations", "(", "self", ")", ":", "result", "=", "{", "}", "shared_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'SHARED'", ")", "if", "os", ".", "path", ".", "isfile", "(", "shared_path", ")", ":", "with", "codecs", ".", "open", "(", "shared_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "lines", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "key", "==", "'namespace'", ":", "result", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "append", "(", "value", ")", "else", ":", "result", "[", "key", "]", "=", "value", "return", "result" ]
[ 743, 4 ]
[ 767, 21 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.write_shared_locations
(self, paths, dry_run=False)
Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to.
Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to.
def write_shared_locations(self, paths, dry_run=False): """ Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to. """ shared_path = os.path.join(self.path, 'SHARED') logger.info('creating %s', shared_path) if dry_run: return None lines = [] for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) with codecs.open(shared_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) return shared_path
[ "def", "write_shared_locations", "(", "self", ",", "paths", ",", "dry_run", "=", "False", ")", ":", "shared_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'SHARED'", ")", "logger", ".", "info", "(", "'creating %s'", ",", "shared_path", ")", "if", "dry_run", ":", "return", "None", "lines", "=", "[", "]", "for", "key", "in", "(", "'prefix'", ",", "'lib'", ",", "'headers'", ",", "'scripts'", ",", "'data'", ")", ":", "path", "=", "paths", "[", "key", "]", "if", "os", ".", "path", ".", "isdir", "(", "paths", "[", "key", "]", ")", ":", "lines", ".", "append", "(", "'%s=%s'", "%", "(", "key", ",", "path", ")", ")", "for", "ns", "in", "paths", ".", "get", "(", "'namespace'", ",", "(", ")", ")", ":", "lines", ".", "append", "(", "'namespace=%s'", "%", "ns", ")", "with", "codecs", ".", "open", "(", "shared_path", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "lines", ")", ")", "return", "shared_path" ]
[ 769, 4 ]
[ 792, 26 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.get_distinfo_file
(self, path)
Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``'/'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn't start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str
Returns a path located under the ``.dist-info`` directory. Returns a string representing the path.
def get_distinfo_file(self, path): """ Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``'/'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn't start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str """ # Check if it is an absolute path # XXX use relpath, add tests if path.find(os.sep) >= 0: # it's an absolute path? distinfo_dirname, path = path.split(os.sep)[-2:] if distinfo_dirname != self.path.split(os.sep)[-1]: raise DistlibException( 'dist-info file %r does not belong to the %r %s ' 'distribution' % (path, self.name, self.version)) # The file must be relative if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) return os.path.join(self.path, path)
[ "def", "get_distinfo_file", "(", "self", ",", "path", ")", ":", "# Check if it is an absolute path # XXX use relpath, add tests", "if", "path", ".", "find", "(", "os", ".", "sep", ")", ">=", "0", ":", "# it's an absolute path?", "distinfo_dirname", ",", "path", "=", "path", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "2", ":", "]", "if", "distinfo_dirname", "!=", "self", ".", "path", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ":", "raise", "DistlibException", "(", "'dist-info file %r does not belong to the %r %s '", "'distribution'", "%", "(", "path", ",", "self", ".", "name", ",", "self", ".", "version", ")", ")", "# The file must be relative", "if", "path", "not", "in", "DIST_FILES", ":", "raise", "DistlibException", "(", "'invalid path for a dist-info file: '", "'%r at %r'", "%", "(", "path", ",", "self", ".", "path", ")", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "path", ")" ]
[ 803, 4 ]
[ 830, 44 ]
python
en
['en', 'error', 'th']
False
InstalledDistribution.list_distinfo_files
(self)
Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths
Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories.
def list_distinfo_files(self): """ Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths """ base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path
[ "def", "list_distinfo_files", "(", "self", ")", ":", "base", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", "for", "path", ",", "checksum", ",", "size", "in", "self", ".", "_get_records", "(", ")", ":", "# XXX add separator or use real relpath algo", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "path", ")", "if", "path", ".", "startswith", "(", "self", ".", "path", ")", ":", "yield", "path" ]
[ 832, 4 ]
[ 846, 26 ]
python
en
['en', 'error', 'th']
False
EggInfoDistribution.check_installed_files
(self)
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): for path, _, _ in self.list_installed_files(): if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) return mismatches
[ "def", "check_installed_files", "(", "self", ")", ":", "mismatches", "=", "[", "]", "record_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'installed-files.txt'", ")", "if", "os", ".", "path", ".", "exists", "(", "record_path", ")", ":", "for", "path", ",", "_", ",", "_", "in", "self", ".", "list_installed_files", "(", ")", ":", "if", "path", "==", "record_path", ":", "continue", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "mismatches", ".", "append", "(", "(", "path", ",", "'exists'", ",", "True", ",", "False", ")", ")", "return", "mismatches" ]
[ 983, 4 ]
[ 1000, 25 ]
python
en
['en', 'error', 'th']
False
EggInfoDistribution.list_installed_files
(self)
Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size)
Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line.
def list_installed_files(self): """ Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) """ def _md5(path): f = open(path, 'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, 'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) # "./" is present as a marker between installed files # and installation metadata files if not os.path.exists(p): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue #otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result
[ "def", "list_installed_files", "(", "self", ")", ":", "def", "_md5", "(", "path", ")", ":", "f", "=", "open", "(", "path", ",", "'rb'", ")", "try", ":", "content", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "return", "hashlib", ".", "md5", "(", "content", ")", ".", "hexdigest", "(", ")", "def", "_size", "(", "path", ")", ":", "return", "os", ".", "stat", "(", "path", ")", ".", "st_size", "record_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'installed-files.txt'", ")", "result", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "record_path", ")", ":", "with", "codecs", ".", "open", "(", "record_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "p", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "line", ")", ")", "# \"./\" is present as a marker between installed files", "# and installation metadata files", "if", "not", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "logger", ".", "warning", "(", "'Non-existent file: %s'", ",", "p", ")", "if", "p", ".", "endswith", "(", "(", "'.pyc'", ",", "'.pyo'", ")", ")", ":", "continue", "#otherwise fall through and fail", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "result", ".", "append", "(", "(", "p", ",", "_md5", "(", "p", ")", ",", "_size", "(", "p", ")", ")", ")", "result", ".", "append", "(", "(", "record_path", ",", "None", ",", "None", ")", ")", "return", "result" ]
[ 1002, 4 ]
[ 1038, 21 ]
python
en
['en', 'error', 'th']
False
EggInfoDistribution.list_distinfo_files
(self, absolute=False)
Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths
Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories.
def list_distinfo_files(self, absolute=False): """ Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths """ record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line
[ "def", "list_distinfo_files", "(", "self", ",", "absolute", "=", "False", ")", ":", "record_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'installed-files.txt'", ")", "if", "os", ".", "path", ".", "exists", "(", "record_path", ")", ":", "skip", "=", "True", "with", "codecs", ".", "open", "(", "record_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "==", "'./'", ":", "skip", "=", "False", "continue", "if", "not", "skip", ":", "p", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "line", ")", ")", "if", "p", ".", "startswith", "(", "self", ".", "path", ")", ":", "if", "absolute", ":", "yield", "p", "else", ":", "yield", "line" ]
[ 1040, 4 ]
[ 1067, 42 ]
python
en
['en', 'error', 'th']
False
DependencyGraph.add_distribution
(self, distribution)
Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution`
Add the *distribution* to the graph.
def add_distribution(self, distribution): """Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = []
[ "def", "add_distribution", "(", "self", ",", "distribution", ")", ":", "self", ".", "adjacency_list", "[", "distribution", "]", "=", "[", "]", "self", ".", "reverse_list", "[", "distribution", "]", "=", "[", "]" ]
[ 1101, 4 ]
[ 1108, 44 ]
python
en
['en', 'en', 'en']
True
DependencyGraph.add_edge
(self, x, y, label=None)
Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None``
Add an edge from distribution *x* to distribution *y* with the given *label*.
def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None`` """ self.adjacency_list[x].append((y, label)) # multiple edges are allowed, so be careful if x not in self.reverse_list[y]: self.reverse_list[y].append(x)
[ "def", "add_edge", "(", "self", ",", "x", ",", "y", ",", "label", "=", "None", ")", ":", "self", ".", "adjacency_list", "[", "x", "]", ".", "append", "(", "(", "y", ",", "label", ")", ")", "# multiple edges are allowed, so be careful", "if", "x", "not", "in", "self", ".", "reverse_list", "[", "y", "]", ":", "self", ".", "reverse_list", "[", "y", "]", ".", "append", "(", "x", ")" ]
[ 1111, 4 ]
[ 1124, 42 ]
python
en
['en', 'en', 'en']
True
DependencyGraph.add_missing
(self, distribution, requirement)
Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str``
Add a missing *requirement* for the given *distribution*.
def add_missing(self, distribution, requirement): """ Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str`` """ logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement)
[ "def", "add_missing", "(", "self", ",", "distribution", ",", "requirement", ")", ":", "logger", ".", "debug", "(", "'%s missing %r'", ",", "distribution", ",", "requirement", ")", "self", ".", "missing", ".", "setdefault", "(", "distribution", ",", "[", "]", ")", ".", "append", "(", "requirement", ")" ]
[ 1126, 4 ]
[ 1135, 69 ]
python
en
['en', 'error', 'th']
False
DependencyGraph.repr_node
(self, dist, level=1)
Prints only a subgraph
Prints only a subgraph
def repr_node(self, dist, level=1): """Prints only a subgraph""" output = [self._repr_dist(dist)] for other, label in self.adjacency_list[dist]: dist = self._repr_dist(other) if label is not None: dist = '%s [%s]' % (dist, label) output.append(' ' * level + str(dist)) suboutput = self.repr_node(other, level + 1) subs = suboutput.split('\n') output.extend(subs[1:]) return '\n'.join(output)
[ "def", "repr_node", "(", "self", ",", "dist", ",", "level", "=", "1", ")", ":", "output", "=", "[", "self", ".", "_repr_dist", "(", "dist", ")", "]", "for", "other", ",", "label", "in", "self", ".", "adjacency_list", "[", "dist", "]", ":", "dist", "=", "self", ".", "_repr_dist", "(", "other", ")", "if", "label", "is", "not", "None", ":", "dist", "=", "'%s [%s]'", "%", "(", "dist", ",", "label", ")", "output", ".", "append", "(", "' '", "*", "level", "+", "str", "(", "dist", ")", ")", "suboutput", "=", "self", ".", "repr_node", "(", "other", ",", "level", "+", "1", ")", "subs", "=", "suboutput", ".", "split", "(", "'\\n'", ")", "output", ".", "extend", "(", "subs", "[", "1", ":", "]", ")", "return", "'\\n'", ".", "join", "(", "output", ")" ]
[ 1140, 4 ]
[ 1151, 32 ]
python
en
['en', 'en', 'en']
True
DependencyGraph.to_dot
(self, f, skip_disconnected=True)
Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool``
Writes a DOT output for the graph to the provided file *f*.
def to_dot(self, f, skip_disconnected=True): """Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool`` """ disconnected = [] f.write("digraph dependencies {\n") for dist, adjs in self.adjacency_list.items(): if len(adjs) == 0 and not skip_disconnected: disconnected.append(dist) for other, label in adjs: if not label is None: f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label)) else: f.write('"%s" -> "%s"\n' % (dist.name, other.name)) if not skip_disconnected and len(disconnected) > 0: f.write('subgraph disconnected {\n') f.write('label = "Disconnected"\n') f.write('bgcolor = red\n') for dist in disconnected: f.write('"%s"' % dist.name) f.write('\n') f.write('}\n') f.write('}\n')
[ "def", "to_dot", "(", "self", ",", "f", ",", "skip_disconnected", "=", "True", ")", ":", "disconnected", "=", "[", "]", "f", ".", "write", "(", "\"digraph dependencies {\\n\"", ")", "for", "dist", ",", "adjs", "in", "self", ".", "adjacency_list", ".", "items", "(", ")", ":", "if", "len", "(", "adjs", ")", "==", "0", "and", "not", "skip_disconnected", ":", "disconnected", ".", "append", "(", "dist", ")", "for", "other", ",", "label", "in", "adjs", ":", "if", "not", "label", "is", "None", ":", "f", ".", "write", "(", "'\"%s\" -> \"%s\" [label=\"%s\"]\\n'", "%", "(", "dist", ".", "name", ",", "other", ".", "name", ",", "label", ")", ")", "else", ":", "f", ".", "write", "(", "'\"%s\" -> \"%s\"\\n'", "%", "(", "dist", ".", "name", ",", "other", ".", "name", ")", ")", "if", "not", "skip_disconnected", "and", "len", "(", "disconnected", ")", ">", "0", ":", "f", ".", "write", "(", "'subgraph disconnected {\\n'", ")", "f", ".", "write", "(", "'label = \"Disconnected\"\\n'", ")", "f", ".", "write", "(", "'bgcolor = red\\n'", ")", "for", "dist", "in", "disconnected", ":", "f", ".", "write", "(", "'\"%s\"'", "%", "dist", ".", "name", ")", "f", ".", "write", "(", "'\\n'", ")", "f", ".", "write", "(", "'}\\n'", ")", "f", ".", "write", "(", "'}\\n'", ")" ]
[ 1153, 4 ]
[ 1183, 22 ]
python
en
['en', 'en', 'en']
True
DependencyGraph.topological_sort
(self)
Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle.
Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle.
def topological_sort(self): """ Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. """ result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys())
[ "def", "topological_sort", "(", "self", ")", ":", "result", "=", "[", "]", "# Make a shallow copy of the adjacency list", "alist", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "adjacency_list", ".", "items", "(", ")", ":", "alist", "[", "k", "]", "=", "v", "[", ":", "]", "while", "True", ":", "# See what we can remove in this run", "to_remove", "=", "[", "]", "for", "k", ",", "v", "in", "list", "(", "alist", ".", "items", "(", ")", ")", "[", ":", "]", ":", "if", "not", "v", ":", "to_remove", ".", "append", "(", "k", ")", "del", "alist", "[", "k", "]", "if", "not", "to_remove", ":", "# What's left in alist (if anything) is a cycle.", "break", "# Remove from the adjacency list of others", "for", "k", ",", "v", "in", "alist", ".", "items", "(", ")", ":", "alist", "[", "k", "]", "=", "[", "(", "d", ",", "r", ")", "for", "d", ",", "r", "in", "v", "if", "d", "not", "in", "to_remove", "]", "logger", ".", "debug", "(", "'Moving to result: %s'", ",", "[", "'%s (%s)'", "%", "(", "d", ".", "name", ",", "d", ".", "version", ")", "for", "d", "in", "to_remove", "]", ")", "result", ".", "extend", "(", "to_remove", ")", "return", "result", ",", "list", "(", "alist", ".", "keys", "(", ")", ")" ]
[ 1185, 4 ]
[ 1214, 41 ]
python
en
['en', 'error', 'th']
False
DependencyGraph.__repr__
(self)
Representation of the graph
Representation of the graph
def __repr__(self): """Representation of the graph""" output = [] for dist, adjs in self.adjacency_list.items(): output.append(self.repr_node(dist)) return '\n'.join(output)
[ "def", "__repr__", "(", "self", ")", ":", "output", "=", "[", "]", "for", "dist", ",", "adjs", "in", "self", ".", "adjacency_list", ".", "items", "(", ")", ":", "output", ".", "append", "(", "self", ".", "repr_node", "(", "dist", ")", ")", "return", "'\\n'", ".", "join", "(", "output", ")" ]
[ 1216, 4 ]
[ 1221, 32 ]
python
en
['en', 'en', 'en']
True
iri2uri
(uri)
Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.
Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.
def iri2uri(uri): """Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.""" if isinstance(uri, unicode): (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) authority = authority.encode("idna") # For each character in 'ucschar' or 'iprivate' # 1. encode as utf-8 # 2. then %-encode each octet of that utf-8 uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) uri = "".join([encode(c) for c in uri]) return uri
[ "def", "iri2uri", "(", "uri", ")", ":", "if", "isinstance", "(", "uri", ",", "unicode", ")", ":", "(", "scheme", ",", "authority", ",", "path", ",", "query", ",", "fragment", ")", "=", "urlparse", ".", "urlsplit", "(", "uri", ")", "authority", "=", "authority", ".", "encode", "(", "\"idna\"", ")", "# For each character in 'ucschar' or 'iprivate'", "# 1. encode as utf-8", "# 2. then %-encode each octet of that utf-8", "uri", "=", "urlparse", ".", "urlunsplit", "(", "(", "scheme", ",", "authority", ",", "path", ",", "query", ",", "fragment", ")", ")", "uri", "=", "\"\"", ".", "join", "(", "[", "encode", "(", "c", ")", "for", "c", "in", "uri", "]", ")", "return", "uri" ]
[ 58, 0 ]
[ 70, 14 ]
python
en
['en', 'lb', 'en']
True
setup_module_logger
(name)
Set up module level logging with formatting
Set up module level logging with formatting
def setup_module_logger(name): """Set up module level logging with formatting""" logger = logging.getLogger(name) ch = logging.StreamHandler() # Really should not be configuring formats in a library, see # https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) ch.setFormatter(formatter) logger.addHandler(ch)
[ "def", "setup_module_logger", "(", "name", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "# Really should not be configuring formats in a library, see", "# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s'", ")", "ch", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "ch", ")" ]
[ 19, 0 ]
[ 29, 25 ]
python
en
['en', 'en', 'en']
True
get_mx_ip
(hostname)
Get MX record by hostname.
Get MX record by hostname.
async def get_mx_ip(hostname): '''Get MX record by hostname. ''' if hostname not in MX_DNS_CACHE: try: resolver = aiodns.DNSResolver() MX_DNS_CACHE[hostname] = await resolver.query(hostname, 'MX') except aiodns.error.DNSError as e: MX_DNS_CACHE[hostname] = None return MX_DNS_CACHE[hostname]
[ "async", "def", "get_mx_ip", "(", "hostname", ")", ":", "if", "hostname", "not", "in", "MX_DNS_CACHE", ":", "try", ":", "resolver", "=", "aiodns", ".", "DNSResolver", "(", ")", "MX_DNS_CACHE", "[", "hostname", "]", "=", "await", "resolver", ".", "query", "(", "hostname", ",", "'MX'", ")", "except", "aiodns", ".", "error", ".", "DNSError", "as", "e", ":", "MX_DNS_CACHE", "[", "hostname", "]", "=", "None", "return", "MX_DNS_CACHE", "[", "hostname", "]" ]
[ 38, 0 ]
[ 47, 33 ]
python
en
['en', 'en', 'en']
True