Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
Client.head
(self, path, data=None, follow=False, secure=False, **extra)
Request a response from the server using HEAD.
Request a response from the server using HEAD.
def head(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using HEAD.""" self.extra = extra response = super().head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response
[ "def", "head", "(", "self", ",", "path", ",", "data", "=", "None", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "head", "(", "path", ",", "data", "=", "data", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "*", "*", "extra", ")", "return", "response" ]
[ 755, 4 ]
[ 761, 23 ]
python
en
['en', 'en', 'en']
True
Client.options
(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra)
Request a response from the server using OPTIONS.
Request a response from the server using OPTIONS.
def options(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Request a response from the server using OPTIONS.""" self.extra = extra response = super().options(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response
[ "def", "options", "(", "self", ",", "path", ",", "data", "=", "''", ",", "content_type", "=", "'application/octet-stream'", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "options", "(", "path", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "*", "*", "extra", ")", "return", "response" ]
[ 763, 4 ]
[ 770, 23 ]
python
en
['en', 'en', 'en']
True
Client.put
(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra)
Send a resource to the server using PUT.
Send a resource to the server using PUT.
def put(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PUT.""" self.extra = extra response = super().put(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response
[ "def", "put", "(", "self", ",", "path", ",", "data", "=", "''", ",", "content_type", "=", "'application/octet-stream'", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "put", "(", "path", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "*", "*", "extra", ")", "return", "response" ]
[ 772, 4 ]
[ 779, 23 ]
python
en
['en', 'en', 'en']
True
Client.patch
(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra)
Send a resource to the server using PATCH.
Send a resource to the server using PATCH.
def patch(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PATCH.""" self.extra = extra response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response
[ "def", "patch", "(", "self", ",", "path", ",", "data", "=", "''", ",", "content_type", "=", "'application/octet-stream'", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "patch", "(", "path", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "*", "*", "extra", ")", "return", "response" ]
[ 781, 4 ]
[ 788, 23 ]
python
en
['en', 'en', 'en']
True
Client.delete
(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra)
Send a DELETE request to the server.
Send a DELETE request to the server.
def delete(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a DELETE request to the server.""" self.extra = extra response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response
[ "def", "delete", "(", "self", ",", "path", ",", "data", "=", "''", ",", "content_type", "=", "'application/octet-stream'", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "delete", "(", "path", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "*", "*", "extra", ")", "return", "response" ]
[ 790, 4 ]
[ 797, 23 ]
python
en
['en', 'it', 'en']
True
Client.trace
(self, path, data='', follow=False, secure=False, **extra)
Send a TRACE request to the server.
Send a TRACE request to the server.
def trace(self, path, data='', follow=False, secure=False, **extra): """Send a TRACE request to the server.""" self.extra = extra response = super().trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response
[ "def", "trace", "(", "self", ",", "path", ",", "data", "=", "''", ",", "follow", "=", "False", ",", "secure", "=", "False", ",", "*", "*", "extra", ")", ":", "self", ".", "extra", "=", "extra", "response", "=", "super", "(", ")", ".", "trace", "(", "path", ",", "data", "=", "data", ",", "secure", "=", "secure", ",", "*", "*", "extra", ")", "if", "follow", ":", "response", "=", "self", ".", "_handle_redirects", "(", "response", ",", "data", "=", "data", ",", "*", "*", "extra", ")", "return", "response" ]
[ 799, 4 ]
[ 805, 23 ]
python
en
['en', 'en', 'en']
True
Client._handle_redirects
(self, response, data='', content_type='', **extra)
Follow any redirects by requesting responses from the server using GET.
Follow any redirects by requesting responses from the server using GET.
def _handle_redirects(self, response, data='', content_type='', **extra): """ Follow any redirects by requesting responses from the server using GET. """ response.redirect_chain = [] redirect_status_codes = ( HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ) while response.status_code in redirect_status_codes: response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) # Prepend the request path to handle relative path redirects path = url.path if not path.startswith('/'): path = urljoin(response.request['PATH_INFO'], path) if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT): # Preserve request method and query string (if needed) # post-redirect for 307/308 responses. request_method = response.request['REQUEST_METHOD'].lower() if request_method not in ('get', 'head'): extra['QUERY_STRING'] = url.query request_method = getattr(self, request_method) else: request_method = self.get data = QueryDict(url.query) content_type = None response = request_method(path, data=data, content_type=content_type, follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response
[ "def", "_handle_redirects", "(", "self", ",", "response", ",", "data", "=", "''", ",", "content_type", "=", "''", ",", "*", "*", "extra", ")", ":", "response", ".", "redirect_chain", "=", "[", "]", "redirect_status_codes", "=", "(", "HTTPStatus", ".", "MOVED_PERMANENTLY", ",", "HTTPStatus", ".", "FOUND", ",", "HTTPStatus", ".", "SEE_OTHER", ",", "HTTPStatus", ".", "TEMPORARY_REDIRECT", ",", "HTTPStatus", ".", "PERMANENT_REDIRECT", ",", ")", "while", "response", ".", "status_code", "in", "redirect_status_codes", ":", "response_url", "=", "response", ".", "url", "redirect_chain", "=", "response", ".", "redirect_chain", "redirect_chain", ".", "append", "(", "(", "response_url", ",", "response", ".", "status_code", ")", ")", "url", "=", "urlsplit", "(", "response_url", ")", "if", "url", ".", "scheme", ":", "extra", "[", "'wsgi.url_scheme'", "]", "=", "url", ".", "scheme", "if", "url", ".", "hostname", ":", "extra", "[", "'SERVER_NAME'", "]", "=", "url", ".", "hostname", "if", "url", ".", "port", ":", "extra", "[", "'SERVER_PORT'", "]", "=", "str", "(", "url", ".", "port", ")", "# Prepend the request path to handle relative path redirects", "path", "=", "url", ".", "path", "if", "not", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "urljoin", "(", "response", ".", "request", "[", "'PATH_INFO'", "]", ",", "path", ")", "if", "response", ".", "status_code", "in", "(", "HTTPStatus", ".", "TEMPORARY_REDIRECT", ",", "HTTPStatus", ".", "PERMANENT_REDIRECT", ")", ":", "# Preserve request method and query string (if needed)", "# post-redirect for 307/308 responses.", "request_method", "=", "response", ".", "request", "[", "'REQUEST_METHOD'", "]", ".", "lower", "(", ")", "if", "request_method", "not", "in", "(", "'get'", ",", "'head'", ")", ":", "extra", "[", "'QUERY_STRING'", "]", "=", "url", ".", "query", "request_method", "=", "getattr", "(", "self", ",", "request_method", ")", "else", ":", "request_method", "=", "self", ".", "get", "data", "=", "QueryDict", "(", "url", ".", "query", ")", "content_type", "=", "None", "response", "=", "request_method", "(", "path", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ",", "follow", "=", "False", ",", "*", "*", "extra", ")", "response", ".", "redirect_chain", "=", "redirect_chain", "if", "redirect_chain", "[", "-", "1", "]", "in", "redirect_chain", "[", ":", "-", "1", "]", ":", "# Check that we're not redirecting to somewhere we've already", "# been to, to prevent loops.", "raise", "RedirectCycleError", "(", "\"Redirect loop detected.\"", ",", "last_response", "=", "response", ")", "if", "len", "(", "redirect_chain", ")", ">", "20", ":", "# Such a lengthy chain likely also means a loop, but one with", "# a growing path, changing view, or changing query argument;", "# 20 is the value of \"network.http.redirection-limit\" from Firefox.", "raise", "RedirectCycleError", "(", "\"Too many redirects.\"", ",", "last_response", "=", "response", ")", "return", "response" ]
[ 807, 4 ]
[ 862, 23 ]
python
en
['en', 'error', 'th']
False
AsyncClient.request
(self, **request)
The master request method. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request.
The master request method. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request.
async def request(self, **request): """ The master request method. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ if 'follow' in request: raise NotImplementedError( 'AsyncClient request methods do not accept the follow ' 'parameter.' ) scope = self._base_scope(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = 'template-render-%s' % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = 'request-exception-%s' % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = await self.handler(scope) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. response.resolver_match = SimpleLazyObject(lambda: resolve(request['path'])) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response
[ "async", "def", "request", "(", "self", ",", "*", "*", "request", ")", ":", "if", "'follow'", "in", "request", ":", "raise", "NotImplementedError", "(", "'AsyncClient request methods do not accept the follow '", "'parameter.'", ")", "scope", "=", "self", ".", "_base_scope", "(", "*", "*", "request", ")", "# Curry a data dictionary into an instance of the template renderer", "# callback function.", "data", "=", "{", "}", "on_template_render", "=", "partial", "(", "store_rendered_templates", ",", "data", ")", "signal_uid", "=", "'template-render-%s'", "%", "id", "(", "request", ")", "signals", ".", "template_rendered", ".", "connect", "(", "on_template_render", ",", "dispatch_uid", "=", "signal_uid", ")", "# Capture exceptions created by the handler.", "exception_uid", "=", "'request-exception-%s'", "%", "id", "(", "request", ")", "got_request_exception", ".", "connect", "(", "self", ".", "store_exc_info", ",", "dispatch_uid", "=", "exception_uid", ")", "try", ":", "response", "=", "await", "self", ".", "handler", "(", "scope", ")", "finally", ":", "signals", ".", "template_rendered", ".", "disconnect", "(", "dispatch_uid", "=", "signal_uid", ")", "got_request_exception", ".", "disconnect", "(", "dispatch_uid", "=", "exception_uid", ")", "# Check for signaled exceptions.", "self", ".", "check_exception", "(", "response", ")", "# Save the client and request that stimulated the response.", "response", ".", "client", "=", "self", "response", ".", "request", "=", "request", "# Add any rendered template detail to the response.", "response", ".", "templates", "=", "data", ".", "get", "(", "'templates'", ",", "[", "]", ")", "response", ".", "context", "=", "data", ".", "get", "(", "'context'", ")", "response", ".", "json", "=", "partial", "(", "self", ".", "_parse_json", ",", "response", ")", "# Attach the ResolverMatch instance to the response.", "response", ".", "resolver_match", "=", "SimpleLazyObject", "(", "lambda", ":", "resolve", "(", "request", "[", "'path'", "]", ")", ")", "# Flatten a single context. Not really necessary anymore thanks to the", "# __getattr__ flattening in ContextList, but has some edge case", "# backwards compatibility implications.", "if", "response", ".", "context", "and", "len", "(", "response", ".", "context", ")", "==", "1", ":", "response", ".", "context", "=", "response", ".", "context", "[", "0", "]", "# Update persistent cookie data.", "if", "response", ".", "cookies", ":", "self", ".", "cookies", ".", "update", "(", "response", ".", "cookies", ")", "return", "response" ]
[ 879, 4 ]
[ 925, 23 ]
python
en
['en', 'error', 'th']
False
suppressed_cache_errors
()
If we can't access the cache then we can just skip caching and process requests as if caching wasn't enabled.
If we can't access the cache then we can just skip caching and process requests as if caching wasn't enabled.
def suppressed_cache_errors() -> Iterator[None]: """If we can't access the cache then we can just skip caching and process requests as if caching wasn't enabled. """ try: yield except OSError: pass
[ "def", "suppressed_cache_errors", "(", ")", "->", "Iterator", "[", "None", "]", ":", "try", ":", "yield", "except", "OSError", ":", "pass" ]
[ 20, 0 ]
[ 27, 12 ]
python
en
['en', 'en', 'en']
True
_default_template_ctx_processor
()
Default template context processor. Injects `request`, `session` and `g`.
Default template context processor. Injects `request`, `session` and `g`.
def _default_template_ctx_processor(): """Default template context processor. Injects `request`, `session` and `g`. """ reqctx = _request_ctx_stack.top appctx = _app_ctx_stack.top rv = {} if appctx is not None: rv['g'] = appctx.g if reqctx is not None: rv['request'] = reqctx.request rv['session'] = reqctx.session return rv
[ "def", "_default_template_ctx_processor", "(", ")", ":", "reqctx", "=", "_request_ctx_stack", ".", "top", "appctx", "=", "_app_ctx_stack", ".", "top", "rv", "=", "{", "}", "if", "appctx", "is", "not", "None", ":", "rv", "[", "'g'", "]", "=", "appctx", ".", "g", "if", "reqctx", "is", "not", "None", ":", "rv", "[", "'request'", "]", "=", "reqctx", ".", "request", "rv", "[", "'session'", "]", "=", "reqctx", ".", "session", "return", "rv" ]
[ 17, 0 ]
[ 29, 13 ]
python
en
['en', 'en', 'pt']
True
_render
(template, context, app)
Renders the template and fires the signal
Renders the template and fires the signal
def _render(template, context, app): """Renders the template and fires the signal""" before_render_template.send(app, template=template, context=context) rv = template.render(context) template_rendered.send(app, template=template, context=context) return rv
[ "def", "_render", "(", "template", ",", "context", ",", "app", ")", ":", "before_render_template", ".", "send", "(", "app", ",", "template", "=", "template", ",", "context", "=", "context", ")", "rv", "=", "template", ".", "render", "(", "context", ")", "template_rendered", ".", "send", "(", "app", ",", "template", "=", "template", ",", "context", "=", "context", ")", "return", "rv" ]
[ 111, 0 ]
[ 117, 13 ]
python
en
['en', 'en', 'en']
True
render_template
(template_name_or_list, **context)
Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template.
Renders a template from the template folder with the given context.
def render_template(template_name_or_list, **context): """Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app)
[ "def", "render_template", "(", "template_name_or_list", ",", "*", "*", "context", ")", ":", "ctx", "=", "_app_ctx_stack", ".", "top", "ctx", ".", "app", ".", "update_template_context", "(", "context", ")", "return", "_render", "(", "ctx", ".", "app", ".", "jinja_env", ".", "get_or_select_template", "(", "template_name_or_list", ")", ",", "context", ",", "ctx", ".", "app", ")" ]
[ 120, 0 ]
[ 133, 36 ]
python
en
['en', 'en', 'en']
True
render_template_string
(source, **context)
Renders a template from the given template source string with the given context. Template variables will be autoescaped. :param source: the source code of the template to be rendered :param context: the variables that should be available in the context of the template.
Renders a template from the given template source string with the given context. Template variables will be autoescaped.
def render_template_string(source, **context): """Renders a template from the given template source string with the given context. Template variables will be autoescaped. :param source: the source code of the template to be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.from_string(source), context, ctx.app)
[ "def", "render_template_string", "(", "source", ",", "*", "*", "context", ")", ":", "ctx", "=", "_app_ctx_stack", ".", "top", "ctx", ".", "app", ".", "update_template_context", "(", "context", ")", "return", "_render", "(", "ctx", ".", "app", ".", "jinja_env", ".", "from_string", "(", "source", ")", ",", "context", ",", "ctx", ".", "app", ")" ]
[ 136, 0 ]
[ 148, 36 ]
python
en
['en', 'en', 'en']
True
_check_link_requires_python
( link: Link, version_info: Tuple[int, int, int], ignore_requires_python: bool = False, )
Return whether the given Python version is compatible with a link's "Requires-Python" value. :param version_info: A 3-tuple of ints representing the Python major-minor-micro version to check. :param ignore_requires_python: Whether to ignore the "Requires-Python" value if the given Python version isn't compatible.
Return whether the given Python version is compatible with a link's "Requires-Python" value.
def _check_link_requires_python( link: Link, version_info: Tuple[int, int, int], ignore_requires_python: bool = False, ) -> bool: """ Return whether the given Python version is compatible with a link's "Requires-Python" value. :param version_info: A 3-tuple of ints representing the Python major-minor-micro version to check. :param ignore_requires_python: Whether to ignore the "Requires-Python" value if the given Python version isn't compatible. """ try: is_compatible = check_requires_python( link.requires_python, version_info=version_info, ) except specifiers.InvalidSpecifier: logger.debug( "Ignoring invalid Requires-Python (%r) for link: %s", link.requires_python, link, ) else: if not is_compatible: version = '.'.join(map(str, version_info)) if not ignore_requires_python: logger.verbose( 'Link requires a different Python (%s not in: %r): %s', version, link.requires_python, link, ) return False logger.debug( 'Ignoring failed Requires-Python check (%s not in: %r) ' 'for link: %s', version, link.requires_python, link, ) return True
[ "def", "_check_link_requires_python", "(", "link", ":", "Link", ",", "version_info", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", ",", "ignore_requires_python", ":", "bool", "=", "False", ",", ")", "->", "bool", ":", "try", ":", "is_compatible", "=", "check_requires_python", "(", "link", ".", "requires_python", ",", "version_info", "=", "version_info", ",", ")", "except", "specifiers", ".", "InvalidSpecifier", ":", "logger", ".", "debug", "(", "\"Ignoring invalid Requires-Python (%r) for link: %s\"", ",", "link", ".", "requires_python", ",", "link", ",", ")", "else", ":", "if", "not", "is_compatible", ":", "version", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "version_info", ")", ")", "if", "not", "ignore_requires_python", ":", "logger", ".", "verbose", "(", "'Link requires a different Python (%s not in: %r): %s'", ",", "version", ",", "link", ".", "requires_python", ",", "link", ",", ")", "return", "False", "logger", ".", "debug", "(", "'Ignoring failed Requires-Python check (%s not in: %r) '", "'for link: %s'", ",", "version", ",", "link", ".", "requires_python", ",", "link", ",", ")", "return", "True" ]
[ 52, 0 ]
[ 91, 15 ]
python
en
['en', 'error', 'th']
False
filter_unallowed_hashes
( candidates: List[InstallationCandidate], hashes: Hashes, project_name: str, )
Filter out candidates whose hashes aren't allowed, and return a new list of candidates. If at least one candidate has an allowed hash, then all candidates with either an allowed hash or no hash specified are returned. Otherwise, the given candidates are returned. Including the candidates with no hash specified when there is a match allows a warning to be logged if there is a more preferred candidate with no hash specified. Returning all candidates in the case of no matches lets pip report the hash of the candidate that would otherwise have been installed (e.g. permitting the user to more easily update their requirements file with the desired hash).
Filter out candidates whose hashes aren't allowed, and return a new list of candidates.
def filter_unallowed_hashes( candidates: List[InstallationCandidate], hashes: Hashes, project_name: str, ) -> List[InstallationCandidate]: """ Filter out candidates whose hashes aren't allowed, and return a new list of candidates. If at least one candidate has an allowed hash, then all candidates with either an allowed hash or no hash specified are returned. Otherwise, the given candidates are returned. Including the candidates with no hash specified when there is a match allows a warning to be logged if there is a more preferred candidate with no hash specified. Returning all candidates in the case of no matches lets pip report the hash of the candidate that would otherwise have been installed (e.g. permitting the user to more easily update their requirements file with the desired hash). """ if not hashes: logger.debug( 'Given no hashes to check %s links for project %r: ' 'discarding no candidates', len(candidates), project_name, ) # Make sure we're not returning back the given value. return list(candidates) matches_or_no_digest = [] # Collect the non-matches for logging purposes. non_matches = [] match_count = 0 for candidate in candidates: link = candidate.link if not link.has_hash: pass elif link.is_hash_allowed(hashes=hashes): match_count += 1 else: non_matches.append(candidate) continue matches_or_no_digest.append(candidate) if match_count: filtered = matches_or_no_digest else: # Make sure we're not returning back the given value. filtered = list(candidates) if len(filtered) == len(candidates): discard_message = 'discarding no candidates' else: discard_message = 'discarding {} non-matches:\n {}'.format( len(non_matches), '\n '.join(str(candidate.link) for candidate in non_matches) ) logger.debug( 'Checked %s links for project %r against %s hashes ' '(%s matches, %s no digest): %s', len(candidates), project_name, hashes.digest_count, match_count, len(matches_or_no_digest) - match_count, discard_message ) return filtered
[ "def", "filter_unallowed_hashes", "(", "candidates", ":", "List", "[", "InstallationCandidate", "]", ",", "hashes", ":", "Hashes", ",", "project_name", ":", "str", ",", ")", "->", "List", "[", "InstallationCandidate", "]", ":", "if", "not", "hashes", ":", "logger", ".", "debug", "(", "'Given no hashes to check %s links for project %r: '", "'discarding no candidates'", ",", "len", "(", "candidates", ")", ",", "project_name", ",", ")", "# Make sure we're not returning back the given value.", "return", "list", "(", "candidates", ")", "matches_or_no_digest", "=", "[", "]", "# Collect the non-matches for logging purposes.", "non_matches", "=", "[", "]", "match_count", "=", "0", "for", "candidate", "in", "candidates", ":", "link", "=", "candidate", ".", "link", "if", "not", "link", ".", "has_hash", ":", "pass", "elif", "link", ".", "is_hash_allowed", "(", "hashes", "=", "hashes", ")", ":", "match_count", "+=", "1", "else", ":", "non_matches", ".", "append", "(", "candidate", ")", "continue", "matches_or_no_digest", ".", "append", "(", "candidate", ")", "if", "match_count", ":", "filtered", "=", "matches_or_no_digest", "else", ":", "# Make sure we're not returning back the given value.", "filtered", "=", "list", "(", "candidates", ")", "if", "len", "(", "filtered", ")", "==", "len", "(", "candidates", ")", ":", "discard_message", "=", "'discarding no candidates'", "else", ":", "discard_message", "=", "'discarding {} non-matches:\\n {}'", ".", "format", "(", "len", "(", "non_matches", ")", ",", "'\\n '", ".", "join", "(", "str", "(", "candidate", ".", "link", ")", "for", "candidate", "in", "non_matches", ")", ")", "logger", ".", "debug", "(", "'Checked %s links for project %r against %s hashes '", "'(%s matches, %s no digest): %s'", ",", "len", "(", "candidates", ")", ",", "project_name", ",", "hashes", ".", "digest_count", ",", "match_count", ",", "len", "(", "matches_or_no_digest", ")", "-", "match_count", ",", "discard_message", ")", "return", "filtered" ]
[ 231, 0 ]
[ 302, 19 ]
python
en
['en', 'error', 'th']
False
_find_name_version_sep
(fragment: str, canonical_name: str)
Find the separator's index based on the package's canonical name. :param fragment: A <package>+<version> filename "fragment" (stem) or egg fragment. :param canonical_name: The package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: >>> fragment = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' >>> _find_name_version_sep(fragment, canonical_name) 8
Find the separator's index based on the package's canonical name.
def _find_name_version_sep(fragment: str, canonical_name: str) -> int: """Find the separator's index based on the package's canonical name. :param fragment: A <package>+<version> filename "fragment" (stem) or egg fragment. :param canonical_name: The package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: >>> fragment = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' >>> _find_name_version_sep(fragment, canonical_name) 8 """ # Project name and version must be separated by one single dash. Find all # occurrences of dashes; if the string in front of it matches the canonical # name, this is the one separating the name and version parts. for i, c in enumerate(fragment): if c != "-": continue if canonicalize_name(fragment[:i]) == canonical_name: return i raise ValueError(f"{fragment} does not match {canonical_name}")
[ "def", "_find_name_version_sep", "(", "fragment", ":", "str", ",", "canonical_name", ":", "str", ")", "->", "int", ":", "# Project name and version must be separated by one single dash. Find all", "# occurrences of dashes; if the string in front of it matches the canonical", "# name, this is the one separating the name and version parts.", "for", "i", ",", "c", "in", "enumerate", "(", "fragment", ")", ":", "if", "c", "!=", "\"-\"", ":", "continue", "if", "canonicalize_name", "(", "fragment", "[", ":", "i", "]", ")", "==", "canonical_name", ":", "return", "i", "raise", "ValueError", "(", "f\"{fragment} does not match {canonical_name}\"", ")" ]
[ 940, 0 ]
[ 963, 67 ]
python
en
['en', 'en', 'en']
True
_extract_version_from_fragment
(fragment: str, canonical_name: str)
Parse the version string from a <package>+<version> filename "fragment" (stem) or egg fragment. :param fragment: The string to parse. E.g. foo-2.1 :param canonical_name: The canonicalized name of the package this belongs to.
Parse the version string from a <package>+<version> filename "fragment" (stem) or egg fragment.
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]: """Parse the version string from a <package>+<version> filename "fragment" (stem) or egg fragment. :param fragment: The string to parse. E.g. foo-2.1 :param canonical_name: The canonicalized name of the package this belongs to. """ try: version_start = _find_name_version_sep(fragment, canonical_name) + 1 except ValueError: return None version = fragment[version_start:] if not version: return None return version
[ "def", "_extract_version_from_fragment", "(", "fragment", ":", "str", ",", "canonical_name", ":", "str", ")", "->", "Optional", "[", "str", "]", ":", "try", ":", "version_start", "=", "_find_name_version_sep", "(", "fragment", ",", "canonical_name", ")", "+", "1", "except", "ValueError", ":", "return", "None", "version", "=", "fragment", "[", "version_start", ":", "]", "if", "not", "version", ":", "return", "None", "return", "version" ]
[ 966, 0 ]
[ 981, 18 ]
python
en
['en', 'en', 'en']
True
LinkEvaluator.__init__
( self, project_name: str, canonical_name: str, formats: FrozenSet[str], target_python: TargetPython, allow_yanked: bool, ignore_requires_python: Optional[bool] = None, )
:param project_name: The user supplied package name. :param canonical_name: The canonical package name. :param formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. :param target_python: The target Python interpreter to use when evaluating link compatibility. This is used, for example, to check wheel compatibility, as well as when checking the Python version, e.g. the Python version embedded in a link filename (or egg fragment) and against an HTML link's optional PEP 503 "data-requires-python" attribute. :param allow_yanked: Whether files marked as yanked (in the sense of PEP 592) are permitted to be candidates for install. :param ignore_requires_python: Whether to ignore incompatible PEP 503 "data-requires-python" values in HTML links. Defaults to False.
:param project_name: The user supplied package name. :param canonical_name: The canonical package name. :param formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. :param target_python: The target Python interpreter to use when evaluating link compatibility. This is used, for example, to check wheel compatibility, as well as when checking the Python version, e.g. the Python version embedded in a link filename (or egg fragment) and against an HTML link's optional PEP 503 "data-requires-python" attribute. :param allow_yanked: Whether files marked as yanked (in the sense of PEP 592) are permitted to be candidates for install. :param ignore_requires_python: Whether to ignore incompatible PEP 503 "data-requires-python" values in HTML links. Defaults to False.
def __init__( self, project_name: str, canonical_name: str, formats: FrozenSet[str], target_python: TargetPython, allow_yanked: bool, ignore_requires_python: Optional[bool] = None, ) -> None: """ :param project_name: The user supplied package name. :param canonical_name: The canonical package name. :param formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. :param target_python: The target Python interpreter to use when evaluating link compatibility. This is used, for example, to check wheel compatibility, as well as when checking the Python version, e.g. the Python version embedded in a link filename (or egg fragment) and against an HTML link's optional PEP 503 "data-requires-python" attribute. :param allow_yanked: Whether files marked as yanked (in the sense of PEP 592) are permitted to be candidates for install. :param ignore_requires_python: Whether to ignore incompatible PEP 503 "data-requires-python" values in HTML links. Defaults to False. """ if ignore_requires_python is None: ignore_requires_python = False self._allow_yanked = allow_yanked self._canonical_name = canonical_name self._ignore_requires_python = ignore_requires_python self._formats = formats self._target_python = target_python self.project_name = project_name
[ "def", "__init__", "(", "self", ",", "project_name", ":", "str", ",", "canonical_name", ":", "str", ",", "formats", ":", "FrozenSet", "[", "str", "]", ",", "target_python", ":", "TargetPython", ",", "allow_yanked", ":", "bool", ",", "ignore_requires_python", ":", "Optional", "[", "bool", "]", "=", "None", ",", ")", "->", "None", ":", "if", "ignore_requires_python", "is", "None", ":", "ignore_requires_python", "=", "False", "self", ".", "_allow_yanked", "=", "allow_yanked", "self", ".", "_canonical_name", "=", "canonical_name", "self", ".", "_ignore_requires_python", "=", "ignore_requires_python", "self", ".", "_formats", "=", "formats", "self", ".", "_target_python", "=", "target_python", "self", ".", "project_name", "=", "project_name" ]
[ 106, 4 ]
[ 141, 40 ]
python
en
['en', 'error', 'th']
False
LinkEvaluator.evaluate_link
(self, link: Link)
Determine whether a link is a candidate for installation. :return: A tuple (is_candidate, result), where `result` is (1) a version string if `is_candidate` is True, and (2) if `is_candidate` is False, an optional string to log the reason the link fails to qualify.
Determine whether a link is a candidate for installation.
def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: """ Determine whether a link is a candidate for installation. :return: A tuple (is_candidate, result), where `result` is (1) a version string if `is_candidate` is True, and (2) if `is_candidate` is False, an optional string to log the reason the link fails to qualify. """ version = None if link.is_yanked and not self._allow_yanked: reason = link.yanked_reason or '<none given>' return (False, f'yanked for reason: {reason}') if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: return (False, 'not a file') if ext not in SUPPORTED_EXTENSIONS: return (False, f'unsupported archive format: {ext}') if "binary" not in self._formats and ext == WHEEL_EXTENSION: reason = 'No binaries permitted for {}'.format( self.project_name) return (False, reason) if "macosx10" in link.path and ext == '.zip': return (False, 'macosx10 one') if ext == WHEEL_EXTENSION: try: wheel = Wheel(link.filename) except InvalidWheelFilename: return (False, 'invalid wheel filename') if canonicalize_name(wheel.name) != self._canonical_name: reason = 'wrong project name (not {})'.format( self.project_name) return (False, reason) supported_tags = self._target_python.get_tags() if not wheel.supported(supported_tags): # Include the wheel's tags in the reason string to # simplify troubleshooting compatibility issues. file_tags = wheel.get_formatted_file_tags() reason = ( "none of the wheel's tags ({}) are compatible " "(run pip debug --verbose to show compatible tags)".format( ', '.join(file_tags) ) ) return (False, reason) version = wheel.version # This should be up by the self.ok_binary check, but see issue 2700. if "source" not in self._formats and ext != WHEEL_EXTENSION: reason = f'No sources permitted for {self.project_name}' return (False, reason) if not version: version = _extract_version_from_fragment( egg_info, self._canonical_name, ) if not version: reason = f'Missing project version for {self.project_name}' return (False, reason) match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != self._target_python.py_version: return (False, 'Python version is incorrect') supports_python = _check_link_requires_python( link, version_info=self._target_python.py_version_info, ignore_requires_python=self._ignore_requires_python, ) if not supports_python: # Return None for the reason text to suppress calling # _log_skipped_link(). return (False, None) logger.debug('Found link %s, version: %s', link, version) return (True, version)
[ "def", "evaluate_link", "(", "self", ",", "link", ":", "Link", ")", "->", "Tuple", "[", "bool", ",", "Optional", "[", "str", "]", "]", ":", "version", "=", "None", "if", "link", ".", "is_yanked", "and", "not", "self", ".", "_allow_yanked", ":", "reason", "=", "link", ".", "yanked_reason", "or", "'<none given>'", "return", "(", "False", ",", "f'yanked for reason: {reason}'", ")", "if", "link", ".", "egg_fragment", ":", "egg_info", "=", "link", ".", "egg_fragment", "ext", "=", "link", ".", "ext", "else", ":", "egg_info", ",", "ext", "=", "link", ".", "splitext", "(", ")", "if", "not", "ext", ":", "return", "(", "False", ",", "'not a file'", ")", "if", "ext", "not", "in", "SUPPORTED_EXTENSIONS", ":", "return", "(", "False", ",", "f'unsupported archive format: {ext}'", ")", "if", "\"binary\"", "not", "in", "self", ".", "_formats", "and", "ext", "==", "WHEEL_EXTENSION", ":", "reason", "=", "'No binaries permitted for {}'", ".", "format", "(", "self", ".", "project_name", ")", "return", "(", "False", ",", "reason", ")", "if", "\"macosx10\"", "in", "link", ".", "path", "and", "ext", "==", "'.zip'", ":", "return", "(", "False", ",", "'macosx10 one'", ")", "if", "ext", "==", "WHEEL_EXTENSION", ":", "try", ":", "wheel", "=", "Wheel", "(", "link", ".", "filename", ")", "except", "InvalidWheelFilename", ":", "return", "(", "False", ",", "'invalid wheel filename'", ")", "if", "canonicalize_name", "(", "wheel", ".", "name", ")", "!=", "self", ".", "_canonical_name", ":", "reason", "=", "'wrong project name (not {})'", ".", "format", "(", "self", ".", "project_name", ")", "return", "(", "False", ",", "reason", ")", "supported_tags", "=", "self", ".", "_target_python", ".", "get_tags", "(", ")", "if", "not", "wheel", ".", "supported", "(", "supported_tags", ")", ":", "# Include the wheel's tags in the reason string to", "# simplify troubleshooting compatibility issues.", "file_tags", "=", "wheel", ".", "get_formatted_file_tags", "(", ")", "reason", "=", "(", "\"none of the wheel's tags ({}) are compatible \"", "\"(run pip debug --verbose to show compatible tags)\"", ".", "format", "(", "', '", ".", "join", "(", "file_tags", ")", ")", ")", "return", "(", "False", ",", "reason", ")", "version", "=", "wheel", ".", "version", "# This should be up by the self.ok_binary check, but see issue 2700.", "if", "\"source\"", "not", "in", "self", ".", "_formats", "and", "ext", "!=", "WHEEL_EXTENSION", ":", "reason", "=", "f'No sources permitted for {self.project_name}'", "return", "(", "False", ",", "reason", ")", "if", "not", "version", ":", "version", "=", "_extract_version_from_fragment", "(", "egg_info", ",", "self", ".", "_canonical_name", ",", ")", "if", "not", "version", ":", "reason", "=", "f'Missing project version for {self.project_name}'", "return", "(", "False", ",", "reason", ")", "match", "=", "self", ".", "_py_version_re", ".", "search", "(", "version", ")", "if", "match", ":", "version", "=", "version", "[", ":", "match", ".", "start", "(", ")", "]", "py_version", "=", "match", ".", "group", "(", "1", ")", "if", "py_version", "!=", "self", ".", "_target_python", ".", "py_version", ":", "return", "(", "False", ",", "'Python version is incorrect'", ")", "supports_python", "=", "_check_link_requires_python", "(", "link", ",", "version_info", "=", "self", ".", "_target_python", ".", "py_version_info", ",", "ignore_requires_python", "=", "self", ".", "_ignore_requires_python", ",", ")", "if", "not", "supports_python", ":", "# Return None for the reason text to suppress calling", "# _log_skipped_link().", "return", "(", "False", ",", "None", ")", "logger", ".", "debug", "(", "'Found link %s, version: %s'", ",", "link", ",", "version", ")", "return", "(", "True", ",", "version", ")" ]
[ 143, 4 ]
[ 228, 30 ]
python
en
['en', 'error', 'th']
False
CandidatePreferences.__init__
( self, prefer_binary: bool = False, allow_all_prereleases: bool = False, )
:param allow_all_prereleases: Whether to allow all pre-releases.
:param allow_all_prereleases: Whether to allow all pre-releases.
def __init__( self, prefer_binary: bool = False, allow_all_prereleases: bool = False, ) -> None: """ :param allow_all_prereleases: Whether to allow all pre-releases. """ self.allow_all_prereleases = allow_all_prereleases self.prefer_binary = prefer_binary
[ "def", "__init__", "(", "self", ",", "prefer_binary", ":", "bool", "=", "False", ",", "allow_all_prereleases", ":", "bool", "=", "False", ",", ")", "->", "None", ":", "self", ".", "allow_all_prereleases", "=", "allow_all_prereleases", "self", ".", "prefer_binary", "=", "prefer_binary" ]
[ 312, 4 ]
[ 321, 42 ]
python
en
['en', 'error', 'th']
False
BestCandidateResult.__init__
( self, candidates: List[InstallationCandidate], applicable_candidates: List[InstallationCandidate], best_candidate: Optional[InstallationCandidate], )
:param candidates: A sequence of all available candidates found. :param applicable_candidates: The applicable candidates. :param best_candidate: The most preferred candidate found, or None if no applicable candidates were found.
:param candidates: A sequence of all available candidates found. :param applicable_candidates: The applicable candidates. :param best_candidate: The most preferred candidate found, or None if no applicable candidates were found.
def __init__( self, candidates: List[InstallationCandidate], applicable_candidates: List[InstallationCandidate], best_candidate: Optional[InstallationCandidate], ) -> None: """ :param candidates: A sequence of all available candidates found. :param applicable_candidates: The applicable candidates. :param best_candidate: The most preferred candidate found, or None if no applicable candidates were found. """ assert set(applicable_candidates) <= set(candidates) if best_candidate is None: assert not applicable_candidates else: assert best_candidate in applicable_candidates self._applicable_candidates = applicable_candidates self._candidates = candidates self.best_candidate = best_candidate
[ "def", "__init__", "(", "self", ",", "candidates", ":", "List", "[", "InstallationCandidate", "]", ",", "applicable_candidates", ":", "List", "[", "InstallationCandidate", "]", ",", "best_candidate", ":", "Optional", "[", "InstallationCandidate", "]", ",", ")", "->", "None", ":", "assert", "set", "(", "applicable_candidates", ")", "<=", "set", "(", "candidates", ")", "if", "best_candidate", "is", "None", ":", "assert", "not", "applicable_candidates", "else", ":", "assert", "best_candidate", "in", "applicable_candidates", "self", ".", "_applicable_candidates", "=", "applicable_candidates", "self", ".", "_candidates", "=", "candidates", "self", ".", "best_candidate", "=", "best_candidate" ]
[ 331, 4 ]
[ 353, 44 ]
python
en
['en', 'error', 'th']
False
BestCandidateResult.iter_all
(self)
Iterate through all candidates.
Iterate through all candidates.
def iter_all(self) -> Iterable[InstallationCandidate]: """Iterate through all candidates. """ return iter(self._candidates)
[ "def", "iter_all", "(", "self", ")", "->", "Iterable", "[", "InstallationCandidate", "]", ":", "return", "iter", "(", "self", ".", "_candidates", ")" ]
[ 355, 4 ]
[ 358, 37 ]
python
en
['en', 'en', 'en']
True
BestCandidateResult.iter_applicable
(self)
Iterate through the applicable candidates.
Iterate through the applicable candidates.
def iter_applicable(self) -> Iterable[InstallationCandidate]: """Iterate through the applicable candidates. """ return iter(self._applicable_candidates)
[ "def", "iter_applicable", "(", "self", ")", "->", "Iterable", "[", "InstallationCandidate", "]", ":", "return", "iter", "(", "self", ".", "_applicable_candidates", ")" ]
[ 360, 4 ]
[ 363, 48 ]
python
en
['en', 'en', 'en']
True
CandidateEvaluator.create
( cls, project_name: str, target_python: Optional[TargetPython] = None, prefer_binary: bool = False, allow_all_prereleases: bool = False, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, )
Create a CandidateEvaluator object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :param hashes: An optional collection of allowed hashes.
Create a CandidateEvaluator object.
def create( cls, project_name: str, target_python: Optional[TargetPython] = None, prefer_binary: bool = False, allow_all_prereleases: bool = False, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> "CandidateEvaluator": """Create a CandidateEvaluator object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :param hashes: An optional collection of allowed hashes. """ if target_python is None: target_python = TargetPython() if specifier is None: specifier = specifiers.SpecifierSet() supported_tags = target_python.get_tags() return cls( project_name=project_name, supported_tags=supported_tags, specifier=specifier, prefer_binary=prefer_binary, allow_all_prereleases=allow_all_prereleases, hashes=hashes, )
[ "def", "create", "(", "cls", ",", "project_name", ":", "str", ",", "target_python", ":", "Optional", "[", "TargetPython", "]", "=", "None", ",", "prefer_binary", ":", "bool", "=", "False", ",", "allow_all_prereleases", ":", "bool", "=", "False", ",", "specifier", ":", "Optional", "[", "specifiers", ".", "BaseSpecifier", "]", "=", "None", ",", "hashes", ":", "Optional", "[", "Hashes", "]", "=", "None", ",", ")", "->", "\"CandidateEvaluator\"", ":", "if", "target_python", "is", "None", ":", "target_python", "=", "TargetPython", "(", ")", "if", "specifier", "is", "None", ":", "specifier", "=", "specifiers", ".", "SpecifierSet", "(", ")", "supported_tags", "=", "target_python", ".", "get_tags", "(", ")", "return", "cls", "(", "project_name", "=", "project_name", ",", "supported_tags", "=", "supported_tags", ",", "specifier", "=", "specifier", ",", "prefer_binary", "=", "prefer_binary", ",", "allow_all_prereleases", "=", "allow_all_prereleases", ",", "hashes", "=", "hashes", ",", ")" ]
[ 374, 4 ]
[ 407, 9 ]
python
en
['pt', 'en', 'en']
True
CandidateEvaluator.__init__
( self, project_name: str, supported_tags: List[Tag], specifier: specifiers.BaseSpecifier, prefer_binary: bool = False, allow_all_prereleases: bool = False, hashes: Optional[Hashes] = None, )
:param supported_tags: The PEP 425 tags supported by the target Python in order of preference (most preferred first).
:param supported_tags: The PEP 425 tags supported by the target Python in order of preference (most preferred first).
def __init__( self, project_name: str, supported_tags: List[Tag], specifier: specifiers.BaseSpecifier, prefer_binary: bool = False, allow_all_prereleases: bool = False, hashes: Optional[Hashes] = None, ) -> None: """ :param supported_tags: The PEP 425 tags supported by the target Python in order of preference (most preferred first). """ self._allow_all_prereleases = allow_all_prereleases self._hashes = hashes self._prefer_binary = prefer_binary self._project_name = project_name self._specifier = specifier self._supported_tags = supported_tags # Since the index of the tag in the _supported_tags list is used # as a priority, precompute a map from tag to index/priority to be # used in wheel.find_most_preferred_tag. self._wheel_tag_preferences = { tag: idx for idx, tag in enumerate(supported_tags) }
[ "def", "__init__", "(", "self", ",", "project_name", ":", "str", ",", "supported_tags", ":", "List", "[", "Tag", "]", ",", "specifier", ":", "specifiers", ".", "BaseSpecifier", ",", "prefer_binary", ":", "bool", "=", "False", ",", "allow_all_prereleases", ":", "bool", "=", "False", ",", "hashes", ":", "Optional", "[", "Hashes", "]", "=", "None", ",", ")", "->", "None", ":", "self", ".", "_allow_all_prereleases", "=", "allow_all_prereleases", "self", ".", "_hashes", "=", "hashes", "self", ".", "_prefer_binary", "=", "prefer_binary", "self", ".", "_project_name", "=", "project_name", "self", ".", "_specifier", "=", "specifier", "self", ".", "_supported_tags", "=", "supported_tags", "# Since the index of the tag in the _supported_tags list is used", "# as a priority, precompute a map from tag to index/priority to be", "# used in wheel.find_most_preferred_tag.", "self", ".", "_wheel_tag_preferences", "=", "{", "tag", ":", "idx", "for", "idx", ",", "tag", "in", "enumerate", "(", "supported_tags", ")", "}" ]
[ 409, 4 ]
[ 433, 9 ]
python
en
['en', 'error', 'th']
False
CandidateEvaluator.get_applicable_candidates
( self, candidates: List[InstallationCandidate], )
Return the applicable candidates from a list of candidates.
Return the applicable candidates from a list of candidates.
def get_applicable_candidates( self, candidates: List[InstallationCandidate], ) -> List[InstallationCandidate]: """ Return the applicable candidates from a list of candidates. """ # Using None infers from the specifier instead. allow_prereleases = self._allow_all_prereleases or None specifier = self._specifier versions = { str(v) for v in specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). (str(c.version) for c in candidates), prereleases=allow_prereleases, ) } # Again, converting version to str to deal with debundling. applicable_candidates = [ c for c in candidates if str(c.version) in versions ] filtered_applicable_candidates = filter_unallowed_hashes( candidates=applicable_candidates, hashes=self._hashes, project_name=self._project_name, ) return sorted(filtered_applicable_candidates, key=self._sort_key)
[ "def", "get_applicable_candidates", "(", "self", ",", "candidates", ":", "List", "[", "InstallationCandidate", "]", ",", ")", "->", "List", "[", "InstallationCandidate", "]", ":", "# Using None infers from the specifier instead.", "allow_prereleases", "=", "self", ".", "_allow_all_prereleases", "or", "None", "specifier", "=", "self", ".", "_specifier", "versions", "=", "{", "str", "(", "v", ")", "for", "v", "in", "specifier", ".", "filter", "(", "# We turn the version object into a str here because otherwise", "# when we're debundled but setuptools isn't, Python will see", "# packaging.version.Version and", "# pkg_resources._vendor.packaging.version.Version as different", "# types. This way we'll use a str as a common data interchange", "# format. If we stop using the pkg_resources provided specifier", "# and start using our own, we can drop the cast to str().", "(", "str", "(", "c", ".", "version", ")", "for", "c", "in", "candidates", ")", ",", "prereleases", "=", "allow_prereleases", ",", ")", "}", "# Again, converting version to str to deal with debundling.", "applicable_candidates", "=", "[", "c", "for", "c", "in", "candidates", "if", "str", "(", "c", ".", "version", ")", "in", "versions", "]", "filtered_applicable_candidates", "=", "filter_unallowed_hashes", "(", "candidates", "=", "applicable_candidates", ",", "hashes", "=", "self", ".", "_hashes", ",", "project_name", "=", "self", ".", "_project_name", ",", ")", "return", "sorted", "(", "filtered_applicable_candidates", ",", "key", "=", "self", ".", "_sort_key", ")" ]
[ 435, 4 ]
[ 470, 73 ]
python
en
['en', 'error', 'th']
False
CandidateEvaluator._sort_key
(self, candidate: InstallationCandidate)
Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference. Returns a tuple such that tuples sorting as greater using Python's default comparison operator are more preferred. The preference is as follows: First and foremost, candidates with allowed (matching) hashes are always preferred over candidates without matching hashes. This is because e.g. if the only candidate with an allowed hash is yanked, we still want to use that candidate. Second, excepting hash considerations, candidates that have been yanked (in the sense of PEP 592) are always less preferred than candidates that haven't been yanked. Then: If not finding wheels, they are sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self._supported_tags) 3. source archives If prefer_binary was set, then all wheels are sorted above sources. Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal
Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference.
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey: """ Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference. Returns a tuple such that tuples sorting as greater using Python's default comparison operator are more preferred. The preference is as follows: First and foremost, candidates with allowed (matching) hashes are always preferred over candidates without matching hashes. This is because e.g. if the only candidate with an allowed hash is yanked, we still want to use that candidate. Second, excepting hash considerations, candidates that have been yanked (in the sense of PEP 592) are always less preferred than candidates that haven't been yanked. Then: If not finding wheels, they are sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self._supported_tags) 3. source archives If prefer_binary was set, then all wheels are sorted above sources. Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ valid_tags = self._supported_tags support_num = len(valid_tags) build_tag: BuildTag = () binary_preference = 0 link = candidate.link if link.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(link.filename) try: pri = -(wheel.find_most_preferred_tag( valid_tags, self._wheel_tag_preferences )) except ValueError: raise UnsupportedWheel( "{} is not a supported wheel for this platform. It " "can't be sorted.".format(wheel.filename) ) if self._prefer_binary: binary_preference = 1 if wheel.build_tag is not None: match = re.match(r'^(\d+)(.*)$', wheel.build_tag) build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist pri = -(support_num) has_allowed_hash = int(link.is_hash_allowed(self._hashes)) yank_value = -1 * int(link.is_yanked) # -1 for yanked. return ( has_allowed_hash, yank_value, binary_preference, candidate.version, pri, build_tag, )
[ "def", "_sort_key", "(", "self", ",", "candidate", ":", "InstallationCandidate", ")", "->", "CandidateSortingKey", ":", "valid_tags", "=", "self", ".", "_supported_tags", "support_num", "=", "len", "(", "valid_tags", ")", "build_tag", ":", "BuildTag", "=", "(", ")", "binary_preference", "=", "0", "link", "=", "candidate", ".", "link", "if", "link", ".", "is_wheel", ":", "# can raise InvalidWheelFilename", "wheel", "=", "Wheel", "(", "link", ".", "filename", ")", "try", ":", "pri", "=", "-", "(", "wheel", ".", "find_most_preferred_tag", "(", "valid_tags", ",", "self", ".", "_wheel_tag_preferences", ")", ")", "except", "ValueError", ":", "raise", "UnsupportedWheel", "(", "\"{} is not a supported wheel for this platform. It \"", "\"can't be sorted.\"", ".", "format", "(", "wheel", ".", "filename", ")", ")", "if", "self", ".", "_prefer_binary", ":", "binary_preference", "=", "1", "if", "wheel", ".", "build_tag", "is", "not", "None", ":", "match", "=", "re", ".", "match", "(", "r'^(\\d+)(.*)$'", ",", "wheel", ".", "build_tag", ")", "build_tag_groups", "=", "match", ".", "groups", "(", ")", "build_tag", "=", "(", "int", "(", "build_tag_groups", "[", "0", "]", ")", ",", "build_tag_groups", "[", "1", "]", ")", "else", ":", "# sdist", "pri", "=", "-", "(", "support_num", ")", "has_allowed_hash", "=", "int", "(", "link", ".", "is_hash_allowed", "(", "self", ".", "_hashes", ")", ")", "yank_value", "=", "-", "1", "*", "int", "(", "link", ".", "is_yanked", ")", "# -1 for yanked.", "return", "(", "has_allowed_hash", ",", "yank_value", ",", "binary_preference", ",", "candidate", ".", "version", ",", "pri", ",", "build_tag", ",", ")" ]
[ 472, 4 ]
[ 532, 9 ]
python
en
['en', 'error', 'th']
False
CandidateEvaluator.sort_best_candidate
( self, candidates: List[InstallationCandidate], )
Return the best candidate per the instance's sort order, or None if no candidate is acceptable.
Return the best candidate per the instance's sort order, or None if no candidate is acceptable.
def sort_best_candidate( self, candidates: List[InstallationCandidate], ) -> Optional[InstallationCandidate]: """ Return the best candidate per the instance's sort order, or None if no candidate is acceptable. """ if not candidates: return None best_candidate = max(candidates, key=self._sort_key) return best_candidate
[ "def", "sort_best_candidate", "(", "self", ",", "candidates", ":", "List", "[", "InstallationCandidate", "]", ",", ")", "->", "Optional", "[", "InstallationCandidate", "]", ":", "if", "not", "candidates", ":", "return", "None", "best_candidate", "=", "max", "(", "candidates", ",", "key", "=", "self", ".", "_sort_key", ")", "return", "best_candidate" ]
[ 534, 4 ]
[ 545, 29 ]
python
en
['en', 'error', 'th']
False
CandidateEvaluator.compute_best_candidate
( self, candidates: List[InstallationCandidate], )
Compute and return a `BestCandidateResult` instance.
Compute and return a `BestCandidateResult` instance.
def compute_best_candidate( self, candidates: List[InstallationCandidate], ) -> BestCandidateResult: """ Compute and return a `BestCandidateResult` instance. """ applicable_candidates = self.get_applicable_candidates(candidates) best_candidate = self.sort_best_candidate(applicable_candidates) return BestCandidateResult( candidates, applicable_candidates=applicable_candidates, best_candidate=best_candidate, )
[ "def", "compute_best_candidate", "(", "self", ",", "candidates", ":", "List", "[", "InstallationCandidate", "]", ",", ")", "->", "BestCandidateResult", ":", "applicable_candidates", "=", "self", ".", "get_applicable_candidates", "(", "candidates", ")", "best_candidate", "=", "self", ".", "sort_best_candidate", "(", "applicable_candidates", ")", "return", "BestCandidateResult", "(", "candidates", ",", "applicable_candidates", "=", "applicable_candidates", ",", "best_candidate", "=", "best_candidate", ",", ")" ]
[ 547, 4 ]
[ 562, 9 ]
python
en
['en', 'error', 'th']
False
PackageFinder.__init__
( self, link_collector: LinkCollector, target_python: TargetPython, allow_yanked: bool, format_control: Optional[FormatControl] = None, candidate_prefs: Optional[CandidatePreferences] = None, ignore_requires_python: Optional[bool] = None, )
This constructor is primarily meant to be used by the create() class method and from tests. :param format_control: A FormatControl object, used to control the selection of source packages / binary packages when consulting the index and links. :param candidate_prefs: Options to use when creating a CandidateEvaluator object.
This constructor is primarily meant to be used by the create() class method and from tests.
def __init__( self, link_collector: LinkCollector, target_python: TargetPython, allow_yanked: bool, format_control: Optional[FormatControl] = None, candidate_prefs: Optional[CandidatePreferences] = None, ignore_requires_python: Optional[bool] = None, ) -> None: """ This constructor is primarily meant to be used by the create() class method and from tests. :param format_control: A FormatControl object, used to control the selection of source packages / binary packages when consulting the index and links. :param candidate_prefs: Options to use when creating a CandidateEvaluator object. """ if candidate_prefs is None: candidate_prefs = CandidatePreferences() format_control = format_control or FormatControl(set(), set()) self._allow_yanked = allow_yanked self._candidate_prefs = candidate_prefs self._ignore_requires_python = ignore_requires_python self._link_collector = link_collector self._target_python = target_python self.format_control = format_control # These are boring links that have already been logged somehow. self._logged_links: Set[Link] = set()
[ "def", "__init__", "(", "self", ",", "link_collector", ":", "LinkCollector", ",", "target_python", ":", "TargetPython", ",", "allow_yanked", ":", "bool", ",", "format_control", ":", "Optional", "[", "FormatControl", "]", "=", "None", ",", "candidate_prefs", ":", "Optional", "[", "CandidatePreferences", "]", "=", "None", ",", "ignore_requires_python", ":", "Optional", "[", "bool", "]", "=", "None", ",", ")", "->", "None", ":", "if", "candidate_prefs", "is", "None", ":", "candidate_prefs", "=", "CandidatePreferences", "(", ")", "format_control", "=", "format_control", "or", "FormatControl", "(", "set", "(", ")", ",", "set", "(", ")", ")", "self", ".", "_allow_yanked", "=", "allow_yanked", "self", ".", "_candidate_prefs", "=", "candidate_prefs", "self", ".", "_ignore_requires_python", "=", "ignore_requires_python", "self", ".", "_link_collector", "=", "link_collector", "self", ".", "_target_python", "=", "target_python", "self", ".", "format_control", "=", "format_control", "# These are boring links that have already been logged somehow.", "self", ".", "_logged_links", ":", "Set", "[", "Link", "]", "=", "set", "(", ")" ]
[ 572, 4 ]
[ 605, 45 ]
python
en
['en', 'error', 'th']
False
PackageFinder.create
( cls, link_collector: LinkCollector, selection_prefs: SelectionPreferences, target_python: Optional[TargetPython] = None, )
Create a PackageFinder. :param selection_prefs: The candidate selection preferences, as a SelectionPreferences object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python.
Create a PackageFinder.
def create( cls, link_collector: LinkCollector, selection_prefs: SelectionPreferences, target_python: Optional[TargetPython] = None, ) -> "PackageFinder": """Create a PackageFinder. :param selection_prefs: The candidate selection preferences, as a SelectionPreferences object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python. """ if target_python is None: target_python = TargetPython() candidate_prefs = CandidatePreferences( prefer_binary=selection_prefs.prefer_binary, allow_all_prereleases=selection_prefs.allow_all_prereleases, ) return cls( candidate_prefs=candidate_prefs, link_collector=link_collector, target_python=target_python, allow_yanked=selection_prefs.allow_yanked, format_control=selection_prefs.format_control, ignore_requires_python=selection_prefs.ignore_requires_python, )
[ "def", "create", "(", "cls", ",", "link_collector", ":", "LinkCollector", ",", "selection_prefs", ":", "SelectionPreferences", ",", "target_python", ":", "Optional", "[", "TargetPython", "]", "=", "None", ",", ")", "->", "\"PackageFinder\"", ":", "if", "target_python", "is", "None", ":", "target_python", "=", "TargetPython", "(", ")", "candidate_prefs", "=", "CandidatePreferences", "(", "prefer_binary", "=", "selection_prefs", ".", "prefer_binary", ",", "allow_all_prereleases", "=", "selection_prefs", ".", "allow_all_prereleases", ",", ")", "return", "cls", "(", "candidate_prefs", "=", "candidate_prefs", ",", "link_collector", "=", "link_collector", ",", "target_python", "=", "target_python", ",", "allow_yanked", "=", "selection_prefs", ".", "allow_yanked", ",", "format_control", "=", "selection_prefs", ".", "format_control", ",", "ignore_requires_python", "=", "selection_prefs", ".", "ignore_requires_python", ",", ")" ]
[ 612, 4 ]
[ 641, 9 ]
python
en
['en', 'af', 'en']
True
PackageFinder._sort_links
(self, links: Iterable[Link])
Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates
Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates
def _sort_links(self, links: Iterable[Link]) -> List[Link]: """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] seen: Set[Link] = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs
[ "def", "_sort_links", "(", "self", ",", "links", ":", "Iterable", "[", "Link", "]", ")", "->", "List", "[", "Link", "]", ":", "eggs", ",", "no_eggs", "=", "[", "]", ",", "[", "]", "seen", ":", "Set", "[", "Link", "]", "=", "set", "(", ")", "for", "link", "in", "links", ":", "if", "link", "not", "in", "seen", ":", "seen", ".", "add", "(", "link", ")", "if", "link", ".", "egg_fragment", ":", "eggs", ".", "append", "(", "link", ")", "else", ":", "no_eggs", ".", "append", "(", "link", ")", "return", "no_eggs", "+", "eggs" ]
[ 695, 4 ]
[ 709, 29 ]
python
en
['en', 'error', 'th']
False
PackageFinder.get_install_candidate
( self, link_evaluator: LinkEvaluator, link: Link )
If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None.
If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None.
def get_install_candidate( self, link_evaluator: LinkEvaluator, link: Link ) -> Optional[InstallationCandidate]: """ If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None. """ is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, )
[ "def", "get_install_candidate", "(", "self", ",", "link_evaluator", ":", "LinkEvaluator", ",", "link", ":", "Link", ")", "->", "Optional", "[", "InstallationCandidate", "]", ":", "is_candidate", ",", "result", "=", "link_evaluator", ".", "evaluate_link", "(", "link", ")", "if", "not", "is_candidate", ":", "if", "result", ":", "self", ".", "_log_skipped_link", "(", "link", ",", "reason", "=", "result", ")", "return", "None", "return", "InstallationCandidate", "(", "name", "=", "link_evaluator", ".", "project_name", ",", "link", "=", "link", ",", "version", "=", "result", ",", ")" ]
[ 718, 4 ]
[ 735, 9 ]
python
en
['en', 'error', 'th']
False
PackageFinder.evaluate_links
( self, link_evaluator: LinkEvaluator, links: Iterable[Link] )
Convert links that are candidates to InstallationCandidate objects.
Convert links that are candidates to InstallationCandidate objects.
def evaluate_links( self, link_evaluator: LinkEvaluator, links: Iterable[Link] ) -> List[InstallationCandidate]: """ Convert links that are candidates to InstallationCandidate objects. """ candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates
[ "def", "evaluate_links", "(", "self", ",", "link_evaluator", ":", "LinkEvaluator", ",", "links", ":", "Iterable", "[", "Link", "]", ")", "->", "List", "[", "InstallationCandidate", "]", ":", "candidates", "=", "[", "]", "for", "link", "in", "self", ".", "_sort_links", "(", "links", ")", ":", "candidate", "=", "self", ".", "get_install_candidate", "(", "link_evaluator", ",", "link", ")", "if", "candidate", "is", "not", "None", ":", "candidates", ".", "append", "(", "candidate", ")", "return", "candidates" ]
[ 737, 4 ]
[ 749, 25 ]
python
en
['en', 'error', 'th']
False
PackageFinder.find_all_candidates
(self, project_name: str)
Find all available InstallationCandidate for project_name This checks index_urls and find_links. All versions found are returned as an InstallationCandidate list. See LinkEvaluator.evaluate_link() for details on which files are accepted.
Find all available InstallationCandidate for project_name
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]: """Find all available InstallationCandidate for project_name This checks index_urls and find_links. All versions found are returned as an InstallationCandidate list. See LinkEvaluator.evaluate_link() for details on which files are accepted. """ link_evaluator = self.make_link_evaluator(project_name) collected_sources = self._link_collector.collect_sources( project_name=project_name, candidates_from_page=functools.partial( self.process_project_url, link_evaluator=link_evaluator, ), ) page_candidates_it = itertools.chain.from_iterable( source.page_candidates() for sources in collected_sources for source in sources if source is not None ) page_candidates = list(page_candidates_it) file_links_it = itertools.chain.from_iterable( source.file_links() for sources in collected_sources for source in sources if source is not None ) file_candidates = self.evaluate_links( link_evaluator, sorted(file_links_it, reverse=True), ) if logger.isEnabledFor(logging.DEBUG) and file_candidates: paths = [url_to_path(c.link.url) for c in file_candidates] logger.debug("Local files found: %s", ", ".join(paths)) # This is an intentional priority ordering return file_candidates + page_candidates
[ "def", "find_all_candidates", "(", "self", ",", "project_name", ":", "str", ")", "->", "List", "[", "InstallationCandidate", "]", ":", "link_evaluator", "=", "self", ".", "make_link_evaluator", "(", "project_name", ")", "collected_sources", "=", "self", ".", "_link_collector", ".", "collect_sources", "(", "project_name", "=", "project_name", ",", "candidates_from_page", "=", "functools", ".", "partial", "(", "self", ".", "process_project_url", ",", "link_evaluator", "=", "link_evaluator", ",", ")", ",", ")", "page_candidates_it", "=", "itertools", ".", "chain", ".", "from_iterable", "(", "source", ".", "page_candidates", "(", ")", "for", "sources", "in", "collected_sources", "for", "source", "in", "sources", "if", "source", "is", "not", "None", ")", "page_candidates", "=", "list", "(", "page_candidates_it", ")", "file_links_it", "=", "itertools", ".", "chain", ".", "from_iterable", "(", "source", ".", "file_links", "(", ")", "for", "sources", "in", "collected_sources", "for", "source", "in", "sources", "if", "source", "is", "not", "None", ")", "file_candidates", "=", "self", ".", "evaluate_links", "(", "link_evaluator", ",", "sorted", "(", "file_links_it", ",", "reverse", "=", "True", ")", ",", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", "and", "file_candidates", ":", "paths", "=", "[", "url_to_path", "(", "c", ".", "link", ".", "url", ")", "for", "c", "in", "file_candidates", "]", "logger", ".", "debug", "(", "\"Local files found: %s\"", ",", "\", \"", ".", "join", "(", "paths", ")", ")", "# This is an intentional priority ordering", "return", "file_candidates", "+", "page_candidates" ]
[ 772, 4 ]
[ 815, 48 ]
python
en
['en', 'en', 'en']
True
PackageFinder.make_candidate_evaluator
( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, )
Create a CandidateEvaluator object to use.
Create a CandidateEvaluator object to use.
def make_candidate_evaluator( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> CandidateEvaluator: """Create a CandidateEvaluator object to use. """ candidate_prefs = self._candidate_prefs return CandidateEvaluator.create( project_name=project_name, target_python=self._target_python, prefer_binary=candidate_prefs.prefer_binary, allow_all_prereleases=candidate_prefs.allow_all_prereleases, specifier=specifier, hashes=hashes, )
[ "def", "make_candidate_evaluator", "(", "self", ",", "project_name", ":", "str", ",", "specifier", ":", "Optional", "[", "specifiers", ".", "BaseSpecifier", "]", "=", "None", ",", "hashes", ":", "Optional", "[", "Hashes", "]", "=", "None", ",", ")", "->", "CandidateEvaluator", ":", "candidate_prefs", "=", "self", ".", "_candidate_prefs", "return", "CandidateEvaluator", ".", "create", "(", "project_name", "=", "project_name", ",", "target_python", "=", "self", ".", "_target_python", ",", "prefer_binary", "=", "candidate_prefs", ".", "prefer_binary", ",", "allow_all_prereleases", "=", "candidate_prefs", ".", "allow_all_prereleases", ",", "specifier", "=", "specifier", ",", "hashes", "=", "hashes", ",", ")" ]
[ 817, 4 ]
[ 833, 9 ]
python
en
['en', 'en', 'en']
True
PackageFinder.find_best_candidate
( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, )
Find matches for the given project and specifier. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :return: A `BestCandidateResult` instance.
Find matches for the given project and specifier.
def find_best_candidate( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> BestCandidateResult: """Find matches for the given project and specifier. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :return: A `BestCandidateResult` instance. """ candidates = self.find_all_candidates(project_name) candidate_evaluator = self.make_candidate_evaluator( project_name=project_name, specifier=specifier, hashes=hashes, ) return candidate_evaluator.compute_best_candidate(candidates)
[ "def", "find_best_candidate", "(", "self", ",", "project_name", ":", "str", ",", "specifier", ":", "Optional", "[", "specifiers", ".", "BaseSpecifier", "]", "=", "None", ",", "hashes", ":", "Optional", "[", "Hashes", "]", "=", "None", ",", ")", "->", "BestCandidateResult", ":", "candidates", "=", "self", ".", "find_all_candidates", "(", "project_name", ")", "candidate_evaluator", "=", "self", ".", "make_candidate_evaluator", "(", "project_name", "=", "project_name", ",", "specifier", "=", "specifier", ",", "hashes", "=", "hashes", ",", ")", "return", "candidate_evaluator", ".", "compute_best_candidate", "(", "candidates", ")" ]
[ 836, 4 ]
[ 856, 69 ]
python
en
['en', 'en', 'en']
True
PackageFinder.find_requirement
( self, req: InstallRequirement, upgrade: bool )
Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a InstallationCandidate if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
Try to find a Link matching req
def find_requirement( self, req: InstallRequirement, upgrade: bool ) -> Optional[InstallationCandidate]: """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a InstallationCandidate if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( req.name, specifier=req.specifier, hashes=hashes, ) best_candidate = best_candidate_result.best_candidate installed_version: Optional[_BaseVersion] = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: # This repeated parse_version and str() conversion is needed to # handle different vendoring sources from pip and pkg_resources. # If we stop using the pkg_resources provided specifier and start # using our own, we can drop the cast to str(). return ", ".join(sorted( {str(c.version) for c in cand_iter}, key=parse_version, )) or "none" if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, _format_versions(best_candidate_result.iter_all()), ) raise DistributionNotFound( 'No matching distribution found for {}'.format( req) ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, _format_versions(best_candidate_result.iter_applicable()), ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, _format_versions(best_candidate_result.iter_applicable()), ) return best_candidate
[ "def", "find_requirement", "(", "self", ",", "req", ":", "InstallRequirement", ",", "upgrade", ":", "bool", ")", "->", "Optional", "[", "InstallationCandidate", "]", ":", "hashes", "=", "req", ".", "hashes", "(", "trust_internet", "=", "False", ")", "best_candidate_result", "=", "self", ".", "find_best_candidate", "(", "req", ".", "name", ",", "specifier", "=", "req", ".", "specifier", ",", "hashes", "=", "hashes", ",", ")", "best_candidate", "=", "best_candidate_result", ".", "best_candidate", "installed_version", ":", "Optional", "[", "_BaseVersion", "]", "=", "None", "if", "req", ".", "satisfied_by", "is", "not", "None", ":", "installed_version", "=", "parse_version", "(", "req", ".", "satisfied_by", ".", "version", ")", "def", "_format_versions", "(", "cand_iter", ":", "Iterable", "[", "InstallationCandidate", "]", ")", "->", "str", ":", "# This repeated parse_version and str() conversion is needed to", "# handle different vendoring sources from pip and pkg_resources.", "# If we stop using the pkg_resources provided specifier and start", "# using our own, we can drop the cast to str().", "return", "\", \"", ".", "join", "(", "sorted", "(", "{", "str", "(", "c", ".", "version", ")", "for", "c", "in", "cand_iter", "}", ",", "key", "=", "parse_version", ",", ")", ")", "or", "\"none\"", "if", "installed_version", "is", "None", "and", "best_candidate", "is", "None", ":", "logger", ".", "critical", "(", "'Could not find a version that satisfies the requirement %s '", "'(from versions: %s)'", ",", "req", ",", "_format_versions", "(", "best_candidate_result", ".", "iter_all", "(", ")", ")", ",", ")", "raise", "DistributionNotFound", "(", "'No matching distribution found for {}'", ".", "format", "(", "req", ")", ")", "best_installed", "=", "False", "if", "installed_version", "and", "(", "best_candidate", "is", "None", "or", "best_candidate", ".", "version", "<=", "installed_version", ")", ":", "best_installed", "=", "True", "if", "not", "upgrade", "and", "installed_version", "is", "not", "None", ":", "if", "best_installed", ":", "logger", ".", "debug", "(", "'Existing installed version (%s) is most up-to-date and '", "'satisfies requirement'", ",", "installed_version", ",", ")", "else", ":", "logger", ".", "debug", "(", "'Existing installed version (%s) satisfies requirement '", "'(most up-to-date version is %s)'", ",", "installed_version", ",", "best_candidate", ".", "version", ",", ")", "return", "None", "if", "best_installed", ":", "# We have an existing version, and its the best version", "logger", ".", "debug", "(", "'Installed version (%s) is most up-to-date (past versions: '", "'%s)'", ",", "installed_version", ",", "_format_versions", "(", "best_candidate_result", ".", "iter_applicable", "(", ")", ")", ",", ")", "raise", "BestVersionAlreadyInstalled", "logger", ".", "debug", "(", "'Using version %s (newest of versions: %s)'", ",", "best_candidate", ".", "version", ",", "_format_versions", "(", "best_candidate_result", ".", "iter_applicable", "(", ")", ")", ",", ")", "return", "best_candidate" ]
[ 858, 4 ]
[ 937, 29 ]
python
en
['en', 'en', 'en']
True
_markers
(pem_marker)
Returns the start and end PEM markers, as bytes.
Returns the start and end PEM markers, as bytes.
def _markers(pem_marker): """ Returns the start and end PEM markers, as bytes. """ if not is_bytes(pem_marker): pem_marker = pem_marker.encode('ascii') return (b'-----BEGIN ' + pem_marker + b'-----', b'-----END ' + pem_marker + b'-----')
[ "def", "_markers", "(", "pem_marker", ")", ":", "if", "not", "is_bytes", "(", "pem_marker", ")", ":", "pem_marker", "=", "pem_marker", ".", "encode", "(", "'ascii'", ")", "return", "(", "b'-----BEGIN '", "+", "pem_marker", "+", "b'-----'", ",", "b'-----END '", "+", "pem_marker", "+", "b'-----'", ")" ]
[ 23, 0 ]
[ 32, 49 ]
python
en
['en', 'error', 'th']
False
load_pem
(contents, pem_marker)
Loads a PEM file. :param contents: the contents of the file to interpret :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY' when your file has '-----BEGIN RSA PRIVATE KEY-----' and '-----END RSA PRIVATE KEY-----' markers. :return: the base64-decoded content between the start and end markers. @raise ValueError: when the content is invalid, for example when the start marker cannot be found.
Loads a PEM file.
def load_pem(contents, pem_marker): """Loads a PEM file. :param contents: the contents of the file to interpret :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY' when your file has '-----BEGIN RSA PRIVATE KEY-----' and '-----END RSA PRIVATE KEY-----' markers. :return: the base64-decoded content between the start and end markers. @raise ValueError: when the content is invalid, for example when the start marker cannot be found. """ # We want bytes, not text. If it's text, it can be converted to ASCII bytes. if not is_bytes(contents): contents = contents.encode('ascii') (pem_start, pem_end) = _markers(pem_marker) pem_lines = [] in_pem_part = False for line in contents.splitlines(): line = line.strip() # Skip empty lines if not line: continue # Handle start marker if line == pem_start: if in_pem_part: raise ValueError('Seen start marker "%s" twice' % pem_start) in_pem_part = True continue # Skip stuff before first marker if not in_pem_part: continue # Handle end marker if in_pem_part and line == pem_end: in_pem_part = False break # Load fields if b':' in line: continue pem_lines.append(line) # Do some sanity checks if not pem_lines: raise ValueError('No PEM start marker "%s" found' % pem_start) if in_pem_part: raise ValueError('No PEM end marker "%s" found' % pem_end) # Base64-decode the contents pem = b''.join(pem_lines) return base64.standard_b64decode(pem)
[ "def", "load_pem", "(", "contents", ",", "pem_marker", ")", ":", "# We want bytes, not text. If it's text, it can be converted to ASCII bytes.", "if", "not", "is_bytes", "(", "contents", ")", ":", "contents", "=", "contents", ".", "encode", "(", "'ascii'", ")", "(", "pem_start", ",", "pem_end", ")", "=", "_markers", "(", "pem_marker", ")", "pem_lines", "=", "[", "]", "in_pem_part", "=", "False", "for", "line", "in", "contents", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "# Skip empty lines", "if", "not", "line", ":", "continue", "# Handle start marker", "if", "line", "==", "pem_start", ":", "if", "in_pem_part", ":", "raise", "ValueError", "(", "'Seen start marker \"%s\" twice'", "%", "pem_start", ")", "in_pem_part", "=", "True", "continue", "# Skip stuff before first marker", "if", "not", "in_pem_part", ":", "continue", "# Handle end marker", "if", "in_pem_part", "and", "line", "==", "pem_end", ":", "in_pem_part", "=", "False", "break", "# Load fields", "if", "b':'", "in", "line", ":", "continue", "pem_lines", ".", "append", "(", "line", ")", "# Do some sanity checks", "if", "not", "pem_lines", ":", "raise", "ValueError", "(", "'No PEM start marker \"%s\" found'", "%", "pem_start", ")", "if", "in_pem_part", ":", "raise", "ValueError", "(", "'No PEM end marker \"%s\" found'", "%", "pem_end", ")", "# Base64-decode the contents", "pem", "=", "b''", ".", "join", "(", "pem_lines", ")", "return", "base64", ".", "standard_b64decode", "(", "pem", ")" ]
[ 35, 0 ]
[ 98, 41 ]
python
en
['en', 'bg', 'en']
True
save_pem
(contents, pem_marker)
Saves a PEM file. :param contents: the contents to encode in PEM format :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY' when your file has '-----BEGIN RSA PRIVATE KEY-----' and '-----END RSA PRIVATE KEY-----' markers. :return: the base64-encoded content between the start and end markers, as bytes.
Saves a PEM file.
def save_pem(contents, pem_marker): """Saves a PEM file. :param contents: the contents to encode in PEM format :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY' when your file has '-----BEGIN RSA PRIVATE KEY-----' and '-----END RSA PRIVATE KEY-----' markers. :return: the base64-encoded content between the start and end markers, as bytes. """ (pem_start, pem_end) = _markers(pem_marker) b64 = base64.standard_b64encode(contents).replace(b'\n', b'') pem_lines = [pem_start] for block_start in range(0, len(b64), 64): block = b64[block_start:block_start + 64] pem_lines.append(block) pem_lines.append(pem_end) pem_lines.append(b'') return b'\n'.join(pem_lines)
[ "def", "save_pem", "(", "contents", ",", "pem_marker", ")", ":", "(", "pem_start", ",", "pem_end", ")", "=", "_markers", "(", "pem_marker", ")", "b64", "=", "base64", ".", "standard_b64encode", "(", "contents", ")", ".", "replace", "(", "b'\\n'", ",", "b''", ")", "pem_lines", "=", "[", "pem_start", "]", "for", "block_start", "in", "range", "(", "0", ",", "len", "(", "b64", ")", ",", "64", ")", ":", "block", "=", "b64", "[", "block_start", ":", "block_start", "+", "64", "]", "pem_lines", ".", "append", "(", "block", ")", "pem_lines", ".", "append", "(", "pem_end", ")", "pem_lines", ".", "append", "(", "b''", ")", "return", "b'\\n'", ".", "join", "(", "pem_lines", ")" ]
[ 101, 0 ]
[ 125, 32 ]
python
en
['en', 'hi-Latn', 'en']
True
BaseItemRecommendation.__init__
(self, train_file, test_file, output_file=None, as_binary=False, rank_length=10, similarity_metric="cosine", sep='\t', output_sep='\t')
This class is base for all item recommendation algorithms. Inherits the class Recommender and implements / adds common methods and attributes for rank approaches. :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: :type similarity_metric: str, default cosine :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm :type rank_length: int, default 10 :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t'
This class is base for all item recommendation algorithms. Inherits the class Recommender and implements / adds common methods and attributes for rank approaches.
def __init__(self, train_file, test_file, output_file=None, as_binary=False, rank_length=10, similarity_metric="cosine", sep='\t', output_sep='\t'): """ This class is base for all item recommendation algorithms. Inherits the class Recommender and implements / adds common methods and attributes for rank approaches. :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: :type similarity_metric: str, default cosine :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm :type rank_length: int, default 10 :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' """ self.train_file = train_file self.test_file = test_file self.as_binary = as_binary self.similarity_metric = similarity_metric self.output_file = output_file self.rank_length = rank_length self.sep = sep self.output_sep = output_sep # internal vars self.item_to_item_id = {} self.item_id_to_item = {} self.user_to_user_id = {} self.user_id_to_user = {} self.train_set = None self.test_set = None self.users = None self.items = None self.matrix = None self.evaluation_results = None self.recommender_name = None self.extra_info_header = None self.ranking = []
[ "def", "__init__", "(", "self", ",", "train_file", ",", "test_file", ",", "output_file", "=", "None", ",", "as_binary", "=", "False", ",", "rank_length", "=", "10", ",", "similarity_metric", "=", "\"cosine\"", ",", "sep", "=", "'\\t'", ",", "output_sep", "=", "'\\t'", ")", ":", "self", ".", "train_file", "=", "train_file", "self", ".", "test_file", "=", "test_file", "self", ".", "as_binary", "=", "as_binary", "self", ".", "similarity_metric", "=", "similarity_metric", "self", ".", "output_file", "=", "output_file", "self", ".", "rank_length", "=", "rank_length", "self", ".", "sep", "=", "sep", "self", ".", "output_sep", "=", "output_sep", "# internal vars", "self", ".", "item_to_item_id", "=", "{", "}", "self", ".", "item_id_to_item", "=", "{", "}", "self", ".", "user_to_user_id", "=", "{", "}", "self", ".", "user_id_to_user", "=", "{", "}", "self", ".", "train_set", "=", "None", "self", ".", "test_set", "=", "None", "self", ".", "users", "=", "None", "self", ".", "items", "=", "None", "self", ".", "matrix", "=", "None", "self", ".", "evaluation_results", "=", "None", "self", ".", "recommender_name", "=", "None", "self", ".", "extra_info_header", "=", "None", "self", ".", "ranking", "=", "[", "]" ]
[ 19, 4 ]
[ 75, 25 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.read_files
(self)
Method to initialize recommender algorithm.
Method to initialize recommender algorithm.
def read_files(self): """ Method to initialize recommender algorithm. """ self.train_set = ReadFile(self.train_file, sep=self.sep, as_binary=self.as_binary).read() if self.test_file is not None: self.test_set = ReadFile(self.test_file).read() self.users = sorted(set(list(self.train_set['users']) + list(self.test_set['users']))) self.items = sorted(set(list(self.train_set['items']) + list(self.test_set['items']))) else: self.users = self.train_set['users'] self.items = self.train_set['items'] for i, item in enumerate(self.items): self.item_to_item_id.update({item: i}) self.item_id_to_item.update({i: item}) for u, user in enumerate(self.users): self.user_to_user_id.update({user: u}) self.user_id_to_user.update({u: user})
[ "def", "read_files", "(", "self", ")", ":", "self", ".", "train_set", "=", "ReadFile", "(", "self", ".", "train_file", ",", "sep", "=", "self", ".", "sep", ",", "as_binary", "=", "self", ".", "as_binary", ")", ".", "read", "(", ")", "if", "self", ".", "test_file", "is", "not", "None", ":", "self", ".", "test_set", "=", "ReadFile", "(", "self", ".", "test_file", ")", ".", "read", "(", ")", "self", ".", "users", "=", "sorted", "(", "set", "(", "list", "(", "self", ".", "train_set", "[", "'users'", "]", ")", "+", "list", "(", "self", ".", "test_set", "[", "'users'", "]", ")", ")", ")", "self", ".", "items", "=", "sorted", "(", "set", "(", "list", "(", "self", ".", "train_set", "[", "'items'", "]", ")", "+", "list", "(", "self", ".", "test_set", "[", "'items'", "]", ")", ")", ")", "else", ":", "self", ".", "users", "=", "self", ".", "train_set", "[", "'users'", "]", "self", ".", "items", "=", "self", ".", "train_set", "[", "'items'", "]", "for", "i", ",", "item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "self", ".", "item_to_item_id", ".", "update", "(", "{", "item", ":", "i", "}", ")", "self", ".", "item_id_to_item", ".", "update", "(", "{", "i", ":", "item", "}", ")", "for", "u", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "self", ".", "user_to_user_id", ".", "update", "(", "{", "user", ":", "u", "}", ")", "self", ".", "user_id_to_user", ".", "update", "(", "{", "u", ":", "user", "}", ")" ]
[ 77, 4 ]
[ 97, 50 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.create_matrix
(self)
Method to create a feedback matrix
Method to create a feedback matrix
def create_matrix(self): """ Method to create a feedback matrix """ self.matrix = np.zeros((len(self.users), len(self.items))) for user in self.train_set['users']: for item in self.train_set['feedback'][user]: self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \ self.train_set['feedback'][user][item]
[ "def", "create_matrix", "(", "self", ")", ":", "self", ".", "matrix", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "users", ")", ",", "len", "(", "self", ".", "items", ")", ")", ")", "for", "user", "in", "self", ".", "train_set", "[", "'users'", "]", ":", "for", "item", "in", "self", ".", "train_set", "[", "'feedback'", "]", "[", "user", "]", ":", "self", ".", "matrix", "[", "self", ".", "user_to_user_id", "[", "user", "]", "]", "[", "self", ".", "item_to_item_id", "[", "item", "]", "]", "=", "self", ".", "train_set", "[", "'feedback'", "]", "[", "user", "]", "[", "item", "]" ]
[ 99, 4 ]
[ 110, 58 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.compute_similarity
(self, transpose=False)
Method to compute a similarity matrix from original df_matrix :param transpose: If True, calculate the similarity in a transpose matrix :type transpose: bool, default False
Method to compute a similarity matrix from original df_matrix
def compute_similarity(self, transpose=False): """ Method to compute a similarity matrix from original df_matrix :param transpose: If True, calculate the similarity in a transpose matrix :type transpose: bool, default False """ # Calculate distance matrix if transpose: similarity_matrix = np.float32(squareform(pdist(self.matrix.T, self.similarity_metric))) else: similarity_matrix = np.float32(squareform(pdist(self.matrix, self.similarity_metric))) # Remove NaNs similarity_matrix[np.isnan(similarity_matrix)] = 1.0 # transform distances in similarities. Values in matrix range from 0-1 similarity_matrix = (similarity_matrix.max() - similarity_matrix) / similarity_matrix.max() return similarity_matrix
[ "def", "compute_similarity", "(", "self", ",", "transpose", "=", "False", ")", ":", "# Calculate distance matrix", "if", "transpose", ":", "similarity_matrix", "=", "np", ".", "float32", "(", "squareform", "(", "pdist", "(", "self", ".", "matrix", ".", "T", ",", "self", ".", "similarity_metric", ")", ")", ")", "else", ":", "similarity_matrix", "=", "np", ".", "float32", "(", "squareform", "(", "pdist", "(", "self", ".", "matrix", ",", "self", ".", "similarity_metric", ")", ")", ")", "# Remove NaNs", "similarity_matrix", "[", "np", ".", "isnan", "(", "similarity_matrix", ")", "]", "=", "1.0", "# transform distances in similarities. Values in matrix range from 0-1", "similarity_matrix", "=", "(", "similarity_matrix", ".", "max", "(", ")", "-", "similarity_matrix", ")", "/", "similarity_matrix", ".", "max", "(", ")", "return", "similarity_matrix" ]
[ 112, 4 ]
[ 132, 32 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.evaluate
(self, metrics, verbose=True, as_table=False, table_sep='\t', n_ranks=None)
Method to evaluate the final ranking :param metrics: List of evaluation metrics :type metrics: list, default ('Prec', 'Recall', 'MAP, 'NDCG') :param verbose: Print the evaluation results :type verbose: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param n_ranks: List of positions to evaluate the ranking :type n_ranks: list, None
Method to evaluate the final ranking
def evaluate(self, metrics, verbose=True, as_table=False, table_sep='\t', n_ranks=None): """ Method to evaluate the final ranking :param metrics: List of evaluation metrics :type metrics: list, default ('Prec', 'Recall', 'MAP, 'NDCG') :param verbose: Print the evaluation results :type verbose: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param n_ranks: List of positions to evaluate the ranking :type n_ranks: list, None """ self.evaluation_results = {} if metrics is None: metrics = list(['PREC', 'RECALL', 'MAP', 'NDCG']) results = ItemRecommendationEvaluation(verbose=verbose, as_table=as_table, table_sep=table_sep, metrics=metrics, n_ranks=n_ranks) results.evaluate_recommender(predictions=self.ranking, test_set=self.test_set)
[ "def", "evaluate", "(", "self", ",", "metrics", ",", "verbose", "=", "True", ",", "as_table", "=", "False", ",", "table_sep", "=", "'\\t'", ",", "n_ranks", "=", "None", ")", ":", "self", ".", "evaluation_results", "=", "{", "}", "if", "metrics", "is", "None", ":", "metrics", "=", "list", "(", "[", "'PREC'", ",", "'RECALL'", ",", "'MAP'", ",", "'NDCG'", "]", ")", "results", "=", "ItemRecommendationEvaluation", "(", "verbose", "=", "verbose", ",", "as_table", "=", "as_table", ",", "table_sep", "=", "table_sep", ",", "metrics", "=", "metrics", ",", "n_ranks", "=", "n_ranks", ")", "results", ".", "evaluate_recommender", "(", "predictions", "=", "self", ".", "ranking", ",", "test_set", "=", "self", ".", "test_set", ")" ]
[ 134, 4 ]
[ 162, 86 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.write_ranking
(self)
Method to write final ranking
Method to write final ranking
def write_ranking(self): """ Method to write final ranking """ if self.output_file is not None: WriteFile(self.output_file, data=self.ranking, sep=self.sep).write()
[ "def", "write_ranking", "(", "self", ")", ":", "if", "self", ".", "output_file", "is", "not", "None", ":", "WriteFile", "(", "self", ".", "output_file", ",", "data", "=", "self", ".", "ranking", ",", "sep", "=", "self", ".", "sep", ")", ".", "write", "(", ")" ]
[ 164, 4 ]
[ 171, 80 ]
python
en
['en', 'error', 'th']
False
BaseItemRecommendation.compute
(self, verbose=True)
Method to run the recommender algorithm :param verbose: Print the information about recommender :type verbose: bool, default True
Method to run the recommender algorithm
def compute(self, verbose=True): """ Method to run the recommender algorithm :param verbose: Print the information about recommender :type verbose: bool, default True """ # read files self.read_files() # initialize empty ranking (Don't remove: important to Cross Validation) self.ranking = [] if verbose: test_info = None main_info = { 'title': 'Item Recommendation > ' + self.recommender_name, 'n_users': len(self.train_set['users']), 'n_items': len(self.train_set['items']), 'n_interactions': self.train_set['number_interactions'], 'sparsity': self.train_set['sparsity'] } if self.test_file is not None: test_info = { 'n_users': len(self.test_set['users']), 'n_items': len(self.test_set['items']), 'n_interactions': self.test_set['number_interactions'], 'sparsity': self.test_set['sparsity'] } print_header(main_info, test_info)
[ "def", "compute", "(", "self", ",", "verbose", "=", "True", ")", ":", "# read files", "self", ".", "read_files", "(", ")", "# initialize empty ranking (Don't remove: important to Cross Validation)", "self", ".", "ranking", "=", "[", "]", "if", "verbose", ":", "test_info", "=", "None", "main_info", "=", "{", "'title'", ":", "'Item Recommendation > '", "+", "self", ".", "recommender_name", ",", "'n_users'", ":", "len", "(", "self", ".", "train_set", "[", "'users'", "]", ")", ",", "'n_items'", ":", "len", "(", "self", ".", "train_set", "[", "'items'", "]", ")", ",", "'n_interactions'", ":", "self", ".", "train_set", "[", "'number_interactions'", "]", ",", "'sparsity'", ":", "self", ".", "train_set", "[", "'sparsity'", "]", "}", "if", "self", ".", "test_file", "is", "not", "None", ":", "test_info", "=", "{", "'n_users'", ":", "len", "(", "self", ".", "test_set", "[", "'users'", "]", ")", ",", "'n_items'", ":", "len", "(", "self", ".", "test_set", "[", "'items'", "]", ")", ",", "'n_interactions'", ":", "self", ".", "test_set", "[", "'number_interactions'", "]", ",", "'sparsity'", ":", "self", ".", "test_set", "[", "'sparsity'", "]", "}", "print_header", "(", "main_info", ",", "test_info", ")" ]
[ 173, 4 ]
[ 207, 46 ]
python
en
['en', 'error', 'th']
False
Loader.get_template_sources
(self, template_name)
Return an Origin object pointing to an absolute path in each directory in template_dirs. For security reasons, if a path doesn't lie inside one of the template_dirs it is excluded from the result set.
Return an Origin object pointing to an absolute path in each directory in template_dirs. For security reasons, if a path doesn't lie inside one of the template_dirs it is excluded from the result set.
def get_template_sources(self, template_name): """ Return an Origin object pointing to an absolute path in each directory in template_dirs. For security reasons, if a path doesn't lie inside one of the template_dirs it is excluded from the result set. """ for template_dir in self.get_dirs(): try: name = safe_join(template_dir, template_name) except SuspiciousFileOperation: # The joined path was located outside of this template_dir # (it might be inside another one, so this isn't fatal). continue yield Origin( name=name, template_name=template_name, loader=self, )
[ "def", "get_template_sources", "(", "self", ",", "template_name", ")", ":", "for", "template_dir", "in", "self", ".", "get_dirs", "(", ")", ":", "try", ":", "name", "=", "safe_join", "(", "template_dir", ",", "template_name", ")", "except", "SuspiciousFileOperation", ":", "# The joined path was located outside of this template_dir", "# (it might be inside another one, so this isn't fatal).", "continue", "yield", "Origin", "(", "name", "=", "name", ",", "template_name", "=", "template_name", ",", "loader", "=", "self", ",", ")" ]
[ 27, 4 ]
[ 45, 13 ]
python
en
['en', 'error', 'th']
False
HDFStorage.download_files
(self, urls, count_callback=None)
Downloads all necessary files that are not yet stored on the disk using multiple processes. download_finished_callback and error_callback cannot be bound or unbound methods in Windows, so pass functions instead.
Downloads all necessary files that are not yet stored on the disk using multiple processes. download_finished_callback and error_callback cannot be bound or unbound methods in Windows, so pass functions instead.
def download_files(self, urls, count_callback=None): """Downloads all necessary files that are not yet stored on the disk using multiple processes. download_finished_callback and error_callback cannot be bound or unbound methods in Windows, so pass functions instead. """ urls = list(self.filter_files(urls, self._storage_directory)) if count_callback is not None: count_callback(len(urls)) process_args = [(url, self._storage_directory, self._username, self._password) for url in urls] with Pool(processes=5) as pool: pool.starmap_async(perform_download, process_args, callback=lambda x: None, error_callback=lambda x: None) pool.close() pool.join() # check if any files failed to download, and return false if so urls = list(self.filter_files(urls, self._storage_directory)) return len(urls) == 0
[ "def", "download_files", "(", "self", ",", "urls", ",", "count_callback", "=", "None", ")", ":", "urls", "=", "list", "(", "self", ".", "filter_files", "(", "urls", ",", "self", ".", "_storage_directory", ")", ")", "if", "count_callback", "is", "not", "None", ":", "count_callback", "(", "len", "(", "urls", ")", ")", "process_args", "=", "[", "(", "url", ",", "self", ".", "_storage_directory", ",", "self", ".", "_username", ",", "self", ".", "_password", ")", "for", "url", "in", "urls", "]", "with", "Pool", "(", "processes", "=", "5", ")", "as", "pool", ":", "pool", ".", "starmap_async", "(", "perform_download", ",", "process_args", ",", "callback", "=", "lambda", "x", ":", "None", ",", "error_callback", "=", "lambda", "x", ":", "None", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "# check if any files failed to download, and return false if so", "urls", "=", "list", "(", "self", ".", "filter_files", "(", "urls", ",", "self", ".", "_storage_directory", ")", ")", "return", "len", "(", "urls", ")", "==", "0" ]
[ 127, 4 ]
[ 146, 29 ]
python
en
['en', 'en', 'en']
True
_int64_feature
(value)
Wrapper for inserting int64 features into Example proto.
Wrapper for inserting int64 features into Example proto.
def _int64_feature(value): """Wrapper for inserting int64 features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
[ "def", "_int64_feature", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "[", "value", "]", "return", "tf", ".", "train", ".", "Feature", "(", "int64_list", "=", "tf", ".", "train", ".", "Int64List", "(", "value", "=", "value", ")", ")" ]
[ 41, 0 ]
[ 45, 69 ]
python
en
['en', 'en', 'en']
True
_bytes_feature
(value)
Wrapper for inserting bytes features into Example proto.
Wrapper for inserting bytes features into Example proto.
def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
[ "def", "_bytes_feature", "(", "value", ")", ":", "return", "tf", ".", "train", ".", "Feature", "(", "bytes_list", "=", "tf", ".", "train", ".", "BytesList", "(", "value", "=", "[", "value", "]", ")", ")" ]
[ 48, 0 ]
[ 50, 71 ]
python
en
['en', 'en', 'en']
True
_convert_to_example
(filename, image_buffer, label_int, label_str, height, width)
Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image label_int: integer, identifier for ground truth (0-based) label_str: string, identifier for ground truth, e.g., 'daisy' height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto
Build an Example proto for an example.
def _convert_to_example(filename, image_buffer, label_int, label_str, height, width): """Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image label_int: integer, identifier for ground truth (0-based) label_str: string, identifier for ground truth, e.g., 'daisy' height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto """ colorspace = 'RGB' channels = 3 image_format = 'JPEG' example = tf.train.Example( features=tf.train.Features( feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/colorspace': _bytes_feature(colorspace), 'image/channels': _int64_feature(channels), 'image/class/label': _int64_feature(label_int + 1), # model expects 1-based 'image/class/synset': _bytes_feature(label_str), 'image/format': _bytes_feature(image_format), 'image/filename': _bytes_feature(os.path.basename(filename)), 'image/encoded': _bytes_feature(image_buffer) })) return example
[ "def", "_convert_to_example", "(", "filename", ",", "image_buffer", ",", "label_int", ",", "label_str", ",", "height", ",", "width", ")", ":", "colorspace", "=", "'RGB'", "channels", "=", "3", "image_format", "=", "'JPEG'", "example", "=", "tf", ".", "train", ".", "Example", "(", "features", "=", "tf", ".", "train", ".", "Features", "(", "feature", "=", "{", "'image/height'", ":", "_int64_feature", "(", "height", ")", ",", "'image/width'", ":", "_int64_feature", "(", "width", ")", ",", "'image/colorspace'", ":", "_bytes_feature", "(", "colorspace", ")", ",", "'image/channels'", ":", "_int64_feature", "(", "channels", ")", ",", "'image/class/label'", ":", "_int64_feature", "(", "label_int", "+", "1", ")", ",", "# model expects 1-based", "'image/class/synset'", ":", "_bytes_feature", "(", "label_str", ")", ",", "'image/format'", ":", "_bytes_feature", "(", "image_format", ")", ",", "'image/filename'", ":", "_bytes_feature", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", ",", "'image/encoded'", ":", "_bytes_feature", "(", "image_buffer", ")", "}", ")", ")", "return", "example" ]
[ 53, 0 ]
[ 85, 16 ]
python
en
['en', 'en', 'en']
True
_get_image_data
(filename, coder)
Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels.
Process a single image file.
def _get_image_data(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'r') as ifp: image_data = ifp.read() # Decode the RGB JPEG. image = coder.decode_jpeg(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width
[ "def", "_get_image_data", "(", "filename", ",", "coder", ")", ":", "# Read the image file.", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "filename", ",", "'r'", ")", "as", "ifp", ":", "image_data", "=", "ifp", ".", "read", "(", ")", "# Decode the RGB JPEG.", "image", "=", "coder", ".", "decode_jpeg", "(", "image_data", ")", "# Check that image converted to RGB", "assert", "len", "(", "image", ".", "shape", ")", "==", "3", "height", "=", "image", ".", "shape", "[", "0", "]", "width", "=", "image", ".", "shape", "[", "1", "]", "assert", "image", ".", "shape", "[", "2", "]", "==", "3", "return", "image_data", ",", "height", ",", "width" ]
[ 110, 0 ]
[ 134, 34 ]
python
en
['en', 'ny', 'en']
True
convert_to_example
(csvline, categories)
Parse a line of CSV file and convert to TF Record. Args: csvline: line from input CSV file categories: list of labels Yields: serialized TF example if the label is in categories
Parse a line of CSV file and convert to TF Record.
def convert_to_example(csvline, categories): """Parse a line of CSV file and convert to TF Record. Args: csvline: line from input CSV file categories: list of labels Yields: serialized TF example if the label is in categories """ filename, label = csvline.encode('ascii', 'ignore').split(',') if label in categories: # ignore labels not in categories list coder = ImageCoder() image_buffer, height, width = _get_image_data(filename, coder) del coder example = _convert_to_example(filename, image_buffer, categories.index(label), label, height, width) yield example.SerializeToString()
[ "def", "convert_to_example", "(", "csvline", ",", "categories", ")", ":", "filename", ",", "label", "=", "csvline", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "split", "(", "','", ")", "if", "label", "in", "categories", ":", "# ignore labels not in categories list", "coder", "=", "ImageCoder", "(", ")", "image_buffer", ",", "height", ",", "width", "=", "_get_image_data", "(", "filename", ",", "coder", ")", "del", "coder", "example", "=", "_convert_to_example", "(", "filename", ",", "image_buffer", ",", "categories", ".", "index", "(", "label", ")", ",", "label", ",", "height", ",", "width", ")", "yield", "example", ".", "SerializeToString", "(", ")" ]
[ 137, 0 ]
[ 154, 37 ]
python
en
['en', 'en', 'en']
True
distutils_scheme
( dist_name: str, user: bool = False, home: str = None, root: str = None, isolated: bool = False, prefix: str = None, *, ignore_config_files: bool = False, )
Return a distutils install scheme
Return a distutils install scheme
def distutils_scheme( dist_name: str, user: bool = False, home: str = None, root: str = None, isolated: bool = False, prefix: str = None, *, ignore_config_files: bool = False, ) -> Dict[str, str]: """ Return a distutils install scheme """ from distutils.dist import Distribution dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name} if isolated: dist_args["script_args"] = ["--no-user-cfg"] d = Distribution(dist_args) if not ignore_config_files: try: d.parse_config_files() except UnicodeDecodeError: # Typeshed does not include find_config_files() for some reason. paths = d.find_config_files() # type: ignore logger.warning( "Ignore distutils configs in %s due to encoding errors.", ", ".join(os.path.basename(p) for p in paths), ) obj: Optional[DistutilsCommand] = None obj = d.get_command_obj("install", create=True) assert obj is not None i = cast(distutils_install_command, obj) # NOTE: setting user or home has the side-effect of creating the home dir # or user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. assert not (user and prefix), f"user={user} prefix={prefix}" assert not (home and prefix), f"home={home} prefix={prefix}" i.user = user or i.user if user or home: i.prefix = "" i.prefix = prefix or i.prefix i.home = home or i.home i.root = root or i.root i.finalize_options() scheme = {} for key in SCHEME_KEYS: scheme[key] = getattr(i, "install_" + key) # install_lib specified in setup.cfg should install *everything* # into there (i.e. it takes precedence over both purelib and # platlib). Note, i.install_lib is *always* set after # finalize_options(); we only want to override here if the user # has explicitly requested it hence going back to the config if "install_lib" in d.get_option_dict("install"): scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) if running_under_virtualenv(): if home: prefix = home elif user: prefix = i.install_userbase # type: ignore else: prefix = i.prefix scheme["headers"] = os.path.join( prefix, "include", "site", f"python{get_major_minor_version()}", dist_name, ) if root is not None: path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] scheme["headers"] = os.path.join(root, path_no_drive[1:]) return scheme
[ "def", "distutils_scheme", "(", "dist_name", ":", "str", ",", "user", ":", "bool", "=", "False", ",", "home", ":", "str", "=", "None", ",", "root", ":", "str", "=", "None", ",", "isolated", ":", "bool", "=", "False", ",", "prefix", ":", "str", "=", "None", ",", "*", ",", "ignore_config_files", ":", "bool", "=", "False", ",", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "from", "distutils", ".", "dist", "import", "Distribution", "dist_args", ":", "Dict", "[", "str", ",", "Union", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "{", "\"name\"", ":", "dist_name", "}", "if", "isolated", ":", "dist_args", "[", "\"script_args\"", "]", "=", "[", "\"--no-user-cfg\"", "]", "d", "=", "Distribution", "(", "dist_args", ")", "if", "not", "ignore_config_files", ":", "try", ":", "d", ".", "parse_config_files", "(", ")", "except", "UnicodeDecodeError", ":", "# Typeshed does not include find_config_files() for some reason.", "paths", "=", "d", ".", "find_config_files", "(", ")", "# type: ignore", "logger", ".", "warning", "(", "\"Ignore distutils configs in %s due to encoding errors.\"", ",", "\", \"", ".", "join", "(", "os", ".", "path", ".", "basename", "(", "p", ")", "for", "p", "in", "paths", ")", ",", ")", "obj", ":", "Optional", "[", "DistutilsCommand", "]", "=", "None", "obj", "=", "d", ".", "get_command_obj", "(", "\"install\"", ",", "create", "=", "True", ")", "assert", "obj", "is", "not", "None", "i", "=", "cast", "(", "distutils_install_command", ",", "obj", ")", "# NOTE: setting user or home has the side-effect of creating the home dir", "# or user base for installations during finalize_options()", "# ideally, we'd prefer a scheme class that has no side-effects.", "assert", "not", "(", "user", "and", "prefix", ")", ",", "f\"user={user} prefix={prefix}\"", "assert", "not", "(", "home", "and", "prefix", ")", ",", "f\"home={home} prefix={prefix}\"", "i", ".", "user", "=", "user", "or", "i", ".", "user", "if", "user", "or", "home", ":", "i", ".", "prefix", "=", "\"\"", "i", ".", "prefix", "=", "prefix", "or", "i", ".", "prefix", "i", ".", "home", "=", "home", "or", "i", ".", "home", "i", ".", "root", "=", "root", "or", "i", ".", "root", "i", ".", "finalize_options", "(", ")", "scheme", "=", "{", "}", "for", "key", "in", "SCHEME_KEYS", ":", "scheme", "[", "key", "]", "=", "getattr", "(", "i", ",", "\"install_\"", "+", "key", ")", "# install_lib specified in setup.cfg should install *everything*", "# into there (i.e. it takes precedence over both purelib and", "# platlib). Note, i.install_lib is *always* set after", "# finalize_options(); we only want to override here if the user", "# has explicitly requested it hence going back to the config", "if", "\"install_lib\"", "in", "d", ".", "get_option_dict", "(", "\"install\"", ")", ":", "scheme", ".", "update", "(", "dict", "(", "purelib", "=", "i", ".", "install_lib", ",", "platlib", "=", "i", ".", "install_lib", ")", ")", "if", "running_under_virtualenv", "(", ")", ":", "if", "home", ":", "prefix", "=", "home", "elif", "user", ":", "prefix", "=", "i", ".", "install_userbase", "# type: ignore", "else", ":", "prefix", "=", "i", ".", "prefix", "scheme", "[", "\"headers\"", "]", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "\"include\"", ",", "\"site\"", ",", "f\"python{get_major_minor_version()}\"", ",", "dist_name", ",", ")", "if", "root", "is", "not", "None", ":", "path_no_drive", "=", "os", ".", "path", ".", "splitdrive", "(", "os", ".", "path", ".", "abspath", "(", "scheme", "[", "\"headers\"", "]", ")", ")", "[", "1", "]", "scheme", "[", "\"headers\"", "]", "=", "os", ".", "path", ".", "join", "(", "root", ",", "path_no_drive", "[", "1", ":", "]", ")", "return", "scheme" ]
[ 23, 0 ]
[ 101, 17 ]
python
en
['en', 'error', 'th']
False
get_scheme
( dist_name: str, user: bool = False, home: Optional[str] = None, root: Optional[str] = None, isolated: bool = False, prefix: Optional[str] = None, )
Get the "scheme" corresponding to the input parameters. The distutils documentation provides the context for the available schemes: https://docs.python.org/3/install/index.html#alternate-installation :param dist_name: the name of the package to retrieve the scheme for, used in the headers scheme path :param user: indicates to use the "user" scheme :param home: indicates to use the "home" scheme and provides the base directory for the same :param root: root under which other directories are re-based :param isolated: equivalent to --no-user-cfg, i.e. do not consider ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for scheme paths :param prefix: indicates to use the "prefix" scheme and provides the base directory for the same
Get the "scheme" corresponding to the input parameters. The distutils documentation provides the context for the available schemes: https://docs.python.org/3/install/index.html#alternate-installation
def get_scheme( dist_name: str, user: bool = False, home: Optional[str] = None, root: Optional[str] = None, isolated: bool = False, prefix: Optional[str] = None, ) -> Scheme: """ Get the "scheme" corresponding to the input parameters. The distutils documentation provides the context for the available schemes: https://docs.python.org/3/install/index.html#alternate-installation :param dist_name: the name of the package to retrieve the scheme for, used in the headers scheme path :param user: indicates to use the "user" scheme :param home: indicates to use the "home" scheme and provides the base directory for the same :param root: root under which other directories are re-based :param isolated: equivalent to --no-user-cfg, i.e. do not consider ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for scheme paths :param prefix: indicates to use the "prefix" scheme and provides the base directory for the same """ scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix) return Scheme( platlib=scheme["platlib"], purelib=scheme["purelib"], headers=scheme["headers"], scripts=scheme["scripts"], data=scheme["data"], )
[ "def", "get_scheme", "(", "dist_name", ":", "str", ",", "user", ":", "bool", "=", "False", ",", "home", ":", "Optional", "[", "str", "]", "=", "None", ",", "root", ":", "Optional", "[", "str", "]", "=", "None", ",", "isolated", ":", "bool", "=", "False", ",", "prefix", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "Scheme", ":", "scheme", "=", "distutils_scheme", "(", "dist_name", ",", "user", ",", "home", ",", "root", ",", "isolated", ",", "prefix", ")", "return", "Scheme", "(", "platlib", "=", "scheme", "[", "\"platlib\"", "]", ",", "purelib", "=", "scheme", "[", "\"purelib\"", "]", ",", "headers", "=", "scheme", "[", "\"headers\"", "]", ",", "scripts", "=", "scheme", "[", "\"scripts\"", "]", ",", "data", "=", "scheme", "[", "\"data\"", "]", ",", ")" ]
[ 104, 0 ]
[ 136, 5 ]
python
en
['en', 'error', 'th']
False
parse_bits
(parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, name)
Parse bits for template tag helpers simple_tag and inclusion_tag, in particular by detecting syntax errors and by extracting positional and keyword arguments.
Parse bits for template tag helpers simple_tag and inclusion_tag, in particular by detecting syntax errors and by extracting positional and keyword arguments.
def parse_bits(parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, name): """ Parse bits for template tag helpers simple_tag and inclusion_tag, in particular by detecting syntax errors and by extracting positional and keyword arguments. """ if takes_context: if params[0] == 'context': params = params[1:] else: raise TemplateSyntaxError( "'%s' is decorated with takes_context=True so it must " "have a first argument of 'context'" % name) args = [] kwargs = {} unhandled_params = list(params) unhandled_kwargs = [ kwarg for kwarg in kwonly if not kwonly_defaults or kwarg not in kwonly_defaults ] for bit in bits: # First we try to extract a potential kwarg from the bit kwarg = token_kwargs([bit], parser) if kwarg: # The kwarg was successfully extracted param, value = kwarg.popitem() if param not in params and param not in kwonly and varkw is None: # An unexpected keyword argument was supplied raise TemplateSyntaxError( "'%s' received unexpected keyword argument '%s'" % (name, param)) elif param in kwargs: # The keyword argument has already been supplied once raise TemplateSyntaxError( "'%s' received multiple values for keyword argument '%s'" % (name, param)) else: # All good, record the keyword argument kwargs[str(param)] = value if param in unhandled_params: # If using the keyword syntax for a positional arg, then # consume it. unhandled_params.remove(param) elif param in unhandled_kwargs: # Same for keyword-only arguments unhandled_kwargs.remove(param) else: if kwargs: raise TemplateSyntaxError( "'%s' received some positional argument(s) after some " "keyword argument(s)" % name) else: # Record the positional argument args.append(parser.compile_filter(bit)) try: # Consume from the list of expected positional arguments unhandled_params.pop(0) except IndexError: if varargs is None: raise TemplateSyntaxError( "'%s' received too many positional arguments" % name) if defaults is not None: # Consider the last n params handled, where n is the # number of defaults. unhandled_params = unhandled_params[:-len(defaults)] if unhandled_params or unhandled_kwargs: # Some positional arguments were not supplied raise TemplateSyntaxError( "'%s' did not receive value(s) for the argument(s): %s" % (name, ", ".join("'%s'" % p for p in unhandled_params + unhandled_kwargs))) return args, kwargs
[ "def", "parse_bits", "(", "parser", ",", "bits", ",", "params", ",", "varargs", ",", "varkw", ",", "defaults", ",", "kwonly", ",", "kwonly_defaults", ",", "takes_context", ",", "name", ")", ":", "if", "takes_context", ":", "if", "params", "[", "0", "]", "==", "'context'", ":", "params", "=", "params", "[", "1", ":", "]", "else", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' is decorated with takes_context=True so it must \"", "\"have a first argument of 'context'\"", "%", "name", ")", "args", "=", "[", "]", "kwargs", "=", "{", "}", "unhandled_params", "=", "list", "(", "params", ")", "unhandled_kwargs", "=", "[", "kwarg", "for", "kwarg", "in", "kwonly", "if", "not", "kwonly_defaults", "or", "kwarg", "not", "in", "kwonly_defaults", "]", "for", "bit", "in", "bits", ":", "# First we try to extract a potential kwarg from the bit", "kwarg", "=", "token_kwargs", "(", "[", "bit", "]", ",", "parser", ")", "if", "kwarg", ":", "# The kwarg was successfully extracted", "param", ",", "value", "=", "kwarg", ".", "popitem", "(", ")", "if", "param", "not", "in", "params", "and", "param", "not", "in", "kwonly", "and", "varkw", "is", "None", ":", "# An unexpected keyword argument was supplied", "raise", "TemplateSyntaxError", "(", "\"'%s' received unexpected keyword argument '%s'\"", "%", "(", "name", ",", "param", ")", ")", "elif", "param", "in", "kwargs", ":", "# The keyword argument has already been supplied once", "raise", "TemplateSyntaxError", "(", "\"'%s' received multiple values for keyword argument '%s'\"", "%", "(", "name", ",", "param", ")", ")", "else", ":", "# All good, record the keyword argument", "kwargs", "[", "str", "(", "param", ")", "]", "=", "value", "if", "param", "in", "unhandled_params", ":", "# If using the keyword syntax for a positional arg, then", "# consume it.", "unhandled_params", ".", "remove", "(", "param", ")", "elif", "param", "in", "unhandled_kwargs", ":", "# Same for keyword-only arguments", "unhandled_kwargs", ".", "remove", "(", "param", ")", "else", ":", "if", "kwargs", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' received some positional argument(s) after some \"", "\"keyword argument(s)\"", "%", "name", ")", "else", ":", "# Record the positional argument", "args", ".", "append", "(", "parser", ".", "compile_filter", "(", "bit", ")", ")", "try", ":", "# Consume from the list of expected positional arguments", "unhandled_params", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "if", "varargs", "is", "None", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' received too many positional arguments\"", "%", "name", ")", "if", "defaults", "is", "not", "None", ":", "# Consider the last n params handled, where n is the", "# number of defaults.", "unhandled_params", "=", "unhandled_params", "[", ":", "-", "len", "(", "defaults", ")", "]", "if", "unhandled_params", "or", "unhandled_kwargs", ":", "# Some positional arguments were not supplied", "raise", "TemplateSyntaxError", "(", "\"'%s' did not receive value(s) for the argument(s): %s\"", "%", "(", "name", ",", "\", \"", ".", "join", "(", "\"'%s'\"", "%", "p", "for", "p", "in", "unhandled_params", "+", "unhandled_kwargs", ")", ")", ")", "return", "args", ",", "kwargs" ]
[ 236, 0 ]
[ 308, 23 ]
python
en
['en', 'error', 'th']
False
import_library
(name)
Load a Library object from a template tag module.
Load a Library object from a template tag module.
def import_library(name): """ Load a Library object from a template tag module. """ try: module = import_module(name) except ImportError as e: raise InvalidTemplateLibrary( "Invalid template library specified. ImportError raised when " "trying to load '%s': %s" % (name, e) ) try: return module.register except AttributeError: raise InvalidTemplateLibrary( "Module %s does not have a variable named 'register'" % name, )
[ "def", "import_library", "(", "name", ")", ":", "try", ":", "module", "=", "import_module", "(", "name", ")", "except", "ImportError", "as", "e", ":", "raise", "InvalidTemplateLibrary", "(", "\"Invalid template library specified. ImportError raised when \"", "\"trying to load '%s': %s\"", "%", "(", "name", ",", "e", ")", ")", "try", ":", "return", "module", ".", "register", "except", "AttributeError", ":", "raise", "InvalidTemplateLibrary", "(", "\"Module %s does not have a variable named 'register'\"", "%", "name", ",", ")" ]
[ 311, 0 ]
[ 327, 9 ]
python
en
['en', 'error', 'th']
False
Library.filter
(self, name=None, filter_func=None, **flags)
Register a callable as a template filter. Example: @register.filter def lower(value): return value.lower()
Register a callable as a template filter. Example:
def filter(self, name=None, filter_func=None, **flags): """ Register a callable as a template filter. Example: @register.filter def lower(value): return value.lower() """ if name is None and filter_func is None: # @register.filter() def dec(func): return self.filter_function(func, **flags) return dec elif name is not None and filter_func is None: if callable(name): # @register.filter return self.filter_function(name, **flags) else: # @register.filter('somename') or @register.filter(name='somename') def dec(func): return self.filter(name, func, **flags) return dec elif name is not None and filter_func is not None: # register.filter('somename', somefunc) self.filters[name] = filter_func for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'): if attr in flags: value = flags[attr] # set the flag on the filter for FilterExpression.resolve setattr(filter_func, attr, value) # set the flag on the innermost decorated function # for decorators that need it, e.g. stringfilter if hasattr(filter_func, "_decorated_function"): setattr(filter_func._decorated_function, attr, value) filter_func._filter_name = name return filter_func else: raise ValueError( "Unsupported arguments to Library.filter: (%r, %r)" % (name, filter_func), )
[ "def", "filter", "(", "self", ",", "name", "=", "None", ",", "filter_func", "=", "None", ",", "*", "*", "flags", ")", ":", "if", "name", "is", "None", "and", "filter_func", "is", "None", ":", "# @register.filter()", "def", "dec", "(", "func", ")", ":", "return", "self", ".", "filter_function", "(", "func", ",", "*", "*", "flags", ")", "return", "dec", "elif", "name", "is", "not", "None", "and", "filter_func", "is", "None", ":", "if", "callable", "(", "name", ")", ":", "# @register.filter", "return", "self", ".", "filter_function", "(", "name", ",", "*", "*", "flags", ")", "else", ":", "# @register.filter('somename') or @register.filter(name='somename')", "def", "dec", "(", "func", ")", ":", "return", "self", ".", "filter", "(", "name", ",", "func", ",", "*", "*", "flags", ")", "return", "dec", "elif", "name", "is", "not", "None", "and", "filter_func", "is", "not", "None", ":", "# register.filter('somename', somefunc)", "self", ".", "filters", "[", "name", "]", "=", "filter_func", "for", "attr", "in", "(", "'expects_localtime'", ",", "'is_safe'", ",", "'needs_autoescape'", ")", ":", "if", "attr", "in", "flags", ":", "value", "=", "flags", "[", "attr", "]", "# set the flag on the filter for FilterExpression.resolve", "setattr", "(", "filter_func", ",", "attr", ",", "value", ")", "# set the flag on the innermost decorated function", "# for decorators that need it, e.g. stringfilter", "if", "hasattr", "(", "filter_func", ",", "\"_decorated_function\"", ")", ":", "setattr", "(", "filter_func", ".", "_decorated_function", ",", "attr", ",", "value", ")", "filter_func", ".", "_filter_name", "=", "name", "return", "filter_func", "else", ":", "raise", "ValueError", "(", "\"Unsupported arguments to Library.filter: (%r, %r)\"", "%", "(", "name", ",", "filter_func", ")", ",", ")" ]
[ 53, 4 ]
[ 93, 13 ]
python
en
['en', 'error', 'th']
False
Library.simple_tag
(self, func=None, takes_context=None, name=None)
Register a callable as a compiled template tag. Example: @register.simple_tag def hello(*args, **kwargs): return 'world'
Register a callable as a compiled template tag. Example:
def simple_tag(self, func=None, takes_context=None, name=None): """ Register a callable as a compiled template tag. Example: @register.simple_tag def hello(*args, **kwargs): return 'world' """ def dec(func): params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func)) function_name = (name or getattr(func, '_decorated_function', func).__name__) @functools.wraps(func) def compile_func(parser, token): bits = token.split_contents()[1:] target_var = None if len(bits) >= 2 and bits[-2] == 'as': target_var = bits[-1] bits = bits[:-2] args, kwargs = parse_bits( parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name, ) return SimpleNode(func, takes_context, args, kwargs, target_var) self.tag(function_name, compile_func) return func if func is None: # @register.simple_tag(...) return dec elif callable(func): # @register.simple_tag return dec(func) else: raise ValueError("Invalid arguments provided to simple_tag")
[ "def", "simple_tag", "(", "self", ",", "func", "=", "None", ",", "takes_context", "=", "None", ",", "name", "=", "None", ")", ":", "def", "dec", "(", "func", ")", ":", "params", ",", "varargs", ",", "varkw", ",", "defaults", ",", "kwonly", ",", "kwonly_defaults", ",", "_", "=", "getfullargspec", "(", "unwrap", "(", "func", ")", ")", "function_name", "=", "(", "name", "or", "getattr", "(", "func", ",", "'_decorated_function'", ",", "func", ")", ".", "__name__", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "compile_func", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "[", "1", ":", "]", "target_var", "=", "None", "if", "len", "(", "bits", ")", ">=", "2", "and", "bits", "[", "-", "2", "]", "==", "'as'", ":", "target_var", "=", "bits", "[", "-", "1", "]", "bits", "=", "bits", "[", ":", "-", "2", "]", "args", ",", "kwargs", "=", "parse_bits", "(", "parser", ",", "bits", ",", "params", ",", "varargs", ",", "varkw", ",", "defaults", ",", "kwonly", ",", "kwonly_defaults", ",", "takes_context", ",", "function_name", ",", ")", "return", "SimpleNode", "(", "func", ",", "takes_context", ",", "args", ",", "kwargs", ",", "target_var", ")", "self", ".", "tag", "(", "function_name", ",", "compile_func", ")", "return", "func", "if", "func", "is", "None", ":", "# @register.simple_tag(...)", "return", "dec", "elif", "callable", "(", "func", ")", ":", "# @register.simple_tag", "return", "dec", "(", "func", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid arguments provided to simple_tag\"", ")" ]
[ 99, 4 ]
[ 133, 72 ]
python
en
['en', 'error', 'th']
False
Library.inclusion_tag
(self, filename, func=None, takes_context=None, name=None)
Register a callable as an inclusion tag: @register.inclusion_tag('results.html') def show_results(poll): choices = poll.choice_set.all() return {'choices': choices}
Register a callable as an inclusion tag:
def inclusion_tag(self, filename, func=None, takes_context=None, name=None): """ Register a callable as an inclusion tag: @register.inclusion_tag('results.html') def show_results(poll): choices = poll.choice_set.all() return {'choices': choices} """ def dec(func): params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func)) function_name = (name or getattr(func, '_decorated_function', func).__name__) @functools.wraps(func) def compile_func(parser, token): bits = token.split_contents()[1:] args, kwargs = parse_bits( parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name, ) return InclusionNode( func, takes_context, args, kwargs, filename, ) self.tag(function_name, compile_func) return func return dec
[ "def", "inclusion_tag", "(", "self", ",", "filename", ",", "func", "=", "None", ",", "takes_context", "=", "None", ",", "name", "=", "None", ")", ":", "def", "dec", "(", "func", ")", ":", "params", ",", "varargs", ",", "varkw", ",", "defaults", ",", "kwonly", ",", "kwonly_defaults", ",", "_", "=", "getfullargspec", "(", "unwrap", "(", "func", ")", ")", "function_name", "=", "(", "name", "or", "getattr", "(", "func", ",", "'_decorated_function'", ",", "func", ")", ".", "__name__", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "compile_func", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "[", "1", ":", "]", "args", ",", "kwargs", "=", "parse_bits", "(", "parser", ",", "bits", ",", "params", ",", "varargs", ",", "varkw", ",", "defaults", ",", "kwonly", ",", "kwonly_defaults", ",", "takes_context", ",", "function_name", ",", ")", "return", "InclusionNode", "(", "func", ",", "takes_context", ",", "args", ",", "kwargs", ",", "filename", ",", ")", "self", ".", "tag", "(", "function_name", ",", "compile_func", ")", "return", "func", "return", "dec" ]
[ 135, 4 ]
[ 160, 18 ]
python
en
['en', 'error', 'th']
False
InclusionNode.render
(self, context)
Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.
Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.
def render(self, context): """ Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop. """ resolved_args, resolved_kwargs = self.get_resolved_arguments(context) _dict = self.func(*resolved_args, **resolved_kwargs) t = context.render_context.get(self) if t is None: if isinstance(self.filename, Template): t = self.filename elif isinstance(getattr(self.filename, 'template', None), Template): t = self.filename.template elif not isinstance(self.filename, str) and is_iterable(self.filename): t = context.template.engine.select_template(self.filename) else: t = context.template.engine.get_template(self.filename) context.render_context[self] = t new_context = context.new(_dict) # Copy across the CSRF token, if present, because inclusion tags are # often used for forms, and we need instructions for using CSRF # protection to be as simple as possible. csrf_token = context.get('csrf_token') if csrf_token is not None: new_context['csrf_token'] = csrf_token return t.render(new_context)
[ "def", "render", "(", "self", ",", "context", ")", ":", "resolved_args", ",", "resolved_kwargs", "=", "self", ".", "get_resolved_arguments", "(", "context", ")", "_dict", "=", "self", ".", "func", "(", "*", "resolved_args", ",", "*", "*", "resolved_kwargs", ")", "t", "=", "context", ".", "render_context", ".", "get", "(", "self", ")", "if", "t", "is", "None", ":", "if", "isinstance", "(", "self", ".", "filename", ",", "Template", ")", ":", "t", "=", "self", ".", "filename", "elif", "isinstance", "(", "getattr", "(", "self", ".", "filename", ",", "'template'", ",", "None", ")", ",", "Template", ")", ":", "t", "=", "self", ".", "filename", ".", "template", "elif", "not", "isinstance", "(", "self", ".", "filename", ",", "str", ")", "and", "is_iterable", "(", "self", ".", "filename", ")", ":", "t", "=", "context", ".", "template", ".", "engine", ".", "select_template", "(", "self", ".", "filename", ")", "else", ":", "t", "=", "context", ".", "template", ".", "engine", ".", "get_template", "(", "self", ".", "filename", ")", "context", ".", "render_context", "[", "self", "]", "=", "t", "new_context", "=", "context", ".", "new", "(", "_dict", ")", "# Copy across the CSRF token, if present, because inclusion tags are", "# often used for forms, and we need instructions for using CSRF", "# protection to be as simple as possible.", "csrf_token", "=", "context", ".", "get", "(", "'csrf_token'", ")", "if", "csrf_token", "is", "not", "None", ":", "new_context", "[", "'csrf_token'", "]", "=", "csrf_token", "return", "t", ".", "render", "(", "new_context", ")" ]
[ 206, 4 ]
[ 233, 36 ]
python
en
['en', 'error', 'th']
False
page_not_found
(request, exception, template_name=ERROR_404_TEMPLATE_NAME)
Default 404 handler. Templates: :template:`404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/'). It's quoted to prevent a content injection attack. exception The message from the exception which triggered the 404 (if one was supplied), or the exception class name
Default 404 handler.
def page_not_found(request, exception, template_name=ERROR_404_TEMPLATE_NAME): """ Default 404 handler. Templates: :template:`404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/'). It's quoted to prevent a content injection attack. exception The message from the exception which triggered the 404 (if one was supplied), or the exception class name """ exception_repr = exception.__class__.__name__ # Try to get an "interesting" exception message, if any (and not the ugly # Resolver404 dictionary) try: message = exception.args[0] except (AttributeError, IndexError): pass else: if isinstance(message, str): exception_repr = message context = { 'request_path': quote(request.path), 'exception': exception_repr, } try: template = loader.get_template(template_name) body = template.render(context, request) content_type = None # Django will use 'text/html'. except TemplateDoesNotExist: if template_name != ERROR_404_TEMPLATE_NAME: # Reraise if it's a missing custom template. raise # Render template (even though there are no substitutions) to allow # inspecting the context in tests. template = Engine().from_string( ERROR_PAGE_TEMPLATE % { 'title': 'Not Found', 'details': 'The requested resource was not found on this server.', }, ) body = template.render(Context(context)) content_type = 'text/html' return HttpResponseNotFound(body, content_type=content_type)
[ "def", "page_not_found", "(", "request", ",", "exception", ",", "template_name", "=", "ERROR_404_TEMPLATE_NAME", ")", ":", "exception_repr", "=", "exception", ".", "__class__", ".", "__name__", "# Try to get an \"interesting\" exception message, if any (and not the ugly", "# Resolver404 dictionary)", "try", ":", "message", "=", "exception", ".", "args", "[", "0", "]", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "pass", "else", ":", "if", "isinstance", "(", "message", ",", "str", ")", ":", "exception_repr", "=", "message", "context", "=", "{", "'request_path'", ":", "quote", "(", "request", ".", "path", ")", ",", "'exception'", ":", "exception_repr", ",", "}", "try", ":", "template", "=", "loader", ".", "get_template", "(", "template_name", ")", "body", "=", "template", ".", "render", "(", "context", ",", "request", ")", "content_type", "=", "None", "# Django will use 'text/html'.", "except", "TemplateDoesNotExist", ":", "if", "template_name", "!=", "ERROR_404_TEMPLATE_NAME", ":", "# Reraise if it's a missing custom template.", "raise", "# Render template (even though there are no substitutions) to allow", "# inspecting the context in tests.", "template", "=", "Engine", "(", ")", ".", "from_string", "(", "ERROR_PAGE_TEMPLATE", "%", "{", "'title'", ":", "'Not Found'", ",", "'details'", ":", "'The requested resource was not found on this server.'", ",", "}", ",", ")", "body", "=", "template", ".", "render", "(", "Context", "(", "context", ")", ")", "content_type", "=", "'text/html'", "return", "HttpResponseNotFound", "(", "body", ",", "content_type", "=", "content_type", ")" ]
[ 30, 0 ]
[ 75, 64 ]
python
en
['en', 'error', 'th']
False
server_error
(request, template_name=ERROR_500_TEMPLATE_NAME)
500 error handler. Templates: :template:`500.html` Context: None
500 error handler.
def server_error(request, template_name=ERROR_500_TEMPLATE_NAME): """ 500 error handler. Templates: :template:`500.html` Context: None """ try: template = loader.get_template(template_name) except TemplateDoesNotExist: if template_name != ERROR_500_TEMPLATE_NAME: # Reraise if it's a missing custom template. raise return HttpResponseServerError( ERROR_PAGE_TEMPLATE % {'title': 'Server Error (500)', 'details': ''}, content_type='text/html', ) return HttpResponseServerError(template.render())
[ "def", "server_error", "(", "request", ",", "template_name", "=", "ERROR_500_TEMPLATE_NAME", ")", ":", "try", ":", "template", "=", "loader", ".", "get_template", "(", "template_name", ")", "except", "TemplateDoesNotExist", ":", "if", "template_name", "!=", "ERROR_500_TEMPLATE_NAME", ":", "# Reraise if it's a missing custom template.", "raise", "return", "HttpResponseServerError", "(", "ERROR_PAGE_TEMPLATE", "%", "{", "'title'", ":", "'Server Error (500)'", ",", "'details'", ":", "''", "}", ",", "content_type", "=", "'text/html'", ",", ")", "return", "HttpResponseServerError", "(", "template", ".", "render", "(", ")", ")" ]
[ 79, 0 ]
[ 96, 53 ]
python
en
['en', 'error', 'th']
False
bad_request
(request, exception, template_name=ERROR_400_TEMPLATE_NAME)
400 error handler. Templates: :template:`400.html` Context: None
400 error handler.
def bad_request(request, exception, template_name=ERROR_400_TEMPLATE_NAME): """ 400 error handler. Templates: :template:`400.html` Context: None """ try: template = loader.get_template(template_name) except TemplateDoesNotExist: if template_name != ERROR_400_TEMPLATE_NAME: # Reraise if it's a missing custom template. raise return HttpResponseBadRequest( ERROR_PAGE_TEMPLATE % {'title': 'Bad Request (400)', 'details': ''}, content_type='text/html', ) # No exception content is passed to the template, to not disclose any sensitive information. return HttpResponseBadRequest(template.render())
[ "def", "bad_request", "(", "request", ",", "exception", ",", "template_name", "=", "ERROR_400_TEMPLATE_NAME", ")", ":", "try", ":", "template", "=", "loader", ".", "get_template", "(", "template_name", ")", "except", "TemplateDoesNotExist", ":", "if", "template_name", "!=", "ERROR_400_TEMPLATE_NAME", ":", "# Reraise if it's a missing custom template.", "raise", "return", "HttpResponseBadRequest", "(", "ERROR_PAGE_TEMPLATE", "%", "{", "'title'", ":", "'Bad Request (400)'", ",", "'details'", ":", "''", "}", ",", "content_type", "=", "'text/html'", ",", ")", "# No exception content is passed to the template, to not disclose any sensitive information.", "return", "HttpResponseBadRequest", "(", "template", ".", "render", "(", ")", ")" ]
[ 100, 0 ]
[ 118, 52 ]
python
en
['en', 'error', 'th']
False
permission_denied
(request, exception, template_name=ERROR_403_TEMPLATE_NAME)
Permission denied (403) handler. Templates: :template:`403.html` Context: None If the template does not exist, an Http403 response containing the text "403 Forbidden" (as per RFC 7231) will be returned.
Permission denied (403) handler.
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME): """ Permission denied (403) handler. Templates: :template:`403.html` Context: None If the template does not exist, an Http403 response containing the text "403 Forbidden" (as per RFC 7231) will be returned. """ try: template = loader.get_template(template_name) except TemplateDoesNotExist: if template_name != ERROR_403_TEMPLATE_NAME: # Reraise if it's a missing custom template. raise return HttpResponseForbidden( ERROR_PAGE_TEMPLATE % {'title': '403 Forbidden', 'details': ''}, content_type='text/html', ) return HttpResponseForbidden( template.render(request=request, context={'exception': str(exception)}) )
[ "def", "permission_denied", "(", "request", ",", "exception", ",", "template_name", "=", "ERROR_403_TEMPLATE_NAME", ")", ":", "try", ":", "template", "=", "loader", ".", "get_template", "(", "template_name", ")", "except", "TemplateDoesNotExist", ":", "if", "template_name", "!=", "ERROR_403_TEMPLATE_NAME", ":", "# Reraise if it's a missing custom template.", "raise", "return", "HttpResponseForbidden", "(", "ERROR_PAGE_TEMPLATE", "%", "{", "'title'", ":", "'403 Forbidden'", ",", "'details'", ":", "''", "}", ",", "content_type", "=", "'text/html'", ",", ")", "return", "HttpResponseForbidden", "(", "template", ".", "render", "(", "request", "=", "request", ",", "context", "=", "{", "'exception'", ":", "str", "(", "exception", ")", "}", ")", ")" ]
[ 125, 0 ]
[ 147, 5 ]
python
en
['en', 'error', 'th']
False
compute_class_weights
(root, train_data_list)
We want to weight the the positive pixels by the ratio of negative to positive. Three scenarios: 1. Equal classes. neg/pos ~ 1. Standard binary cross-entropy 2. Many more negative examples. The network will learn to always output negative. In this way we want to increase the punishment for getting a positive wrong that way it will want to put positive more 3. Many more positive examples. We weight the positive value less so that negatives have a chance.
We want to weight the the positive pixels by the ratio of negative to positive. Three scenarios: 1. Equal classes. neg/pos ~ 1. Standard binary cross-entropy 2. Many more negative examples. The network will learn to always output negative. In this way we want to increase the punishment for getting a positive wrong that way it will want to put positive more 3. Many more positive examples. We weight the positive value less so that negatives have a chance.
def compute_class_weights(root, train_data_list): ''' We want to weight the the positive pixels by the ratio of negative to positive. Three scenarios: 1. Equal classes. neg/pos ~ 1. Standard binary cross-entropy 2. Many more negative examples. The network will learn to always output negative. In this way we want to increase the punishment for getting a positive wrong that way it will want to put positive more 3. Many more positive examples. We weight the positive value less so that negatives have a chance. ''' pos = 0.0 neg = 0.0 for img_name in tqdm(train_data_list): img = sitk.GetArrayFromImage(sitk.ReadImage(join(root, 'masks', img_name[0]))) for slic in img: if not np.any(slic): continue else: p = np.count_nonzero(slic) pos += p neg += (slic.size - p) return neg / pos
[ "def", "compute_class_weights", "(", "root", ",", "train_data_list", ")", ":", "pos", "=", "0.0", "neg", "=", "0.0", "for", "img_name", "in", "tqdm", "(", "train_data_list", ")", ":", "img", "=", "sitk", ".", "GetArrayFromImage", "(", "sitk", ".", "ReadImage", "(", "join", "(", "root", ",", "'masks'", ",", "img_name", "[", "0", "]", ")", ")", ")", "for", "slic", "in", "img", ":", "if", "not", "np", ".", "any", "(", "slic", ")", ":", "continue", "else", ":", "p", "=", "np", ".", "count_nonzero", "(", "slic", ")", "pos", "+=", "p", "neg", "+=", "(", "slic", ".", "size", "-", "p", ")", "return", "neg", "/", "pos" ]
[ 56, 0 ]
[ 77, 20 ]
python
en
['en', 'error', 'th']
False
validate_twilio_request
(f)
Validates that incoming requests genuinely originated from Twilio
Validates that incoming requests genuinely originated from Twilio
def validate_twilio_request(f): """Validates that incoming requests genuinely originated from Twilio""" @wraps(f) def decorated_function(*args, **kwargs): # Create an instance of the RequestValidator class validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN')) # Validate the request using its URL, POST data, # and X-TWILIO-SIGNATURE header request_valid = validator.validate( request.url, request.form, request.headers.get('X-TWILIO-SIGNATURE', '')) # Continue processing the request if it's valid, return a 403 error if # it's not if request_valid: return f(*args, **kwargs) else: return abort(403) return decorated_function
[ "def", "validate_twilio_request", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "decorated_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Create an instance of the RequestValidator class", "validator", "=", "RequestValidator", "(", "os", ".", "environ", ".", "get", "(", "'TWILIO_AUTH_TOKEN'", ")", ")", "# Validate the request using its URL, POST data,", "# and X-TWILIO-SIGNATURE header", "request_valid", "=", "validator", ".", "validate", "(", "request", ".", "url", ",", "request", ".", "form", ",", "request", ".", "headers", ".", "get", "(", "'X-TWILIO-SIGNATURE'", ",", "''", ")", ")", "# Continue processing the request if it's valid, return a 403 error if", "# it's not", "if", "request_valid", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "abort", "(", "403", ")", "return", "decorated_function" ]
[ 10, 0 ]
[ 30, 29 ]
python
en
['en', 'en', 'en']
True
convert_exception_to_response
(get_response)
Wrap the given get_response callable in exception-to-response conversion. All exceptions will be converted. All known 4xx exceptions (Http404, PermissionDenied, MultiPartParserError, SuspiciousOperation) will be converted to the appropriate response, and all other exceptions will be converted to 500 responses. This decorator is automatically applied to all middleware to ensure that no middleware leaks an exception and that the next middleware in the stack can rely on getting a response instead of an exception.
Wrap the given get_response callable in exception-to-response conversion.
def convert_exception_to_response(get_response): """ Wrap the given get_response callable in exception-to-response conversion. All exceptions will be converted. All known 4xx exceptions (Http404, PermissionDenied, MultiPartParserError, SuspiciousOperation) will be converted to the appropriate response, and all other exceptions will be converted to 500 responses. This decorator is automatically applied to all middleware to ensure that no middleware leaks an exception and that the next middleware in the stack can rely on getting a response instead of an exception. """ if asyncio.iscoroutinefunction(get_response): @wraps(get_response) async def inner(request): try: response = await get_response(request) except Exception as exc: response = await sync_to_async(response_for_exception, thread_sensitive=False)(request, exc) return response return inner else: @wraps(get_response) def inner(request): try: response = get_response(request) except Exception as exc: response = response_for_exception(request, exc) return response return inner
[ "def", "convert_exception_to_response", "(", "get_response", ")", ":", "if", "asyncio", ".", "iscoroutinefunction", "(", "get_response", ")", ":", "@", "wraps", "(", "get_response", ")", "async", "def", "inner", "(", "request", ")", ":", "try", ":", "response", "=", "await", "get_response", "(", "request", ")", "except", "Exception", "as", "exc", ":", "response", "=", "await", "sync_to_async", "(", "response_for_exception", ",", "thread_sensitive", "=", "False", ")", "(", "request", ",", "exc", ")", "return", "response", "return", "inner", "else", ":", "@", "wraps", "(", "get_response", ")", "def", "inner", "(", "request", ")", ":", "try", ":", "response", "=", "get_response", "(", "request", ")", "except", "Exception", "as", "exc", ":", "response", "=", "response_for_exception", "(", "request", ",", "exc", ")", "return", "response", "return", "inner" ]
[ 20, 0 ]
[ 50, 20 ]
python
en
['en', 'error', 'th']
False
handle_uncaught_exception
(request, resolver, exc_info)
Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses).
Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses).
def handle_uncaught_exception(request, resolver, exc_info): """ Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses). """ if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise if settings.DEBUG: return debug.technical_500_response(request, *exc_info) # Return an HttpResponse that displays a friendly error message. callback = resolver.resolve_error_handler(500) return callback(request)
[ "def", "handle_uncaught_exception", "(", "request", ",", "resolver", ",", "exc_info", ")", ":", "if", "settings", ".", "DEBUG_PROPAGATE_EXCEPTIONS", ":", "raise", "if", "settings", ".", "DEBUG", ":", "return", "debug", ".", "technical_500_response", "(", "request", ",", "*", "exc_info", ")", "# Return an HttpResponse that displays a friendly error message.", "callback", "=", "resolver", ".", "resolve_error_handler", "(", "500", ")", "return", "callback", "(", "request", ")" ]
[ 139, 0 ]
[ 152, 28 ]
python
en
['en', 'error', 'th']
False
GeometryField.to_python
(self, value)
Transform the value to a Geometry object.
Transform the value to a Geometry object.
def to_python(self, value): """Transform the value to a Geometry object.""" if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): if hasattr(self.widget, 'deserialize'): try: value = self.widget.deserialize(value) except GDALException: value = None else: try: value = GEOSGeometry(value) except (GEOSException, ValueError, TypeError): value = None if value is None: raise ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') # Try to set the srid if not value.srid: try: value.srid = self.widget.map_srid except AttributeError: if self.srid: value.srid = self.srid return value
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "in", "self", ".", "empty_values", ":", "return", "None", "if", "not", "isinstance", "(", "value", ",", "GEOSGeometry", ")", ":", "if", "hasattr", "(", "self", ".", "widget", ",", "'deserialize'", ")", ":", "try", ":", "value", "=", "self", ".", "widget", ".", "deserialize", "(", "value", ")", "except", "GDALException", ":", "value", "=", "None", "else", ":", "try", ":", "value", "=", "GEOSGeometry", "(", "value", ")", "except", "(", "GEOSException", ",", "ValueError", ",", "TypeError", ")", ":", "value", "=", "None", "if", "value", "is", "None", ":", "raise", "ValidationError", "(", "self", ".", "error_messages", "[", "'invalid_geom'", "]", ",", "code", "=", "'invalid_geom'", ")", "# Try to set the srid", "if", "not", "value", ".", "srid", ":", "try", ":", "value", ".", "srid", "=", "self", ".", "widget", ".", "map_srid", "except", "AttributeError", ":", "if", "self", ".", "srid", ":", "value", ".", "srid", "=", "self", ".", "srid", "return", "value" ]
[ 33, 4 ]
[ 59, 20 ]
python
en
['en', 'en', 'en']
True
GeometryField.clean
(self, value)
Validate that the input value can be converted to a Geometry object and return it. Raise a ValidationError if the value cannot be instantiated as a Geometry.
Validate that the input value can be converted to a Geometry object and return it. Raise a ValidationError if the value cannot be instantiated as a Geometry.
def clean(self, value): """ Validate that the input value can be converted to a Geometry object and return it. Raise a ValidationError if the value cannot be instantiated as a Geometry. """ geom = super().clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and self.geom_type != 'GEOMETRY': raise ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid and self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except GEOSException: raise ValidationError( self.error_messages['transform_error'], code='transform_error') return geom
[ "def", "clean", "(", "self", ",", "value", ")", ":", "geom", "=", "super", "(", ")", ".", "clean", "(", "value", ")", "if", "geom", "is", "None", ":", "return", "geom", "# Ensuring that the geometry is of the correct type (indicated", "# using the OGC string label).", "if", "str", "(", "geom", ".", "geom_type", ")", ".", "upper", "(", ")", "!=", "self", ".", "geom_type", "and", "self", ".", "geom_type", "!=", "'GEOMETRY'", ":", "raise", "ValidationError", "(", "self", ".", "error_messages", "[", "'invalid_geom_type'", "]", ",", "code", "=", "'invalid_geom_type'", ")", "# Transforming the geometry if the SRID was set.", "if", "self", ".", "srid", "and", "self", ".", "srid", "!=", "-", "1", "and", "self", ".", "srid", "!=", "geom", ".", "srid", ":", "try", ":", "geom", ".", "transform", "(", "self", ".", "srid", ")", "except", "GEOSException", ":", "raise", "ValidationError", "(", "self", ".", "error_messages", "[", "'transform_error'", "]", ",", "code", "=", "'transform_error'", ")", "return", "geom" ]
[ 61, 4 ]
[ 84, 19 ]
python
en
['en', 'error', 'th']
False
GeometryField.has_changed
(self, initial, data)
Compare geographic value of data with its initial value.
Compare geographic value of data with its initial value.
def has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data)
[ "def", "has_changed", "(", "self", ",", "initial", ",", "data", ")", ":", "try", ":", "data", "=", "self", ".", "to_python", "(", "data", ")", "initial", "=", "self", ".", "to_python", "(", "initial", ")", "except", "ValidationError", ":", "return", "True", "# Only do a geographic comparison if both values are available", "if", "initial", "and", "data", ":", "data", ".", "transform", "(", "initial", ".", "srid", ")", "# If the initial value was not added by the browser, the geometry", "# provided may be slightly different, the first time it is saved.", "# The comparison is done with a very low tolerance.", "return", "not", "initial", ".", "equals_exact", "(", "data", ",", "tolerance", "=", "0.000001", ")", "else", ":", "# Check for change of state of existence", "return", "bool", "(", "initial", ")", "!=", "bool", "(", "data", ")" ]
[ 86, 4 ]
[ 104, 46 ]
python
en
['en', 'en', 'en']
True
pep562
(module_name: str)
Helper function to apply PEP 562.
Helper function to apply PEP 562.
def pep562(module_name: str) -> None: """Helper function to apply PEP 562.""" if sys.version_info < (3, 7): Pep562(module_name)
[ "def", "pep562", "(", "module_name", ":", "str", ")", "->", "None", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "7", ")", ":", "Pep562", "(", "module_name", ")" ]
[ 56, 0 ]
[ 60, 27 ]
python
en
['en', 'af', 'en']
True
Pep562.__init__
(self, name: str)
Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7.
Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7.
def __init__(self, name: str) -> None: """Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7.""" self._module = sys.modules[name] self._get_attr = getattr(self._module, "__getattr__", None) self._get_dir: Optional[Callable[..., List[str]]] = getattr( self._module, "__dir__", None ) sys.modules[name] = self
[ "def", "__init__", "(", "self", ",", "name", ":", "str", ")", "->", "None", ":", "self", ".", "_module", "=", "sys", ".", "modules", "[", "name", "]", "self", ".", "_get_attr", "=", "getattr", "(", "self", ".", "_module", ",", "\"__getattr__\"", ",", "None", ")", "self", ".", "_get_dir", ":", "Optional", "[", "Callable", "[", "...", ",", "List", "[", "str", "]", "]", "]", "=", "getattr", "(", "self", ".", "_module", ",", "\"__dir__\"", ",", "None", ")", "sys", ".", "modules", "[", "name", "]", "=", "self" ]
[ 28, 4 ]
[ 36, 32 ]
python
en
['en', 'en', 'en']
True
Pep562.__dir__
(self)
Return the overridden `dir` if one was provided, else apply `dir` to the module.
Return the overridden `dir` if one was provided, else apply `dir` to the module.
def __dir__(self) -> List[str]: """Return the overridden `dir` if one was provided, else apply `dir` to the module.""" return self._get_dir() if self._get_dir else dir(self._module)
[ "def", "__dir__", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "_get_dir", "(", ")", "if", "self", ".", "_get_dir", "else", "dir", "(", "self", ".", "_module", ")" ]
[ 38, 4 ]
[ 41, 70 ]
python
en
['en', 'en', 'en']
True
Pep562.__getattr__
(self, name: str)
Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present.
Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present.
def __getattr__(self, name: str) -> Any: """ Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present. """ try: return getattr(self._module, name) except AttributeError: if self._get_attr: return self._get_attr(name) raise
[ "def", "__getattr__", "(", "self", ",", "name", ":", "str", ")", "->", "Any", ":", "try", ":", "return", "getattr", "(", "self", ".", "_module", ",", "name", ")", "except", "AttributeError", ":", "if", "self", ".", "_get_attr", ":", "return", "self", ".", "_get_attr", "(", "name", ")", "raise" ]
[ 43, 4 ]
[ 53, 17 ]
python
en
['en', 'error', 'th']
False
run
()
Run the script in sys.argv[1] as if it had been invoked naturally.
Run the script in sys.argv[1] as if it had been invoked naturally.
def run(): """ Run the script in sys.argv[1] as if it had been invoked naturally. """ __builtins__ script_name = sys.argv[1] namespace = dict( __file__=script_name, __name__='__main__', __doc__=None, ) sys.argv[:] = sys.argv[1:] open_ = getattr(tokenize, 'open', open) script = open_(script_name).read() norm_script = script.replace('\\r\\n', '\\n') code = compile(norm_script, script_name, 'exec') exec(code, namespace)
[ "def", "run", "(", ")", ":", "__builtins__", "script_name", "=", "sys", ".", "argv", "[", "1", "]", "namespace", "=", "dict", "(", "__file__", "=", "script_name", ",", "__name__", "=", "'__main__'", ",", "__doc__", "=", "None", ",", ")", "sys", ".", "argv", "[", ":", "]", "=", "sys", ".", "argv", "[", "1", ":", "]", "open_", "=", "getattr", "(", "tokenize", ",", "'open'", ",", "open", ")", "script", "=", "open_", "(", "script_name", ")", ".", "read", "(", ")", "norm_script", "=", "script", ".", "replace", "(", "'\\\\r\\\\n'", ",", "'\\\\n'", ")", "code", "=", "compile", "(", "norm_script", ",", "script_name", ",", "'exec'", ")", "exec", "(", "code", ",", "namespace", ")" ]
[ 12, 0 ]
[ 30, 25 ]
python
en
['en', 'error', 'th']
False
reshape_gt_to_pred
(pred_pix, gt_pix)
Resize the gt_pix to same size as pred_pix if not. Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) Returns: gt_pix BxH1xW1
Resize the gt_pix to same size as pred_pix if not. Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) Returns: gt_pix BxH1xW1
def reshape_gt_to_pred(pred_pix, gt_pix): """Resize the gt_pix to same size as pred_pix if not. Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) Returns: gt_pix BxH1xW1 """ if pred_pix.shape[2:] == gt_pix.shape[1:]: return gt_pix gt_pix = nn.functional.interpolate( # None of the interpolations are implemented for Long so need # convert to float for interpolation. Only use nearest # interpolation so that it can be easily converted back to int torch.unsqueeze(gt_pix, axis=1).to(torch.float), size=pred_pix.shape[2:], mode='nearest') \ .to(torch.long).to(pred_pix.device).squeeze(1) \ .detach() return gt_pix
[ "def", "reshape_gt_to_pred", "(", "pred_pix", ",", "gt_pix", ")", ":", "if", "pred_pix", ".", "shape", "[", "2", ":", "]", "==", "gt_pix", ".", "shape", "[", "1", ":", "]", ":", "return", "gt_pix", "gt_pix", "=", "nn", ".", "functional", ".", "interpolate", "(", "# None of the interpolations are implemented for Long so need", "# convert to float for interpolation. Only use nearest", "# interpolation so that it can be easily converted back to int", "torch", ".", "unsqueeze", "(", "gt_pix", ",", "axis", "=", "1", ")", ".", "to", "(", "torch", ".", "float", ")", ",", "size", "=", "pred_pix", ".", "shape", "[", "2", ":", "]", ",", "mode", "=", "'nearest'", ")", ".", "to", "(", "torch", ".", "long", ")", ".", "to", "(", "pred_pix", ".", "device", ")", ".", "squeeze", "(", "1", ")", ".", "detach", "(", ")", "return", "gt_pix" ]
[ 21, 0 ]
[ 39, 17 ]
python
en
['en', 'en', 'en']
True
PixCELoss._pix_wts_default
(cls, out_dim, gt_pix)
This pixel weighting was used until 2020-01-21.
This pixel weighting was used until 2020-01-21.
def _pix_wts_default(cls, out_dim, gt_pix): """This pixel weighting was used until 2020-01-21.""" cls_weights = np.ones((out_dim, ), dtype=np.float32) cls_weights[0] = 0.1 # 10x lower, as it degenerates to white bgs return torch.as_tensor(cls_weights, device=gt_pix.device)
[ "def", "_pix_wts_default", "(", "cls", ",", "out_dim", ",", "gt_pix", ")", ":", "cls_weights", "=", "np", ".", "ones", "(", "(", "out_dim", ",", ")", ",", "dtype", "=", "np", ".", "float32", ")", "cls_weights", "[", "0", "]", "=", "0.1", "# 10x lower, as it degenerates to white bgs", "return", "torch", ".", "as_tensor", "(", "cls_weights", ",", "device", "=", "gt_pix", ".", "device", ")" ]
[ 50, 4 ]
[ 54, 65 ]
python
en
['en', 'en', 'en']
True
PixCELoss._pix_wts_count_reciprocal
(cls, out_dim, gt_pix)
This pixel weighting is default from 2020-01-22. Gave better results on 0018 template.
This pixel weighting is default from 2020-01-22. Gave better results on 0018 template.
def _pix_wts_count_reciprocal(cls, out_dim, gt_pix): """This pixel weighting is default from 2020-01-22. Gave better results on 0018 template.""" uniq_elts, uniq_cnts = torch.unique(gt_pix, return_counts=True) uniq_elts = uniq_elts.cpu().numpy().tolist() uniq_cnts = uniq_cnts.cpu().numpy().tolist() cls_weights = np.zeros((out_dim, ), dtype=np.float32) for i in range(out_dim): if i in uniq_elts: cls_weights[i] = 1.0 / uniq_cnts[uniq_elts.index(i)] return torch.as_tensor(cls_weights, device=gt_pix.device)
[ "def", "_pix_wts_count_reciprocal", "(", "cls", ",", "out_dim", ",", "gt_pix", ")", ":", "uniq_elts", ",", "uniq_cnts", "=", "torch", ".", "unique", "(", "gt_pix", ",", "return_counts", "=", "True", ")", "uniq_elts", "=", "uniq_elts", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ".", "tolist", "(", ")", "uniq_cnts", "=", "uniq_cnts", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ".", "tolist", "(", ")", "cls_weights", "=", "np", ".", "zeros", "(", "(", "out_dim", ",", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", "in", "range", "(", "out_dim", ")", ":", "if", "i", "in", "uniq_elts", ":", "cls_weights", "[", "i", "]", "=", "1.0", "/", "uniq_cnts", "[", "uniq_elts", ".", "index", "(", "i", ")", "]", "return", "torch", ".", "as_tensor", "(", "cls_weights", ",", "device", "=", "gt_pix", ".", "device", ")" ]
[ 57, 4 ]
[ 67, 65 ]
python
en
['en', 'en', 'en']
True
PixCELoss._per_pixel_softmax
(self, pred_pix, gt_pix)
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
def _per_pixel_softmax(self, pred_pix, gt_pix): """ Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) """ pred_pix = pred_pix.permute(0, 2, 3, 1) out_dim = pred_pix.shape[-1] cls_weights = self.wt_fn(out_dim, gt_pix) criterion = nn.CrossEntropyLoss(weight=cls_weights) return criterion(pred_pix.reshape((-1, out_dim)), gt_pix.reshape( (-1, )))
[ "def", "_per_pixel_softmax", "(", "self", ",", "pred_pix", ",", "gt_pix", ")", ":", "pred_pix", "=", "pred_pix", ".", "permute", "(", "0", ",", "2", ",", "3", ",", "1", ")", "out_dim", "=", "pred_pix", ".", "shape", "[", "-", "1", "]", "cls_weights", "=", "self", ".", "wt_fn", "(", "out_dim", ",", "gt_pix", ")", "criterion", "=", "nn", ".", "CrossEntropyLoss", "(", "weight", "=", "cls_weights", ")", "return", "criterion", "(", "pred_pix", ".", "reshape", "(", "(", "-", "1", ",", "out_dim", ")", ")", ",", "gt_pix", ".", "reshape", "(", "(", "-", "1", ",", ")", ")", ")" ]
[ 69, 4 ]
[ 80, 20 ]
python
en
['en', 'error', 'th']
False
PixCELoss._bce
(cls, pred_pix, gt_pix)
Binary cross entropy Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
Binary cross entropy Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
def _bce(cls, pred_pix, gt_pix): """ Binary cross entropy Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) """ gt_pix = nn.functional.one_hot(gt_pix, num_classes=phyre.NUM_COLORS).permute( 0, 3, 1, 2).float() criterion = nn.BCELoss() return criterion(pred_pix, gt_pix)
[ "def", "_bce", "(", "cls", ",", "pred_pix", ",", "gt_pix", ")", ":", "gt_pix", "=", "nn", ".", "functional", ".", "one_hot", "(", "gt_pix", ",", "num_classes", "=", "phyre", ".", "NUM_COLORS", ")", ".", "permute", "(", "0", ",", "3", ",", "1", ",", "2", ")", ".", "float", "(", ")", "criterion", "=", "nn", ".", "BCELoss", "(", ")", "return", "criterion", "(", "pred_pix", ",", "gt_pix", ")" ]
[ 83, 4 ]
[ 94, 42 ]
python
en
['en', 'error', 'th']
False
PixCELoss._per_channel_spatial_softmax
( cls, pred_pix, gt_pix, gt_dist=True, # Not exposed, used for a quick expt target_temp=1)
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
def _per_channel_spatial_softmax( cls, pred_pix, gt_pix, gt_dist=True, # Not exposed, used for a quick expt target_temp=1): """ Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) """ gt_pix = nn.functional.one_hot(gt_pix, num_classes=phyre.NUM_COLORS).permute( 0, 3, 1, 2).float() # flatten, move the channels to batch since each channel will be tested # separately, and stretch out the spatial dimensions act = nn.Softmax(dim=-1) if gt_dist else lambda x: x # (identity) gt_pix = gt_pix / target_temp gt_pix_normed, keep_dims = cls._convert_to_distribution( gt_pix, None, act) act = nn.LogSoftmax(dim=-1) pred_pix_normed, _ = cls._convert_to_distribution( pred_pix, keep_dims, act) nll = torch.mean(-torch.sum(gt_pix_normed * pred_pix_normed, dim=-1)) return nll
[ "def", "_per_channel_spatial_softmax", "(", "cls", ",", "pred_pix", ",", "gt_pix", ",", "gt_dist", "=", "True", ",", "# Not exposed, used for a quick expt", "target_temp", "=", "1", ")", ":", "gt_pix", "=", "nn", ".", "functional", ".", "one_hot", "(", "gt_pix", ",", "num_classes", "=", "phyre", ".", "NUM_COLORS", ")", ".", "permute", "(", "0", ",", "3", ",", "1", ",", "2", ")", ".", "float", "(", ")", "# flatten, move the channels to batch since each channel will be tested", "# separately, and stretch out the spatial dimensions", "act", "=", "nn", ".", "Softmax", "(", "dim", "=", "-", "1", ")", "if", "gt_dist", "else", "lambda", "x", ":", "x", "# (identity)", "gt_pix", "=", "gt_pix", "/", "target_temp", "gt_pix_normed", ",", "keep_dims", "=", "cls", ".", "_convert_to_distribution", "(", "gt_pix", ",", "None", ",", "act", ")", "act", "=", "nn", ".", "LogSoftmax", "(", "dim", "=", "-", "1", ")", "pred_pix_normed", ",", "_", "=", "cls", ".", "_convert_to_distribution", "(", "pred_pix", ",", "keep_dims", ",", "act", ")", "nll", "=", "torch", ".", "mean", "(", "-", "torch", ".", "sum", "(", "gt_pix_normed", "*", "pred_pix_normed", ",", "dim", "=", "-", "1", ")", ")", "return", "nll" ]
[ 105, 4 ]
[ 130, 18 ]
python
en
['en', 'error', 'th']
False
PixCELoss._per_channel_spatial_kl
(cls, pred_pix, gt_pix)
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
def _per_channel_spatial_kl(cls, pred_pix, gt_pix): """ Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) """ gt_pix = nn.functional.one_hot(gt_pix, num_classes=phyre.NUM_COLORS).permute( 0, 3, 1, 2).float() # flatten, move the channels to batch since each channel will be tested # separately, and stretch out the spatial dimensions gt_pix_normed, keep_dims = cls._convert_to_distribution( gt_pix, None, nn.Softmax(dim=-1)) pred_pix_normed, _ = cls._convert_to_distribution( pred_pix, keep_dims, nn.LogSoftmax(dim=-1)) return nn.KLDivLoss(reduction='batchmean')(pred_pix_normed, gt_pix_normed)
[ "def", "_per_channel_spatial_kl", "(", "cls", ",", "pred_pix", ",", "gt_pix", ")", ":", "gt_pix", "=", "nn", ".", "functional", ".", "one_hot", "(", "gt_pix", ",", "num_classes", "=", "phyre", ".", "NUM_COLORS", ")", ".", "permute", "(", "0", ",", "3", ",", "1", ",", "2", ")", ".", "float", "(", ")", "# flatten, move the channels to batch since each channel will be tested", "# separately, and stretch out the spatial dimensions", "gt_pix_normed", ",", "keep_dims", "=", "cls", ".", "_convert_to_distribution", "(", "gt_pix", ",", "None", ",", "nn", ".", "Softmax", "(", "dim", "=", "-", "1", ")", ")", "pred_pix_normed", ",", "_", "=", "cls", ".", "_convert_to_distribution", "(", "pred_pix", ",", "keep_dims", ",", "nn", ".", "LogSoftmax", "(", "dim", "=", "-", "1", ")", ")", "return", "nn", ".", "KLDivLoss", "(", "reduction", "=", "'batchmean'", ")", "(", "pred_pix_normed", ",", "gt_pix_normed", ")" ]
[ 139, 4 ]
[ 155, 65 ]
python
en
['en', 'error', 'th']
False
PixL2Loss.forward
(self, pred_pix, gt_pix)
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2])
def forward(self, pred_pix, gt_pix): """ Args: pred_pix ([Bx7xH1xW1]) gt_pix ([BxH2xW2]) """ gt_pix = reshape_gt_to_pred(pred_pix, gt_pix) # Add the channel dimension to gt_pix gt_pix = nn.functional.one_hot(gt_pix, num_classes=phyre.NUM_COLORS).permute( 0, 3, 1, 2).float() criterion = nn.MSELoss() return criterion(pred_pix, gt_pix)
[ "def", "forward", "(", "self", ",", "pred_pix", ",", "gt_pix", ")", ":", "gt_pix", "=", "reshape_gt_to_pred", "(", "pred_pix", ",", "gt_pix", ")", "# Add the channel dimension to gt_pix", "gt_pix", "=", "nn", ".", "functional", ".", "one_hot", "(", "gt_pix", ",", "num_classes", "=", "phyre", ".", "NUM_COLORS", ")", ".", "permute", "(", "0", ",", "3", ",", "1", ",", "2", ")", ".", "float", "(", ")", "criterion", "=", "nn", ".", "MSELoss", "(", ")", "return", "criterion", "(", "pred_pix", ",", "gt_pix", ")" ]
[ 166, 4 ]
[ 178, 42 ]
python
en
['en', 'error', 'th']
False
InfoNCELoss.__init__
(self, in_dim, temp=0.07, l2_norm_feats=True, nce_dim=None)
Args: in_dim: Dimension of the incoming features temp (float): temprature l2_norm_feats (bool): Whether to normalize feats before nce_dim (int): If not None, reduce the dimensionality to this number
Args: in_dim: Dimension of the incoming features temp (float): temprature l2_norm_feats (bool): Whether to normalize feats before nce_dim (int): If not None, reduce the dimensionality to this number
def __init__(self, in_dim, temp=0.07, l2_norm_feats=True, nce_dim=None): """ Args: in_dim: Dimension of the incoming features temp (float): temprature l2_norm_feats (bool): Whether to normalize feats before nce_dim (int): If not None, reduce the dimensionality to this number """ super().__init__() self.temp = temp self.l2_norm_feats = l2_norm_feats if nce_dim is not None and in_dim != nce_dim: self.reduce_dim = nn.Sequential(nn.Linear(in_dim, nce_dim), nn.ReLU(), nn.Linear(nce_dim, nce_dim)) else: self.reduce_dim = lambda x: x
[ "def", "__init__", "(", "self", ",", "in_dim", ",", "temp", "=", "0.07", ",", "l2_norm_feats", "=", "True", ",", "nce_dim", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "temp", "=", "temp", "self", ".", "l2_norm_feats", "=", "l2_norm_feats", "if", "nce_dim", "is", "not", "None", "and", "in_dim", "!=", "nce_dim", ":", "self", ".", "reduce_dim", "=", "nn", ".", "Sequential", "(", "nn", ".", "Linear", "(", "in_dim", ",", "nce_dim", ")", ",", "nn", ".", "ReLU", "(", ")", ",", "nn", ".", "Linear", "(", "nce_dim", ",", "nce_dim", ")", ")", "else", ":", "self", ".", "reduce_dim", "=", "lambda", "x", ":", "x" ]
[ 183, 4 ]
[ 199, 41 ]
python
en
['en', 'error', 'th']
False
InfoNCELoss.forward
(self, pred, gt)
Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://arxiv.org/pdf/1911.05722.pdf
Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://arxiv.org/pdf/1911.05722.pdf
def forward(self, pred, gt): """ Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://arxiv.org/pdf/1911.05722.pdf """ def spatial_to_batch(feat): """Move the H', W', Nobj dimension to batch, so will do a spatial-obj NCE.""" feat_dim = feat.shape[2] return torch.reshape(torch.transpose(feat, 2, -1), [-1, feat_dim]) pred_rolled = spatial_to_batch(pred) gt_rolled = spatial_to_batch(gt) pred_rolled = self.reduce_dim(pred_rolled) gt_rolled = self.reduce_dim(gt_rolled) # Features are L2 normalized before doing this typically. In case the # feature extractor did not normalize the features, normalize it now if self.l2_norm_feats: pred_rolled = nn.functional.normalize(pred_rolled, p=2, dim=-1) gt_rolled = nn.functional.normalize(gt_rolled, p=2, dim=-1) logits = torch.mm(pred_rolled, torch.transpose(gt_rolled, 0, 1)) labels = torch.arange(pred_rolled.shape[0]).to(logits.device) criterion = nn.CrossEntropyLoss() return criterion(logits / self.temp, labels)
[ "def", "forward", "(", "self", ",", "pred", ",", "gt", ")", ":", "def", "spatial_to_batch", "(", "feat", ")", ":", "\"\"\"Move the H', W', Nobj dimension to batch,\n so will do a spatial-obj NCE.\"\"\"", "feat_dim", "=", "feat", ".", "shape", "[", "2", "]", "return", "torch", ".", "reshape", "(", "torch", ".", "transpose", "(", "feat", ",", "2", ",", "-", "1", ")", ",", "[", "-", "1", ",", "feat_dim", "]", ")", "pred_rolled", "=", "spatial_to_batch", "(", "pred", ")", "gt_rolled", "=", "spatial_to_batch", "(", "gt", ")", "pred_rolled", "=", "self", ".", "reduce_dim", "(", "pred_rolled", ")", "gt_rolled", "=", "self", ".", "reduce_dim", "(", "gt_rolled", ")", "# Features are L2 normalized before doing this typically. In case the", "# feature extractor did not normalize the features, normalize it now", "if", "self", ".", "l2_norm_feats", ":", "pred_rolled", "=", "nn", ".", "functional", ".", "normalize", "(", "pred_rolled", ",", "p", "=", "2", ",", "dim", "=", "-", "1", ")", "gt_rolled", "=", "nn", ".", "functional", ".", "normalize", "(", "gt_rolled", ",", "p", "=", "2", ",", "dim", "=", "-", "1", ")", "logits", "=", "torch", ".", "mm", "(", "pred_rolled", ",", "torch", ".", "transpose", "(", "gt_rolled", ",", "0", ",", "1", ")", ")", "labels", "=", "torch", ".", "arange", "(", "pred_rolled", ".", "shape", "[", "0", "]", ")", ".", "to", "(", "logits", ".", "device", ")", "criterion", "=", "nn", ".", "CrossEntropyLoss", "(", ")", "return", "criterion", "(", "logits", "/", "self", ".", "temp", ",", "labels", ")" ]
[ 201, 4 ]
[ 226, 52 ]
python
en
['en', 'error', 'th']
False
gen_vis_vid_preds
(orig_vid, orig_objs, model, n_fwd_times=None, run_decode=True, n_hist_frames=3)
Generate a visualization of some training videos, along with model rollout (actual autoregressive rollout, so need to test again). Args: orig_vid: (B, T, Nobj, H, W) video batch model: the pytorch model for forward prediction Returns: RGB frames (B, T, 3, H, W) as torch tensor, in the standard format that can be used with tensorboard.
Generate a visualization of some training videos, along with model rollout (actual autoregressive rollout, so need to test again). Args: orig_vid: (B, T, Nobj, H, W) video batch model: the pytorch model for forward prediction Returns: RGB frames (B, T, 3, H, W) as torch tensor, in the standard format that can be used with tensorboard.
def gen_vis_vid_preds(orig_vid, orig_objs, model, n_fwd_times=None, run_decode=True, n_hist_frames=3): """ Generate a visualization of some training videos, along with model rollout (actual autoregressive rollout, so need to test again). Args: orig_vid: (B, T, Nobj, H, W) video batch model: the pytorch model for forward prediction Returns: RGB frames (B, T, 3, H, W) as torch tensor, in the standard format that can be used with tensorboard. """ # Generate the predictions if n_fwd_times is None: n_fwd_times = orig_vid.shape[1] - n_hist_frames # As many we've GT for # For vis, at least 1 frame would be needed for following code n_fwd_times = max(n_fwd_times, 1) vid = orig_vid[:, :n_hist_frames, ...] # crop out the first part for pred objs = orig_objs[:, :n_hist_frames, ...] with torch.no_grad(): model.eval() logging.info('gen vis preds') all_preds, _ = model.forward(objs, None, n_hist_frames=n_hist_frames, n_fwd_times=n_fwd_times, compute_losses=False, need_intermediate=True, run_decode=run_decode, nslices=1) stacked, _, _, _ = im_fwd_agent.ImgTrainer.vis_stacked_pred_gt( nets.combine_obj_pixels(orig_vid, 2).cpu().numpy(), nets.combine_obj_pixels(vid, 2), all_preds['pixels'] if run_decode else None) # For some reason need to flip the image in space and time for corr vis stacked_rgb = np.array( im_fwd_agent.convert_to_video_vis(stacked).transpose((0, 1, 4, 2, 3))) return torch.as_tensor(stacked_rgb)
[ "def", "gen_vis_vid_preds", "(", "orig_vid", ",", "orig_objs", ",", "model", ",", "n_fwd_times", "=", "None", ",", "run_decode", "=", "True", ",", "n_hist_frames", "=", "3", ")", ":", "# Generate the predictions", "if", "n_fwd_times", "is", "None", ":", "n_fwd_times", "=", "orig_vid", ".", "shape", "[", "1", "]", "-", "n_hist_frames", "# As many we've GT for", "# For vis, at least 1 frame would be needed for following code", "n_fwd_times", "=", "max", "(", "n_fwd_times", ",", "1", ")", "vid", "=", "orig_vid", "[", ":", ",", ":", "n_hist_frames", ",", "...", "]", "# crop out the first part for pred", "objs", "=", "orig_objs", "[", ":", ",", ":", "n_hist_frames", ",", "...", "]", "with", "torch", ".", "no_grad", "(", ")", ":", "model", ".", "eval", "(", ")", "logging", ".", "info", "(", "'gen vis preds'", ")", "all_preds", ",", "_", "=", "model", ".", "forward", "(", "objs", ",", "None", ",", "n_hist_frames", "=", "n_hist_frames", ",", "n_fwd_times", "=", "n_fwd_times", ",", "compute_losses", "=", "False", ",", "need_intermediate", "=", "True", ",", "run_decode", "=", "run_decode", ",", "nslices", "=", "1", ")", "stacked", ",", "_", ",", "_", ",", "_", "=", "im_fwd_agent", ".", "ImgTrainer", ".", "vis_stacked_pred_gt", "(", "nets", ".", "combine_obj_pixels", "(", "orig_vid", ",", "2", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ",", "nets", ".", "combine_obj_pixels", "(", "vid", ",", "2", ")", ",", "all_preds", "[", "'pixels'", "]", "if", "run_decode", "else", "None", ")", "# For some reason need to flip the image in space and time for corr vis", "stacked_rgb", "=", "np", ".", "array", "(", "im_fwd_agent", ".", "convert_to_video_vis", "(", "stacked", ")", ".", "transpose", "(", "(", "0", ",", "1", ",", "4", ",", "2", ",", "3", ")", ")", ")", "return", "torch", ".", "as_tensor", "(", "stacked_rgb", ")" ]
[ 42, 0 ]
[ 83, 39 ]
python
en
['en', 'error', 'th']
False
ObjTrainer.load_agent_from_folder
(cls, model: NeuralModel, agent_folder: str, strict: bool = True)
This loader is used in the offline_agents code, to load at test time.
This loader is used in the offline_agents code, to load at test time.
def load_agent_from_folder(cls, model: NeuralModel, agent_folder: str, strict: bool = True) -> NeuralModel: """ This loader is used in the offline_agents code, to load at test time. """ last_checkpoint = get_latest_checkpoint(agent_folder) assert last_checkpoint is not None, agent_folder logging.info('Loading a model from: %s', last_checkpoint) last_checkpoint = torch.load(last_checkpoint) try: model.module.classification_model.pos_encoder.pe = model.module.classification_model.pos_encoder.pe.contiguous() except: pass missing_keys, unexp_keys = model.load_state_dict( last_checkpoint['model'], strict=strict) logging.warning('Could not init: %s', missing_keys) logging.warning('Unused keys in ckpt: %s', unexp_keys) model.to(nets.DEVICE) return model
[ "def", "load_agent_from_folder", "(", "cls", ",", "model", ":", "NeuralModel", ",", "agent_folder", ":", "str", ",", "strict", ":", "bool", "=", "True", ")", "->", "NeuralModel", ":", "last_checkpoint", "=", "get_latest_checkpoint", "(", "agent_folder", ")", "assert", "last_checkpoint", "is", "not", "None", ",", "agent_folder", "logging", ".", "info", "(", "'Loading a model from: %s'", ",", "last_checkpoint", ")", "last_checkpoint", "=", "torch", ".", "load", "(", "last_checkpoint", ")", "try", ":", "model", ".", "module", ".", "classification_model", ".", "pos_encoder", ".", "pe", "=", "model", ".", "module", ".", "classification_model", ".", "pos_encoder", ".", "pe", ".", "contiguous", "(", ")", "except", ":", "pass", "missing_keys", ",", "unexp_keys", "=", "model", ".", "load_state_dict", "(", "last_checkpoint", "[", "'model'", "]", ",", "strict", "=", "strict", ")", "logging", ".", "warning", "(", "'Could not init: %s'", ",", "missing_keys", ")", "logging", ".", "warning", "(", "'Unused keys in ckpt: %s'", ",", "unexp_keys", ")", "model", ".", "to", "(", "nets", ".", "DEVICE", ")", "return", "model" ]
[ 91, 4 ]
[ 111, 20 ]
python
en
['en', 'error', 'th']
False
ObjTrainer.gen_model
(cls, cfg, override_cfg=None, on_cpu=False)
Generate the random init model.
Generate the random init model.
def gen_model(cls, cfg, override_cfg=None, on_cpu=False): """Generate the random init model.""" if override_cfg is not None: model = obj_nets.FwdObject(override_cfg) else: model = obj_nets.FwdObject(cfg) if on_cpu: return model.cpu() print('Using GPUS:', cfg.num_gpus) assert cfg.num_gpus <= torch.cuda.device_count() model = torch.nn.DataParallel(model, device_ids=list(range(cfg.num_gpus))) return model
[ "def", "gen_model", "(", "cls", ",", "cfg", ",", "override_cfg", "=", "None", ",", "on_cpu", "=", "False", ")", ":", "if", "override_cfg", "is", "not", "None", ":", "model", "=", "obj_nets", ".", "FwdObject", "(", "override_cfg", ")", "else", ":", "model", "=", "obj_nets", ".", "FwdObject", "(", "cfg", ")", "if", "on_cpu", ":", "return", "model", ".", "cpu", "(", ")", "print", "(", "'Using GPUS:'", ",", "cfg", ".", "num_gpus", ")", "assert", "cfg", ".", "num_gpus", "<=", "torch", ".", "cuda", ".", "device_count", "(", ")", "model", "=", "torch", ".", "nn", ".", "DataParallel", "(", "model", ",", "device_ids", "=", "list", "(", "range", "(", "cfg", ".", "num_gpus", ")", ")", ")", "return", "model" ]
[ 113, 4 ]
[ 129, 20 ]
python
en
['en', 'ms', 'en']
True
ObjTrainer.train
(cls, model, dataset, output_dir, summary_writer, full_eval_from_model, cfg)
Main train function.
Main train function.
def train(cls, model, dataset, output_dir, summary_writer, full_eval_from_model, cfg): """Main train function.""" updates = cfg.train.num_iter report_every = cfg.train.report_every save_checkpoints_every = cfg.train.save_checkpoints_every full_eval_every = cfg.train.full_eval_every train_batch_size = cfg.train.batch_size print(train_batch_size) print(cfg.eval.batch_size) max_frames_fwd = cfg.train.frames_per_clip n_hist_frames = cfg.train.n_hist_frames # Frames used to predict the future loss_cfg = cfg.train.obj_loss opt_params = cfg.opt n_fwd_times = cfg.train.n_fwd_times # nslices (slice out the input for training) num_slices = cfg.train.num_slices if max_frames_fwd is not None and (max_frames_fwd <= n_hist_frames): logging.warning( 'Cant train prediction model, max_frames_fwd too low') device = nets.DEVICE model.train() model.to(device) logging.info("%s", model) train_modules_subset = cfg.train.modules_to_train params_to_train = [] if train_modules_subset is not None: mod_names = train_modules_subset.split('%') logging.warning( 'Training only a few modules, listed below. NOTE: ' 'BNs/dropout will still be in train mode. Explicitly ' 'set those to eval mode if thats not desired.') for mod_name in mod_names: mod = getattr(model.module, mod_name) logging.warning('Training %s: %s', mod_name, mod) params_to_train.extend(mod.parameters()) else: mod_names = [] params_to_train = model.parameters() optimizer = hydra.utils.instantiate(opt_params, params_to_train) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=updates) logging.info('Starting actual training for %d updates', updates) last_checkpoint = get_latest_checkpoint(output_dir) batch_start = 0 # By default, starting from iteration 0, unles loading mdl if last_checkpoint is not None: logging.info('Going to load from %s', last_checkpoint) last_checkpoint = torch.load(last_checkpoint) model.load_state_dict(last_checkpoint['model']) optimizer.load_state_dict(last_checkpoint['optim']) # Subtracting 1 since we store batch_id + 1 when calling save_agent batch_start = last_checkpoint['done_batches'] - 1 if scheduler is not None: scheduler.load_state_dict(last_checkpoint['scheduler']) def run_full_eval(batch_id): results = {} # To store to a json eval_stats = full_eval_from_model(model) metric = eval_stats.compute_all_metrics() results['metrics'] = metric results[ 'metrics_rollout'] = eval_stats.compute_all_metrics_over_rollout( ) results[ 'metrics_per_task'] = eval_stats.compute_all_metrics_per_task( ) max_test_attempts_per_task = (cfg.max_test_attempts_per_task or phyre.MAX_TEST_ATTEMPTS) results['parsed_args'] = dict( # cfg=cfg, # Not json serializable, anyway will be stored in dir main_kwargs=dict( eval_setup_name=cfg.eval_setup_name, fold_id=cfg.fold_id, use_test_split=cfg.use_test_split, agent_type=cfg.agent.type, max_test_attempts_per_task=max_test_attempts_per_task, output_dir=output_dir)) results['target_metric'] = ( results['metrics']['independent_solved_by_aucs'] [max_test_attempts_per_task]) results['target_metric_over_time'] = [ el['independent_solved_by_aucs'][max_test_attempts_per_task] for el in results['metrics_rollout'] ] logging.info('Iter %d: %s; Over rollout: %s', (batch_id + 1), results['target_metric'], results['target_metric_over_time']) score = metric['independent_solved_by_aucs'][-1] summary_writer.add_scalar('FullEval/AUCCESS', score, batch_id + 1) for solved_by_iter in metric['global_solved_by']: summary_writer.add_scalar( 'FullEval/solved_by_{}'.format(solved_by_iter), metric['global_solved_by'][solved_by_iter], batch_id + 1) logging.info('Full eval perf @ %d: %s', batch_id + 1, score) for i, metric in enumerate(results['metrics_rollout']): summary_writer.add_scalar( 'FullEvalRollout/AUCCESS/{}'.format(i + 1), metric['independent_solved_by_aucs'][-1], batch_id + 1) summary_writer.add_scalar( 'FullEvalRollout/solved_by_100/{}'.format(i), metric['global_solved_by'][100], batch_id + 1) summary_writer.add_scalar('FullEvalRollout/prediction_accuracy/', metric['pred_acc'], batch_id + 1) respath = os.path.join( output_dir, 'results_intermediate/{:08d}.json'.format(batch_id + 1)) os.makedirs(os.path.dirname(respath), exist_ok=True) with open(respath, 'w') as fout: json.dump(results, fout) logging.info('Report every %d; full eval every %d', report_every, full_eval_every) if save_checkpoints_every > full_eval_every: save_checkpoints_every -= save_checkpoints_every % full_eval_every losses_report = {} last_time = time.time() assert train_batch_size > 1 and train_batch_size % 2 == 0, ( 'Needs to get 2 elements at least to balance out') for batch_data_id, batch_data in enumerate( torch.utils.data.DataLoader( dataset, num_workers=im_fwd_agent.get_num_workers( cfg.train.data_loader.num_workers, dataset.frames_per_clip), pin_memory=False, # Asking for half the batch size since the dataloader is designed # to give 2 elements per batch (for class balancing) batch_size=train_batch_size // 2)): batch_id = batch_data_id + batch_start if (batch_id + 1) >= updates: im_fwd_agent.save_agent(output_dir, batch_id + 1, model, optimizer, scheduler) break model.train() batch_is_solved = batch_data['is_solved'] batch_is_solved = batch_is_solved.to(device, non_blocking=True) batch_is_solved = batch_is_solved.reshape((-1, )) batch_obj_obs = batch_data['obj_obs'] batch_obj_obs = batch_obj_obs.reshape( [-1] + list(batch_obj_obs.shape[2:])) batch_obj_obs = batch_obj_obs.to(device) # Run the forward classifcation model on the object frames train_noise_frac = 0.0 if cfg.agent.train_with_noise: if (batch_id / updates) > cfg.agent.decay_noise_end: train_noise_frac = 0.0 elif (batch_id / updates) < cfg.agent.decay_noise_start: train_noise_frac = cfg.agent.train_noise_percent else: start_noise_decay = cfg.agent.decay_noise_start * updates end_noise_decay = cfg.agent.decay_noise_end * updates noise_decay_updates = end_noise_decay - start_noise_decay train_noise_frac = cfg.agent.train_noise_percent * ( 1 - (batch_id - start_noise_decay) / noise_decay_updates) _, batch_losses = model.forward( batch_obj_obs, batch_is_solved, n_hist_frames=n_hist_frames, n_fwd_times=n_fwd_times, compute_losses=True, need_intermediate=False, #loss_cfg.on_intermediate, nslices=num_slices, train_noise_frac=train_noise_frac, need_pixels=False) optimizer.zero_grad() total_loss = 0 # Mean over each loss type from each replica for loss_type in batch_losses: loss_wt = getattr(loss_cfg, 'wt_' + loss_type) if loss_wt <= 0: continue loss_val = loss_wt * torch.mean(batch_losses[loss_type], dim=0) if loss_type not in losses_report: losses_report[loss_type] = [] losses_report[loss_type].append(loss_val.item()) total_loss += loss_val total_loss.backward() optimizer.step() if (save_checkpoints_every > 0 and (batch_id + 1) % save_checkpoints_every == 0): im_fwd_agent.save_agent(output_dir, batch_id + 1, model, optimizer, scheduler) if (batch_id + 1) % report_every == 0: speed = report_every / (time.time() - last_time) last_time = time.time() loss_stats = { typ: np.mean(losses_report[typ][-report_every:]) for typ in losses_report if len(losses_report[typ]) > 0 } logging.info( 'Iter: %s, examples: %d, mean loss: %s, speed: %.1f batch/sec,' ' lr: %f', batch_id + 1, (batch_id + 1) * train_batch_size, loss_stats, speed, im_fwd_agent.get_lr(optimizer)) for typ in loss_stats: summary_writer.add_scalar('Loss/{}'.format(typ), loss_stats[typ], batch_id + 1) summary_writer.add_scalar('Loss/Total', sum(loss_stats.values()), batch_id + 1) summary_writer.add_scalar('LR', im_fwd_agent.get_lr(optimizer), batch_id + 1) summary_writer.add_scalar('Speed', speed, batch_id + 1) # Add a histogram of the batch task IDs, to make sure it picks a # variety of task batch_templates = np.array( dataset.task_ids)[batch_data['task_indices'].reshape( (-1, ))].tolist() batch_templates = np.array( [int(el.split(':')[0]) for el in batch_templates]) gpu_mem_max = max([ torch.cuda.max_memory_allocated(device=i) for i in range(torch.cuda.device_count()) ]) summary_writer.add_scalar('GPU/Mem/Max', gpu_mem_max, batch_id + 1) summary_writer.add_histogram('Templates', batch_templates, global_step=(batch_id + 1), bins=25) # Visualize a couple train videos, and actual rollouts if pix is # being trained # Just visualizing the first 256 videos in case the batch size is # larger; somehow the visualizations get corrupted (grey bg) for # more. Also no point filling up the memory. # Storing less frequently than the rest of the logs (takes lot of space) if n_fwd_times > 0 and (batch_id + 1) % (report_every * 10) == 0: batch_vid_obs = batch_data['vid_obs'] batch_vid_obs = batch_vid_obs.reshape( [-1] + list(batch_vid_obs.shape[2:])) batch_vid_obs = batch_vid_obs.to(device) vis_fwd_times = n_fwd_times if 'classification_model' in mod_names else None videos = gen_vis_vid_preds(batch_vid_obs[:256], batch_obj_obs[:256], model, n_fwd_times=vis_fwd_times, run_decode=True, n_hist_frames=n_hist_frames) summary_writer.add_video('InputAndRollout/train', videos, (batch_id + 1)) if (batch_id + 1) % full_eval_every == 0: run_full_eval(batch_id) if scheduler is not None: scheduler.step() return model.cpu()
[ "def", "train", "(", "cls", ",", "model", ",", "dataset", ",", "output_dir", ",", "summary_writer", ",", "full_eval_from_model", ",", "cfg", ")", ":", "updates", "=", "cfg", ".", "train", ".", "num_iter", "report_every", "=", "cfg", ".", "train", ".", "report_every", "save_checkpoints_every", "=", "cfg", ".", "train", ".", "save_checkpoints_every", "full_eval_every", "=", "cfg", ".", "train", ".", "full_eval_every", "train_batch_size", "=", "cfg", ".", "train", ".", "batch_size", "print", "(", "train_batch_size", ")", "print", "(", "cfg", ".", "eval", ".", "batch_size", ")", "max_frames_fwd", "=", "cfg", ".", "train", ".", "frames_per_clip", "n_hist_frames", "=", "cfg", ".", "train", ".", "n_hist_frames", "# Frames used to predict the future", "loss_cfg", "=", "cfg", ".", "train", ".", "obj_loss", "opt_params", "=", "cfg", ".", "opt", "n_fwd_times", "=", "cfg", ".", "train", ".", "n_fwd_times", "# nslices (slice out the input for training)", "num_slices", "=", "cfg", ".", "train", ".", "num_slices", "if", "max_frames_fwd", "is", "not", "None", "and", "(", "max_frames_fwd", "<=", "n_hist_frames", ")", ":", "logging", ".", "warning", "(", "'Cant train prediction model, max_frames_fwd too low'", ")", "device", "=", "nets", ".", "DEVICE", "model", ".", "train", "(", ")", "model", ".", "to", "(", "device", ")", "logging", ".", "info", "(", "\"%s\"", ",", "model", ")", "train_modules_subset", "=", "cfg", ".", "train", ".", "modules_to_train", "params_to_train", "=", "[", "]", "if", "train_modules_subset", "is", "not", "None", ":", "mod_names", "=", "train_modules_subset", ".", "split", "(", "'%'", ")", "logging", ".", "warning", "(", "'Training only a few modules, listed below. NOTE: '", "'BNs/dropout will still be in train mode. Explicitly '", "'set those to eval mode if thats not desired.'", ")", "for", "mod_name", "in", "mod_names", ":", "mod", "=", "getattr", "(", "model", ".", "module", ",", "mod_name", ")", "logging", ".", "warning", "(", "'Training %s: %s'", ",", "mod_name", ",", "mod", ")", "params_to_train", ".", "extend", "(", "mod", ".", "parameters", "(", ")", ")", "else", ":", "mod_names", "=", "[", "]", "params_to_train", "=", "model", ".", "parameters", "(", ")", "optimizer", "=", "hydra", ".", "utils", ".", "instantiate", "(", "opt_params", ",", "params_to_train", ")", "scheduler", "=", "torch", ".", "optim", ".", "lr_scheduler", ".", "CosineAnnealingLR", "(", "optimizer", ",", "T_max", "=", "updates", ")", "logging", ".", "info", "(", "'Starting actual training for %d updates'", ",", "updates", ")", "last_checkpoint", "=", "get_latest_checkpoint", "(", "output_dir", ")", "batch_start", "=", "0", "# By default, starting from iteration 0, unles loading mdl", "if", "last_checkpoint", "is", "not", "None", ":", "logging", ".", "info", "(", "'Going to load from %s'", ",", "last_checkpoint", ")", "last_checkpoint", "=", "torch", ".", "load", "(", "last_checkpoint", ")", "model", ".", "load_state_dict", "(", "last_checkpoint", "[", "'model'", "]", ")", "optimizer", ".", "load_state_dict", "(", "last_checkpoint", "[", "'optim'", "]", ")", "# Subtracting 1 since we store batch_id + 1 when calling save_agent", "batch_start", "=", "last_checkpoint", "[", "'done_batches'", "]", "-", "1", "if", "scheduler", "is", "not", "None", ":", "scheduler", ".", "load_state_dict", "(", "last_checkpoint", "[", "'scheduler'", "]", ")", "def", "run_full_eval", "(", "batch_id", ")", ":", "results", "=", "{", "}", "# To store to a json", "eval_stats", "=", "full_eval_from_model", "(", "model", ")", "metric", "=", "eval_stats", ".", "compute_all_metrics", "(", ")", "results", "[", "'metrics'", "]", "=", "metric", "results", "[", "'metrics_rollout'", "]", "=", "eval_stats", ".", "compute_all_metrics_over_rollout", "(", ")", "results", "[", "'metrics_per_task'", "]", "=", "eval_stats", ".", "compute_all_metrics_per_task", "(", ")", "max_test_attempts_per_task", "=", "(", "cfg", ".", "max_test_attempts_per_task", "or", "phyre", ".", "MAX_TEST_ATTEMPTS", ")", "results", "[", "'parsed_args'", "]", "=", "dict", "(", "# cfg=cfg, # Not json serializable, anyway will be stored in dir", "main_kwargs", "=", "dict", "(", "eval_setup_name", "=", "cfg", ".", "eval_setup_name", ",", "fold_id", "=", "cfg", ".", "fold_id", ",", "use_test_split", "=", "cfg", ".", "use_test_split", ",", "agent_type", "=", "cfg", ".", "agent", ".", "type", ",", "max_test_attempts_per_task", "=", "max_test_attempts_per_task", ",", "output_dir", "=", "output_dir", ")", ")", "results", "[", "'target_metric'", "]", "=", "(", "results", "[", "'metrics'", "]", "[", "'independent_solved_by_aucs'", "]", "[", "max_test_attempts_per_task", "]", ")", "results", "[", "'target_metric_over_time'", "]", "=", "[", "el", "[", "'independent_solved_by_aucs'", "]", "[", "max_test_attempts_per_task", "]", "for", "el", "in", "results", "[", "'metrics_rollout'", "]", "]", "logging", ".", "info", "(", "'Iter %d: %s; Over rollout: %s'", ",", "(", "batch_id", "+", "1", ")", ",", "results", "[", "'target_metric'", "]", ",", "results", "[", "'target_metric_over_time'", "]", ")", "score", "=", "metric", "[", "'independent_solved_by_aucs'", "]", "[", "-", "1", "]", "summary_writer", ".", "add_scalar", "(", "'FullEval/AUCCESS'", ",", "score", ",", "batch_id", "+", "1", ")", "for", "solved_by_iter", "in", "metric", "[", "'global_solved_by'", "]", ":", "summary_writer", ".", "add_scalar", "(", "'FullEval/solved_by_{}'", ".", "format", "(", "solved_by_iter", ")", ",", "metric", "[", "'global_solved_by'", "]", "[", "solved_by_iter", "]", ",", "batch_id", "+", "1", ")", "logging", ".", "info", "(", "'Full eval perf @ %d: %s'", ",", "batch_id", "+", "1", ",", "score", ")", "for", "i", ",", "metric", "in", "enumerate", "(", "results", "[", "'metrics_rollout'", "]", ")", ":", "summary_writer", ".", "add_scalar", "(", "'FullEvalRollout/AUCCESS/{}'", ".", "format", "(", "i", "+", "1", ")", ",", "metric", "[", "'independent_solved_by_aucs'", "]", "[", "-", "1", "]", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_scalar", "(", "'FullEvalRollout/solved_by_100/{}'", ".", "format", "(", "i", ")", ",", "metric", "[", "'global_solved_by'", "]", "[", "100", "]", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_scalar", "(", "'FullEvalRollout/prediction_accuracy/'", ",", "metric", "[", "'pred_acc'", "]", ",", "batch_id", "+", "1", ")", "respath", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'results_intermediate/{:08d}.json'", ".", "format", "(", "batch_id", "+", "1", ")", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "respath", ")", ",", "exist_ok", "=", "True", ")", "with", "open", "(", "respath", ",", "'w'", ")", "as", "fout", ":", "json", ".", "dump", "(", "results", ",", "fout", ")", "logging", ".", "info", "(", "'Report every %d; full eval every %d'", ",", "report_every", ",", "full_eval_every", ")", "if", "save_checkpoints_every", ">", "full_eval_every", ":", "save_checkpoints_every", "-=", "save_checkpoints_every", "%", "full_eval_every", "losses_report", "=", "{", "}", "last_time", "=", "time", ".", "time", "(", ")", "assert", "train_batch_size", ">", "1", "and", "train_batch_size", "%", "2", "==", "0", ",", "(", "'Needs to get 2 elements at least to balance out'", ")", "for", "batch_data_id", ",", "batch_data", "in", "enumerate", "(", "torch", ".", "utils", ".", "data", ".", "DataLoader", "(", "dataset", ",", "num_workers", "=", "im_fwd_agent", ".", "get_num_workers", "(", "cfg", ".", "train", ".", "data_loader", ".", "num_workers", ",", "dataset", ".", "frames_per_clip", ")", ",", "pin_memory", "=", "False", ",", "# Asking for half the batch size since the dataloader is designed", "# to give 2 elements per batch (for class balancing)", "batch_size", "=", "train_batch_size", "//", "2", ")", ")", ":", "batch_id", "=", "batch_data_id", "+", "batch_start", "if", "(", "batch_id", "+", "1", ")", ">=", "updates", ":", "im_fwd_agent", ".", "save_agent", "(", "output_dir", ",", "batch_id", "+", "1", ",", "model", ",", "optimizer", ",", "scheduler", ")", "break", "model", ".", "train", "(", ")", "batch_is_solved", "=", "batch_data", "[", "'is_solved'", "]", "batch_is_solved", "=", "batch_is_solved", ".", "to", "(", "device", ",", "non_blocking", "=", "True", ")", "batch_is_solved", "=", "batch_is_solved", ".", "reshape", "(", "(", "-", "1", ",", ")", ")", "batch_obj_obs", "=", "batch_data", "[", "'obj_obs'", "]", "batch_obj_obs", "=", "batch_obj_obs", ".", "reshape", "(", "[", "-", "1", "]", "+", "list", "(", "batch_obj_obs", ".", "shape", "[", "2", ":", "]", ")", ")", "batch_obj_obs", "=", "batch_obj_obs", ".", "to", "(", "device", ")", "# Run the forward classifcation model on the object frames", "train_noise_frac", "=", "0.0", "if", "cfg", ".", "agent", ".", "train_with_noise", ":", "if", "(", "batch_id", "/", "updates", ")", ">", "cfg", ".", "agent", ".", "decay_noise_end", ":", "train_noise_frac", "=", "0.0", "elif", "(", "batch_id", "/", "updates", ")", "<", "cfg", ".", "agent", ".", "decay_noise_start", ":", "train_noise_frac", "=", "cfg", ".", "agent", ".", "train_noise_percent", "else", ":", "start_noise_decay", "=", "cfg", ".", "agent", ".", "decay_noise_start", "*", "updates", "end_noise_decay", "=", "cfg", ".", "agent", ".", "decay_noise_end", "*", "updates", "noise_decay_updates", "=", "end_noise_decay", "-", "start_noise_decay", "train_noise_frac", "=", "cfg", ".", "agent", ".", "train_noise_percent", "*", "(", "1", "-", "(", "batch_id", "-", "start_noise_decay", ")", "/", "noise_decay_updates", ")", "_", ",", "batch_losses", "=", "model", ".", "forward", "(", "batch_obj_obs", ",", "batch_is_solved", ",", "n_hist_frames", "=", "n_hist_frames", ",", "n_fwd_times", "=", "n_fwd_times", ",", "compute_losses", "=", "True", ",", "need_intermediate", "=", "False", ",", "#loss_cfg.on_intermediate,", "nslices", "=", "num_slices", ",", "train_noise_frac", "=", "train_noise_frac", ",", "need_pixels", "=", "False", ")", "optimizer", ".", "zero_grad", "(", ")", "total_loss", "=", "0", "# Mean over each loss type from each replica", "for", "loss_type", "in", "batch_losses", ":", "loss_wt", "=", "getattr", "(", "loss_cfg", ",", "'wt_'", "+", "loss_type", ")", "if", "loss_wt", "<=", "0", ":", "continue", "loss_val", "=", "loss_wt", "*", "torch", ".", "mean", "(", "batch_losses", "[", "loss_type", "]", ",", "dim", "=", "0", ")", "if", "loss_type", "not", "in", "losses_report", ":", "losses_report", "[", "loss_type", "]", "=", "[", "]", "losses_report", "[", "loss_type", "]", ".", "append", "(", "loss_val", ".", "item", "(", ")", ")", "total_loss", "+=", "loss_val", "total_loss", ".", "backward", "(", ")", "optimizer", ".", "step", "(", ")", "if", "(", "save_checkpoints_every", ">", "0", "and", "(", "batch_id", "+", "1", ")", "%", "save_checkpoints_every", "==", "0", ")", ":", "im_fwd_agent", ".", "save_agent", "(", "output_dir", ",", "batch_id", "+", "1", ",", "model", ",", "optimizer", ",", "scheduler", ")", "if", "(", "batch_id", "+", "1", ")", "%", "report_every", "==", "0", ":", "speed", "=", "report_every", "/", "(", "time", ".", "time", "(", ")", "-", "last_time", ")", "last_time", "=", "time", ".", "time", "(", ")", "loss_stats", "=", "{", "typ", ":", "np", ".", "mean", "(", "losses_report", "[", "typ", "]", "[", "-", "report_every", ":", "]", ")", "for", "typ", "in", "losses_report", "if", "len", "(", "losses_report", "[", "typ", "]", ")", ">", "0", "}", "logging", ".", "info", "(", "'Iter: %s, examples: %d, mean loss: %s, speed: %.1f batch/sec,'", "' lr: %f'", ",", "batch_id", "+", "1", ",", "(", "batch_id", "+", "1", ")", "*", "train_batch_size", ",", "loss_stats", ",", "speed", ",", "im_fwd_agent", ".", "get_lr", "(", "optimizer", ")", ")", "for", "typ", "in", "loss_stats", ":", "summary_writer", ".", "add_scalar", "(", "'Loss/{}'", ".", "format", "(", "typ", ")", ",", "loss_stats", "[", "typ", "]", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_scalar", "(", "'Loss/Total'", ",", "sum", "(", "loss_stats", ".", "values", "(", ")", ")", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_scalar", "(", "'LR'", ",", "im_fwd_agent", ".", "get_lr", "(", "optimizer", ")", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_scalar", "(", "'Speed'", ",", "speed", ",", "batch_id", "+", "1", ")", "# Add a histogram of the batch task IDs, to make sure it picks a", "# variety of task", "batch_templates", "=", "np", ".", "array", "(", "dataset", ".", "task_ids", ")", "[", "batch_data", "[", "'task_indices'", "]", ".", "reshape", "(", "(", "-", "1", ",", ")", ")", "]", ".", "tolist", "(", ")", "batch_templates", "=", "np", ".", "array", "(", "[", "int", "(", "el", ".", "split", "(", "':'", ")", "[", "0", "]", ")", "for", "el", "in", "batch_templates", "]", ")", "gpu_mem_max", "=", "max", "(", "[", "torch", ".", "cuda", ".", "max_memory_allocated", "(", "device", "=", "i", ")", "for", "i", "in", "range", "(", "torch", ".", "cuda", ".", "device_count", "(", ")", ")", "]", ")", "summary_writer", ".", "add_scalar", "(", "'GPU/Mem/Max'", ",", "gpu_mem_max", ",", "batch_id", "+", "1", ")", "summary_writer", ".", "add_histogram", "(", "'Templates'", ",", "batch_templates", ",", "global_step", "=", "(", "batch_id", "+", "1", ")", ",", "bins", "=", "25", ")", "# Visualize a couple train videos, and actual rollouts if pix is", "# being trained", "# Just visualizing the first 256 videos in case the batch size is", "# larger; somehow the visualizations get corrupted (grey bg) for", "# more. Also no point filling up the memory.", "# Storing less frequently than the rest of the logs (takes lot of space)", "if", "n_fwd_times", ">", "0", "and", "(", "batch_id", "+", "1", ")", "%", "(", "report_every", "*", "10", ")", "==", "0", ":", "batch_vid_obs", "=", "batch_data", "[", "'vid_obs'", "]", "batch_vid_obs", "=", "batch_vid_obs", ".", "reshape", "(", "[", "-", "1", "]", "+", "list", "(", "batch_vid_obs", ".", "shape", "[", "2", ":", "]", ")", ")", "batch_vid_obs", "=", "batch_vid_obs", ".", "to", "(", "device", ")", "vis_fwd_times", "=", "n_fwd_times", "if", "'classification_model'", "in", "mod_names", "else", "None", "videos", "=", "gen_vis_vid_preds", "(", "batch_vid_obs", "[", ":", "256", "]", ",", "batch_obj_obs", "[", ":", "256", "]", ",", "model", ",", "n_fwd_times", "=", "vis_fwd_times", ",", "run_decode", "=", "True", ",", "n_hist_frames", "=", "n_hist_frames", ")", "summary_writer", ".", "add_video", "(", "'InputAndRollout/train'", ",", "videos", ",", "(", "batch_id", "+", "1", ")", ")", "if", "(", "batch_id", "+", "1", ")", "%", "full_eval_every", "==", "0", ":", "run_full_eval", "(", "batch_id", ")", "if", "scheduler", "is", "not", "None", ":", "scheduler", ".", "step", "(", ")", "return", "model", ".", "cpu", "(", ")" ]
[ 132, 4 ]
[ 391, 26 ]
python
en
['en', 'ja', 'en']
True
read_random_bits
(nbits)
Reads 'nbits' random bits. If nbits isn't a whole number of bytes, an extra byte will be appended with only the lower bits set.
Reads 'nbits' random bits.
def read_random_bits(nbits): """Reads 'nbits' random bits. If nbits isn't a whole number of bytes, an extra byte will be appended with only the lower bits set. """ nbytes, rbits = divmod(nbits, 8) # Get the random bytes randomdata = os.urandom(nbytes) # Add the remaining random bits if rbits > 0: randomvalue = ord(os.urandom(1)) randomvalue >>= (8 - rbits) randomdata = byte(randomvalue) + randomdata return randomdata
[ "def", "read_random_bits", "(", "nbits", ")", ":", "nbytes", ",", "rbits", "=", "divmod", "(", "nbits", ",", "8", ")", "# Get the random bytes", "randomdata", "=", "os", ".", "urandom", "(", "nbytes", ")", "# Add the remaining random bits", "if", "rbits", ">", "0", ":", "randomvalue", "=", "ord", "(", "os", ".", "urandom", "(", "1", ")", ")", "randomvalue", ">>=", "(", "8", "-", "rbits", ")", "randomdata", "=", "byte", "(", "randomvalue", ")", "+", "randomdata", "return", "randomdata" ]
[ 26, 0 ]
[ 44, 21 ]
python
en
['en', 'bg', 'en']
True
read_random_int
(nbits)
Reads a random integer of approximately nbits bits.
Reads a random integer of approximately nbits bits.
def read_random_int(nbits): """Reads a random integer of approximately nbits bits. """ randomdata = read_random_bits(nbits) value = transform.bytes2int(randomdata) # Ensure that the number is large enough to just fill out the required # number of bits. value |= 1 << (nbits - 1) return value
[ "def", "read_random_int", "(", "nbits", ")", ":", "randomdata", "=", "read_random_bits", "(", "nbits", ")", "value", "=", "transform", ".", "bytes2int", "(", "randomdata", ")", "# Ensure that the number is large enough to just fill out the required", "# number of bits.", "value", "|=", "1", "<<", "(", "nbits", "-", "1", ")", "return", "value" ]
[ 47, 0 ]
[ 58, 16 ]
python
en
['en', 'ca', 'en']
True
read_random_odd_int
(nbits)
Reads a random odd integer of approximately nbits bits. >>> read_random_odd_int(512) & 1 1
Reads a random odd integer of approximately nbits bits.
def read_random_odd_int(nbits): """Reads a random odd integer of approximately nbits bits. >>> read_random_odd_int(512) & 1 1 """ value = read_random_int(nbits) # Make sure it's odd return value | 1
[ "def", "read_random_odd_int", "(", "nbits", ")", ":", "value", "=", "read_random_int", "(", "nbits", ")", "# Make sure it's odd", "return", "value", "|", "1" ]
[ 61, 0 ]
[ 71, 20 ]
python
en
['en', 'cs', 'en']
True
randint
(maxvalue)
Returns a random integer x with 1 <= x <= maxvalue May take a very long time in specific situations. If maxvalue needs N bits to store, the closer maxvalue is to (2 ** N) - 1, the faster this function is.
Returns a random integer x with 1 <= x <= maxvalue
def randint(maxvalue): """Returns a random integer x with 1 <= x <= maxvalue May take a very long time in specific situations. If maxvalue needs N bits to store, the closer maxvalue is to (2 ** N) - 1, the faster this function is. """ bit_size = common.bit_size(maxvalue) tries = 0 while True: value = read_random_int(bit_size) if value <= maxvalue: break if tries % 10 == 0 and tries: # After a lot of tries to get the right number of bits but still # smaller than maxvalue, decrease the number of bits by 1. That'll # dramatically increase the chances to get a large enough number. bit_size -= 1 tries += 1 return value
[ "def", "randint", "(", "maxvalue", ")", ":", "bit_size", "=", "common", ".", "bit_size", "(", "maxvalue", ")", "tries", "=", "0", "while", "True", ":", "value", "=", "read_random_int", "(", "bit_size", ")", "if", "value", "<=", "maxvalue", ":", "break", "if", "tries", "%", "10", "==", "0", "and", "tries", ":", "# After a lot of tries to get the right number of bits but still", "# smaller than maxvalue, decrease the number of bits by 1. That'll", "# dramatically increase the chances to get a large enough number.", "bit_size", "-=", "1", "tries", "+=", "1", "return", "value" ]
[ 74, 0 ]
[ 97, 16 ]
python
en
['en', 'ca', 'en']
True
BaseNSVD1.__init__
(self, train_file, test_file, output_file=None, factors=10, init_mean=0, init_stdev=0.1, sep='\t', output_sep='\t', random_seed=None)
This class is base for all NSVD1 algorithms. :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param factors: Number of latent factors per user/item :type factors: int, default 10 :param init_mean: Mean of the normal distribution used to initialize the latent factors :type init_mean: float, default 0 :param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors :type init_stdev: float, default 0.1 :param sep: Delimiter for input files :type sep: str, default'\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' :param random_seed: Number of seed. Lock random numbers for reproducibility of experiments. :type random_seed: int, default None
This class is base for all NSVD1 algorithms.
def __init__(self, train_file, test_file, output_file=None, factors=10, init_mean=0, init_stdev=0.1, sep='\t', output_sep='\t', random_seed=None): """ This class is base for all NSVD1 algorithms. :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param factors: Number of latent factors per user/item :type factors: int, default 10 :param init_mean: Mean of the normal distribution used to initialize the latent factors :type init_mean: float, default 0 :param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors :type init_stdev: float, default 0.1 :param sep: Delimiter for input files :type sep: str, default'\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' :param random_seed: Number of seed. Lock random numbers for reproducibility of experiments. :type random_seed: int, default None """ super(BaseNSVD1, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, sep=sep, output_sep=output_sep) self.factors = factors self.init_mean = init_mean self.init_stdev = init_stdev if random_seed is not None: np.random.seed(random_seed) # internal vars self.number_users = len(self.users) self.number_items = len(self.items) self.item_to_item_id = {} self.item_id_to_item = {} self.user_to_user_id = {} self.user_id_to_user = {} self.x = None self.p = None self.q = None self.w = None self.b = None self.c = None self.metadata = None self.number_metadata = None self.last_rmse = 0 self.predictions = []
[ "def", "__init__", "(", "self", ",", "train_file", ",", "test_file", ",", "output_file", "=", "None", ",", "factors", "=", "10", ",", "init_mean", "=", "0", ",", "init_stdev", "=", "0.1", ",", "sep", "=", "'\\t'", ",", "output_sep", "=", "'\\t'", ",", "random_seed", "=", "None", ")", ":", "super", "(", "BaseNSVD1", ",", "self", ")", ".", "__init__", "(", "train_file", "=", "train_file", ",", "test_file", "=", "test_file", ",", "output_file", "=", "output_file", ",", "sep", "=", "sep", ",", "output_sep", "=", "output_sep", ")", "self", ".", "factors", "=", "factors", "self", ".", "init_mean", "=", "init_mean", "self", ".", "init_stdev", "=", "init_stdev", "if", "random_seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "random_seed", ")", "# internal vars", "self", ".", "number_users", "=", "len", "(", "self", ".", "users", ")", "self", ".", "number_items", "=", "len", "(", "self", ".", "items", ")", "self", ".", "item_to_item_id", "=", "{", "}", "self", ".", "item_id_to_item", "=", "{", "}", "self", ".", "user_to_user_id", "=", "{", "}", "self", ".", "user_id_to_user", "=", "{", "}", "self", ".", "x", "=", "None", "self", ".", "p", "=", "None", "self", ".", "q", "=", "None", "self", ".", "w", "=", "None", "self", ".", "b", "=", "None", "self", ".", "c", "=", "None", "self", ".", "metadata", "=", "None", "self", ".", "number_metadata", "=", "None", "self", ".", "last_rmse", "=", "0", "self", ".", "predictions", "=", "[", "]" ]
[ 24, 4 ]
[ 86, 29 ]
python
en
['en', 'error', 'th']
False
BaseNSVD1.init_model
(self)
Method to treat and initialize the model
Method to treat and initialize the model
def init_model(self): """ Method to treat and initialize the model """ # Map items and users with their respective ids and upgrade unobserved items with test set samples for i, item in enumerate(self.items): self.item_to_item_id.update({item: i}) self.item_id_to_item.update({i: item}) for u, user in enumerate(self.users): self.user_to_user_id.update({user: u}) self.user_id_to_user.update({u: user})
[ "def", "init_model", "(", "self", ")", ":", "# Map items and users with their respective ids and upgrade unobserved items with test set samples", "for", "i", ",", "item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "self", ".", "item_to_item_id", ".", "update", "(", "{", "item", ":", "i", "}", ")", "self", ".", "item_id_to_item", ".", "update", "(", "{", "i", ":", "item", "}", ")", "for", "u", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "self", ".", "user_to_user_id", ".", "update", "(", "{", "user", ":", "u", "}", ")", "self", ".", "user_id_to_user", ".", "update", "(", "{", "u", ":", "user", "}", ")" ]
[ 88, 4 ]
[ 100, 50 ]
python
en
['en', 'error', 'th']
False
BaseNSVD1.predict
(self)
This method computes a final rating for unknown pairs (user, item)
This method computes a final rating for unknown pairs (user, item)
def predict(self): """ This method computes a final rating for unknown pairs (user, item) """ if self.test_file is not None: for user in self.test_set['users']: for item in self.test_set['feedback'][user]: rui = self._predict(self.user_to_user_id[user], self.item_to_item_id[item]) self.predictions.append((user, item, rui)) else: raise NotImplemented
[ "def", "predict", "(", "self", ")", ":", "if", "self", ".", "test_file", "is", "not", "None", ":", "for", "user", "in", "self", ".", "test_set", "[", "'users'", "]", ":", "for", "item", "in", "self", ".", "test_set", "[", "'feedback'", "]", "[", "user", "]", ":", "rui", "=", "self", ".", "_predict", "(", "self", ".", "user_to_user_id", "[", "user", "]", ",", "self", ".", "item_to_item_id", "[", "item", "]", ")", "self", ".", "predictions", ".", "append", "(", "(", "user", ",", "item", ",", "rui", ")", ")", "else", ":", "raise", "NotImplemented" ]
[ 120, 4 ]
[ 132, 32 ]
python
en
['en', 'error', 'th']
False