Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
_get_encoding_from_headers | (headers: ResponseHeaders) | Determine if we have any encoding information in our headers.
| Determine if we have any encoding information in our headers.
| def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None | [
"def",
"_get_encoding_from_headers",
"(",
"headers",
":",
"ResponseHeaders",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"headers",
"and",
"\"Content-Type\"",
"in",
"headers",
":",
"content_type",
",",
"params",
"=",
"cgi",
".",
"parse_header",
"(",
"headers",
"[",
"\"Content-Type\"",
"]",
")",
"if",
"\"charset\"",
"in",
"params",
":",
"return",
"params",
"[",
"'charset'",
"]",
"return",
"None"
] | [
146,
0
] | [
153,
15
] | python | en | ['en', 'en', 'en'] | True |
_determine_base_url | (document: HTMLElement, page_url: str) | Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
| Determine the HTML document's base URL. | def _determine_base_url(document: HTMLElement, page_url: str) -> str:
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url | [
"def",
"_determine_base_url",
"(",
"document",
":",
"HTMLElement",
",",
"page_url",
":",
"str",
")",
"->",
"str",
":",
"for",
"base",
"in",
"document",
".",
"findall",
"(",
"\".//base\"",
")",
":",
"href",
"=",
"base",
".",
"get",
"(",
"\"href\"",
")",
"if",
"href",
"is",
"not",
"None",
":",
"return",
"href",
"return",
"page_url"
] | [
156,
0
] | [
172,
19
] | python | en | ['en', 'no', 'en'] | True |
_clean_url_path_part | (part: str) |
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
|
Clean a "part" of a URL path (i.e. after splitting on " | def _clean_url_path_part(part: str) -> str:
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib.parse.quote(urllib.parse.unquote(part)) | [
"def",
"_clean_url_path_part",
"(",
"part",
":",
"str",
")",
"->",
"str",
":",
"# We unquote prior to quoting to make sure nothing is double quoted.",
"return",
"urllib",
".",
"parse",
".",
"quote",
"(",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"part",
")",
")"
] | [
175,
0
] | [
180,
57
] | python | en | ['en', 'error', 'th'] | False |
_clean_file_url_path | (part: str) |
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
|
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on " | def _clean_file_url_path(part: str) -> str:
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib.request.pathname2url(urllib.request.url2pathname(part)) | [
"def",
"_clean_file_url_path",
"(",
"part",
":",
"str",
")",
"->",
"str",
":",
"# We unquote prior to quoting to make sure nothing is double quoted.",
"# Also, on Windows the path part might contain a drive letter which",
"# should not be quoted. On Linux where drive letters do not",
"# exist, the colon should be quoted. We rely on urllib.request",
"# to do the right thing here.",
"return",
"urllib",
".",
"request",
".",
"pathname2url",
"(",
"urllib",
".",
"request",
".",
"url2pathname",
"(",
"part",
")",
")"
] | [
183,
0
] | [
193,
73
] | python | en | ['en', 'error', 'th'] | False |
_clean_url_path | (path: str, is_local_path: bool) |
Clean the path portion of a URL.
|
Clean the path portion of a URL.
| def _clean_url_path(path: str, is_local_path: bool) -> str:
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts) | [
"def",
"_clean_url_path",
"(",
"path",
":",
"str",
",",
"is_local_path",
":",
"bool",
")",
"->",
"str",
":",
"if",
"is_local_path",
":",
"clean_func",
"=",
"_clean_file_url_path",
"else",
":",
"clean_func",
"=",
"_clean_url_path_part",
"# Split on the reserved characters prior to cleaning so that",
"# revision strings in VCS URLs are properly preserved.",
"parts",
"=",
"_reserved_chars_re",
".",
"split",
"(",
"path",
")",
"cleaned_parts",
"=",
"[",
"]",
"for",
"to_clean",
",",
"reserved",
"in",
"pairwise",
"(",
"itertools",
".",
"chain",
"(",
"parts",
",",
"[",
"''",
"]",
")",
")",
":",
"cleaned_parts",
".",
"append",
"(",
"clean_func",
"(",
"to_clean",
")",
")",
"# Normalize %xx escapes (e.g. %2f -> %2F)",
"cleaned_parts",
".",
"append",
"(",
"reserved",
".",
"upper",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"cleaned_parts",
")"
] | [
200,
0
] | [
219,
33
] | python | en | ['en', 'error', 'th'] | False |
_clean_link | (url: str) |
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
|
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
| def _clean_link(url: str) -> str:
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib.parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib.parse.urlunparse(result._replace(path=path)) | [
"def",
"_clean_link",
"(",
"url",
":",
"str",
")",
"->",
"str",
":",
"# Split the URL into parts according to the general structure",
"# `scheme://netloc/path;parameters?query#fragment`.",
"result",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"# If the netloc is empty, then the URL refers to a local filesystem path.",
"is_local_path",
"=",
"not",
"result",
".",
"netloc",
"path",
"=",
"_clean_url_path",
"(",
"result",
".",
"path",
",",
"is_local_path",
"=",
"is_local_path",
")",
"return",
"urllib",
".",
"parse",
".",
"urlunparse",
"(",
"result",
".",
"_replace",
"(",
"path",
"=",
"path",
")",
")"
] | [
222,
0
] | [
234,
62
] | python | en | ['en', 'error', 'th'] | False |
_create_link_from_element | (
anchor: HTMLElement,
page_url: str,
base_url: str,
) |
Convert an anchor element in a simple repository page to a Link.
|
Convert an anchor element in a simple repository page to a Link.
| def _create_link_from_element(
anchor: HTMLElement,
page_url: str,
base_url: str,
) -> Optional[Link]:
"""
Convert an anchor element in a simple repository page to a Link.
"""
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib.parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = html.unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
yanked_reason = html.unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link | [
"def",
"_create_link_from_element",
"(",
"anchor",
":",
"HTMLElement",
",",
"page_url",
":",
"str",
",",
"base_url",
":",
"str",
",",
")",
"->",
"Optional",
"[",
"Link",
"]",
":",
"href",
"=",
"anchor",
".",
"get",
"(",
"\"href\"",
")",
"if",
"not",
"href",
":",
"return",
"None",
"url",
"=",
"_clean_link",
"(",
"urllib",
".",
"parse",
".",
"urljoin",
"(",
"base_url",
",",
"href",
")",
")",
"pyrequire",
"=",
"anchor",
".",
"get",
"(",
"'data-requires-python'",
")",
"pyrequire",
"=",
"html",
".",
"unescape",
"(",
"pyrequire",
")",
"if",
"pyrequire",
"else",
"None",
"yanked_reason",
"=",
"anchor",
".",
"get",
"(",
"'data-yanked'",
")",
"if",
"yanked_reason",
":",
"yanked_reason",
"=",
"html",
".",
"unescape",
"(",
"yanked_reason",
")",
"link",
"=",
"Link",
"(",
"url",
",",
"comes_from",
"=",
"page_url",
",",
"requires_python",
"=",
"pyrequire",
",",
"yanked_reason",
"=",
"yanked_reason",
",",
")",
"return",
"link"
] | [
237,
0
] | [
264,
15
] | python | en | ['en', 'error', 'th'] | False |
with_cached_html_pages | (
fn: Callable[["HTMLPage"], Iterable[Link]],
) |
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
|
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
| def with_cached_html_pages(
fn: Callable[["HTMLPage"], Iterable[Link]],
) -> Callable[["HTMLPage"], List[Link]]:
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@functools.lru_cache(maxsize=None)
def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page: "HTMLPage") -> List[Link]:
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper | [
"def",
"with_cached_html_pages",
"(",
"fn",
":",
"Callable",
"[",
"[",
"\"HTMLPage\"",
"]",
",",
"Iterable",
"[",
"Link",
"]",
"]",
",",
")",
"->",
"Callable",
"[",
"[",
"\"HTMLPage\"",
"]",
",",
"List",
"[",
"Link",
"]",
"]",
":",
"@",
"functools",
".",
"lru_cache",
"(",
"maxsize",
"=",
"None",
")",
"def",
"wrapper",
"(",
"cacheable_page",
":",
"CacheablePageContent",
")",
"->",
"List",
"[",
"Link",
"]",
":",
"return",
"list",
"(",
"fn",
"(",
"cacheable_page",
".",
"page",
")",
")",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapper_wrapper",
"(",
"page",
":",
"\"HTMLPage\"",
")",
"->",
"List",
"[",
"Link",
"]",
":",
"if",
"page",
".",
"cache_link_parsing",
":",
"return",
"wrapper",
"(",
"CacheablePageContent",
"(",
"page",
")",
")",
"return",
"list",
"(",
"fn",
"(",
"page",
")",
")",
"return",
"wrapper_wrapper"
] | [
280,
0
] | [
299,
26
] | python | en | ['en', 'error', 'th'] | False |
parse_links | (page: "HTMLPage") |
Parse an HTML document, and yield its anchor elements as Link objects.
|
Parse an HTML document, and yield its anchor elements as Link objects.
| def parse_links(page: "HTMLPage") -> Iterable[Link]:
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link | [
"def",
"parse_links",
"(",
"page",
":",
"\"HTMLPage\"",
")",
"->",
"Iterable",
"[",
"Link",
"]",
":",
"document",
"=",
"html5lib",
".",
"parse",
"(",
"page",
".",
"content",
",",
"transport_encoding",
"=",
"page",
".",
"encoding",
",",
"namespaceHTMLElements",
"=",
"False",
",",
")",
"url",
"=",
"page",
".",
"url",
"base_url",
"=",
"_determine_base_url",
"(",
"document",
",",
"url",
")",
"for",
"anchor",
"in",
"document",
".",
"findall",
"(",
"\".//a\"",
")",
":",
"link",
"=",
"_create_link_from_element",
"(",
"anchor",
",",
"page_url",
"=",
"url",
",",
"base_url",
"=",
"base_url",
",",
")",
"if",
"link",
"is",
"None",
":",
"continue",
"yield",
"link"
] | [
303,
0
] | [
323,
18
] | python | en | ['en', 'error', 'th'] | False |
HTMLPage.__init__ | (
self,
content: bytes,
encoding: Optional[str],
url: str,
cache_link_parsing: bool = True,
) |
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
|
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
| def __init__(
self,
content: bytes,
encoding: Optional[str],
url: str,
cache_link_parsing: bool = True,
) -> None:
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing | [
"def",
"__init__",
"(",
"self",
",",
"content",
":",
"bytes",
",",
"encoding",
":",
"Optional",
"[",
"str",
"]",
",",
"url",
":",
"str",
",",
"cache_link_parsing",
":",
"bool",
"=",
"True",
",",
")",
"->",
"None",
":",
"self",
".",
"content",
"=",
"content",
"self",
".",
"encoding",
"=",
"encoding",
"self",
".",
"url",
"=",
"url",
"self",
".",
"cache_link_parsing",
"=",
"cache_link_parsing"
] | [
329,
4
] | [
346,
52
] | python | en | ['en', 'error', 'th'] | False |
LinkCollector.create | (
cls, session: PipSession,
options: Values,
suppress_no_index: bool = False
) |
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
|
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
| def create(
cls, session: PipSession,
options: Values,
suppress_no_index: bool = False
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls,
)
link_collector = LinkCollector(
session=session, search_scope=search_scope,
)
return link_collector | [
"def",
"create",
"(",
"cls",
",",
"session",
":",
"PipSession",
",",
"options",
":",
"Values",
",",
"suppress_no_index",
":",
"bool",
"=",
"False",
")",
"->",
"\"LinkCollector\"",
":",
"index_urls",
"=",
"[",
"options",
".",
"index_url",
"]",
"+",
"options",
".",
"extra_index_urls",
"if",
"options",
".",
"no_index",
"and",
"not",
"suppress_no_index",
":",
"logger",
".",
"debug",
"(",
"'Ignoring indexes: %s'",
",",
"','",
".",
"join",
"(",
"redact_auth_from_url",
"(",
"url",
")",
"for",
"url",
"in",
"index_urls",
")",
",",
")",
"index_urls",
"=",
"[",
"]",
"# Make sure find_links is a list before passing to create().",
"find_links",
"=",
"options",
".",
"find_links",
"or",
"[",
"]",
"search_scope",
"=",
"SearchScope",
".",
"create",
"(",
"find_links",
"=",
"find_links",
",",
"index_urls",
"=",
"index_urls",
",",
")",
"link_collector",
"=",
"LinkCollector",
"(",
"session",
"=",
"session",
",",
"search_scope",
"=",
"search_scope",
",",
")",
"return",
"link_collector"
] | [
452,
4
] | [
479,
29
] | python | en | ['en', 'error', 'th'] | False |
LinkCollector.fetch_page | (self, location: Link) |
Fetch an HTML page containing package links.
|
Fetch an HTML page containing package links.
| def fetch_page(self, location: Link) -> Optional[HTMLPage]:
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session) | [
"def",
"fetch_page",
"(",
"self",
",",
"location",
":",
"Link",
")",
"->",
"Optional",
"[",
"HTMLPage",
"]",
":",
"return",
"_get_html_page",
"(",
"location",
",",
"session",
"=",
"self",
".",
"session",
")"
] | [
485,
4
] | [
489,
61
] | python | en | ['en', 'error', 'th'] | False |
WKBReader.read | (self, wkb) | Return a GEOSGeometry for the given WKB buffer. | Return a GEOSGeometry for the given WKB buffer. | def read(self, wkb):
"Return a GEOSGeometry for the given WKB buffer."
return GEOSGeometry(super().read(wkb)) | [
"def",
"read",
"(",
"self",
",",
"wkb",
")",
":",
"return",
"GEOSGeometry",
"(",
"super",
"(",
")",
".",
"read",
"(",
"wkb",
")",
")"
] | [
15,
4
] | [
17,
46
] | python | en | ['en', 'en', 'en'] | True |
WKTReader.read | (self, wkt) | Return a GEOSGeometry for the given WKT string. | Return a GEOSGeometry for the given WKT string. | def read(self, wkt):
"Return a GEOSGeometry for the given WKT string."
return GEOSGeometry(super().read(wkt)) | [
"def",
"read",
"(",
"self",
",",
"wkt",
")",
":",
"return",
"GEOSGeometry",
"(",
"super",
"(",
")",
".",
"read",
"(",
"wkt",
")",
")"
] | [
21,
4
] | [
23,
46
] | python | en | ['en', 'en', 'en'] | True |
_wrapper | (args: Optional[List[str]] = None) | Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
| Central wrapper for all old entrypoints. | def _wrapper(args: Optional[List[str]] = None) -> int:
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args) | [
"def",
"_wrapper",
"(",
"args",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"int",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"WARNING: pip is being invoked by an old script wrapper. This will \"",
"\"fail in a future version of pip.\\n\"",
"\"Please see https://github.com/pypa/pip/issues/5599 for advice on \"",
"\"fixing the underlying issue.\\n\"",
"\"To avoid this problem you can invoke Python with '-m pip' instead of \"",
"\"running pip directly.\\n\"",
")",
"return",
"main",
"(",
"args",
")"
] | [
6,
0
] | [
26,
21
] | python | en | ['en', 'en', 'en'] | True |
_validate_clientsecrets | (clientsecrets_dict) | Validate parsed client secrets from a file.
Args:
clientsecrets_dict: dict, a dictionary holding the client secrets.
Returns:
tuple, a string of the client type and the information parsed
from the file.
| Validate parsed client secrets from a file. | def _validate_clientsecrets(clientsecrets_dict):
"""Validate parsed client secrets from a file.
Args:
clientsecrets_dict: dict, a dictionary holding the client secrets.
Returns:
tuple, a string of the client type and the information parsed
from the file.
"""
_INVALID_FILE_FORMAT_MSG = (
'Invalid file format. See '
'https://developers.google.com/api-client-library/'
'python/guide/aaa_client_secrets')
if clientsecrets_dict is None:
raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
try:
(client_type, client_info), = clientsecrets_dict.items()
except (ValueError, AttributeError):
raise InvalidClientSecretsError(
_INVALID_FILE_FORMAT_MSG + ' '
'Expected a JSON object with a single property for a "web" or '
'"installed" application')
if client_type not in VALID_CLIENT:
raise InvalidClientSecretsError(
'Unknown client type: {0}.'.format(client_type))
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "{0}" in a client type of "{1}".'.format(
prop_name, client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "{0}" is not configured.'.format(prop_name))
return client_type, client_info | [
"def",
"_validate_clientsecrets",
"(",
"clientsecrets_dict",
")",
":",
"_INVALID_FILE_FORMAT_MSG",
"=",
"(",
"'Invalid file format. See '",
"'https://developers.google.com/api-client-library/'",
"'python/guide/aaa_client_secrets'",
")",
"if",
"clientsecrets_dict",
"is",
"None",
":",
"raise",
"InvalidClientSecretsError",
"(",
"_INVALID_FILE_FORMAT_MSG",
")",
"try",
":",
"(",
"client_type",
",",
"client_info",
")",
",",
"=",
"clientsecrets_dict",
".",
"items",
"(",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"raise",
"InvalidClientSecretsError",
"(",
"_INVALID_FILE_FORMAT_MSG",
"+",
"' '",
"'Expected a JSON object with a single property for a \"web\" or '",
"'\"installed\" application'",
")",
"if",
"client_type",
"not",
"in",
"VALID_CLIENT",
":",
"raise",
"InvalidClientSecretsError",
"(",
"'Unknown client type: {0}.'",
".",
"format",
"(",
"client_type",
")",
")",
"for",
"prop_name",
"in",
"VALID_CLIENT",
"[",
"client_type",
"]",
"[",
"'required'",
"]",
":",
"if",
"prop_name",
"not",
"in",
"client_info",
":",
"raise",
"InvalidClientSecretsError",
"(",
"'Missing property \"{0}\" in a client type of \"{1}\".'",
".",
"format",
"(",
"prop_name",
",",
"client_type",
")",
")",
"for",
"prop_name",
"in",
"VALID_CLIENT",
"[",
"client_type",
"]",
"[",
"'string'",
"]",
":",
"if",
"client_info",
"[",
"prop_name",
"]",
".",
"startswith",
"(",
"'[['",
")",
":",
"raise",
"InvalidClientSecretsError",
"(",
"'Property \"{0}\" is not configured.'",
".",
"format",
"(",
"prop_name",
")",
")",
"return",
"client_type",
",",
"client_info"
] | [
67,
0
] | [
105,
35
] | python | en | ['en', 'en', 'en'] | True |
loadfile | (filename, cache=None) | Loading of client_secrets JSON file, optionally backed by a cache.
Typical cache storage would be App Engine memcache service,
but you can pass in any other cache client that implements
these methods:
* ``get(key, namespace=ns)``
* ``set(key, value, namespace=ns)``
Usage::
# without caching
client_type, client_info = loadfile('secrets.json')
# using App Engine memcache service
from google.appengine.api import memcache
client_type, client_info = loadfile('secrets.json', cache=memcache)
Args:
filename: string, Path to a client_secrets.json file on a filesystem.
cache: An optional cache service client that implements get() and set()
methods. If not specified, the file is always being loaded from
a filesystem.
Raises:
InvalidClientSecretsError: In case of a validation error or some
I/O failure. Can happen only on cache miss.
Returns:
(client_type, client_info) tuple, as _loadfile() normally would.
JSON contents is validated only during first load. Cache hits are not
validated.
| Loading of client_secrets JSON file, optionally backed by a cache. | def loadfile(filename, cache=None):
"""Loading of client_secrets JSON file, optionally backed by a cache.
Typical cache storage would be App Engine memcache service,
but you can pass in any other cache client that implements
these methods:
* ``get(key, namespace=ns)``
* ``set(key, value, namespace=ns)``
Usage::
# without caching
client_type, client_info = loadfile('secrets.json')
# using App Engine memcache service
from google.appengine.api import memcache
client_type, client_info = loadfile('secrets.json', cache=memcache)
Args:
filename: string, Path to a client_secrets.json file on a filesystem.
cache: An optional cache service client that implements get() and set()
methods. If not specified, the file is always being loaded from
a filesystem.
Raises:
InvalidClientSecretsError: In case of a validation error or some
I/O failure. Can happen only on cache miss.
Returns:
(client_type, client_info) tuple, as _loadfile() normally would.
JSON contents is validated only during first load. Cache hits are not
validated.
"""
_SECRET_NAMESPACE = 'oauth2client:secrets#ns'
if not cache:
return _loadfile(filename)
obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
if obj is None:
client_type, client_info = _loadfile(filename)
obj = {client_type: client_info}
cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
return next(six.iteritems(obj)) | [
"def",
"loadfile",
"(",
"filename",
",",
"cache",
"=",
"None",
")",
":",
"_SECRET_NAMESPACE",
"=",
"'oauth2client:secrets#ns'",
"if",
"not",
"cache",
":",
"return",
"_loadfile",
"(",
"filename",
")",
"obj",
"=",
"cache",
".",
"get",
"(",
"filename",
",",
"namespace",
"=",
"_SECRET_NAMESPACE",
")",
"if",
"obj",
"is",
"None",
":",
"client_type",
",",
"client_info",
"=",
"_loadfile",
"(",
"filename",
")",
"obj",
"=",
"{",
"client_type",
":",
"client_info",
"}",
"cache",
".",
"set",
"(",
"filename",
",",
"obj",
",",
"namespace",
"=",
"_SECRET_NAMESPACE",
")",
"return",
"next",
"(",
"six",
".",
"iteritems",
"(",
"obj",
")",
")"
] | [
128,
0
] | [
172,
35
] | python | en | ['en', 'en', 'en'] | True |
_check_lazy_references | (apps, ignore=None) |
Ensure all lazy (i.e. string) model references have been resolved.
Lazy references are used in various places throughout Django, primarily in
related fields and model signals. Identify those common cases and provide
more helpful error messages for them.
The ignore parameter is used by StateApps to exclude swappable models from
this check.
|
Ensure all lazy (i.e. string) model references have been resolved. | def _check_lazy_references(apps, ignore=None):
"""
Ensure all lazy (i.e. string) model references have been resolved.
Lazy references are used in various places throughout Django, primarily in
related fields and model signals. Identify those common cases and provide
more helpful error messages for them.
The ignore parameter is used by StateApps to exclude swappable models from
this check.
"""
pending_models = set(apps._pending_operations) - (ignore or set())
# Short circuit if there aren't any errors.
if not pending_models:
return []
from django.db.models import signals
model_signals = {
signal: name for name, signal in vars(signals).items()
if isinstance(signal, signals.ModelSignal)
}
def extract_operation(obj):
"""
Take a callable found in Apps._pending_operations and identify the
original callable passed to Apps.lazy_model_operation(). If that
callable was a partial, return the inner, non-partial function and
any arguments and keyword arguments that were supplied with it.
obj is a callback defined locally in Apps.lazy_model_operation() and
annotated there with a `func` attribute so as to imitate a partial.
"""
operation, args, keywords = obj, [], {}
while hasattr(operation, 'func'):
args.extend(getattr(operation, 'args', []))
keywords.update(getattr(operation, 'keywords', {}))
operation = operation.func
return operation, args, keywords
def app_model_error(model_key):
try:
apps.get_app_config(model_key[0])
model_error = "app '%s' doesn't provide model '%s'" % model_key
except LookupError:
model_error = "app '%s' isn't installed" % model_key[0]
return model_error
# Here are several functions which return CheckMessage instances for the
# most common usages of lazy operations throughout Django. These functions
# take the model that was being waited on as an (app_label, modelname)
# pair, the original lazy function, and its positional and keyword args as
# determined by extract_operation().
def field_error(model_key, func, args, keywords):
error_msg = (
"The field %(field)s was declared with a lazy reference "
"to '%(model)s', but %(model_error)s."
)
params = {
'model': '.'.join(model_key),
'field': keywords['field'],
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=keywords['field'], id='fields.E307')
def signal_connect_error(model_key, func, args, keywords):
error_msg = (
"%(receiver)s was connected to the '%(signal)s' signal with a "
"lazy reference to the sender '%(model)s', but %(model_error)s."
)
receiver = args[0]
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The function '%s'" % receiver.__name__
elif isinstance(receiver, types.MethodType):
description = "Bound method '%s.%s'" % (receiver.__self__.__class__.__name__, receiver.__name__)
else:
description = "An instance of class '%s'" % receiver.__class__.__name__
signal_name = model_signals.get(func.__self__, 'unknown')
params = {
'model': '.'.join(model_key),
'receiver': description,
'signal': signal_name,
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=receiver.__module__, id='signals.E001')
def default_error(model_key, func, args, keywords):
error_msg = "%(op)s contains a lazy reference to %(model)s, but %(model_error)s."
params = {
'op': func,
'model': '.'.join(model_key),
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=func, id='models.E022')
# Maps common uses of lazy operations to corresponding error functions
# defined above. If a key maps to None, no error will be produced.
# default_error() will be used for usages that don't appear in this dict.
known_lazy = {
('django.db.models.fields.related', 'resolve_related_class'): field_error,
('django.db.models.fields.related', 'set_managed'): None,
('django.dispatch.dispatcher', 'connect'): signal_connect_error,
}
def build_error(model_key, func, args, keywords):
key = (func.__module__, func.__name__)
error_fn = known_lazy.get(key, default_error)
return error_fn(model_key, func, args, keywords) if error_fn else None
return sorted(filter(None, (
build_error(model_key, *extract_operation(func))
for model_key in pending_models
for func in apps._pending_operations[model_key]
)), key=lambda error: error.msg) | [
"def",
"_check_lazy_references",
"(",
"apps",
",",
"ignore",
"=",
"None",
")",
":",
"pending_models",
"=",
"set",
"(",
"apps",
".",
"_pending_operations",
")",
"-",
"(",
"ignore",
"or",
"set",
"(",
")",
")",
"# Short circuit if there aren't any errors.",
"if",
"not",
"pending_models",
":",
"return",
"[",
"]",
"from",
"django",
".",
"db",
".",
"models",
"import",
"signals",
"model_signals",
"=",
"{",
"signal",
":",
"name",
"for",
"name",
",",
"signal",
"in",
"vars",
"(",
"signals",
")",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"signal",
",",
"signals",
".",
"ModelSignal",
")",
"}",
"def",
"extract_operation",
"(",
"obj",
")",
":",
"\"\"\"\n Take a callable found in Apps._pending_operations and identify the\n original callable passed to Apps.lazy_model_operation(). If that\n callable was a partial, return the inner, non-partial function and\n any arguments and keyword arguments that were supplied with it.\n\n obj is a callback defined locally in Apps.lazy_model_operation() and\n annotated there with a `func` attribute so as to imitate a partial.\n \"\"\"",
"operation",
",",
"args",
",",
"keywords",
"=",
"obj",
",",
"[",
"]",
",",
"{",
"}",
"while",
"hasattr",
"(",
"operation",
",",
"'func'",
")",
":",
"args",
".",
"extend",
"(",
"getattr",
"(",
"operation",
",",
"'args'",
",",
"[",
"]",
")",
")",
"keywords",
".",
"update",
"(",
"getattr",
"(",
"operation",
",",
"'keywords'",
",",
"{",
"}",
")",
")",
"operation",
"=",
"operation",
".",
"func",
"return",
"operation",
",",
"args",
",",
"keywords",
"def",
"app_model_error",
"(",
"model_key",
")",
":",
"try",
":",
"apps",
".",
"get_app_config",
"(",
"model_key",
"[",
"0",
"]",
")",
"model_error",
"=",
"\"app '%s' doesn't provide model '%s'\"",
"%",
"model_key",
"except",
"LookupError",
":",
"model_error",
"=",
"\"app '%s' isn't installed\"",
"%",
"model_key",
"[",
"0",
"]",
"return",
"model_error",
"# Here are several functions which return CheckMessage instances for the",
"# most common usages of lazy operations throughout Django. These functions",
"# take the model that was being waited on as an (app_label, modelname)",
"# pair, the original lazy function, and its positional and keyword args as",
"# determined by extract_operation().",
"def",
"field_error",
"(",
"model_key",
",",
"func",
",",
"args",
",",
"keywords",
")",
":",
"error_msg",
"=",
"(",
"\"The field %(field)s was declared with a lazy reference \"",
"\"to '%(model)s', but %(model_error)s.\"",
")",
"params",
"=",
"{",
"'model'",
":",
"'.'",
".",
"join",
"(",
"model_key",
")",
",",
"'field'",
":",
"keywords",
"[",
"'field'",
"]",
",",
"'model_error'",
":",
"app_model_error",
"(",
"model_key",
")",
",",
"}",
"return",
"Error",
"(",
"error_msg",
"%",
"params",
",",
"obj",
"=",
"keywords",
"[",
"'field'",
"]",
",",
"id",
"=",
"'fields.E307'",
")",
"def",
"signal_connect_error",
"(",
"model_key",
",",
"func",
",",
"args",
",",
"keywords",
")",
":",
"error_msg",
"=",
"(",
"\"%(receiver)s was connected to the '%(signal)s' signal with a \"",
"\"lazy reference to the sender '%(model)s', but %(model_error)s.\"",
")",
"receiver",
"=",
"args",
"[",
"0",
"]",
"# The receiver is either a function or an instance of class",
"# defining a `__call__` method.",
"if",
"isinstance",
"(",
"receiver",
",",
"types",
".",
"FunctionType",
")",
":",
"description",
"=",
"\"The function '%s'\"",
"%",
"receiver",
".",
"__name__",
"elif",
"isinstance",
"(",
"receiver",
",",
"types",
".",
"MethodType",
")",
":",
"description",
"=",
"\"Bound method '%s.%s'\"",
"%",
"(",
"receiver",
".",
"__self__",
".",
"__class__",
".",
"__name__",
",",
"receiver",
".",
"__name__",
")",
"else",
":",
"description",
"=",
"\"An instance of class '%s'\"",
"%",
"receiver",
".",
"__class__",
".",
"__name__",
"signal_name",
"=",
"model_signals",
".",
"get",
"(",
"func",
".",
"__self__",
",",
"'unknown'",
")",
"params",
"=",
"{",
"'model'",
":",
"'.'",
".",
"join",
"(",
"model_key",
")",
",",
"'receiver'",
":",
"description",
",",
"'signal'",
":",
"signal_name",
",",
"'model_error'",
":",
"app_model_error",
"(",
"model_key",
")",
",",
"}",
"return",
"Error",
"(",
"error_msg",
"%",
"params",
",",
"obj",
"=",
"receiver",
".",
"__module__",
",",
"id",
"=",
"'signals.E001'",
")",
"def",
"default_error",
"(",
"model_key",
",",
"func",
",",
"args",
",",
"keywords",
")",
":",
"error_msg",
"=",
"\"%(op)s contains a lazy reference to %(model)s, but %(model_error)s.\"",
"params",
"=",
"{",
"'op'",
":",
"func",
",",
"'model'",
":",
"'.'",
".",
"join",
"(",
"model_key",
")",
",",
"'model_error'",
":",
"app_model_error",
"(",
"model_key",
")",
",",
"}",
"return",
"Error",
"(",
"error_msg",
"%",
"params",
",",
"obj",
"=",
"func",
",",
"id",
"=",
"'models.E022'",
")",
"# Maps common uses of lazy operations to corresponding error functions",
"# defined above. If a key maps to None, no error will be produced.",
"# default_error() will be used for usages that don't appear in this dict.",
"known_lazy",
"=",
"{",
"(",
"'django.db.models.fields.related'",
",",
"'resolve_related_class'",
")",
":",
"field_error",
",",
"(",
"'django.db.models.fields.related'",
",",
"'set_managed'",
")",
":",
"None",
",",
"(",
"'django.dispatch.dispatcher'",
",",
"'connect'",
")",
":",
"signal_connect_error",
",",
"}",
"def",
"build_error",
"(",
"model_key",
",",
"func",
",",
"args",
",",
"keywords",
")",
":",
"key",
"=",
"(",
"func",
".",
"__module__",
",",
"func",
".",
"__name__",
")",
"error_fn",
"=",
"known_lazy",
".",
"get",
"(",
"key",
",",
"default_error",
")",
"return",
"error_fn",
"(",
"model_key",
",",
"func",
",",
"args",
",",
"keywords",
")",
"if",
"error_fn",
"else",
"None",
"return",
"sorted",
"(",
"filter",
"(",
"None",
",",
"(",
"build_error",
"(",
"model_key",
",",
"*",
"extract_operation",
"(",
"func",
")",
")",
"for",
"model_key",
"in",
"pending_models",
"for",
"func",
"in",
"apps",
".",
"_pending_operations",
"[",
"model_key",
"]",
")",
")",
",",
"key",
"=",
"lambda",
"error",
":",
"error",
".",
"msg",
")"
] | [
88,
0
] | [
204,
36
] | python | en | ['en', 'error', 'th'] | False |
install_scripts.write_script | (self, script_name, contents, mode="t", *ignored) | Write an executable file to the scripts directory | Write an executable file to the scripts directory | def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask) | [
"def",
"write_script",
"(",
"self",
",",
"script_name",
",",
"contents",
",",
"mode",
"=",
"\"t\"",
",",
"*",
"ignored",
")",
":",
"from",
"setuptools",
".",
"command",
".",
"easy_install",
"import",
"chmod",
",",
"current_umask",
"log",
".",
"info",
"(",
"\"Installing %s script to %s\"",
",",
"script_name",
",",
"self",
".",
"install_dir",
")",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_dir",
",",
"script_name",
")",
"self",
".",
"outfiles",
".",
"append",
"(",
"target",
")",
"mask",
"=",
"current_umask",
"(",
")",
"if",
"not",
"self",
".",
"dry_run",
":",
"ensure_directory",
"(",
"target",
")",
"f",
"=",
"open",
"(",
"target",
",",
"\"w\"",
"+",
"mode",
")",
"f",
".",
"write",
"(",
"contents",
")",
"f",
".",
"close",
"(",
")",
"chmod",
"(",
"target",
",",
"0o777",
"-",
"mask",
")"
] | [
50,
4
] | [
64,
39
] | python | en | ['en', 'en', 'en'] | True |
StandardizedFeature.fit | (self, blocks, y=None) |
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`StandardizedFeature`: an instance of this class with the
``self.scaler`` attribute fit to the ``blocks`` data
Note:
When fitting the :class:`StandardScaler` object, you'll probably
want to determine the mean and/or std of *multiple* HTML files'
blocks, rather than just a single observation. To do that, just
concatenate all of the blocks together in a single iterable.
In contrast, you'll typically apply :meth:`transform` to a *single*
HTML file's blocks at a time.
|
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency. | def fit(self, blocks, y=None):
"""
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`StandardizedFeature`: an instance of this class with the
``self.scaler`` attribute fit to the ``blocks`` data
Note:
When fitting the :class:`StandardScaler` object, you'll probably
want to determine the mean and/or std of *multiple* HTML files'
blocks, rather than just a single observation. To do that, just
concatenate all of the blocks together in a single iterable.
In contrast, you'll typically apply :meth:`transform` to a *single*
HTML file's blocks at a time.
"""
feature_array = self.feature.fit_transform(blocks)
self.scaler = self.scaler.fit(feature_array)
return self | [
"def",
"fit",
"(",
"self",
",",
"blocks",
",",
"y",
"=",
"None",
")",
":",
"feature_array",
"=",
"self",
".",
"feature",
".",
"fit_transform",
"(",
"blocks",
")",
"self",
".",
"scaler",
"=",
"self",
".",
"scaler",
".",
"fit",
"(",
"feature_array",
")",
"return",
"self"
] | [
23,
4
] | [
44,
19
] | python | en | ['en', 'error', 'th'] | False |
StandardizedFeature.transform | (self, blocks, y=None) |
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features) and standardized feature values.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, num sub-features),
where ``blocks`` data has been transformed by ``self.feature``
and optionally standardized by ``self.scaler``.
|
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features) and standardized feature values. | def transform(self, blocks, y=None):
"""
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features) and standardized feature values.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, num sub-features),
where ``blocks`` data has been transformed by ``self.feature``
and optionally standardized by ``self.scaler``.
"""
return self.scaler.transform(self.feature.transform(blocks)) | [
"def",
"transform",
"(",
"self",
",",
"blocks",
",",
"y",
"=",
"None",
")",
":",
"return",
"self",
".",
"scaler",
".",
"transform",
"(",
"self",
".",
"feature",
".",
"transform",
"(",
"blocks",
")",
")"
] | [
46,
4
] | [
60,
68
] | python | en | ['en', 'error', 'th'] | False |
DateField._check_fix_default_value | (self) |
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
|
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
| def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return [] | [
"def",
"_check_fix_default_value",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_default",
"(",
")",
":",
"return",
"[",
"]",
"now",
"=",
"timezone",
".",
"now",
"(",
")",
"if",
"not",
"timezone",
".",
"is_naive",
"(",
"now",
")",
":",
"now",
"=",
"timezone",
".",
"make_naive",
"(",
"now",
",",
"timezone",
".",
"utc",
")",
"value",
"=",
"self",
".",
"default",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"if",
"not",
"timezone",
".",
"is_naive",
"(",
"value",
")",
":",
"value",
"=",
"timezone",
".",
"make_naive",
"(",
"value",
",",
"timezone",
".",
"utc",
")",
"value",
"=",
"value",
".",
"date",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"# Nothing to do, as dates don't have tz information",
"pass",
"else",
":",
"# No explicit date / datetime value -- no checks necessary",
"return",
"[",
"]",
"offset",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"lower",
"=",
"(",
"now",
"-",
"offset",
")",
".",
"date",
"(",
")",
"upper",
"=",
"(",
"now",
"+",
"offset",
")",
".",
"date",
"(",
")",
"if",
"lower",
"<=",
"value",
"<=",
"upper",
":",
"return",
"[",
"checks",
".",
"Warning",
"(",
"'Fixed default value provided.'",
",",
"hint",
"=",
"'It seems you set a fixed date / time / datetime '",
"'value as default for this field. This may not be '",
"'what you want. If you want to have the current date '",
"'as default, use `django.utils.timezone.now`'",
",",
"obj",
"=",
"self",
",",
"id",
"=",
"'fields.W161'",
",",
")",
"]",
"return",
"[",
"]"
] | [
1159,
4
] | [
1197,
17
] | python | en | ['en', 'error', 'th'] | False |
DateTimeField._check_fix_default_value | (self) |
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
|
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
| def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return [] | [
"def",
"_check_fix_default_value",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_default",
"(",
")",
":",
"return",
"[",
"]",
"now",
"=",
"timezone",
".",
"now",
"(",
")",
"if",
"not",
"timezone",
".",
"is_naive",
"(",
"now",
")",
":",
"now",
"=",
"timezone",
".",
"make_naive",
"(",
"now",
",",
"timezone",
".",
"utc",
")",
"value",
"=",
"self",
".",
"default",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"second_offset",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"10",
")",
"lower",
"=",
"now",
"-",
"second_offset",
"upper",
"=",
"now",
"+",
"second_offset",
"if",
"timezone",
".",
"is_aware",
"(",
"value",
")",
":",
"value",
"=",
"timezone",
".",
"make_naive",
"(",
"value",
",",
"timezone",
".",
"utc",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"second_offset",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"10",
")",
"lower",
"=",
"now",
"-",
"second_offset",
"lower",
"=",
"datetime",
".",
"datetime",
"(",
"lower",
".",
"year",
",",
"lower",
".",
"month",
",",
"lower",
".",
"day",
")",
"upper",
"=",
"now",
"+",
"second_offset",
"upper",
"=",
"datetime",
".",
"datetime",
"(",
"upper",
".",
"year",
",",
"upper",
".",
"month",
",",
"upper",
".",
"day",
")",
"value",
"=",
"datetime",
".",
"datetime",
"(",
"value",
".",
"year",
",",
"value",
".",
"month",
",",
"value",
".",
"day",
")",
"else",
":",
"# No explicit date / datetime value -- no checks necessary",
"return",
"[",
"]",
"if",
"lower",
"<=",
"value",
"<=",
"upper",
":",
"return",
"[",
"checks",
".",
"Warning",
"(",
"'Fixed default value provided.'",
",",
"hint",
"=",
"'It seems you set a fixed date / time / datetime '",
"'value as default for this field. This may not be '",
"'what you want. If you want to have the current date '",
"'as default, use `django.utils.timezone.now`'",
",",
"obj",
"=",
"self",
",",
"id",
"=",
"'fields.W161'",
",",
")",
"]",
"return",
"[",
"]"
] | [
1299,
4
] | [
1340,
17
] | python | en | ['en', 'error', 'th'] | False |
PositiveIntegerRelDbTypeMixin.rel_db_type | (self, connection) |
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
|
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
| def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return self.integer_field_class().db_type(connection=connection) | [
"def",
"rel_db_type",
"(",
"self",
",",
"connection",
")",
":",
"if",
"connection",
".",
"features",
".",
"related_fields_match_type",
":",
"return",
"self",
".",
"db_type",
"(",
"connection",
")",
"else",
":",
"return",
"self",
".",
"integer_field_class",
"(",
")",
".",
"db_type",
"(",
"connection",
"=",
"connection",
")"
] | [
2026,
4
] | [
2038,
76
] | python | en | ['en', 'error', 'th'] | False |
TimeField._check_fix_default_value | (self) |
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
|
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
| def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return [] | [
"def",
"_check_fix_default_value",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_default",
"(",
")",
":",
"return",
"[",
"]",
"now",
"=",
"timezone",
".",
"now",
"(",
")",
"if",
"not",
"timezone",
".",
"is_naive",
"(",
"now",
")",
":",
"now",
"=",
"timezone",
".",
"make_naive",
"(",
"now",
",",
"timezone",
".",
"utc",
")",
"value",
"=",
"self",
".",
"default",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"second_offset",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"10",
")",
"lower",
"=",
"now",
"-",
"second_offset",
"upper",
"=",
"now",
"+",
"second_offset",
"if",
"timezone",
".",
"is_aware",
"(",
"value",
")",
":",
"value",
"=",
"timezone",
".",
"make_naive",
"(",
"value",
",",
"timezone",
".",
"utc",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"second_offset",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"10",
")",
"lower",
"=",
"now",
"-",
"second_offset",
"upper",
"=",
"now",
"+",
"second_offset",
"value",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"now",
".",
"date",
"(",
")",
",",
"value",
")",
"if",
"timezone",
".",
"is_aware",
"(",
"value",
")",
":",
"value",
"=",
"timezone",
".",
"make_naive",
"(",
"value",
",",
"timezone",
".",
"utc",
")",
".",
"time",
"(",
")",
"else",
":",
"# No explicit time / datetime value -- no checks necessary",
"return",
"[",
"]",
"if",
"lower",
"<=",
"value",
"<=",
"upper",
":",
"return",
"[",
"checks",
".",
"Warning",
"(",
"'Fixed default value provided.'",
",",
"hint",
"=",
"'It seems you set a fixed date / time / datetime '",
"'value as default for this field. This may not be '",
"'what you want. If you want to have the current date '",
"'as default, use `django.utils.timezone.now`'",
",",
"obj",
"=",
"self",
",",
"id",
"=",
"'fields.W161'",
",",
")",
"]",
"return",
"[",
"]"
] | [
2195,
4
] | [
2236,
17
] | python | en | ['en', 'error', 'th'] | False |
BinaryField.value_to_string | (self, obj) | Binary data is serialized as base64 | Binary data is serialized as base64 | def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode('ascii') | [
"def",
"value_to_string",
"(",
"self",
",",
"obj",
")",
":",
"return",
"b64encode",
"(",
"self",
".",
"value_from_object",
"(",
"obj",
")",
")",
".",
"decode",
"(",
"'ascii'",
")"
] | [
2385,
4
] | [
2387,
69
] | python | en | ['en', 'en', 'en'] | True |
get_srid_info | (srid, connection) |
Return the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
|
Return the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
| def get_srid_info(srid, connection):
"""
Return the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
from django.contrib.gis.gdal import SpatialReference
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
SpatialRefSys = None
alias, get_srs = (
(connection.alias, lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs)
if SpatialRefSys else
(None, SpatialReference)
)
if srid not in _srid_cache[alias]:
srs = get_srs(srid)
units, units_name = srs.units
_srid_cache[alias][srid] = SRIDCacheEntry(
units=units,
units_name=units_name,
spheroid='SPHEROID["%s",%s,%s]' % (srs['spheroid'], srs.semi_major, srs.inverse_flattening),
geodetic=srs.geographic,
)
return _srid_cache[alias][srid] | [
"def",
"get_srid_info",
"(",
"srid",
",",
"connection",
")",
":",
"from",
"django",
".",
"contrib",
".",
"gis",
".",
"gdal",
"import",
"SpatialReference",
"global",
"_srid_cache",
"try",
":",
"# The SpatialRefSys model for the spatial backend.",
"SpatialRefSys",
"=",
"connection",
".",
"ops",
".",
"spatial_ref_sys",
"(",
")",
"except",
"NotImplementedError",
":",
"SpatialRefSys",
"=",
"None",
"alias",
",",
"get_srs",
"=",
"(",
"(",
"connection",
".",
"alias",
",",
"lambda",
"srid",
":",
"SpatialRefSys",
".",
"objects",
".",
"using",
"(",
"connection",
".",
"alias",
")",
".",
"get",
"(",
"srid",
"=",
"srid",
")",
".",
"srs",
")",
"if",
"SpatialRefSys",
"else",
"(",
"None",
",",
"SpatialReference",
")",
")",
"if",
"srid",
"not",
"in",
"_srid_cache",
"[",
"alias",
"]",
":",
"srs",
"=",
"get_srs",
"(",
"srid",
")",
"units",
",",
"units_name",
"=",
"srs",
".",
"units",
"_srid_cache",
"[",
"alias",
"]",
"[",
"srid",
"]",
"=",
"SRIDCacheEntry",
"(",
"units",
"=",
"units",
",",
"units_name",
"=",
"units_name",
",",
"spheroid",
"=",
"'SPHEROID[\"%s\",%s,%s]'",
"%",
"(",
"srs",
"[",
"'spheroid'",
"]",
",",
"srs",
".",
"semi_major",
",",
"srs",
".",
"inverse_flattening",
")",
",",
"geodetic",
"=",
"srs",
".",
"geographic",
",",
")",
"return",
"_srid_cache",
"[",
"alias",
"]",
"[",
"srid",
"]"
] | [
22,
0
] | [
52,
35
] | python | en | ['en', 'error', 'th'] | False |
BaseSpatialField.__init__ | (self, verbose_name=None, srid=4326, spatial_index=True, **kwargs) |
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
|
The initialization function for base spatial fields. Takes the following
as keyword arguments: | def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super().__init__(**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"verbose_name",
"=",
"None",
",",
"srid",
"=",
"4326",
",",
"spatial_index",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# Setting the index flag with the value of the `spatial_index` keyword.",
"self",
".",
"spatial_index",
"=",
"spatial_index",
"# Setting the SRID and getting the units. Unit information must be",
"# easily available in the field instance for distance queries.",
"self",
".",
"srid",
"=",
"srid",
"# Setting the verbose_name keyword argument with the positional",
"# first parameter, so this works like normal fields.",
"kwargs",
"[",
"'verbose_name'",
"]",
"=",
"verbose_name",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] | [
66,
4
] | [
92,
34
] | python | en | ['en', 'error', 'th'] | False |
BaseSpatialField.geodetic | (self, connection) |
Return true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
|
Return true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
| def geodetic(self, connection):
"""
Return true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return get_srid_info(self.srid, connection).geodetic | [
"def",
"geodetic",
"(",
"self",
",",
"connection",
")",
":",
"return",
"get_srid_info",
"(",
"self",
".",
"srid",
",",
"connection",
")",
".",
"geodetic"
] | [
115,
4
] | [
120,
60
] | python | en | ['en', 'error', 'th'] | False |
BaseSpatialField.get_placeholder | (self, value, compiler, connection) |
Return the placeholder for the spatial column for the
given value.
|
Return the placeholder for the spatial column for the
given value.
| def get_placeholder(self, value, compiler, connection):
"""
Return the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler) | [
"def",
"get_placeholder",
"(",
"self",
",",
"value",
",",
"compiler",
",",
"connection",
")",
":",
"return",
"connection",
".",
"ops",
".",
"get_geom_placeholder",
"(",
"self",
",",
"value",
",",
"compiler",
")"
] | [
122,
4
] | [
127,
73
] | python | en | ['en', 'error', 'th'] | False |
BaseSpatialField.get_srid | (self, obj) |
Return the default SRID for the given geometry or raster, taking into
account the SRID set for the field. For example, if the input geometry
or raster doesn't have an SRID, then the SRID of the field will be
returned.
|
Return the default SRID for the given geometry or raster, taking into
account the SRID set for the field. For example, if the input geometry
or raster doesn't have an SRID, then the SRID of the field will be
returned.
| def get_srid(self, obj):
"""
Return the default SRID for the given geometry or raster, taking into
account the SRID set for the field. For example, if the input geometry
or raster doesn't have an SRID, then the SRID of the field will be
returned.
"""
srid = obj.srid # SRID of given geometry.
if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1):
return self.srid
else:
return srid | [
"def",
"get_srid",
"(",
"self",
",",
"obj",
")",
":",
"srid",
"=",
"obj",
".",
"srid",
"# SRID of given geometry.",
"if",
"srid",
"is",
"None",
"or",
"self",
".",
"srid",
"==",
"-",
"1",
"or",
"(",
"srid",
"==",
"-",
"1",
"and",
"self",
".",
"srid",
"!=",
"-",
"1",
")",
":",
"return",
"self",
".",
"srid",
"else",
":",
"return",
"srid"
] | [
129,
4
] | [
140,
23
] | python | en | ['en', 'error', 'th'] | False |
BaseSpatialField.get_raster_prep_value | (self, value, is_candidate) |
Return a GDALRaster if conversion is successful, otherwise return None.
|
Return a GDALRaster if conversion is successful, otherwise return None.
| def get_raster_prep_value(self, value, is_candidate):
"""
Return a GDALRaster if conversion is successful, otherwise return None.
"""
if isinstance(value, gdal.GDALRaster):
return value
elif is_candidate:
try:
return gdal.GDALRaster(value)
except GDALException:
pass
elif isinstance(value, dict):
try:
return gdal.GDALRaster(value)
except GDALException:
raise ValueError("Couldn't create spatial object from lookup value '%s'." % value) | [
"def",
"get_raster_prep_value",
"(",
"self",
",",
"value",
",",
"is_candidate",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"gdal",
".",
"GDALRaster",
")",
":",
"return",
"value",
"elif",
"is_candidate",
":",
"try",
":",
"return",
"gdal",
".",
"GDALRaster",
"(",
"value",
")",
"except",
"GDALException",
":",
"pass",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"try",
":",
"return",
"gdal",
".",
"GDALRaster",
"(",
"value",
")",
"except",
"GDALException",
":",
"raise",
"ValueError",
"(",
"\"Couldn't create spatial object from lookup value '%s'.\"",
"%",
"value",
")"
] | [
154,
4
] | [
169,
98
] | python | en | ['en', 'error', 'th'] | False |
GeometryField.__init__ | (self, verbose_name=None, dim=2, geography=False, *, extent=(-180.0, -90.0, 180.0, 90.0),
tolerance=0.05, **kwargs) |
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
|
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments: | def __init__(self, verbose_name=None, dim=2, geography=False, *, extent=(-180.0, -90.0, 180.0, 90.0),
tolerance=0.05, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = extent
self._tolerance = tolerance
super().__init__(verbose_name=verbose_name, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"verbose_name",
"=",
"None",
",",
"dim",
"=",
"2",
",",
"geography",
"=",
"False",
",",
"*",
",",
"extent",
"=",
"(",
"-",
"180.0",
",",
"-",
"90.0",
",",
"180.0",
",",
"90.0",
")",
",",
"tolerance",
"=",
"0.05",
",",
"*",
"*",
"kwargs",
")",
":",
"# Setting the dimension of the geometry field.",
"self",
".",
"dim",
"=",
"dim",
"# Is this a geography rather than a geometry column?",
"self",
".",
"geography",
"=",
"geography",
"# Oracle-specific private attributes for creating the entry in",
"# `USER_SDO_GEOM_METADATA`",
"self",
".",
"_extent",
"=",
"extent",
"self",
".",
"_tolerance",
"=",
"tolerance",
"super",
"(",
")",
".",
"__init__",
"(",
"verbose_name",
"=",
"verbose_name",
",",
"*",
"*",
"kwargs",
")"
] | [
210,
4
] | [
240,
61
] | python | en | ['en', 'error', 'th'] | False |
GeometryField.select_format | (self, compiler, sql, params) |
Return the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKB.
|
Return the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKB.
| def select_format(self, compiler, sql, params):
"""
Return the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKB.
"""
if not compiler.query.subquery:
return compiler.connection.ops.select % sql, params
return sql, params | [
"def",
"select_format",
"(",
"self",
",",
"compiler",
",",
"sql",
",",
"params",
")",
":",
"if",
"not",
"compiler",
".",
"query",
".",
"subquery",
":",
"return",
"compiler",
".",
"connection",
".",
"ops",
".",
"select",
"%",
"sql",
",",
"params",
"return",
"sql",
",",
"params"
] | [
272,
4
] | [
280,
26
] | python | en | ['en', 'error', 'th'] | False |
main | (_) | Train a word2vec model. | Train a word2vec model. | def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals()) | [
"def",
"main",
"(",
"_",
")",
":",
"if",
"not",
"FLAGS",
".",
"train_data",
"or",
"not",
"FLAGS",
".",
"eval_data",
"or",
"not",
"FLAGS",
".",
"save_path",
":",
"print",
"(",
"\"--train_data --eval_data and --save_path must be specified.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"opts",
"=",
"Options",
"(",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
",",
"tf",
".",
"Session",
"(",
")",
"as",
"session",
":",
"with",
"tf",
".",
"device",
"(",
"\"/cpu:0\"",
")",
":",
"model",
"=",
"Word2Vec",
"(",
"opts",
",",
"session",
")",
"model",
".",
"read_analogies",
"(",
")",
"# Read analogy questions",
"for",
"_",
"in",
"xrange",
"(",
"opts",
".",
"epochs_to_train",
")",
":",
"model",
".",
"train",
"(",
")",
"# Process one epoch",
"model",
".",
"eval",
"(",
")",
"# Eval analogies.",
"# Perform a final save.",
"model",
".",
"saver",
".",
"save",
"(",
"session",
",",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"save_path",
",",
"\"model.ckpt\"",
")",
",",
"global_step",
"=",
"model",
".",
"global_step",
")",
"if",
"FLAGS",
".",
"interactive",
":",
"# E.g.,",
"# [0]: model.analogy(b'france', b'paris', b'russia')",
"# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])",
"_start_shell",
"(",
"locals",
"(",
")",
")"
] | [
510,
0
] | [
531,
28
] | python | en | ['en', 'ca', 'en'] | True |
Word2Vec.read_analogies | (self) | Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
| Reads through the analogy question file. | def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32) | [
"def",
"read_analogies",
"(",
"self",
")",
":",
"questions",
"=",
"[",
"]",
"questions_skipped",
"=",
"0",
"with",
"open",
"(",
"self",
".",
"_options",
".",
"eval_data",
",",
"\"rb\"",
")",
"as",
"analogy_f",
":",
"for",
"line",
"in",
"analogy_f",
":",
"if",
"line",
".",
"startswith",
"(",
"b\":\"",
")",
":",
"# Skip comments.",
"continue",
"words",
"=",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"split",
"(",
"b\" \"",
")",
"ids",
"=",
"[",
"self",
".",
"_word2id",
".",
"get",
"(",
"w",
".",
"strip",
"(",
")",
")",
"for",
"w",
"in",
"words",
"]",
"if",
"None",
"in",
"ids",
"or",
"len",
"(",
"ids",
")",
"!=",
"4",
":",
"questions_skipped",
"+=",
"1",
"else",
":",
"questions",
".",
"append",
"(",
"np",
".",
"array",
"(",
"ids",
")",
")",
"print",
"(",
"\"Eval analogy file: \"",
",",
"self",
".",
"_options",
".",
"eval_data",
")",
"print",
"(",
"\"Questions: \"",
",",
"len",
"(",
"questions",
")",
")",
"print",
"(",
"\"Skipped: \"",
",",
"questions_skipped",
")",
"self",
".",
"_analogy_questions",
"=",
"np",
".",
"array",
"(",
"questions",
",",
"dtype",
"=",
"np",
".",
"int32",
")"
] | [
169,
2
] | [
192,
65
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.forward | (self, examples, labels) | Build the graph for the forward pass. | Build the graph for the forward pass. | def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [vocab_size].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits | [
"def",
"forward",
"(",
"self",
",",
"examples",
",",
"labels",
")",
":",
"opts",
"=",
"self",
".",
"_options",
"# Declare all variables we need.",
"# Embedding: [vocab_size, emb_dim]",
"init_width",
"=",
"0.5",
"/",
"opts",
".",
"emb_dim",
"emb",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"random_uniform",
"(",
"[",
"opts",
".",
"vocab_size",
",",
"opts",
".",
"emb_dim",
"]",
",",
"-",
"init_width",
",",
"init_width",
")",
",",
"name",
"=",
"\"emb\"",
")",
"self",
".",
"_emb",
"=",
"emb",
"# Softmax weight: [vocab_size, emb_dim]. Transposed.",
"sm_w_t",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"zeros",
"(",
"[",
"opts",
".",
"vocab_size",
",",
"opts",
".",
"emb_dim",
"]",
")",
",",
"name",
"=",
"\"sm_w_t\"",
")",
"# Softmax bias: [vocab_size].",
"sm_b",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"zeros",
"(",
"[",
"opts",
".",
"vocab_size",
"]",
")",
",",
"name",
"=",
"\"sm_b\"",
")",
"# Global step: scalar, i.e., shape [].",
"self",
".",
"global_step",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"name",
"=",
"\"global_step\"",
")",
"# Nodes to compute the nce loss w/ candidate sampling.",
"labels_matrix",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cast",
"(",
"labels",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
",",
"[",
"opts",
".",
"batch_size",
",",
"1",
"]",
")",
"# Negative sampling.",
"sampled_ids",
",",
"_",
",",
"_",
"=",
"(",
"tf",
".",
"nn",
".",
"fixed_unigram_candidate_sampler",
"(",
"true_classes",
"=",
"labels_matrix",
",",
"num_true",
"=",
"1",
",",
"num_sampled",
"=",
"opts",
".",
"num_samples",
",",
"unique",
"=",
"True",
",",
"range_max",
"=",
"opts",
".",
"vocab_size",
",",
"distortion",
"=",
"0.75",
",",
"unigrams",
"=",
"opts",
".",
"vocab_counts",
".",
"tolist",
"(",
")",
")",
")",
"# Embeddings for examples: [batch_size, emb_dim]",
"example_emb",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"emb",
",",
"examples",
")",
"# Weights for labels: [batch_size, emb_dim]",
"true_w",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"sm_w_t",
",",
"labels",
")",
"# Biases for labels: [batch_size, 1]",
"true_b",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"sm_b",
",",
"labels",
")",
"# Weights for sampled ids: [num_sampled, emb_dim]",
"sampled_w",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"sm_w_t",
",",
"sampled_ids",
")",
"# Biases for sampled ids: [num_sampled, 1]",
"sampled_b",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"sm_b",
",",
"sampled_ids",
")",
"# True logits: [batch_size, 1]",
"true_logits",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"example_emb",
",",
"true_w",
")",
",",
"1",
")",
"+",
"true_b",
"# Sampled logits: [batch_size, num_sampled]",
"# We replicate sampled noise labels for all examples in the batch",
"# using the matmul.",
"sampled_b_vec",
"=",
"tf",
".",
"reshape",
"(",
"sampled_b",
",",
"[",
"opts",
".",
"num_samples",
"]",
")",
"sampled_logits",
"=",
"tf",
".",
"matmul",
"(",
"example_emb",
",",
"sampled_w",
",",
"transpose_b",
"=",
"True",
")",
"+",
"sampled_b_vec",
"return",
"true_logits",
",",
"sampled_logits"
] | [
194,
2
] | [
257,
38
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.nce_loss | (self, true_logits, sampled_logits) | Build the graph for the NCE loss. | Build the graph for the NCE loss. | def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor | [
"def",
"nce_loss",
"(",
"self",
",",
"true_logits",
",",
"sampled_logits",
")",
":",
"# cross-entropy(logits, labels)",
"opts",
"=",
"self",
".",
"_options",
"true_xent",
"=",
"tf",
".",
"nn",
".",
"sigmoid_cross_entropy_with_logits",
"(",
"labels",
"=",
"tf",
".",
"ones_like",
"(",
"true_logits",
")",
",",
"logits",
"=",
"true_logits",
")",
"sampled_xent",
"=",
"tf",
".",
"nn",
".",
"sigmoid_cross_entropy_with_logits",
"(",
"labels",
"=",
"tf",
".",
"zeros_like",
"(",
"sampled_logits",
")",
",",
"logits",
"=",
"sampled_logits",
")",
"# NCE-loss is the sum of the true and noise (sampled words)",
"# contributions, averaged over the batch.",
"nce_loss_tensor",
"=",
"(",
"tf",
".",
"reduce_sum",
"(",
"true_xent",
")",
"+",
"tf",
".",
"reduce_sum",
"(",
"sampled_xent",
")",
")",
"/",
"opts",
".",
"batch_size",
"return",
"nce_loss_tensor"
] | [
259,
2
] | [
273,
26
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.optimize | (self, loss) | Build the graph to optimize the loss function. | Build the graph to optimize the loss function. | def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train | [
"def",
"optimize",
"(",
"self",
",",
"loss",
")",
":",
"# Optimizer nodes.",
"# Linear learning rate decay.",
"opts",
"=",
"self",
".",
"_options",
"words_to_train",
"=",
"float",
"(",
"opts",
".",
"words_per_epoch",
"*",
"opts",
".",
"epochs_to_train",
")",
"lr",
"=",
"opts",
".",
"learning_rate",
"*",
"tf",
".",
"maximum",
"(",
"0.0001",
",",
"1.0",
"-",
"tf",
".",
"cast",
"(",
"self",
".",
"_words",
",",
"tf",
".",
"float32",
")",
"/",
"words_to_train",
")",
"self",
".",
"_lr",
"=",
"lr",
"optimizer",
"=",
"tf",
".",
"train",
".",
"GradientDescentOptimizer",
"(",
"lr",
")",
"train",
"=",
"optimizer",
".",
"minimize",
"(",
"loss",
",",
"global_step",
"=",
"self",
".",
"global_step",
",",
"gate_gradients",
"=",
"optimizer",
".",
"GATE_NONE",
")",
"self",
".",
"_train",
"=",
"train"
] | [
275,
2
] | [
289,
23
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.build_eval_graph | (self) | Build the eval graph. | Build the eval graph. | def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
self._nemb = nemb
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx | [
"def",
"build_eval_graph",
"(",
"self",
")",
":",
"# Eval graph",
"# Each analogy task is to predict the 4th word (d) given three",
"# words: a, b, c. E.g., a=italy, b=rome, c=france, we should",
"# predict d=paris.",
"# The eval feeds three vectors of word ids for a, b, c, each of",
"# which is of size N, where N is the number of analogies we want to",
"# evaluate in one batch.",
"analogy_a",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# [N]",
"analogy_b",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# [N]",
"analogy_c",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# [N]",
"# Normalized word embeddings of shape [vocab_size, emb_dim].",
"nemb",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"self",
".",
"_emb",
",",
"1",
")",
"self",
".",
"_nemb",
"=",
"nemb",
"# Each row of a_emb, b_emb, c_emb is a word's embedding vector.",
"# They all have the shape [N, emb_dim]",
"a_emb",
"=",
"tf",
".",
"gather",
"(",
"nemb",
",",
"analogy_a",
")",
"# a's embs",
"b_emb",
"=",
"tf",
".",
"gather",
"(",
"nemb",
",",
"analogy_b",
")",
"# b's embs",
"c_emb",
"=",
"tf",
".",
"gather",
"(",
"nemb",
",",
"analogy_c",
")",
"# c's embs",
"# We expect that d's embedding vectors on the unit hyper-sphere is",
"# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].",
"target",
"=",
"c_emb",
"+",
"(",
"b_emb",
"-",
"a_emb",
")",
"# Compute cosine distance between each pair of target and vocab.",
"# dist has shape [N, vocab_size].",
"dist",
"=",
"tf",
".",
"matmul",
"(",
"target",
",",
"nemb",
",",
"transpose_b",
"=",
"True",
")",
"# For each question (row in dist), find the top 4 words.",
"_",
",",
"pred_idx",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"dist",
",",
"4",
")",
"# Nodes for computing neighbors for a given word according to",
"# their cosine distance.",
"nearby_word",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# word id",
"nearby_emb",
"=",
"tf",
".",
"gather",
"(",
"nemb",
",",
"nearby_word",
")",
"nearby_dist",
"=",
"tf",
".",
"matmul",
"(",
"nearby_emb",
",",
"nemb",
",",
"transpose_b",
"=",
"True",
")",
"nearby_val",
",",
"nearby_idx",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"nearby_dist",
",",
"min",
"(",
"1000",
",",
"self",
".",
"_options",
".",
"vocab_size",
")",
")",
"# Nodes in the construct graph which are used by training and",
"# evaluation to run/feed/fetch.",
"self",
".",
"_analogy_a",
"=",
"analogy_a",
"self",
".",
"_analogy_b",
"=",
"analogy_b",
"self",
".",
"_analogy_c",
"=",
"analogy_c",
"self",
".",
"_analogy_pred_idx",
"=",
"pred_idx",
"self",
".",
"_nearby_word",
"=",
"nearby_word",
"self",
".",
"_nearby_val",
"=",
"nearby_val",
"self",
".",
"_nearby_idx",
"=",
"nearby_idx"
] | [
291,
2
] | [
343,
33
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.build_graph | (self) | Build the graph for the full model. | Build the graph for the full model. | def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver() | [
"def",
"build_graph",
"(",
"self",
")",
":",
"opts",
"=",
"self",
".",
"_options",
"# The training data. A text file.",
"(",
"words",
",",
"counts",
",",
"words_per_epoch",
",",
"self",
".",
"_epoch",
",",
"self",
".",
"_words",
",",
"examples",
",",
"labels",
")",
"=",
"word2vec",
".",
"skipgram_word2vec",
"(",
"filename",
"=",
"opts",
".",
"train_data",
",",
"batch_size",
"=",
"opts",
".",
"batch_size",
",",
"window_size",
"=",
"opts",
".",
"window_size",
",",
"min_count",
"=",
"opts",
".",
"min_count",
",",
"subsample",
"=",
"opts",
".",
"subsample",
")",
"(",
"opts",
".",
"vocab_words",
",",
"opts",
".",
"vocab_counts",
",",
"opts",
".",
"words_per_epoch",
")",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"[",
"words",
",",
"counts",
",",
"words_per_epoch",
"]",
")",
"opts",
".",
"vocab_size",
"=",
"len",
"(",
"opts",
".",
"vocab_words",
")",
"print",
"(",
"\"Data file: \"",
",",
"opts",
".",
"train_data",
")",
"print",
"(",
"\"Vocab size: \"",
",",
"opts",
".",
"vocab_size",
"-",
"1",
",",
"\" + UNK\"",
")",
"print",
"(",
"\"Words per epoch: \"",
",",
"opts",
".",
"words_per_epoch",
")",
"self",
".",
"_examples",
"=",
"examples",
"self",
".",
"_labels",
"=",
"labels",
"self",
".",
"_id2word",
"=",
"opts",
".",
"vocab_words",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"self",
".",
"_id2word",
")",
":",
"self",
".",
"_word2id",
"[",
"w",
"]",
"=",
"i",
"true_logits",
",",
"sampled_logits",
"=",
"self",
".",
"forward",
"(",
"examples",
",",
"labels",
")",
"loss",
"=",
"self",
".",
"nce_loss",
"(",
"true_logits",
",",
"sampled_logits",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"NCE loss\"",
",",
"loss",
")",
"self",
".",
"_loss",
"=",
"loss",
"self",
".",
"optimize",
"(",
"loss",
")",
"# Properly initialize all variables.",
"tf",
".",
"global_variables_initializer",
"(",
")",
".",
"run",
"(",
")",
"self",
".",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")"
] | [
345,
2
] | [
375,
33
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.save_vocab | (self) | Save the vocabulary to a file so the model can be reloaded. | Save the vocabulary to a file so the model can be reloaded. | def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
np.savetxt(os.path.join(opts.save_path, "vectors.txt"), self._nemb.eval()) | [
"def",
"save_vocab",
"(",
"self",
")",
":",
"opts",
"=",
"self",
".",
"_options",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"save_path",
",",
"\"vocab.txt\"",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"i",
"in",
"xrange",
"(",
"opts",
".",
"vocab_size",
")",
":",
"vocab_word",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"opts",
".",
"vocab_words",
"[",
"i",
"]",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"f",
".",
"write",
"(",
"\"%s %d\\n\"",
"%",
"(",
"vocab_word",
",",
"opts",
".",
"vocab_counts",
"[",
"i",
"]",
")",
")",
"np",
".",
"savetxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"save_path",
",",
"\"vectors.txt\"",
")",
",",
"self",
".",
"_nemb",
".",
"eval",
"(",
")",
")"
] | [
377,
2
] | [
385,
78
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.train | (self) | Train the model. | Train the model. | def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch | [
"def",
"train",
"(",
"self",
")",
":",
"opts",
"=",
"self",
".",
"_options",
"initial_epoch",
",",
"initial_words",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"[",
"self",
".",
"_epoch",
",",
"self",
".",
"_words",
"]",
")",
"summary_op",
"=",
"tf",
".",
"summary",
".",
"merge_all",
"(",
")",
"summary_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"opts",
".",
"save_path",
",",
"self",
".",
"_session",
".",
"graph",
")",
"workers",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"opts",
".",
"concurrent_steps",
")",
":",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_train_thread_body",
")",
"t",
".",
"start",
"(",
")",
"workers",
".",
"append",
"(",
"t",
")",
"last_words",
",",
"last_time",
",",
"last_summary_time",
"=",
"initial_words",
",",
"time",
".",
"time",
"(",
")",
",",
"0",
"last_checkpoint_time",
"=",
"0",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"opts",
".",
"statistics_interval",
")",
"# Reports our progress once a while.",
"(",
"epoch",
",",
"step",
",",
"loss",
",",
"words",
",",
"lr",
")",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"[",
"self",
".",
"_epoch",
",",
"self",
".",
"global_step",
",",
"self",
".",
"_loss",
",",
"self",
".",
"_words",
",",
"self",
".",
"_lr",
"]",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"last_words",
",",
"last_time",
",",
"rate",
"=",
"words",
",",
"now",
",",
"(",
"words",
"-",
"last_words",
")",
"/",
"(",
"now",
"-",
"last_time",
")",
"print",
"(",
"\"Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\\r\"",
"%",
"(",
"epoch",
",",
"step",
",",
"lr",
",",
"loss",
",",
"rate",
")",
",",
"end",
"=",
"\"\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"now",
"-",
"last_summary_time",
">",
"opts",
".",
"summary_interval",
":",
"summary_str",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"summary_op",
")",
"summary_writer",
".",
"add_summary",
"(",
"summary_str",
",",
"step",
")",
"last_summary_time",
"=",
"now",
"if",
"now",
"-",
"last_checkpoint_time",
">",
"opts",
".",
"checkpoint_interval",
":",
"self",
".",
"saver",
".",
"save",
"(",
"self",
".",
"_session",
",",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"save_path",
",",
"\"model.ckpt\"",
")",
",",
"global_step",
"=",
"step",
".",
"astype",
"(",
"int",
")",
")",
"last_checkpoint_time",
"=",
"now",
"if",
"epoch",
"!=",
"initial_epoch",
":",
"break",
"for",
"t",
"in",
"workers",
":",
"t",
".",
"join",
"(",
")",
"return",
"epoch"
] | [
394,
2
] | [
435,
16
] | python | en | ['en', 'it', 'en'] | True |
Word2Vec._predict | (self, analogy) | Predict the top 4 answers for analogy questions. | Predict the top 4 answers for analogy questions. | def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx | [
"def",
"_predict",
"(",
"self",
",",
"analogy",
")",
":",
"idx",
",",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"[",
"self",
".",
"_analogy_pred_idx",
"]",
",",
"{",
"self",
".",
"_analogy_a",
":",
"analogy",
"[",
":",
",",
"0",
"]",
",",
"self",
".",
"_analogy_b",
":",
"analogy",
"[",
":",
",",
"1",
"]",
",",
"self",
".",
"_analogy_c",
":",
"analogy",
"[",
":",
",",
"2",
"]",
"}",
")",
"return",
"idx"
] | [
437,
2
] | [
444,
14
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.eval | (self) | Evaluate analogy questions and reports accuracy. | Evaluate analogy questions and reports accuracy. | def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total)) | [
"def",
"eval",
"(",
"self",
")",
":",
"# How many questions we get right at precision@1.",
"correct",
"=",
"0",
"try",
":",
"total",
"=",
"self",
".",
"_analogy_questions",
".",
"shape",
"[",
"0",
"]",
"except",
"AttributeError",
"as",
"e",
":",
"raise",
"AttributeError",
"(",
"\"Need to read analogy questions.\"",
")",
"start",
"=",
"0",
"while",
"start",
"<",
"total",
":",
"limit",
"=",
"start",
"+",
"2500",
"sub",
"=",
"self",
".",
"_analogy_questions",
"[",
"start",
":",
"limit",
",",
":",
"]",
"idx",
"=",
"self",
".",
"_predict",
"(",
"sub",
")",
"start",
"=",
"limit",
"for",
"question",
"in",
"xrange",
"(",
"sub",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"4",
")",
":",
"if",
"idx",
"[",
"question",
",",
"j",
"]",
"==",
"sub",
"[",
"question",
",",
"3",
"]",
":",
"# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].",
"correct",
"+=",
"1",
"break",
"elif",
"idx",
"[",
"question",
",",
"j",
"]",
"in",
"sub",
"[",
"question",
",",
":",
"3",
"]",
":",
"# We need to skip words already in the question.",
"continue",
"else",
":",
"# The correct label is not the precision@1",
"break",
"print",
"(",
")",
"print",
"(",
"\"Eval %4d/%d accuracy = %4.1f%%\"",
"%",
"(",
"correct",
",",
"total",
",",
"correct",
"*",
"100.0",
"/",
"total",
")",
")"
] | [
446,
2
] | [
477,
71
] | python | en | ['en', 'en', 'en'] | True |
Word2Vec.analogy | (self, w0, w1, w2) | Predict word w3 as in w0:w1 vs w2:w3. | Predict word w3 as in w0:w1 vs w2:w3. | def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
return
print("unknown") | [
"def",
"analogy",
"(",
"self",
",",
"w0",
",",
"w1",
",",
"w2",
")",
":",
"wid",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"self",
".",
"_word2id",
".",
"get",
"(",
"w",
",",
"0",
")",
"for",
"w",
"in",
"[",
"w0",
",",
"w1",
",",
"w2",
"]",
"]",
"]",
")",
"idx",
"=",
"self",
".",
"_predict",
"(",
"wid",
")",
"for",
"c",
"in",
"[",
"self",
".",
"_id2word",
"[",
"i",
"]",
"for",
"i",
"in",
"idx",
"[",
"0",
",",
":",
"]",
"]",
":",
"if",
"c",
"not",
"in",
"[",
"w0",
",",
"w1",
",",
"w2",
"]",
":",
"print",
"(",
"c",
")",
"return",
"print",
"(",
"\"unknown\"",
")"
] | [
479,
2
] | [
487,
20
] | python | pl | ['en', 'pl', 'pl'] | True |
Word2Vec.nearby | (self, words, num=20) | Prints out nearby words given a list of words. | Prints out nearby words given a list of words. | def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance)) | [
"def",
"nearby",
"(",
"self",
",",
"words",
",",
"num",
"=",
"20",
")",
":",
"ids",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"_word2id",
".",
"get",
"(",
"x",
",",
"0",
")",
"for",
"x",
"in",
"words",
"]",
")",
"vals",
",",
"idx",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"[",
"self",
".",
"_nearby_val",
",",
"self",
".",
"_nearby_idx",
"]",
",",
"{",
"self",
".",
"_nearby_word",
":",
"ids",
"}",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"words",
")",
")",
":",
"print",
"(",
"\"\\n%s\\n=====================================\"",
"%",
"(",
"words",
"[",
"i",
"]",
")",
")",
"for",
"(",
"neighbor",
",",
"distance",
")",
"in",
"zip",
"(",
"idx",
"[",
"i",
",",
":",
"num",
"]",
",",
"vals",
"[",
"i",
",",
":",
"num",
"]",
")",
":",
"print",
"(",
"\"%-20s %6.4f\"",
"%",
"(",
"self",
".",
"_id2word",
"[",
"neighbor",
"]",
",",
"distance",
")",
")"
] | [
489,
2
] | [
497,
66
] | python | en | ['en', 'en', 'en'] | True |
_dump_arg_defaults | (kwargs) | Inject default arguments for dump functions. | Inject default arguments for dump functions. | def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder) | [
"def",
"_dump_arg_defaults",
"(",
"kwargs",
")",
":",
"if",
"current_app",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"current_app",
".",
"json_encoder",
")",
"if",
"not",
"current_app",
".",
"config",
"[",
"'JSON_AS_ASCII'",
"]",
":",
"kwargs",
".",
"setdefault",
"(",
"'ensure_ascii'",
",",
"False",
")",
"kwargs",
".",
"setdefault",
"(",
"'sort_keys'",
",",
"current_app",
".",
"config",
"[",
"'JSON_SORT_KEYS'",
"]",
")",
"else",
":",
"kwargs",
".",
"setdefault",
"(",
"'sort_keys'",
",",
"True",
")",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"JSONEncoder",
")"
] | [
90,
0
] | [
99,
45
] | python | da | ['da', 'fr', 'en'] | False |
_load_arg_defaults | (kwargs) | Inject default arguments for load functions. | Inject default arguments for load functions. | def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder) | [
"def",
"_load_arg_defaults",
"(",
"kwargs",
")",
":",
"if",
"current_app",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"current_app",
".",
"json_decoder",
")",
"else",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"JSONDecoder",
")"
] | [
102,
0
] | [
107,
45
] | python | en | ['da', 'en', 'en'] | True |
dumps | (obj, **kwargs) | Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
| Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack. | def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv | [
"def",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"_dump_arg_defaults",
"(",
"kwargs",
")",
"encoding",
"=",
"kwargs",
".",
"pop",
"(",
"'encoding'",
",",
"None",
")",
"rv",
"=",
"_json",
".",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"if",
"encoding",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"rv",
",",
"text_type",
")",
":",
"rv",
"=",
"rv",
".",
"encode",
"(",
"encoding",
")",
"return",
"rv"
] | [
110,
0
] | [
125,
13
] | python | en | ['en', 'en', 'en'] | True |
dump | (obj, fp, **kwargs) | Like :func:`dumps` but writes into a file object. | Like :func:`dumps` but writes into a file object. | def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs) | [
"def",
"dump",
"(",
"obj",
",",
"fp",
",",
"*",
"*",
"kwargs",
")",
":",
"_dump_arg_defaults",
"(",
"kwargs",
")",
"encoding",
"=",
"kwargs",
".",
"pop",
"(",
"'encoding'",
",",
"None",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"fp",
"=",
"_wrap_writer_for_text",
"(",
"fp",
",",
"encoding",
")",
"_json",
".",
"dump",
"(",
"obj",
",",
"fp",
",",
"*",
"*",
"kwargs",
")"
] | [
128,
0
] | [
134,
33
] | python | en | ['en', 'haw', 'en'] | True |
loads | (s, **kwargs) | Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
| Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
| def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs) | [
"def",
"loads",
"(",
"s",
",",
"*",
"*",
"kwargs",
")",
":",
"_load_arg_defaults",
"(",
"kwargs",
")",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"kwargs",
".",
"pop",
"(",
"'encoding'",
",",
"None",
")",
"or",
"'utf-8'",
")",
"return",
"_json",
".",
"loads",
"(",
"s",
",",
"*",
"*",
"kwargs",
")"
] | [
137,
0
] | [
145,
35
] | python | en | ['en', 'en', 'en'] | True |
load | (fp, **kwargs) | Like :func:`loads` but reads from a file object.
| Like :func:`loads` but reads from a file object.
| def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs) | [
"def",
"load",
"(",
"fp",
",",
"*",
"*",
"kwargs",
")",
":",
"_load_arg_defaults",
"(",
"kwargs",
")",
"if",
"not",
"PY2",
":",
"fp",
"=",
"_wrap_reader_for_text",
"(",
"fp",
",",
"kwargs",
".",
"pop",
"(",
"'encoding'",
",",
"None",
")",
"or",
"'utf-8'",
")",
"return",
"_json",
".",
"load",
"(",
"fp",
",",
"*",
"*",
"kwargs",
")"
] | [
148,
0
] | [
154,
35
] | python | en | ['en', 'en', 'en'] | True |
htmlsafe_dumps | (obj, **kwargs) | Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
| Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags. | def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv | [
"def",
"htmlsafe_dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"rv",
"=",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
".",
"replace",
"(",
"u'<'",
",",
"u'\\\\u003c'",
")",
".",
"replace",
"(",
"u'>'",
",",
"u'\\\\u003e'",
")",
".",
"replace",
"(",
"u'&'",
",",
"u'\\\\u0026'",
")",
".",
"replace",
"(",
"u\"'\"",
",",
"u'\\\\u0027'",
")",
"if",
"not",
"_slash_escape",
":",
"rv",
"=",
"rv",
".",
"replace",
"(",
"'\\\\/'",
",",
"'/'",
")",
"return",
"rv"
] | [
157,
0
] | [
189,
13
] | python | en | ['en', 'en', 'en'] | True |
htmlsafe_dump | (obj, fp, **kwargs) | Like :func:`htmlsafe_dumps` but writes into a file object. | Like :func:`htmlsafe_dumps` but writes into a file object. | def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs))) | [
"def",
"htmlsafe_dump",
"(",
"obj",
",",
"fp",
",",
"*",
"*",
"kwargs",
")",
":",
"fp",
".",
"write",
"(",
"text_type",
"(",
"htmlsafe_dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
")",
")"
] | [
192,
0
] | [
194,
54
] | python | en | ['en', 'en', 'en'] | True |
jsonify | (*args, **kwargs) | This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
| This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``. | def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
) | [
"def",
"jsonify",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"indent",
"=",
"None",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
"if",
"current_app",
".",
"config",
"[",
"'JSONIFY_PRETTYPRINT_REGULAR'",
"]",
"and",
"not",
"request",
".",
"is_xhr",
":",
"indent",
"=",
"2",
"separators",
"=",
"(",
"', '",
",",
"': '",
")",
"if",
"args",
"and",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'jsonify() behavior undefined when passed both args and kwargs'",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"1",
":",
"# single args are passed directly to dumps()",
"data",
"=",
"args",
"[",
"0",
"]",
"else",
":",
"data",
"=",
"args",
"or",
"kwargs",
"return",
"current_app",
".",
"response_class",
"(",
"(",
"dumps",
"(",
"data",
",",
"indent",
"=",
"indent",
",",
"separators",
"=",
"separators",
")",
",",
"'\\n'",
")",
",",
"mimetype",
"=",
"current_app",
".",
"config",
"[",
"'JSONIFY_MIMETYPE'",
"]",
")"
] | [
197,
0
] | [
264,
5
] | python | en | ['en', 'en', 'en'] | True |
JSONEncoder.default | (self, o) | Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
| Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`). | def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o) | [
"def",
"default",
"(",
"self",
",",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"date",
")",
":",
"return",
"http_date",
"(",
"o",
".",
"timetuple",
"(",
")",
")",
"if",
"isinstance",
"(",
"o",
",",
"uuid",
".",
"UUID",
")",
":",
"return",
"str",
"(",
"o",
")",
"if",
"hasattr",
"(",
"o",
",",
"'__html__'",
")",
":",
"return",
"text_type",
"(",
"o",
".",
"__html__",
"(",
")",
")",
"return",
"_json",
".",
"JSONEncoder",
".",
"default",
"(",
"self",
",",
"o",
")"
] | [
56,
4
] | [
79,
49
] | python | en | ['en', 'en', 'en'] | True |
_group_matching | (tlist, cls) | Groups Tokens that have beginning and end. | Groups Tokens that have beginning and end. | def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
opens = []
tidx_offset = 0
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
if token.is_whitespace:
# ~50% of tokens will be whitespace. Will checking early
# for them avoid 3 comparisons, but then add 1 more comparison
# for the other ~50% of tokens...
continue
if token.is_group and not isinstance(token, cls):
# Check inside previously grouped (i.e. parenthesis) if group
# of different type is inside (i.e., case). though ideally should
# should check for all open/close tokens at once to avoid recursion
_group_matching(token, cls)
continue
if token.match(*cls.M_OPEN):
opens.append(tidx)
elif token.match(*cls.M_CLOSE):
try:
open_idx = opens.pop()
except IndexError:
# this indicates invalid sql and unbalanced tokens.
# instead of break, continue in case other "valid" groups exist
continue
close_idx = tidx
tlist.group_tokens(cls, open_idx, close_idx)
tidx_offset += close_idx - open_idx | [
"def",
"_group_matching",
"(",
"tlist",
",",
"cls",
")",
":",
"opens",
"=",
"[",
"]",
"tidx_offset",
"=",
"0",
"for",
"idx",
",",
"token",
"in",
"enumerate",
"(",
"list",
"(",
"tlist",
")",
")",
":",
"tidx",
"=",
"idx",
"-",
"tidx_offset",
"if",
"token",
".",
"is_whitespace",
":",
"# ~50% of tokens will be whitespace. Will checking early",
"# for them avoid 3 comparisons, but then add 1 more comparison",
"# for the other ~50% of tokens...",
"continue",
"if",
"token",
".",
"is_group",
"and",
"not",
"isinstance",
"(",
"token",
",",
"cls",
")",
":",
"# Check inside previously grouped (i.e. parenthesis) if group",
"# of different type is inside (i.e., case). though ideally should",
"# should check for all open/close tokens at once to avoid recursion",
"_group_matching",
"(",
"token",
",",
"cls",
")",
"continue",
"if",
"token",
".",
"match",
"(",
"*",
"cls",
".",
"M_OPEN",
")",
":",
"opens",
".",
"append",
"(",
"tidx",
")",
"elif",
"token",
".",
"match",
"(",
"*",
"cls",
".",
"M_CLOSE",
")",
":",
"try",
":",
"open_idx",
"=",
"opens",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"# this indicates invalid sql and unbalanced tokens.",
"# instead of break, continue in case other \"valid\" groups exist",
"continue",
"close_idx",
"=",
"tidx",
"tlist",
".",
"group_tokens",
"(",
"cls",
",",
"open_idx",
",",
"close_idx",
")",
"tidx_offset",
"+=",
"close_idx",
"-",
"open_idx"
] | [
16,
0
] | [
48,
47
] | python | en | ['en', 'en', 'en'] | True |
group_order | (tlist) | Group together Identifier and Asc/Desc token | Group together Identifier and Asc/Desc token | def group_order(tlist):
"""Group together Identifier and Asc/Desc token"""
tidx, token = tlist.token_next_by(t=T.Keyword.Order)
while token:
pidx, prev_ = tlist.token_prev(tidx)
if imt(prev_, i=sql.Identifier, t=T.Number):
tlist.group_tokens(sql.Identifier, pidx, tidx)
tidx = pidx
tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx) | [
"def",
"group_order",
"(",
"tlist",
")",
":",
"tidx",
",",
"token",
"=",
"tlist",
".",
"token_next_by",
"(",
"t",
"=",
"T",
".",
"Keyword",
".",
"Order",
")",
"while",
"token",
":",
"pidx",
",",
"prev_",
"=",
"tlist",
".",
"token_prev",
"(",
"tidx",
")",
"if",
"imt",
"(",
"prev_",
",",
"i",
"=",
"sql",
".",
"Identifier",
",",
"t",
"=",
"T",
".",
"Number",
")",
":",
"tlist",
".",
"group_tokens",
"(",
"sql",
".",
"Identifier",
",",
"pidx",
",",
"tidx",
")",
"tidx",
"=",
"pidx",
"tidx",
",",
"token",
"=",
"tlist",
".",
"token_next_by",
"(",
"t",
"=",
"T",
".",
"Keyword",
".",
"Order",
",",
"idx",
"=",
"tidx",
")"
] | [
352,
0
] | [
360,
70
] | python | en | ['en', 'en', 'en'] | True |
_group | (tlist, cls, match,
valid_prev=lambda t: True,
valid_next=lambda t: True,
post=None,
extend=True,
recurse=True
) | Groups together tokens that are joined by a middle token. i.e. x < y | Groups together tokens that are joined by a middle token. i.e. x < y | def _group(tlist, cls, match,
valid_prev=lambda t: True,
valid_next=lambda t: True,
post=None,
extend=True,
recurse=True
):
"""Groups together tokens that are joined by a middle token. i.e. x < y"""
tidx_offset = 0
pidx, prev_ = None, None
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
if tidx < 0: # tidx shouldn't get negative
continue
if token.is_whitespace:
continue
if recurse and token.is_group and not isinstance(token, cls):
_group(token, cls, match, valid_prev, valid_next, post, extend)
if match(token):
nidx, next_ = tlist.token_next(tidx)
if prev_ and valid_prev(prev_) and valid_next(next_):
from_idx, to_idx = post(tlist, pidx, tidx, nidx)
grp = tlist.group_tokens(cls, from_idx, to_idx, extend=extend)
tidx_offset += to_idx - from_idx
pidx, prev_ = from_idx, grp
continue
pidx, prev_ = tidx, token | [
"def",
"_group",
"(",
"tlist",
",",
"cls",
",",
"match",
",",
"valid_prev",
"=",
"lambda",
"t",
":",
"True",
",",
"valid_next",
"=",
"lambda",
"t",
":",
"True",
",",
"post",
"=",
"None",
",",
"extend",
"=",
"True",
",",
"recurse",
"=",
"True",
")",
":",
"tidx_offset",
"=",
"0",
"pidx",
",",
"prev_",
"=",
"None",
",",
"None",
"for",
"idx",
",",
"token",
"in",
"enumerate",
"(",
"list",
"(",
"tlist",
")",
")",
":",
"tidx",
"=",
"idx",
"-",
"tidx_offset",
"if",
"tidx",
"<",
"0",
":",
"# tidx shouldn't get negative",
"continue",
"if",
"token",
".",
"is_whitespace",
":",
"continue",
"if",
"recurse",
"and",
"token",
".",
"is_group",
"and",
"not",
"isinstance",
"(",
"token",
",",
"cls",
")",
":",
"_group",
"(",
"token",
",",
"cls",
",",
"match",
",",
"valid_prev",
",",
"valid_next",
",",
"post",
",",
"extend",
")",
"if",
"match",
"(",
"token",
")",
":",
"nidx",
",",
"next_",
"=",
"tlist",
".",
"token_next",
"(",
"tidx",
")",
"if",
"prev_",
"and",
"valid_prev",
"(",
"prev_",
")",
"and",
"valid_next",
"(",
"next_",
")",
":",
"from_idx",
",",
"to_idx",
"=",
"post",
"(",
"tlist",
",",
"pidx",
",",
"tidx",
",",
"nidx",
")",
"grp",
"=",
"tlist",
".",
"group_tokens",
"(",
"cls",
",",
"from_idx",
",",
"to_idx",
",",
"extend",
"=",
"extend",
")",
"tidx_offset",
"+=",
"to_idx",
"-",
"from_idx",
"pidx",
",",
"prev_",
"=",
"from_idx",
",",
"grp",
"continue",
"pidx",
",",
"prev_",
"=",
"tidx",
",",
"token"
] | [
421,
0
] | [
453,
33
] | python | en | ['en', 'en', 'en'] | True |
CookieStorage._get | (self, *args, **kwargs) |
Retrieve a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
|
Retrieve a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
| def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved | [
"def",
"_get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"request",
".",
"COOKIES",
".",
"get",
"(",
"self",
".",
"cookie_name",
")",
"messages",
"=",
"self",
".",
"_decode",
"(",
"data",
")",
"all_retrieved",
"=",
"not",
"(",
"messages",
"and",
"messages",
"[",
"-",
"1",
"]",
"==",
"self",
".",
"not_finished",
")",
"if",
"messages",
"and",
"not",
"all_retrieved",
":",
"# remove the sentinel value",
"messages",
".",
"pop",
"(",
")",
"return",
"messages",
",",
"all_retrieved"
] | [
78,
4
] | [
91,
38
] | python | en | ['en', 'error', 'th'] | False |
CookieStorage._update_cookie | (self, encoded_data, response) |
Either set the cookie with the encoded data if there is any data to
store, or delete the cookie.
|
Either set the cookie with the encoded data if there is any data to
store, or delete the cookie.
| def _update_cookie(self, encoded_data, response):
"""
Either set the cookie with the encoded data if there is any data to
store, or delete the cookie.
"""
if encoded_data:
response.set_cookie(
self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
else:
response.delete_cookie(
self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN,
samesite=settings.SESSION_COOKIE_SAMESITE,
) | [
"def",
"_update_cookie",
"(",
"self",
",",
"encoded_data",
",",
"response",
")",
":",
"if",
"encoded_data",
":",
"response",
".",
"set_cookie",
"(",
"self",
".",
"cookie_name",
",",
"encoded_data",
",",
"domain",
"=",
"settings",
".",
"SESSION_COOKIE_DOMAIN",
",",
"secure",
"=",
"settings",
".",
"SESSION_COOKIE_SECURE",
"or",
"None",
",",
"httponly",
"=",
"settings",
".",
"SESSION_COOKIE_HTTPONLY",
"or",
"None",
",",
"samesite",
"=",
"settings",
".",
"SESSION_COOKIE_SAMESITE",
",",
")",
"else",
":",
"response",
".",
"delete_cookie",
"(",
"self",
".",
"cookie_name",
",",
"domain",
"=",
"settings",
".",
"SESSION_COOKIE_DOMAIN",
",",
"samesite",
"=",
"settings",
".",
"SESSION_COOKIE_SAMESITE",
",",
")"
] | [
93,
4
] | [
111,
13
] | python | en | ['en', 'error', 'th'] | False |
CookieStorage._store | (self, messages, response, remove_oldest=True, *args, **kwargs) |
Store the messages to a cookie and return a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, remove
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
|
Store the messages to a cookie and return a list of any messages which
could not be stored. | def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Store the messages to a cookie and return a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, remove
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages | [
"def",
"_store",
"(",
"self",
",",
"messages",
",",
"response",
",",
"remove_oldest",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"unstored_messages",
"=",
"[",
"]",
"encoded_data",
"=",
"self",
".",
"_encode",
"(",
"messages",
")",
"if",
"self",
".",
"max_cookie_size",
":",
"# data is going to be stored eventually by SimpleCookie, which",
"# adds its own overhead, which we must account for.",
"cookie",
"=",
"SimpleCookie",
"(",
")",
"# create outside the loop",
"def",
"stored_length",
"(",
"val",
")",
":",
"return",
"len",
"(",
"cookie",
".",
"value_encode",
"(",
"val",
")",
"[",
"1",
"]",
")",
"while",
"encoded_data",
"and",
"stored_length",
"(",
"encoded_data",
")",
">",
"self",
".",
"max_cookie_size",
":",
"if",
"remove_oldest",
":",
"unstored_messages",
".",
"append",
"(",
"messages",
".",
"pop",
"(",
"0",
")",
")",
"else",
":",
"unstored_messages",
".",
"insert",
"(",
"0",
",",
"messages",
".",
"pop",
"(",
")",
")",
"encoded_data",
"=",
"self",
".",
"_encode",
"(",
"messages",
"+",
"[",
"self",
".",
"not_finished",
"]",
",",
"encode_empty",
"=",
"unstored_messages",
")",
"self",
".",
"_update_cookie",
"(",
"encoded_data",
",",
"response",
")",
"return",
"unstored_messages"
] | [
113,
4
] | [
140,
32
] | python | en | ['en', 'error', 'th'] | False |
CookieStorage._legacy_hash | (self, value) |
# RemovedInDjango40Warning: pre-Django 3.1 hashes will be invalid.
Create an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
|
# RemovedInDjango40Warning: pre-Django 3.1 hashes will be invalid.
Create an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
| def _legacy_hash(self, value):
"""
# RemovedInDjango40Warning: pre-Django 3.1 hashes will be invalid.
Create an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
# The class wide key salt is not reused here since older Django
# versions had it fixed and making it dynamic would break old hashes if
# self.key_salt is changed.
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest() | [
"def",
"_legacy_hash",
"(",
"self",
",",
"value",
")",
":",
"# The class wide key salt is not reused here since older Django",
"# versions had it fixed and making it dynamic would break old hashes if",
"# self.key_salt is changed.",
"key_salt",
"=",
"'django.contrib.messages'",
"return",
"salted_hmac",
"(",
"key_salt",
",",
"value",
")",
".",
"hexdigest",
"(",
")"
] | [
142,
4
] | [
152,
55
] | python | en | ['en', 'error', 'th'] | False |
CookieStorage._encode | (self, messages, encode_empty=False) |
Return an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
|
Return an encoded version of the messages list which can be stored as
plain text. | def _encode(self, messages, encode_empty=False):
"""
Return an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
return self.signer.sign_object(messages, serializer=MessageSerializer, compress=True) | [
"def",
"_encode",
"(",
"self",
",",
"messages",
",",
"encode_empty",
"=",
"False",
")",
":",
"if",
"messages",
"or",
"encode_empty",
":",
"return",
"self",
".",
"signer",
".",
"sign_object",
"(",
"messages",
",",
"serializer",
"=",
"MessageSerializer",
",",
"compress",
"=",
"True",
")"
] | [
154,
4
] | [
163,
97
] | python | en | ['en', 'error', 'th'] | False |
CookieStorage._decode | (self, data) |
Safely decode an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, return None.
|
Safely decode an encoded text stream back into a list of messages. | def _decode(self, data):
"""
Safely decode an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, return None.
"""
if not data:
return None
try:
return self.signer.unsign_object(data, serializer=MessageSerializer)
# RemovedInDjango41Warning: when the deprecation ends, replace with:
#
# except (signing.BadSignature, json.JSONDecodeError):
# pass
except signing.BadSignature:
# RemovedInDjango40Warning: when the deprecation ends, replace
# with:
# decoded = None.
decoded = self._legacy_decode(data)
except (binascii.Error, json.JSONDecodeError):
decoded = self.signer.unsign(data)
if decoded:
# RemovedInDjango41Warning.
try:
return json.loads(decoded, cls=MessageDecoder)
except json.JSONDecodeError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None | [
"def",
"_decode",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"data",
":",
"return",
"None",
"try",
":",
"return",
"self",
".",
"signer",
".",
"unsign_object",
"(",
"data",
",",
"serializer",
"=",
"MessageSerializer",
")",
"# RemovedInDjango41Warning: when the deprecation ends, replace with:",
"#",
"# except (signing.BadSignature, json.JSONDecodeError):",
"# pass",
"except",
"signing",
".",
"BadSignature",
":",
"# RemovedInDjango40Warning: when the deprecation ends, replace",
"# with:",
"# decoded = None.",
"decoded",
"=",
"self",
".",
"_legacy_decode",
"(",
"data",
")",
"except",
"(",
"binascii",
".",
"Error",
",",
"json",
".",
"JSONDecodeError",
")",
":",
"decoded",
"=",
"self",
".",
"signer",
".",
"unsign",
"(",
"data",
")",
"if",
"decoded",
":",
"# RemovedInDjango41Warning.",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"decoded",
",",
"cls",
"=",
"MessageDecoder",
")",
"except",
"json",
".",
"JSONDecodeError",
":",
"pass",
"# Mark the data as used (so it gets removed) since something was wrong",
"# with the data.",
"self",
".",
"used",
"=",
"True",
"return",
"None"
] | [
165,
4
] | [
197,
19
] | python | en | ['en', 'error', 'th'] | False |
_make_flow | (request, scopes, return_url=None) | Creates a Web Server Flow
Args:
request: A Django request object.
scopes: the request oauth2 scopes.
return_url: The URL to return to after the flow is complete. Defaults
to the path of the current request.
Returns:
An OAuth2 flow object that has been stored in the session.
| Creates a Web Server Flow | def _make_flow(request, scopes, return_url=None):
"""Creates a Web Server Flow
Args:
request: A Django request object.
scopes: the request oauth2 scopes.
return_url: The URL to return to after the flow is complete. Defaults
to the path of the current request.
Returns:
An OAuth2 flow object that has been stored in the session.
"""
# Generate a CSRF token to prevent malicious requests.
csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()
request.session[_CSRF_KEY] = csrf_token
state = json.dumps({
'csrf_token': csrf_token,
'return_url': return_url,
})
flow = client.OAuth2WebServerFlow(
client_id=django_util.oauth2_settings.client_id,
client_secret=django_util.oauth2_settings.client_secret,
scope=scopes,
state=state,
redirect_uri=request.build_absolute_uri(
urlresolvers.reverse("google_oauth:callback")))
flow_key = _FLOW_KEY.format(csrf_token)
request.session[flow_key] = jsonpickle.encode(flow)
return flow | [
"def",
"_make_flow",
"(",
"request",
",",
"scopes",
",",
"return_url",
"=",
"None",
")",
":",
"# Generate a CSRF token to prevent malicious requests.",
"csrf_token",
"=",
"hashlib",
".",
"sha256",
"(",
"os",
".",
"urandom",
"(",
"1024",
")",
")",
".",
"hexdigest",
"(",
")",
"request",
".",
"session",
"[",
"_CSRF_KEY",
"]",
"=",
"csrf_token",
"state",
"=",
"json",
".",
"dumps",
"(",
"{",
"'csrf_token'",
":",
"csrf_token",
",",
"'return_url'",
":",
"return_url",
",",
"}",
")",
"flow",
"=",
"client",
".",
"OAuth2WebServerFlow",
"(",
"client_id",
"=",
"django_util",
".",
"oauth2_settings",
".",
"client_id",
",",
"client_secret",
"=",
"django_util",
".",
"oauth2_settings",
".",
"client_secret",
",",
"scope",
"=",
"scopes",
",",
"state",
"=",
"state",
",",
"redirect_uri",
"=",
"request",
".",
"build_absolute_uri",
"(",
"urlresolvers",
".",
"reverse",
"(",
"\"google_oauth:callback\"",
")",
")",
")",
"flow_key",
"=",
"_FLOW_KEY",
".",
"format",
"(",
"csrf_token",
")",
"request",
".",
"session",
"[",
"flow_key",
"]",
"=",
"jsonpickle",
".",
"encode",
"(",
"flow",
")",
"return",
"flow"
] | [
43,
0
] | [
75,
15
] | python | en | ['en', 'hu', 'en'] | True |
_get_flow_for_token | (csrf_token, request) | Looks up the flow in session to recover information about requested
scopes.
Args:
csrf_token: The token passed in the callback request that should
match the one previously generated and stored in the request on the
initial authorization view.
Returns:
The OAuth2 Flow object associated with this flow based on the
CSRF token.
| Looks up the flow in session to recover information about requested
scopes. | def _get_flow_for_token(csrf_token, request):
""" Looks up the flow in session to recover information about requested
scopes.
Args:
csrf_token: The token passed in the callback request that should
match the one previously generated and stored in the request on the
initial authorization view.
Returns:
The OAuth2 Flow object associated with this flow based on the
CSRF token.
"""
flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)
return None if flow_pickle is None else jsonpickle.decode(flow_pickle) | [
"def",
"_get_flow_for_token",
"(",
"csrf_token",
",",
"request",
")",
":",
"flow_pickle",
"=",
"request",
".",
"session",
".",
"get",
"(",
"_FLOW_KEY",
".",
"format",
"(",
"csrf_token",
")",
",",
"None",
")",
"return",
"None",
"if",
"flow_pickle",
"is",
"None",
"else",
"jsonpickle",
".",
"decode",
"(",
"flow_pickle",
")"
] | [
78,
0
] | [
92,
74
] | python | en | ['en', 'en', 'en'] | True |
oauth2_callback | (request) | View that handles the user's return from OAuth2 provider.
This view verifies the CSRF state and OAuth authorization code, and on
success stores the credentials obtained in the storage provider,
and redirects to the return_url specified in the authorize view and
stored in the session.
Args:
request: Django request.
Returns:
A redirect response back to the return_url.
| View that handles the user's return from OAuth2 provider. | def oauth2_callback(request):
""" View that handles the user's return from OAuth2 provider.
This view verifies the CSRF state and OAuth authorization code, and on
success stores the credentials obtained in the storage provider,
and redirects to the return_url specified in the authorize view and
stored in the session.
Args:
request: Django request.
Returns:
A redirect response back to the return_url.
"""
if 'error' in request.GET:
reason = request.GET.get(
'error_description', request.GET.get('error', ''))
reason = html.escape(reason)
return http.HttpResponseBadRequest(
'Authorization failed {0}'.format(reason))
try:
encoded_state = request.GET['state']
code = request.GET['code']
except KeyError:
return http.HttpResponseBadRequest(
'Request missing state or authorization code')
try:
server_csrf = request.session[_CSRF_KEY]
except KeyError:
return http.HttpResponseBadRequest(
'No existing session for this flow.')
try:
state = json.loads(encoded_state)
client_csrf = state['csrf_token']
return_url = state['return_url']
except (ValueError, KeyError):
return http.HttpResponseBadRequest('Invalid state parameter.')
if client_csrf != server_csrf:
return http.HttpResponseBadRequest('Invalid CSRF token.')
flow = _get_flow_for_token(client_csrf, request)
if not flow:
return http.HttpResponseBadRequest('Missing Oauth2 flow.')
try:
credentials = flow.step2_exchange(code)
except client.FlowExchangeError as exchange_error:
return http.HttpResponseBadRequest(
'An error has occurred: {0}'.format(exchange_error))
get_storage(request).put(credentials)
signals.oauth2_authorized.send(sender=signals.oauth2_authorized,
request=request, credentials=credentials)
return shortcuts.redirect(return_url) | [
"def",
"oauth2_callback",
"(",
"request",
")",
":",
"if",
"'error'",
"in",
"request",
".",
"GET",
":",
"reason",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'error_description'",
",",
"request",
".",
"GET",
".",
"get",
"(",
"'error'",
",",
"''",
")",
")",
"reason",
"=",
"html",
".",
"escape",
"(",
"reason",
")",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'Authorization failed {0}'",
".",
"format",
"(",
"reason",
")",
")",
"try",
":",
"encoded_state",
"=",
"request",
".",
"GET",
"[",
"'state'",
"]",
"code",
"=",
"request",
".",
"GET",
"[",
"'code'",
"]",
"except",
"KeyError",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'Request missing state or authorization code'",
")",
"try",
":",
"server_csrf",
"=",
"request",
".",
"session",
"[",
"_CSRF_KEY",
"]",
"except",
"KeyError",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'No existing session for this flow.'",
")",
"try",
":",
"state",
"=",
"json",
".",
"loads",
"(",
"encoded_state",
")",
"client_csrf",
"=",
"state",
"[",
"'csrf_token'",
"]",
"return_url",
"=",
"state",
"[",
"'return_url'",
"]",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'Invalid state parameter.'",
")",
"if",
"client_csrf",
"!=",
"server_csrf",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'Invalid CSRF token.'",
")",
"flow",
"=",
"_get_flow_for_token",
"(",
"client_csrf",
",",
"request",
")",
"if",
"not",
"flow",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'Missing Oauth2 flow.'",
")",
"try",
":",
"credentials",
"=",
"flow",
".",
"step2_exchange",
"(",
"code",
")",
"except",
"client",
".",
"FlowExchangeError",
"as",
"exchange_error",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
"'An error has occurred: {0}'",
".",
"format",
"(",
"exchange_error",
")",
")",
"get_storage",
"(",
"request",
")",
".",
"put",
"(",
"credentials",
")",
"signals",
".",
"oauth2_authorized",
".",
"send",
"(",
"sender",
"=",
"signals",
".",
"oauth2_authorized",
",",
"request",
"=",
"request",
",",
"credentials",
"=",
"credentials",
")",
"return",
"shortcuts",
".",
"redirect",
"(",
"return_url",
")"
] | [
95,
0
] | [
155,
41
] | python | en | ['en', 'en', 'en'] | True |
oauth2_authorize | (request) | View to start the OAuth2 Authorization flow.
This view starts the OAuth2 authorization flow. If scopes is passed in
as a GET URL parameter, it will authorize those scopes, otherwise the
default scopes specified in settings. The return_url can also be
specified as a GET parameter, otherwise the referer header will be
checked, and if that isn't found it will return to the root path.
Args:
request: The Django request object.
Returns:
A redirect to Google OAuth2 Authorization.
| View to start the OAuth2 Authorization flow. | def oauth2_authorize(request):
""" View to start the OAuth2 Authorization flow.
This view starts the OAuth2 authorization flow. If scopes is passed in
as a GET URL parameter, it will authorize those scopes, otherwise the
default scopes specified in settings. The return_url can also be
specified as a GET parameter, otherwise the referer header will be
checked, and if that isn't found it will return to the root path.
Args:
request: The Django request object.
Returns:
A redirect to Google OAuth2 Authorization.
"""
return_url = request.GET.get('return_url', None)
if not return_url:
return_url = request.META.get('HTTP_REFERER', '/')
scopes = request.GET.getlist('scopes', django_util.oauth2_settings.scopes)
# Model storage (but not session storage) requires a logged in user
if django_util.oauth2_settings.storage_model:
if not request.user.is_authenticated():
return redirect('{0}?next={1}'.format(
settings.LOGIN_URL, parse.quote(request.get_full_path())))
# This checks for the case where we ended up here because of a logged
# out user but we had credentials for it in the first place
else:
user_oauth = django_util.UserOAuth2(request, scopes, return_url)
if user_oauth.has_credentials():
return redirect(return_url)
flow = _make_flow(request=request, scopes=scopes, return_url=return_url)
auth_url = flow.step1_get_authorize_url()
return shortcuts.redirect(auth_url) | [
"def",
"oauth2_authorize",
"(",
"request",
")",
":",
"return_url",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'return_url'",
",",
"None",
")",
"if",
"not",
"return_url",
":",
"return_url",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_REFERER'",
",",
"'/'",
")",
"scopes",
"=",
"request",
".",
"GET",
".",
"getlist",
"(",
"'scopes'",
",",
"django_util",
".",
"oauth2_settings",
".",
"scopes",
")",
"# Model storage (but not session storage) requires a logged in user",
"if",
"django_util",
".",
"oauth2_settings",
".",
"storage_model",
":",
"if",
"not",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"return",
"redirect",
"(",
"'{0}?next={1}'",
".",
"format",
"(",
"settings",
".",
"LOGIN_URL",
",",
"parse",
".",
"quote",
"(",
"request",
".",
"get_full_path",
"(",
")",
")",
")",
")",
"# This checks for the case where we ended up here because of a logged",
"# out user but we had credentials for it in the first place",
"else",
":",
"user_oauth",
"=",
"django_util",
".",
"UserOAuth2",
"(",
"request",
",",
"scopes",
",",
"return_url",
")",
"if",
"user_oauth",
".",
"has_credentials",
"(",
")",
":",
"return",
"redirect",
"(",
"return_url",
")",
"flow",
"=",
"_make_flow",
"(",
"request",
"=",
"request",
",",
"scopes",
"=",
"scopes",
",",
"return_url",
"=",
"return_url",
")",
"auth_url",
"=",
"flow",
".",
"step1_get_authorize_url",
"(",
")",
"return",
"shortcuts",
".",
"redirect",
"(",
"auth_url",
")"
] | [
158,
0
] | [
192,
39
] | python | en | ['en', 'en', 'en'] | True |
URIVariable.parse | (self) | Parse the variable.
This finds the:
- operator,
- set of safe characters,
- variables, and
- defaults.
| Parse the variable. | def parse(self):
"""Parse the variable.
This finds the:
- operator,
- set of safe characters,
- variables, and
- defaults.
"""
var_list = self.original
if self.original[0] in URIVariable.operators:
self.operator = self.original[0]
var_list = self.original[1:]
if self.operator in URIVariable.operators[:2]:
self.safe = URIVariable.reserved
var_list = var_list.split(',')
for var in var_list:
default_val = None
name = var
if '=' in var:
name, default_val = tuple(var.split('=', 1))
explode = False
if name.endswith('*'):
explode = True
name = name[:-1]
prefix = None
if ':' in name:
name, prefix = tuple(name.split(':', 1))
prefix = int(prefix)
if default_val:
self.defaults[name] = default_val
self.variables.append(
(name, {'explode': explode, 'prefix': prefix})
)
self.variable_names = [varname for (varname, _) in self.variables] | [
"def",
"parse",
"(",
"self",
")",
":",
"var_list",
"=",
"self",
".",
"original",
"if",
"self",
".",
"original",
"[",
"0",
"]",
"in",
"URIVariable",
".",
"operators",
":",
"self",
".",
"operator",
"=",
"self",
".",
"original",
"[",
"0",
"]",
"var_list",
"=",
"self",
".",
"original",
"[",
"1",
":",
"]",
"if",
"self",
".",
"operator",
"in",
"URIVariable",
".",
"operators",
"[",
":",
"2",
"]",
":",
"self",
".",
"safe",
"=",
"URIVariable",
".",
"reserved",
"var_list",
"=",
"var_list",
".",
"split",
"(",
"','",
")",
"for",
"var",
"in",
"var_list",
":",
"default_val",
"=",
"None",
"name",
"=",
"var",
"if",
"'='",
"in",
"var",
":",
"name",
",",
"default_val",
"=",
"tuple",
"(",
"var",
".",
"split",
"(",
"'='",
",",
"1",
")",
")",
"explode",
"=",
"False",
"if",
"name",
".",
"endswith",
"(",
"'*'",
")",
":",
"explode",
"=",
"True",
"name",
"=",
"name",
"[",
":",
"-",
"1",
"]",
"prefix",
"=",
"None",
"if",
"':'",
"in",
"name",
":",
"name",
",",
"prefix",
"=",
"tuple",
"(",
"name",
".",
"split",
"(",
"':'",
",",
"1",
")",
")",
"prefix",
"=",
"int",
"(",
"prefix",
")",
"if",
"default_val",
":",
"self",
".",
"defaults",
"[",
"name",
"]",
"=",
"default_val",
"self",
".",
"variables",
".",
"append",
"(",
"(",
"name",
",",
"{",
"'explode'",
":",
"explode",
",",
"'prefix'",
":",
"prefix",
"}",
")",
")",
"self",
".",
"variable_names",
"=",
"[",
"varname",
"for",
"(",
"varname",
",",
"_",
")",
"in",
"self",
".",
"variables",
"]"
] | [
72,
4
] | [
115,
74
] | python | en | ['en', 'en', 'en'] | True |
URIVariable.post_parse | (self) | Set ``start``, ``join_str`` and ``safe`` attributes.
After parsing the variable, we need to set up these attributes and it
only makes sense to do it in a more easily testable way.
| Set ``start``, ``join_str`` and ``safe`` attributes. | def post_parse(self):
"""Set ``start``, ``join_str`` and ``safe`` attributes.
After parsing the variable, we need to set up these attributes and it
only makes sense to do it in a more easily testable way.
"""
self.safe = ''
self.start = self.join_str = self.operator
if self.operator == '+':
self.start = ''
if self.operator in ('+', '#', ''):
self.join_str = ','
if self.operator == '#':
self.start = '#'
if self.operator == '?':
self.start = '?'
self.join_str = '&'
if self.operator in ('+', '#'):
self.safe = URIVariable.reserved | [
"def",
"post_parse",
"(",
"self",
")",
":",
"self",
".",
"safe",
"=",
"''",
"self",
".",
"start",
"=",
"self",
".",
"join_str",
"=",
"self",
".",
"operator",
"if",
"self",
".",
"operator",
"==",
"'+'",
":",
"self",
".",
"start",
"=",
"''",
"if",
"self",
".",
"operator",
"in",
"(",
"'+'",
",",
"'#'",
",",
"''",
")",
":",
"self",
".",
"join_str",
"=",
"','",
"if",
"self",
".",
"operator",
"==",
"'#'",
":",
"self",
".",
"start",
"=",
"'#'",
"if",
"self",
".",
"operator",
"==",
"'?'",
":",
"self",
".",
"start",
"=",
"'?'",
"self",
".",
"join_str",
"=",
"'&'",
"if",
"self",
".",
"operator",
"in",
"(",
"'+'",
",",
"'#'",
")",
":",
"self",
".",
"safe",
"=",
"URIVariable",
".",
"reserved"
] | [
117,
4
] | [
136,
44
] | python | en | ['en', 'da', 'en'] | True |
URIVariable._query_expansion | (self, name, value, explode, prefix) | Expansion method for the '?' and '&' operators. | Expansion method for the '?' and '&' operators. | def _query_expansion(self, name, value, explode, prefix):
"""Expansion method for the '?' and '&' operators."""
if value is None:
return None
tuples, items = is_list_of_tuples(value)
safe = self.safe
if list_test(value) and not tuples:
if not value:
return None
if explode:
return self.join_str.join(
'%s=%s' % (name, quote(v, safe)) for v in value
)
else:
value = ','.join(quote(v, safe) for v in value)
return '%s=%s' % (name, value)
if dict_test(value) or tuples:
if not value:
return None
items = items or sorted(value.items())
if explode:
return self.join_str.join(
'%s=%s' % (
quote(k, safe), quote(v, safe)
) for k, v in items
)
else:
value = ','.join(
'%s,%s' % (
quote(k, safe), quote(v, safe)
) for k, v in items
)
return '%s=%s' % (name, value)
if value:
value = value[:prefix] if prefix else value
return '%s=%s' % (name, quote(value, safe))
return name + '=' | [
"def",
"_query_expansion",
"(",
"self",
",",
"name",
",",
"value",
",",
"explode",
",",
"prefix",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"tuples",
",",
"items",
"=",
"is_list_of_tuples",
"(",
"value",
")",
"safe",
"=",
"self",
".",
"safe",
"if",
"list_test",
"(",
"value",
")",
"and",
"not",
"tuples",
":",
"if",
"not",
"value",
":",
"return",
"None",
"if",
"explode",
":",
"return",
"self",
".",
"join_str",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"name",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"v",
"in",
"value",
")",
"else",
":",
"value",
"=",
"','",
".",
"join",
"(",
"quote",
"(",
"v",
",",
"safe",
")",
"for",
"v",
"in",
"value",
")",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"value",
")",
"if",
"dict_test",
"(",
"value",
")",
"or",
"tuples",
":",
"if",
"not",
"value",
":",
"return",
"None",
"items",
"=",
"items",
"or",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
")",
"if",
"explode",
":",
"return",
"self",
".",
"join_str",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"quote",
"(",
"k",
",",
"safe",
")",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
")",
"else",
":",
"value",
"=",
"','",
".",
"join",
"(",
"'%s,%s'",
"%",
"(",
"quote",
"(",
"k",
",",
"safe",
")",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
")",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"value",
")",
"if",
"value",
":",
"value",
"=",
"value",
"[",
":",
"prefix",
"]",
"if",
"prefix",
"else",
"value",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"quote",
"(",
"value",
",",
"safe",
")",
")",
"return",
"name",
"+",
"'='"
] | [
138,
4
] | [
178,
25
] | python | en | ['en', 'en', 'en'] | True |
URIVariable._label_path_expansion | (self, name, value, explode, prefix) | Label and path expansion method.
Expands for operators: '/', '.'
| Label and path expansion method. | def _label_path_expansion(self, name, value, explode, prefix):
"""Label and path expansion method.
Expands for operators: '/', '.'
"""
join_str = self.join_str
safe = self.safe
if value is None or (len(value) == 0 and value != ''):
return None
tuples, items = is_list_of_tuples(value)
if list_test(value) and not tuples:
if not explode:
join_str = ','
expanded = join_str.join(
quote(v, safe) for v in value if value is not None
)
return expanded if expanded else None
if dict_test(value) or tuples:
items = items or sorted(value.items())
format_str = '%s=%s'
if not explode:
format_str = '%s,%s'
join_str = ','
expanded = join_str.join(
format_str % (
quote(k, safe), quote(v, safe)
) for k, v in items if v is not None
)
return expanded if expanded else None
value = value[:prefix] if prefix else value
return quote(value, safe) | [
"def",
"_label_path_expansion",
"(",
"self",
",",
"name",
",",
"value",
",",
"explode",
",",
"prefix",
")",
":",
"join_str",
"=",
"self",
".",
"join_str",
"safe",
"=",
"self",
".",
"safe",
"if",
"value",
"is",
"None",
"or",
"(",
"len",
"(",
"value",
")",
"==",
"0",
"and",
"value",
"!=",
"''",
")",
":",
"return",
"None",
"tuples",
",",
"items",
"=",
"is_list_of_tuples",
"(",
"value",
")",
"if",
"list_test",
"(",
"value",
")",
"and",
"not",
"tuples",
":",
"if",
"not",
"explode",
":",
"join_str",
"=",
"','",
"expanded",
"=",
"join_str",
".",
"join",
"(",
"quote",
"(",
"v",
",",
"safe",
")",
"for",
"v",
"in",
"value",
"if",
"value",
"is",
"not",
"None",
")",
"return",
"expanded",
"if",
"expanded",
"else",
"None",
"if",
"dict_test",
"(",
"value",
")",
"or",
"tuples",
":",
"items",
"=",
"items",
"or",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
")",
"format_str",
"=",
"'%s=%s'",
"if",
"not",
"explode",
":",
"format_str",
"=",
"'%s,%s'",
"join_str",
"=",
"','",
"expanded",
"=",
"join_str",
".",
"join",
"(",
"format_str",
"%",
"(",
"quote",
"(",
"k",
",",
"safe",
")",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"expanded",
"if",
"expanded",
"else",
"None",
"value",
"=",
"value",
"[",
":",
"prefix",
"]",
"if",
"prefix",
"else",
"value",
"return",
"quote",
"(",
"value",
",",
"safe",
")"
] | [
180,
4
] | [
218,
33
] | python | en | ['en', 'en', 'en'] | True |
URIVariable._semi_path_expansion | (self, name, value, explode, prefix) | Expansion method for ';' operator. | Expansion method for ';' operator. | def _semi_path_expansion(self, name, value, explode, prefix):
"""Expansion method for ';' operator."""
join_str = self.join_str
safe = self.safe
if value is None:
return None
if self.operator == '?':
join_str = '&'
tuples, items = is_list_of_tuples(value)
if list_test(value) and not tuples:
if explode:
expanded = join_str.join(
'%s=%s' % (
name, quote(v, safe)
) for v in value if v is not None
)
return expanded if expanded else None
else:
value = ','.join(quote(v, safe) for v in value)
return '%s=%s' % (name, value)
if dict_test(value) or tuples:
items = items or sorted(value.items())
if explode:
return join_str.join(
'%s=%s' % (
quote(k, safe), quote(v, safe)
) for k, v in items if v is not None
)
else:
expanded = ','.join(
'%s,%s' % (
quote(k, safe), quote(v, safe)
) for k, v in items if v is not None
)
return '%s=%s' % (name, expanded)
value = value[:prefix] if prefix else value
if value:
return '%s=%s' % (name, quote(value, safe))
return name | [
"def",
"_semi_path_expansion",
"(",
"self",
",",
"name",
",",
"value",
",",
"explode",
",",
"prefix",
")",
":",
"join_str",
"=",
"self",
".",
"join_str",
"safe",
"=",
"self",
".",
"safe",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"if",
"self",
".",
"operator",
"==",
"'?'",
":",
"join_str",
"=",
"'&'",
"tuples",
",",
"items",
"=",
"is_list_of_tuples",
"(",
"value",
")",
"if",
"list_test",
"(",
"value",
")",
"and",
"not",
"tuples",
":",
"if",
"explode",
":",
"expanded",
"=",
"join_str",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"name",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"v",
"in",
"value",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"expanded",
"if",
"expanded",
"else",
"None",
"else",
":",
"value",
"=",
"','",
".",
"join",
"(",
"quote",
"(",
"v",
",",
"safe",
")",
"for",
"v",
"in",
"value",
")",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"value",
")",
"if",
"dict_test",
"(",
"value",
")",
"or",
"tuples",
":",
"items",
"=",
"items",
"or",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
")",
"if",
"explode",
":",
"return",
"join_str",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"quote",
"(",
"k",
",",
"safe",
")",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
"if",
"v",
"is",
"not",
"None",
")",
"else",
":",
"expanded",
"=",
"','",
".",
"join",
"(",
"'%s,%s'",
"%",
"(",
"quote",
"(",
"k",
",",
"safe",
")",
",",
"quote",
"(",
"v",
",",
"safe",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"expanded",
")",
"value",
"=",
"value",
"[",
":",
"prefix",
"]",
"if",
"prefix",
"else",
"value",
"if",
"value",
":",
"return",
"'%s=%s'",
"%",
"(",
"name",
",",
"quote",
"(",
"value",
",",
"safe",
")",
")",
"return",
"name"
] | [
220,
4
] | [
266,
19
] | python | en | ['en', 'en', 'en'] | True |
URIVariable.expand | (self, var_dict=None) | Expand the variable in question.
Using ``var_dict`` and the previously parsed defaults, expand this
variable and subvariables.
:param dict var_dict: dictionary of key-value pairs to be used during
expansion
:returns: dict(variable=value)
Examples::
# (1)
v = URIVariable('/var')
expansion = v.expand({'var': 'value'})
print(expansion)
# => {'/var': '/value'}
# (2)
v = URIVariable('?var,hello,x,y')
expansion = v.expand({'var': 'value', 'hello': 'Hello World!',
'x': '1024', 'y': '768'})
print(expansion)
# => {'?var,hello,x,y':
# '?var=value&hello=Hello%20World%21&x=1024&y=768'}
| Expand the variable in question. | def expand(self, var_dict=None):
"""Expand the variable in question.
Using ``var_dict`` and the previously parsed defaults, expand this
variable and subvariables.
:param dict var_dict: dictionary of key-value pairs to be used during
expansion
:returns: dict(variable=value)
Examples::
# (1)
v = URIVariable('/var')
expansion = v.expand({'var': 'value'})
print(expansion)
# => {'/var': '/value'}
# (2)
v = URIVariable('?var,hello,x,y')
expansion = v.expand({'var': 'value', 'hello': 'Hello World!',
'x': '1024', 'y': '768'})
print(expansion)
# => {'?var,hello,x,y':
# '?var=value&hello=Hello%20World%21&x=1024&y=768'}
"""
return_values = []
for name, opts in self.variables:
value = var_dict.get(name, None)
if not value and value != '' and name in self.defaults:
value = self.defaults[name]
if value is None:
continue
expanded = None
if self.operator in ('/', '.'):
expansion = self._label_path_expansion
elif self.operator in ('?', '&'):
expansion = self._query_expansion
elif self.operator == ';':
expansion = self._semi_path_expansion
else:
expansion = self._string_expansion
expanded = expansion(name, value, opts['explode'], opts['prefix'])
if expanded is not None:
return_values.append(expanded)
value = ''
if return_values:
value = self.start + self.join_str.join(return_values)
return {self.original: value} | [
"def",
"expand",
"(",
"self",
",",
"var_dict",
"=",
"None",
")",
":",
"return_values",
"=",
"[",
"]",
"for",
"name",
",",
"opts",
"in",
"self",
".",
"variables",
":",
"value",
"=",
"var_dict",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"not",
"value",
"and",
"value",
"!=",
"''",
"and",
"name",
"in",
"self",
".",
"defaults",
":",
"value",
"=",
"self",
".",
"defaults",
"[",
"name",
"]",
"if",
"value",
"is",
"None",
":",
"continue",
"expanded",
"=",
"None",
"if",
"self",
".",
"operator",
"in",
"(",
"'/'",
",",
"'.'",
")",
":",
"expansion",
"=",
"self",
".",
"_label_path_expansion",
"elif",
"self",
".",
"operator",
"in",
"(",
"'?'",
",",
"'&'",
")",
":",
"expansion",
"=",
"self",
".",
"_query_expansion",
"elif",
"self",
".",
"operator",
"==",
"';'",
":",
"expansion",
"=",
"self",
".",
"_semi_path_expansion",
"else",
":",
"expansion",
"=",
"self",
".",
"_string_expansion",
"expanded",
"=",
"expansion",
"(",
"name",
",",
"value",
",",
"opts",
"[",
"'explode'",
"]",
",",
"opts",
"[",
"'prefix'",
"]",
")",
"if",
"expanded",
"is",
"not",
"None",
":",
"return_values",
".",
"append",
"(",
"expanded",
")",
"value",
"=",
"''",
"if",
"return_values",
":",
"value",
"=",
"self",
".",
"start",
"+",
"self",
".",
"join_str",
".",
"join",
"(",
"return_values",
")",
"return",
"{",
"self",
".",
"original",
":",
"value",
"}"
] | [
290,
4
] | [
345,
37
] | python | en | ['en', 'en', 'en'] | True |
FileField.generate_filename | (self, instance, filename) |
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
|
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
| def generate_filename(self, instance, filename):
"""
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
"""
if callable(self.upload_to):
filename = self.upload_to(instance, filename)
else:
dirname = datetime.datetime.now().strftime(str(self.upload_to))
filename = posixpath.join(dirname, filename)
filename = validate_file_name(filename, allow_relative_path=True)
return self.storage.generate_filename(filename) | [
"def",
"generate_filename",
"(",
"self",
",",
"instance",
",",
"filename",
")",
":",
"if",
"callable",
"(",
"self",
".",
"upload_to",
")",
":",
"filename",
"=",
"self",
".",
"upload_to",
"(",
"instance",
",",
"filename",
")",
"else",
":",
"dirname",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"str",
"(",
"self",
".",
"upload_to",
")",
")",
"filename",
"=",
"posixpath",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"filename",
"=",
"validate_file_name",
"(",
"filename",
",",
"allow_relative_path",
"=",
"True",
")",
"return",
"self",
".",
"storage",
".",
"generate_filename",
"(",
"filename",
")"
] | [
308,
4
] | [
321,
55
] | python | en | ['en', 'error', 'th'] | False |
ImageField.update_dimension_fields | (self, instance, force=False, *args, **kwargs) |
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
|
Update field's width and height fields, if defined. | def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields or if
# the field is deferred.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields or self.attname not in instance.__dict__:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field)) or
(self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height) | [
"def",
"update_dimension_fields",
"(",
"self",
",",
"instance",
",",
"force",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Nothing to update if the field doesn't have dimension fields or if",
"# the field is deferred.",
"has_dimension_fields",
"=",
"self",
".",
"width_field",
"or",
"self",
".",
"height_field",
"if",
"not",
"has_dimension_fields",
"or",
"self",
".",
"attname",
"not",
"in",
"instance",
".",
"__dict__",
":",
"return",
"# getattr will call the ImageFileDescriptor's __get__ method, which",
"# coerces the assigned value into an instance of self.attr_class",
"# (ImageFieldFile in this case).",
"file",
"=",
"getattr",
"(",
"instance",
",",
"self",
".",
"attname",
")",
"# Nothing to update if we have no file and not being forced to update.",
"if",
"not",
"file",
"and",
"not",
"force",
":",
"return",
"dimension_fields_filled",
"=",
"not",
"(",
"(",
"self",
".",
"width_field",
"and",
"not",
"getattr",
"(",
"instance",
",",
"self",
".",
"width_field",
")",
")",
"or",
"(",
"self",
".",
"height_field",
"and",
"not",
"getattr",
"(",
"instance",
",",
"self",
".",
"height_field",
")",
")",
")",
"# When both dimension fields have values, we are most likely loading",
"# data from the database or updating an image field that already had",
"# an image stored. In the first case, we don't want to update the",
"# dimension fields because we are already getting their values from the",
"# database. In the second case, we do want to update the dimensions",
"# fields and will skip this return because force will be True since we",
"# were called from ImageFileDescriptor.__set__.",
"if",
"dimension_fields_filled",
"and",
"not",
"force",
":",
"return",
"# file should be an instance of ImageFieldFile or should be None.",
"if",
"file",
":",
"width",
"=",
"file",
".",
"width",
"height",
"=",
"file",
".",
"height",
"else",
":",
"# No file, so clear dimensions fields.",
"width",
"=",
"None",
"height",
"=",
"None",
"# Update the width and height fields.",
"if",
"self",
".",
"width_field",
":",
"setattr",
"(",
"instance",
",",
"self",
".",
"width_field",
",",
"width",
")",
"if",
"self",
".",
"height_field",
":",
"setattr",
"(",
"instance",
",",
"self",
".",
"height_field",
",",
"height",
")"
] | [
419,
4
] | [
474,
56
] | python | en | ['en', 'error', 'th'] | False |
sensitive_variables | (*variables) |
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case consider all
variables are sensitive:
@sensitive_variables()
def my_function()
...
|
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions. | def sensitive_variables(*variables):
"""
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case consider all
variables are sensitive:
@sensitive_variables()
def my_function()
...
"""
if len(variables) == 1 and callable(variables[0]):
raise TypeError(
'sensitive_variables() must be called to use it as a decorator, '
'e.g., use @sensitive_variables(), not @sensitive_variables.'
)
def decorator(func):
@functools.wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = '__ALL__'
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator | [
"def",
"sensitive_variables",
"(",
"*",
"variables",
")",
":",
"if",
"len",
"(",
"variables",
")",
"==",
"1",
"and",
"callable",
"(",
"variables",
"[",
"0",
"]",
")",
":",
"raise",
"TypeError",
"(",
"'sensitive_variables() must be called to use it as a decorator, '",
"'e.g., use @sensitive_variables(), not @sensitive_variables.'",
")",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"sensitive_variables_wrapper",
"(",
"*",
"func_args",
",",
"*",
"*",
"func_kwargs",
")",
":",
"if",
"variables",
":",
"sensitive_variables_wrapper",
".",
"sensitive_variables",
"=",
"variables",
"else",
":",
"sensitive_variables_wrapper",
".",
"sensitive_variables",
"=",
"'__ALL__'",
"return",
"func",
"(",
"*",
"func_args",
",",
"*",
"*",
"func_kwargs",
")",
"return",
"sensitive_variables_wrapper",
"return",
"decorator"
] | [
5,
0
] | [
43,
20
] | python | en | ['en', 'error', 'th'] | False |
sensitive_post_parameters | (*parameters) |
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case consider all
variables are sensitive:
@sensitive_post_parameters()
def my_view(request)
...
|
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions. | def sensitive_post_parameters(*parameters):
"""
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case consider all
variables are sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
if len(parameters) == 1 and callable(parameters[0]):
raise TypeError(
'sensitive_post_parameters() must be called to use it as a '
'decorator, e.g., use @sensitive_post_parameters(), not '
'@sensitive_post_parameters.'
)
def decorator(view):
@functools.wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), (
"sensitive_post_parameters didn't receive an HttpRequest. "
"If you are decorating a classmethod, be sure to use "
"@method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = '__ALL__'
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator | [
"def",
"sensitive_post_parameters",
"(",
"*",
"parameters",
")",
":",
"if",
"len",
"(",
"parameters",
")",
"==",
"1",
"and",
"callable",
"(",
"parameters",
"[",
"0",
"]",
")",
":",
"raise",
"TypeError",
"(",
"'sensitive_post_parameters() must be called to use it as a '",
"'decorator, e.g., use @sensitive_post_parameters(), not '",
"'@sensitive_post_parameters.'",
")",
"def",
"decorator",
"(",
"view",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"view",
")",
"def",
"sensitive_post_parameters_wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"request",
",",
"HttpRequest",
")",
",",
"(",
"\"sensitive_post_parameters didn't receive an HttpRequest. \"",
"\"If you are decorating a classmethod, be sure to use \"",
"\"@method_decorator.\"",
")",
"if",
"parameters",
":",
"request",
".",
"sensitive_post_parameters",
"=",
"parameters",
"else",
":",
"request",
".",
"sensitive_post_parameters",
"=",
"'__ALL__'",
"return",
"view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"sensitive_post_parameters_wrapper",
"return",
"decorator"
] | [
46,
0
] | [
90,
20
] | python | en | ['en', 'error', 'th'] | False |
Operation.deconstruct | (self) |
Return a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
|
Return a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
| def deconstruct(self):
"""
Return a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
) | [
"def",
"deconstruct",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_constructor_args",
"[",
"0",
"]",
",",
"self",
".",
"_constructor_args",
"[",
"1",
"]",
",",
")"
] | [
41,
4
] | [
51,
9
] | python | en | ['en', 'error', 'th'] | False |
Operation.state_forwards | (self, app_label, state) |
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
|
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
| def state_forwards(self, app_label, state):
"""
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method') | [
"def",
"state_forwards",
"(",
"self",
",",
"app_label",
",",
"state",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of Operation must provide a state_forwards() method'",
")"
] | [
53,
4
] | [
58,
99
] | python | en | ['en', 'error', 'th'] | False |
Operation.database_forwards | (self, app_label, schema_editor, from_state, to_state) |
Perform the mutation on the database schema in the normal
(forwards) direction.
|
Perform the mutation on the database schema in the normal
(forwards) direction.
| def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method') | [
"def",
"database_forwards",
"(",
"self",
",",
"app_label",
",",
"schema_editor",
",",
"from_state",
",",
"to_state",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of Operation must provide a database_forwards() method'",
")"
] | [
60,
4
] | [
65,
102
] | python | en | ['en', 'error', 'th'] | False |
Operation.database_backwards | (self, app_label, schema_editor, from_state, to_state) |
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
|
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
| def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method') | [
"def",
"database_backwards",
"(",
"self",
",",
"app_label",
",",
"schema_editor",
",",
"from_state",
",",
"to_state",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of Operation must provide a database_backwards() method'",
")"
] | [
67,
4
] | [
73,
103
] | python | en | ['en', 'error', 'th'] | False |
Operation.describe | (self) |
Output a brief summary of what the action does.
|
Output a brief summary of what the action does.
| def describe(self):
"""
Output a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args) | [
"def",
"describe",
"(",
"self",
")",
":",
"return",
"\"%s: %s\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_constructor_args",
")"
] | [
75,
4
] | [
79,
75
] | python | en | ['en', 'error', 'th'] | False |
Operation.migration_name_fragment | (self) |
A filename part suitable for automatically naming a migration
containing this operation, or None if not applicable.
|
A filename part suitable for automatically naming a migration
containing this operation, or None if not applicable.
| def migration_name_fragment(self):
"""
A filename part suitable for automatically naming a migration
containing this operation, or None if not applicable.
"""
return None | [
"def",
"migration_name_fragment",
"(",
"self",
")",
":",
"return",
"None"
] | [
82,
4
] | [
87,
19
] | python | en | ['en', 'error', 'th'] | False |
Operation.references_model | (self, name, app_label) |
Return True if there is a chance this operation references the given
model name (as a string), with an app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
|
Return True if there is a chance this operation references the given
model name (as a string), with an app label for accuracy. | def references_model(self, name, app_label):
"""
Return True if there is a chance this operation references the given
model name (as a string), with an app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True | [
"def",
"references_model",
"(",
"self",
",",
"name",
",",
"app_label",
")",
":",
"return",
"True"
] | [
89,
4
] | [
99,
19
] | python | en | ['en', 'error', 'th'] | False |
Operation.references_field | (self, model_name, name, app_label) |
Return True if there is a chance this operation references the given
field name, with an app label for accuracy.
Used for optimization. If in doubt, return True.
|
Return True if there is a chance this operation references the given
field name, with an app label for accuracy. | def references_field(self, model_name, name, app_label):
"""
Return True if there is a chance this operation references the given
field name, with an app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label) | [
"def",
"references_field",
"(",
"self",
",",
"model_name",
",",
"name",
",",
"app_label",
")",
":",
"return",
"self",
".",
"references_model",
"(",
"model_name",
",",
"app_label",
")"
] | [
101,
4
] | [
108,
59
] | python | en | ['en', 'error', 'th'] | False |
Operation.allow_migrate_model | (self, connection_alias, model) |
Return whether or not a model may be migrated.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
|
Return whether or not a model may be migrated. | def allow_migrate_model(self, connection_alias, model):
"""
Return whether or not a model may be migrated.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model) | [
"def",
"allow_migrate_model",
"(",
"self",
",",
"connection_alias",
",",
"model",
")",
":",
"if",
"not",
"model",
".",
"_meta",
".",
"can_migrate",
"(",
"connection_alias",
")",
":",
"return",
"False",
"return",
"router",
".",
"allow_migrate_model",
"(",
"connection_alias",
",",
"model",
")"
] | [
110,
4
] | [
120,
66
] | python | en | ['en', 'error', 'th'] | False |
Operation.reduce | (self, operation, app_label) |
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
|
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
| def reduce(self, operation, app_label):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False | [
"def",
"reduce",
"(",
"self",
",",
"operation",
",",
"app_label",
")",
":",
"if",
"self",
".",
"elidable",
":",
"return",
"[",
"operation",
"]",
"elif",
"operation",
".",
"elidable",
":",
"return",
"[",
"self",
"]",
"return",
"False"
] | [
122,
4
] | [
132,
20
] | python | en | ['en', 'error', 'th'] | False |
BatchPreprocessing.prepare_for_batch | (self, image, labels, bboxes, image_id=-1) |
All inputs have different dimensions, we need to update them in order to fit the batch,
Image: Depending on the config, we rescale image to the batch image size (and it will stay the same)
or maximum batch image size, which is then transformed to randomly selected size in preprocess_batch() method.
Labels, bounding boxes: We either cut them to maximal size or fill to fit the maximum size. Returned mask
tells us, which values are valid.
:param image: 3-D Tensor of shape [height, width, channels]
:param labels: 1-D Tensor with labels for every object
:param bboxes: 2-D Tensor of shape (objects, 4) containing bounding boxes in format [ymin, xmin, ymin, xmax]
in relative coordinates
:param image_id: Id of image, requirement for coco evaluation
:return: (image, bboxes, labels, mask)
|
All inputs have different dimensions, we need to update them in order to fit the batch, | def prepare_for_batch(self, image, labels, bboxes, image_id=-1):
"""
All inputs have different dimensions, we need to update them in order to fit the batch,
Image: Depending on the config, we rescale image to the batch image size (and it will stay the same)
or maximum batch image size, which is then transformed to randomly selected size in preprocess_batch() method.
Labels, bounding boxes: We either cut them to maximal size or fill to fit the maximum size. Returned mask
tells us, which values are valid.
:param image: 3-D Tensor of shape [height, width, channels]
:param labels: 1-D Tensor with labels for every object
:param bboxes: 2-D Tensor of shape (objects, 4) containing bounding boxes in format [ymin, xmin, ymin, xmax]
in relative coordinates
:param image_id: Id of image, requirement for coco evaluation
:return: (image, bboxes, labels, mask)
"""
labels = labels[0 : self.model_config.max_objects]
bboxes = bboxes[0 : self.model_config.max_objects]
bboxes = tf.reshape(bboxes, (-1, 4)) # always keep the second dimension to be 4, even if there are no objects
# make sure labels and boxes have the correct data type
labels = tf.cast(labels, dtype=tf.float32)
bboxes = tf.cast(bboxes, dtype=tf.float32)
# we resize for the max size to form a batch. Afterwards, we can resize the whole batch
image_size = (
self.model_config.image_size + self.model_config.image_size_variation
if self.train
else self.model_config.image_size
)
height, width = tf.shape(image)[0], tf.shape(image)[1]
# make some augmentations
if self.augmentations:
if self.resize_before_augmenting:
additional_space = 1.2 # so that we have something to clip
pre_resize = tf.cast(image_size, dtype=tf.float32) * additional_space
ratio = tf.math.minimum(
tf.cast(height, dtype=tf.float32) / pre_resize, tf.cast(width, dtype=tf.float32) / pre_resize
)
def _preresize():
return resize(
image,
bboxes,
tf.cast(tf.cast(height, dtype=tf.float32) / ratio, dtype=tf.int32),
tf.cast(tf.cast(width, dtype=tf.float32) / ratio, dtype=tf.int32),
keep_aspect_ratio=False,
random_method=True,
)
# when the image is just slightly better, there is not need to pre-resize
image, bboxes = tf.cond(tf.math.greater(ratio, 1.2), lambda: _preresize(), lambda: (image, bboxes))
# probabilities for random.categorical() are unscaled
probabilities = [tf.cast(aug.probability, dtype=tf.float32) for aug in self.augmentations]
selected = tf.random.categorical(tf.math.log([probabilities]), 1, dtype=tf.int32)[0][0]
# perform augmentation with selected id (nice tf.switch_case() was not working for an unknown reason)
for idx, aug in enumerate(self.augmentations):
image, bboxes = tf.cond(selected == idx, lambda: aug.augment(image, bboxes), lambda: (image, bboxes))
if self.train and not self.model_config.keep_aspect_ratio:
# randomly chose to keep the image size or spread it out to take the full available space
image, bboxes = self.resize_train(image, bboxes, image_size, prob=0.5)
else:
# always keep the size or spread depending on the settings
image, bboxes = resize(
image,
bboxes,
image_size,
image_size,
keep_aspect_ratio=self.model_config.keep_aspect_ratio,
random_method=self.train,
)
# calculate mask (one when there is a detected object)
mask = tf.range(self.model_config.max_objects) < tf.shape(labels)[0]
mask = tf.cast(mask, dtype=tf.float32)
# update bounding boxes to the correct shape
padding_add = self.model_config.max_objects - tf.shape(bboxes)[0]
bboxes = tf.pad(bboxes, tf.stack([[0, padding_add], [0, 0]]))
# update labels to correct shape
labels = tf.pad(labels, tf.stack([[0, padding_add]]))
labels = tf.cast(labels, dtype=tf.int32)
return image, bboxes, labels, mask, image_id, height, width | [
"def",
"prepare_for_batch",
"(",
"self",
",",
"image",
",",
"labels",
",",
"bboxes",
",",
"image_id",
"=",
"-",
"1",
")",
":",
"labels",
"=",
"labels",
"[",
"0",
":",
"self",
".",
"model_config",
".",
"max_objects",
"]",
"bboxes",
"=",
"bboxes",
"[",
"0",
":",
"self",
".",
"model_config",
".",
"max_objects",
"]",
"bboxes",
"=",
"tf",
".",
"reshape",
"(",
"bboxes",
",",
"(",
"-",
"1",
",",
"4",
")",
")",
"# always keep the second dimension to be 4, even if there are no objects",
"# make sure labels and boxes have the correct data type",
"labels",
"=",
"tf",
".",
"cast",
"(",
"labels",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"bboxes",
"=",
"tf",
".",
"cast",
"(",
"bboxes",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# we resize for the max size to form a batch. Afterwards, we can resize the whole batch",
"image_size",
"=",
"(",
"self",
".",
"model_config",
".",
"image_size",
"+",
"self",
".",
"model_config",
".",
"image_size_variation",
"if",
"self",
".",
"train",
"else",
"self",
".",
"model_config",
".",
"image_size",
")",
"height",
",",
"width",
"=",
"tf",
".",
"shape",
"(",
"image",
")",
"[",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"image",
")",
"[",
"1",
"]",
"# make some augmentations",
"if",
"self",
".",
"augmentations",
":",
"if",
"self",
".",
"resize_before_augmenting",
":",
"additional_space",
"=",
"1.2",
"# so that we have something to clip",
"pre_resize",
"=",
"tf",
".",
"cast",
"(",
"image_size",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"*",
"additional_space",
"ratio",
"=",
"tf",
".",
"math",
".",
"minimum",
"(",
"tf",
".",
"cast",
"(",
"height",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"/",
"pre_resize",
",",
"tf",
".",
"cast",
"(",
"width",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"/",
"pre_resize",
")",
"def",
"_preresize",
"(",
")",
":",
"return",
"resize",
"(",
"image",
",",
"bboxes",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"cast",
"(",
"height",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"/",
"ratio",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"cast",
"(",
"width",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"/",
"ratio",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"keep_aspect_ratio",
"=",
"False",
",",
"random_method",
"=",
"True",
",",
")",
"# when the image is just slightly better, there is not need to pre-resize",
"image",
",",
"bboxes",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"math",
".",
"greater",
"(",
"ratio",
",",
"1.2",
")",
",",
"lambda",
":",
"_preresize",
"(",
")",
",",
"lambda",
":",
"(",
"image",
",",
"bboxes",
")",
")",
"# probabilities for random.categorical() are unscaled",
"probabilities",
"=",
"[",
"tf",
".",
"cast",
"(",
"aug",
".",
"probability",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"for",
"aug",
"in",
"self",
".",
"augmentations",
"]",
"selected",
"=",
"tf",
".",
"random",
".",
"categorical",
"(",
"tf",
".",
"math",
".",
"log",
"(",
"[",
"probabilities",
"]",
")",
",",
"1",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# perform augmentation with selected id (nice tf.switch_case() was not working for an unknown reason)",
"for",
"idx",
",",
"aug",
"in",
"enumerate",
"(",
"self",
".",
"augmentations",
")",
":",
"image",
",",
"bboxes",
"=",
"tf",
".",
"cond",
"(",
"selected",
"==",
"idx",
",",
"lambda",
":",
"aug",
".",
"augment",
"(",
"image",
",",
"bboxes",
")",
",",
"lambda",
":",
"(",
"image",
",",
"bboxes",
")",
")",
"if",
"self",
".",
"train",
"and",
"not",
"self",
".",
"model_config",
".",
"keep_aspect_ratio",
":",
"# randomly chose to keep the image size or spread it out to take the full available space",
"image",
",",
"bboxes",
"=",
"self",
".",
"resize_train",
"(",
"image",
",",
"bboxes",
",",
"image_size",
",",
"prob",
"=",
"0.5",
")",
"else",
":",
"# always keep the size or spread depending on the settings",
"image",
",",
"bboxes",
"=",
"resize",
"(",
"image",
",",
"bboxes",
",",
"image_size",
",",
"image_size",
",",
"keep_aspect_ratio",
"=",
"self",
".",
"model_config",
".",
"keep_aspect_ratio",
",",
"random_method",
"=",
"self",
".",
"train",
",",
")",
"# calculate mask (one when there is a detected object)",
"mask",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"model_config",
".",
"max_objects",
")",
"<",
"tf",
".",
"shape",
"(",
"labels",
")",
"[",
"0",
"]",
"mask",
"=",
"tf",
".",
"cast",
"(",
"mask",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# update bounding boxes to the correct shape",
"padding_add",
"=",
"self",
".",
"model_config",
".",
"max_objects",
"-",
"tf",
".",
"shape",
"(",
"bboxes",
")",
"[",
"0",
"]",
"bboxes",
"=",
"tf",
".",
"pad",
"(",
"bboxes",
",",
"tf",
".",
"stack",
"(",
"[",
"[",
"0",
",",
"padding_add",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
")",
"# update labels to correct shape",
"labels",
"=",
"tf",
".",
"pad",
"(",
"labels",
",",
"tf",
".",
"stack",
"(",
"[",
"[",
"0",
",",
"padding_add",
"]",
"]",
")",
")",
"labels",
"=",
"tf",
".",
"cast",
"(",
"labels",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"return",
"image",
",",
"bboxes",
",",
"labels",
",",
"mask",
",",
"image_id",
",",
"height",
",",
"width"
] | [
24,
4
] | [
114,
67
] | python | en | ['en', 'error', 'th'] | False |
BatchPreprocessing.preprocess_batch | (self, images, bboxes, labels, mask, image_ids, heights, widths) |
We have the all the inputs in batches, uniformly sized.
Images/bounding boxes are augmented if this was required.
First, the images are resized to a random size (from given range) if this is allowed.
If we use neural network with layers independent on image size, like convolutional ones,
we could resize the whole batch randomly to further improve our augmentation and prevent overfitting.
After, inputs for our network are prepared.
:param image_ids: Id of images, requirement for coco evaluation
:param heights: original heights of images (not resized)
:param widths: original widths of images (not resized)
|
We have the all the inputs in batches, uniformly sized.
Images/bounding boxes are augmented if this was required. | def preprocess_batch(self, images, bboxes, labels, mask, image_ids, heights, widths):
"""
We have the all the inputs in batches, uniformly sized.
Images/bounding boxes are augmented if this was required.
First, the images are resized to a random size (from given range) if this is allowed.
If we use neural network with layers independent on image size, like convolutional ones,
we could resize the whole batch randomly to further improve our augmentation and prevent overfitting.
After, inputs for our network are prepared.
:param image_ids: Id of images, requirement for coco evaluation
:param heights: original heights of images (not resized)
:param widths: original widths of images (not resized)
"""
images = tf.cast(images, tf.float32)
# select the current batch size, if the variation is greater than 0
image_size = self.model_config.image_size
if self.model_config.image_size_variation > 0 and self.train:
add = tf.random.uniform(
[],
minval=-self.model_config.image_size_variation,
maxval=self.model_config.image_size_variation,
dtype=tf.int32,
)
# TODO 32 depends on network and downsampling
image_size = ((self.model_config.image_size + add) // 32) * 32
# resize and pad image to current batch size (aspect ratios are already solved)
images = random_resize(images, image_size, image_size)
# transform bounding boxes from relative to absolute coordinates
bboxes *= tf.cast(image_size, tf.float32)
# calculate bounding box properties
size, local_offset, indices = self.decompose_bounding_boxes(bboxes, image_size, self.model_config.downsample)
# create heatmap
bboxes /= self.model_config.downsample
heatmap_size = image_size // self.model_config.downsample
heatmap_shape = [tf.shape(images)[0], heatmap_size, heatmap_size, self.model_config.labels]
if self.model_config.model_type == XModelType.CENTERNET:
heatmap_dense = tf.numpy_function(func=draw_heatmaps, inp=[heatmap_shape, bboxes, labels], Tout=tf.float32)
heatmap_dense = tf.reshape(heatmap_dense, heatmap_shape)
return (
{"input": images},
{"heatmap": heatmap_dense, "size": size, "offset": local_offset},
{
"indices": indices,
"mask": mask,
"bboxes": bboxes,
"labels": labels,
"ids": image_ids,
"heights": heights,
"widths": widths,
},
)
else:
# otherwise we are fittint TTF net
heatmap_dense, box_target, reg_weight, _ = tf.numpy_function(
func=draw_heatmaps_ttf,
inp=[heatmap_shape, bboxes, labels],
Tout=[tf.float32, tf.float32, tf.float32, tf.float32],
)
heatmap_dense = tf.reshape(heatmap_dense, heatmap_shape)
box_target = tf.reshape(box_target, [tf.shape(images)[0], heatmap_size, heatmap_size, 4])
reg_weight = tf.reshape(reg_weight, [tf.shape(images)[0], heatmap_size, heatmap_size, 1])
return (
{"input": images},
{"heatmap": heatmap_dense, "size": size, "offset": local_offset},
{
"indices": indices,
"mask": mask,
"bboxes": bboxes,
"labels": labels,
"box_target": box_target,
"reg_weight": reg_weight,
"ids": image_ids,
"heights": heights,
"widths": widths,
},
) | [
"def",
"preprocess_batch",
"(",
"self",
",",
"images",
",",
"bboxes",
",",
"labels",
",",
"mask",
",",
"image_ids",
",",
"heights",
",",
"widths",
")",
":",
"images",
"=",
"tf",
".",
"cast",
"(",
"images",
",",
"tf",
".",
"float32",
")",
"# select the current batch size, if the variation is greater than 0",
"image_size",
"=",
"self",
".",
"model_config",
".",
"image_size",
"if",
"self",
".",
"model_config",
".",
"image_size_variation",
">",
"0",
"and",
"self",
".",
"train",
":",
"add",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
",",
"minval",
"=",
"-",
"self",
".",
"model_config",
".",
"image_size_variation",
",",
"maxval",
"=",
"self",
".",
"model_config",
".",
"image_size_variation",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
")",
"# TODO 32 depends on network and downsampling",
"image_size",
"=",
"(",
"(",
"self",
".",
"model_config",
".",
"image_size",
"+",
"add",
")",
"//",
"32",
")",
"*",
"32",
"# resize and pad image to current batch size (aspect ratios are already solved)",
"images",
"=",
"random_resize",
"(",
"images",
",",
"image_size",
",",
"image_size",
")",
"# transform bounding boxes from relative to absolute coordinates",
"bboxes",
"*=",
"tf",
".",
"cast",
"(",
"image_size",
",",
"tf",
".",
"float32",
")",
"# calculate bounding box properties",
"size",
",",
"local_offset",
",",
"indices",
"=",
"self",
".",
"decompose_bounding_boxes",
"(",
"bboxes",
",",
"image_size",
",",
"self",
".",
"model_config",
".",
"downsample",
")",
"# create heatmap",
"bboxes",
"/=",
"self",
".",
"model_config",
".",
"downsample",
"heatmap_size",
"=",
"image_size",
"//",
"self",
".",
"model_config",
".",
"downsample",
"heatmap_shape",
"=",
"[",
"tf",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
",",
"heatmap_size",
",",
"heatmap_size",
",",
"self",
".",
"model_config",
".",
"labels",
"]",
"if",
"self",
".",
"model_config",
".",
"model_type",
"==",
"XModelType",
".",
"CENTERNET",
":",
"heatmap_dense",
"=",
"tf",
".",
"numpy_function",
"(",
"func",
"=",
"draw_heatmaps",
",",
"inp",
"=",
"[",
"heatmap_shape",
",",
"bboxes",
",",
"labels",
"]",
",",
"Tout",
"=",
"tf",
".",
"float32",
")",
"heatmap_dense",
"=",
"tf",
".",
"reshape",
"(",
"heatmap_dense",
",",
"heatmap_shape",
")",
"return",
"(",
"{",
"\"input\"",
":",
"images",
"}",
",",
"{",
"\"heatmap\"",
":",
"heatmap_dense",
",",
"\"size\"",
":",
"size",
",",
"\"offset\"",
":",
"local_offset",
"}",
",",
"{",
"\"indices\"",
":",
"indices",
",",
"\"mask\"",
":",
"mask",
",",
"\"bboxes\"",
":",
"bboxes",
",",
"\"labels\"",
":",
"labels",
",",
"\"ids\"",
":",
"image_ids",
",",
"\"heights\"",
":",
"heights",
",",
"\"widths\"",
":",
"widths",
",",
"}",
",",
")",
"else",
":",
"# otherwise we are fittint TTF net",
"heatmap_dense",
",",
"box_target",
",",
"reg_weight",
",",
"_",
"=",
"tf",
".",
"numpy_function",
"(",
"func",
"=",
"draw_heatmaps_ttf",
",",
"inp",
"=",
"[",
"heatmap_shape",
",",
"bboxes",
",",
"labels",
"]",
",",
"Tout",
"=",
"[",
"tf",
".",
"float32",
",",
"tf",
".",
"float32",
",",
"tf",
".",
"float32",
",",
"tf",
".",
"float32",
"]",
",",
")",
"heatmap_dense",
"=",
"tf",
".",
"reshape",
"(",
"heatmap_dense",
",",
"heatmap_shape",
")",
"box_target",
"=",
"tf",
".",
"reshape",
"(",
"box_target",
",",
"[",
"tf",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
",",
"heatmap_size",
",",
"heatmap_size",
",",
"4",
"]",
")",
"reg_weight",
"=",
"tf",
".",
"reshape",
"(",
"reg_weight",
",",
"[",
"tf",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
",",
"heatmap_size",
",",
"heatmap_size",
",",
"1",
"]",
")",
"return",
"(",
"{",
"\"input\"",
":",
"images",
"}",
",",
"{",
"\"heatmap\"",
":",
"heatmap_dense",
",",
"\"size\"",
":",
"size",
",",
"\"offset\"",
":",
"local_offset",
"}",
",",
"{",
"\"indices\"",
":",
"indices",
",",
"\"mask\"",
":",
"mask",
",",
"\"bboxes\"",
":",
"bboxes",
",",
"\"labels\"",
":",
"labels",
",",
"\"box_target\"",
":",
"box_target",
",",
"\"reg_weight\"",
":",
"reg_weight",
",",
"\"ids\"",
":",
"image_ids",
",",
"\"heights\"",
":",
"heights",
",",
"\"widths\"",
":",
"widths",
",",
"}",
",",
")"
] | [
127,
4
] | [
212,
13
] | python | en | ['en', 'error', 'th'] | False |
_add_doc | (func, doc) | Add documentation to a function. | Add documentation to a function. | def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc | [
"def",
"_add_doc",
"(",
"func",
",",
"doc",
")",
":",
"func",
".",
"__doc__",
"=",
"doc"
] | [
74,
0
] | [
76,
22
] | python | en | ['en', 'en', 'en'] | True |
_import_module | (name) | Import module, returning the module after the last dot. | Import module, returning the module after the last dot. | def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name] | [
"def",
"_import_module",
"(",
"name",
")",
":",
"__import__",
"(",
"name",
")",
"return",
"sys",
".",
"modules",
"[",
"name",
"]"
] | [
79,
0
] | [
82,
28
] | python | en | ['en', 'en', 'en'] | True |
add_move | (move) | Add an item to six.moves. | Add an item to six.moves. | def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move) | [
"def",
"add_move",
"(",
"move",
")",
":",
"setattr",
"(",
"_MovedItems",
",",
"move",
".",
"name",
",",
"move",
")"
] | [
485,
0
] | [
487,
41
] | python | en | ['en', 'en', 'en'] | True |
remove_move | (name) | Remove item from six.moves. | Remove item from six.moves. | def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,)) | [
"def",
"remove_move",
"(",
"name",
")",
":",
"try",
":",
"delattr",
"(",
"_MovedItems",
",",
"name",
")",
"except",
"AttributeError",
":",
"try",
":",
"del",
"moves",
".",
"__dict__",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"AttributeError",
"(",
"\"no such move, %r\"",
"%",
"(",
"name",
",",
")",
")"
] | [
490,
0
] | [
498,
62
] | python | en | ['en', 'en', 'en'] | True |
with_metaclass | (meta, *bases) | Create a base class with a metaclass. | Create a base class with a metaclass. | def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {}) | [
"def",
"with_metaclass",
"(",
"meta",
",",
"*",
"bases",
")",
":",
"# This requires a bit of explanation: the basic idea is to make a dummy",
"# metaclass for one level of class instantiation that replaces itself with",
"# the actual metaclass.",
"class",
"metaclass",
"(",
"meta",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"name",
",",
"this_bases",
",",
"d",
")",
":",
"return",
"meta",
"(",
"name",
",",
"bases",
",",
"d",
")",
"return",
"type",
".",
"__new__",
"(",
"metaclass",
",",
"'temporary_class'",
",",
"(",
")",
",",
"{",
"}",
")"
] | [
799,
0
] | [
808,
61
] | python | en | ['en', 'en', 'en'] | True |
add_metaclass | (metaclass) | Class decorator for creating a class with a metaclass. | Class decorator for creating a class with a metaclass. | def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | [
"def",
"add_metaclass",
"(",
"metaclass",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"orig_vars",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"slots",
"=",
"orig_vars",
".",
"get",
"(",
"'__slots__'",
")",
"if",
"slots",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"slots",
",",
"str",
")",
":",
"slots",
"=",
"[",
"slots",
"]",
"for",
"slots_var",
"in",
"slots",
":",
"orig_vars",
".",
"pop",
"(",
"slots_var",
")",
"orig_vars",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"orig_vars",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"return",
"metaclass",
"(",
"cls",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"orig_vars",
")",
"return",
"wrapper"
] | [
811,
0
] | [
824,
18
] | python | en | ['en', 'en', 'en'] | True |
python_2_unicode_compatible | (klass) |
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
|
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing. | def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass | [
"def",
"python_2_unicode_compatible",
"(",
"klass",
")",
":",
"if",
"PY2",
":",
"if",
"'__str__'",
"not",
"in",
"klass",
".",
"__dict__",
":",
"raise",
"ValueError",
"(",
"\"@python_2_unicode_compatible cannot be applied \"",
"\"to %s because it doesn't define __str__().\"",
"%",
"klass",
".",
"__name__",
")",
"klass",
".",
"__unicode__",
"=",
"klass",
".",
"__str__",
"klass",
".",
"__str__",
"=",
"lambda",
"self",
":",
"self",
".",
"__unicode__",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"klass"
] | [
827,
0
] | [
842,
16
] | python | en | ['en', 'error', 'th'] | False |
_SixMetaPathImporter.is_package | (self, fullname) |
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
|
Return true, if the named module is a package. | def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__") | [
"def",
"is_package",
"(",
"self",
",",
"fullname",
")",
":",
"return",
"hasattr",
"(",
"self",
".",
"__get_module",
"(",
"fullname",
")",
",",
"\"__path__\"",
")"
] | [
208,
4
] | [
215,
63
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.