Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Http.clear_credentials | (self) | Remove all the names and passwords
that are used for authentication | Remove all the names and passwords
that are used for authentication | def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = [] | [
"def",
"clear_credentials",
"(",
"self",
")",
":",
"self",
".",
"credentials",
".",
"clear",
"(",
")",
"self",
".",
"authorizations",
"=",
"[",
"]"
] | [
1687,
4
] | [
1691,
32
] | python | en | ['en', 'en', 'en'] | True |
Http._request | (
self,
conn,
host,
absolute_uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
) | Do the actual request using the connection object
and also follow one level of redirects if necessary | Do the actual request using the connection object
and also follow one level of redirects if necessary | def _request(
self,
conn,
host,
absolute_uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [
(auth.depth(request_uri), auth)
for auth in self.authorizations
if auth.inscope(host, request_uri)
]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(
host, request_uri, headers, response, content
):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (
self.follow_all_redirects
or (method in ["GET", "HEAD"])
or response.status == 303
):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if "location" not in response and response.status != 300:
raise RedirectMissingLocation(
_(
"Redirected but the response is missing a Location: header."
),
response,
content,
)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if "location" in response:
location = response["location"]
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response["location"] = urlparse.urljoin(
absolute_uri, location
)
if response.status == 301 and method in ["GET", "HEAD"]:
response["-x-permanent-redirect-url"] = response["location"]
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if "if-none-match" in headers:
del headers["if-none-match"]
if "if-modified-since" in headers:
del headers["if-modified-since"]
if (
"authorization" in headers
and not self.forward_authorization_headers
):
del headers["authorization"]
if "location" in response:
location = response["location"]
old_response = copy.deepcopy(response)
if "content-location" not in old_response:
old_response["content-location"] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location,
method=redirect_method,
body=body,
headers=headers,
redirections=redirections - 1,
)
response.previous = old_response
else:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.",
response,
content,
)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content) | [
"def",
"_request",
"(",
"self",
",",
"conn",
",",
"host",
",",
"absolute_uri",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
",",
"redirections",
",",
"cachekey",
",",
")",
":",
"auths",
"=",
"[",
"(",
"auth",
".",
"depth",
"(",
"request_uri",
")",
",",
"auth",
")",
"for",
"auth",
"in",
"self",
".",
"authorizations",
"if",
"auth",
".",
"inscope",
"(",
"host",
",",
"request_uri",
")",
"]",
"auth",
"=",
"auths",
"and",
"sorted",
"(",
"auths",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"or",
"None",
"if",
"auth",
":",
"auth",
".",
"request",
"(",
"method",
",",
"request_uri",
",",
"headers",
",",
"body",
")",
"(",
"response",
",",
"content",
")",
"=",
"self",
".",
"_conn_request",
"(",
"conn",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
")",
"if",
"auth",
":",
"if",
"auth",
".",
"response",
"(",
"response",
",",
"body",
")",
":",
"auth",
".",
"request",
"(",
"method",
",",
"request_uri",
",",
"headers",
",",
"body",
")",
"(",
"response",
",",
"content",
")",
"=",
"self",
".",
"_conn_request",
"(",
"conn",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
")",
"response",
".",
"_stale_digest",
"=",
"1",
"if",
"response",
".",
"status",
"==",
"401",
":",
"for",
"authorization",
"in",
"self",
".",
"_auth_from_challenge",
"(",
"host",
",",
"request_uri",
",",
"headers",
",",
"response",
",",
"content",
")",
":",
"authorization",
".",
"request",
"(",
"method",
",",
"request_uri",
",",
"headers",
",",
"body",
")",
"(",
"response",
",",
"content",
")",
"=",
"self",
".",
"_conn_request",
"(",
"conn",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
")",
"if",
"response",
".",
"status",
"!=",
"401",
":",
"self",
".",
"authorizations",
".",
"append",
"(",
"authorization",
")",
"authorization",
".",
"response",
"(",
"response",
",",
"body",
")",
"break",
"if",
"(",
"self",
".",
"follow_all_redirects",
"or",
"(",
"method",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
")",
"or",
"response",
".",
"status",
"==",
"303",
")",
":",
"if",
"self",
".",
"follow_redirects",
"and",
"response",
".",
"status",
"in",
"[",
"300",
",",
"301",
",",
"302",
",",
"303",
",",
"307",
"]",
":",
"# Pick out the location header and basically start from the beginning",
"# remembering first to strip the ETag header and decrement our 'depth'",
"if",
"redirections",
":",
"if",
"\"location\"",
"not",
"in",
"response",
"and",
"response",
".",
"status",
"!=",
"300",
":",
"raise",
"RedirectMissingLocation",
"(",
"_",
"(",
"\"Redirected but the response is missing a Location: header.\"",
")",
",",
"response",
",",
"content",
",",
")",
"# Fix-up relative redirects (which violate an RFC 2616 MUST)",
"if",
"\"location\"",
"in",
"response",
":",
"location",
"=",
"response",
"[",
"\"location\"",
"]",
"(",
"scheme",
",",
"authority",
",",
"path",
",",
"query",
",",
"fragment",
")",
"=",
"parse_uri",
"(",
"location",
")",
"if",
"authority",
"==",
"None",
":",
"response",
"[",
"\"location\"",
"]",
"=",
"urlparse",
".",
"urljoin",
"(",
"absolute_uri",
",",
"location",
")",
"if",
"response",
".",
"status",
"==",
"301",
"and",
"method",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
":",
"response",
"[",
"\"-x-permanent-redirect-url\"",
"]",
"=",
"response",
"[",
"\"location\"",
"]",
"if",
"\"content-location\"",
"not",
"in",
"response",
":",
"response",
"[",
"\"content-location\"",
"]",
"=",
"absolute_uri",
"_updateCache",
"(",
"headers",
",",
"response",
",",
"content",
",",
"self",
".",
"cache",
",",
"cachekey",
")",
"if",
"\"if-none-match\"",
"in",
"headers",
":",
"del",
"headers",
"[",
"\"if-none-match\"",
"]",
"if",
"\"if-modified-since\"",
"in",
"headers",
":",
"del",
"headers",
"[",
"\"if-modified-since\"",
"]",
"if",
"(",
"\"authorization\"",
"in",
"headers",
"and",
"not",
"self",
".",
"forward_authorization_headers",
")",
":",
"del",
"headers",
"[",
"\"authorization\"",
"]",
"if",
"\"location\"",
"in",
"response",
":",
"location",
"=",
"response",
"[",
"\"location\"",
"]",
"old_response",
"=",
"copy",
".",
"deepcopy",
"(",
"response",
")",
"if",
"\"content-location\"",
"not",
"in",
"old_response",
":",
"old_response",
"[",
"\"content-location\"",
"]",
"=",
"absolute_uri",
"redirect_method",
"=",
"method",
"if",
"response",
".",
"status",
"in",
"[",
"302",
",",
"303",
"]",
":",
"redirect_method",
"=",
"\"GET\"",
"body",
"=",
"None",
"(",
"response",
",",
"content",
")",
"=",
"self",
".",
"request",
"(",
"location",
",",
"method",
"=",
"redirect_method",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
",",
"redirections",
"=",
"redirections",
"-",
"1",
",",
")",
"response",
".",
"previous",
"=",
"old_response",
"else",
":",
"raise",
"RedirectLimit",
"(",
"\"Redirected more times than rediection_limit allows.\"",
",",
"response",
",",
"content",
",",
")",
"elif",
"response",
".",
"status",
"in",
"[",
"200",
",",
"203",
"]",
"and",
"method",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
":",
"# Don't cache 206's since we aren't going to handle byte range requests",
"if",
"\"content-location\"",
"not",
"in",
"response",
":",
"response",
"[",
"\"content-location\"",
"]",
"=",
"absolute_uri",
"_updateCache",
"(",
"headers",
",",
"response",
",",
"content",
",",
"self",
".",
"cache",
",",
"cachekey",
")",
"return",
"(",
"response",
",",
"content",
")"
] | [
1770,
4
] | [
1887,
34
] | python | en | ['en', 'en', 'en'] | True |
Http.request | (
self,
uri,
method="GET",
body=None,
headers=None,
redirections=DEFAULT_MAX_REDIRECTS,
connection_type=None,
) | Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
| Performs a single HTTP request. | def request(
self,
uri,
method="GET",
body=None,
headers=None,
redirections=DEFAULT_MAX_REDIRECTS,
connection_type=None,
):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
conn_key = ''
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if "user-agent" not in headers:
headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme + ":" + authority
conn = self.connections.get(conn_key)
if conn is None:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == "https":
if certs:
conn = self.connections[conn_key] = connection_type(
authority,
key_file=certs[0][0],
cert_file=certs[0][1],
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
)
else:
conn = self.connections[conn_key] = connection_type(
authority,
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout, proxy_info=proxy_info
)
conn.set_debuglevel(debuglevel)
if "range" not in headers and "accept-encoding" not in headers:
headers["accept-encoding"] = "gzip, deflate"
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri.encode("utf-8")
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split("\r\n\r\n", 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if (
method in self.optimistic_concurrency_methods
and self.cache
and "etag" in info
and not self.ignore_etag
and "if-match" not in headers
):
# http://www.w3.org/1999/04/Editing/
headers["if-match"] = info["etag"]
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ["GET", "HEAD"] and "vary" in info:
vary = info["vary"]
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
key = "-varied-%s" % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if (
cached_value
and method in ["GET", "HEAD"]
and self.cache
and "range" not in headers
):
if "-x-permanent-redirect-url" in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.",
{},
"",
)
(response, new_content) = self.request(
info["-x-permanent-redirect-url"],
method="GET",
headers=headers,
redirections=redirections - 1,
)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info["status"] = "504"
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if (
"etag" in info
and not self.ignore_etag
and not "if-none-match" in headers
):
headers["if-none-match"] = info["etag"]
if "last-modified" in info and not "last-modified" in headers:
headers["if-modified-since"] = info["last-modified"]
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(
conn,
authority,
uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(
headers, merged_response, content, self.cache, cachekey
)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if "only-if-cached" in cc:
info["status"] = "504"
response = Response(info)
content = ""
else:
(response, content) = self._request(
conn,
authority,
uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
)
except Exception as e:
is_timeout = isinstance(e, socket.timeout)
if is_timeout:
conn = self.connections.pop(conn_key, None)
if conn:
conn.close()
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif is_timeout:
content = "Request Timeout"
response = Response(
{
"content-type": "text/plain",
"status": "408",
"content-length": len(content),
}
)
response.reason = "Request Timeout"
else:
content = str(e)
response = Response(
{
"content-type": "text/plain",
"status": "400",
"content-length": len(content),
}
)
response.reason = "Bad Request"
else:
raise
return (response, content) | [
"def",
"request",
"(",
"self",
",",
"uri",
",",
"method",
"=",
"\"GET\"",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"redirections",
"=",
"DEFAULT_MAX_REDIRECTS",
",",
"connection_type",
"=",
"None",
",",
")",
":",
"conn_key",
"=",
"''",
"try",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"else",
":",
"headers",
"=",
"self",
".",
"_normalize_headers",
"(",
"headers",
")",
"if",
"\"user-agent\"",
"not",
"in",
"headers",
":",
"headers",
"[",
"\"user-agent\"",
"]",
"=",
"\"Python-httplib2/%s (gzip)\"",
"%",
"__version__",
"uri",
"=",
"iri2uri",
"(",
"uri",
")",
"(",
"scheme",
",",
"authority",
",",
"request_uri",
",",
"defrag_uri",
")",
"=",
"urlnorm",
"(",
"uri",
")",
"proxy_info",
"=",
"self",
".",
"_get_proxy_info",
"(",
"scheme",
",",
"authority",
")",
"conn_key",
"=",
"scheme",
"+",
"\":\"",
"+",
"authority",
"conn",
"=",
"self",
".",
"connections",
".",
"get",
"(",
"conn_key",
")",
"if",
"conn",
"is",
"None",
":",
"if",
"not",
"connection_type",
":",
"connection_type",
"=",
"SCHEME_TO_CONNECTION",
"[",
"scheme",
"]",
"certs",
"=",
"list",
"(",
"self",
".",
"certificates",
".",
"iter",
"(",
"authority",
")",
")",
"if",
"scheme",
"==",
"\"https\"",
":",
"if",
"certs",
":",
"conn",
"=",
"self",
".",
"connections",
"[",
"conn_key",
"]",
"=",
"connection_type",
"(",
"authority",
",",
"key_file",
"=",
"certs",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"cert_file",
"=",
"certs",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"proxy_info",
"=",
"proxy_info",
",",
"ca_certs",
"=",
"self",
".",
"ca_certs",
",",
"disable_ssl_certificate_validation",
"=",
"self",
".",
"disable_ssl_certificate_validation",
",",
"ssl_version",
"=",
"self",
".",
"ssl_version",
",",
")",
"else",
":",
"conn",
"=",
"self",
".",
"connections",
"[",
"conn_key",
"]",
"=",
"connection_type",
"(",
"authority",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"proxy_info",
"=",
"proxy_info",
",",
"ca_certs",
"=",
"self",
".",
"ca_certs",
",",
"disable_ssl_certificate_validation",
"=",
"self",
".",
"disable_ssl_certificate_validation",
",",
"ssl_version",
"=",
"self",
".",
"ssl_version",
",",
")",
"else",
":",
"conn",
"=",
"self",
".",
"connections",
"[",
"conn_key",
"]",
"=",
"connection_type",
"(",
"authority",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"proxy_info",
"=",
"proxy_info",
")",
"conn",
".",
"set_debuglevel",
"(",
"debuglevel",
")",
"if",
"\"range\"",
"not",
"in",
"headers",
"and",
"\"accept-encoding\"",
"not",
"in",
"headers",
":",
"headers",
"[",
"\"accept-encoding\"",
"]",
"=",
"\"gzip, deflate\"",
"info",
"=",
"email",
".",
"Message",
".",
"Message",
"(",
")",
"cached_value",
"=",
"None",
"if",
"self",
".",
"cache",
":",
"cachekey",
"=",
"defrag_uri",
".",
"encode",
"(",
"\"utf-8\"",
")",
"cached_value",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"cachekey",
")",
"if",
"cached_value",
":",
"# info = email.message_from_string(cached_value)",
"#",
"# Need to replace the line above with the kludge below",
"# to fix the non-existent bug not fixed in this",
"# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html",
"try",
":",
"info",
",",
"content",
"=",
"cached_value",
".",
"split",
"(",
"\"\\r\\n\\r\\n\"",
",",
"1",
")",
"feedparser",
"=",
"email",
".",
"FeedParser",
".",
"FeedParser",
"(",
")",
"feedparser",
".",
"feed",
"(",
"info",
")",
"info",
"=",
"feedparser",
".",
"close",
"(",
")",
"feedparser",
".",
"_parse",
"=",
"None",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"self",
".",
"cache",
".",
"delete",
"(",
"cachekey",
")",
"cachekey",
"=",
"None",
"cached_value",
"=",
"None",
"else",
":",
"cachekey",
"=",
"None",
"if",
"(",
"method",
"in",
"self",
".",
"optimistic_concurrency_methods",
"and",
"self",
".",
"cache",
"and",
"\"etag\"",
"in",
"info",
"and",
"not",
"self",
".",
"ignore_etag",
"and",
"\"if-match\"",
"not",
"in",
"headers",
")",
":",
"# http://www.w3.org/1999/04/Editing/",
"headers",
"[",
"\"if-match\"",
"]",
"=",
"info",
"[",
"\"etag\"",
"]",
"if",
"method",
"not",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
"and",
"self",
".",
"cache",
"and",
"cachekey",
":",
"# RFC 2616 Section 13.10",
"self",
".",
"cache",
".",
"delete",
"(",
"cachekey",
")",
"# Check the vary header in the cache to see if this request",
"# matches what varies in the cache.",
"if",
"method",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
"and",
"\"vary\"",
"in",
"info",
":",
"vary",
"=",
"info",
"[",
"\"vary\"",
"]",
"vary_headers",
"=",
"vary",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"split",
"(",
"\",\"",
")",
"for",
"header",
"in",
"vary_headers",
":",
"key",
"=",
"\"-varied-%s\"",
"%",
"header",
"value",
"=",
"info",
"[",
"key",
"]",
"if",
"headers",
".",
"get",
"(",
"header",
",",
"None",
")",
"!=",
"value",
":",
"cached_value",
"=",
"None",
"break",
"if",
"(",
"cached_value",
"and",
"method",
"in",
"[",
"\"GET\"",
",",
"\"HEAD\"",
"]",
"and",
"self",
".",
"cache",
"and",
"\"range\"",
"not",
"in",
"headers",
")",
":",
"if",
"\"-x-permanent-redirect-url\"",
"in",
"info",
":",
"# Should cached permanent redirects be counted in our redirection count? For now, yes.",
"if",
"redirections",
"<=",
"0",
":",
"raise",
"RedirectLimit",
"(",
"\"Redirected more times than rediection_limit allows.\"",
",",
"{",
"}",
",",
"\"\"",
",",
")",
"(",
"response",
",",
"new_content",
")",
"=",
"self",
".",
"request",
"(",
"info",
"[",
"\"-x-permanent-redirect-url\"",
"]",
",",
"method",
"=",
"\"GET\"",
",",
"headers",
"=",
"headers",
",",
"redirections",
"=",
"redirections",
"-",
"1",
",",
")",
"response",
".",
"previous",
"=",
"Response",
"(",
"info",
")",
"response",
".",
"previous",
".",
"fromcache",
"=",
"True",
"else",
":",
"# Determine our course of action:",
"# Is the cached entry fresh or stale?",
"# Has the client requested a non-cached response?",
"#",
"# There seems to be three possible answers:",
"# 1. [FRESH] Return the cache entry w/o doing a GET",
"# 2. [STALE] Do the GET (but add in cache validators if available)",
"# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request",
"entry_disposition",
"=",
"_entry_disposition",
"(",
"info",
",",
"headers",
")",
"if",
"entry_disposition",
"==",
"\"FRESH\"",
":",
"if",
"not",
"cached_value",
":",
"info",
"[",
"\"status\"",
"]",
"=",
"\"504\"",
"content",
"=",
"\"\"",
"response",
"=",
"Response",
"(",
"info",
")",
"if",
"cached_value",
":",
"response",
".",
"fromcache",
"=",
"True",
"return",
"(",
"response",
",",
"content",
")",
"if",
"entry_disposition",
"==",
"\"STALE\"",
":",
"if",
"(",
"\"etag\"",
"in",
"info",
"and",
"not",
"self",
".",
"ignore_etag",
"and",
"not",
"\"if-none-match\"",
"in",
"headers",
")",
":",
"headers",
"[",
"\"if-none-match\"",
"]",
"=",
"info",
"[",
"\"etag\"",
"]",
"if",
"\"last-modified\"",
"in",
"info",
"and",
"not",
"\"last-modified\"",
"in",
"headers",
":",
"headers",
"[",
"\"if-modified-since\"",
"]",
"=",
"info",
"[",
"\"last-modified\"",
"]",
"elif",
"entry_disposition",
"==",
"\"TRANSPARENT\"",
":",
"pass",
"(",
"response",
",",
"new_content",
")",
"=",
"self",
".",
"_request",
"(",
"conn",
",",
"authority",
",",
"uri",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
",",
"redirections",
",",
"cachekey",
",",
")",
"if",
"response",
".",
"status",
"==",
"304",
"and",
"method",
"==",
"\"GET\"",
":",
"# Rewrite the cache entry with the new end-to-end headers",
"# Take all headers that are in response",
"# and overwrite their values in info.",
"# unless they are hop-by-hop, or are listed in the connection header.",
"for",
"key",
"in",
"_get_end2end_headers",
"(",
"response",
")",
":",
"info",
"[",
"key",
"]",
"=",
"response",
"[",
"key",
"]",
"merged_response",
"=",
"Response",
"(",
"info",
")",
"if",
"hasattr",
"(",
"response",
",",
"\"_stale_digest\"",
")",
":",
"merged_response",
".",
"_stale_digest",
"=",
"response",
".",
"_stale_digest",
"_updateCache",
"(",
"headers",
",",
"merged_response",
",",
"content",
",",
"self",
".",
"cache",
",",
"cachekey",
")",
"response",
"=",
"merged_response",
"response",
".",
"status",
"=",
"200",
"response",
".",
"fromcache",
"=",
"True",
"elif",
"response",
".",
"status",
"==",
"200",
":",
"content",
"=",
"new_content",
"else",
":",
"self",
".",
"cache",
".",
"delete",
"(",
"cachekey",
")",
"content",
"=",
"new_content",
"else",
":",
"cc",
"=",
"_parse_cache_control",
"(",
"headers",
")",
"if",
"\"only-if-cached\"",
"in",
"cc",
":",
"info",
"[",
"\"status\"",
"]",
"=",
"\"504\"",
"response",
"=",
"Response",
"(",
"info",
")",
"content",
"=",
"\"\"",
"else",
":",
"(",
"response",
",",
"content",
")",
"=",
"self",
".",
"_request",
"(",
"conn",
",",
"authority",
",",
"uri",
",",
"request_uri",
",",
"method",
",",
"body",
",",
"headers",
",",
"redirections",
",",
"cachekey",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"is_timeout",
"=",
"isinstance",
"(",
"e",
",",
"socket",
".",
"timeout",
")",
"if",
"is_timeout",
":",
"conn",
"=",
"self",
".",
"connections",
".",
"pop",
"(",
"conn_key",
",",
"None",
")",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")",
"if",
"self",
".",
"force_exception_to_status_code",
":",
"if",
"isinstance",
"(",
"e",
",",
"HttpLib2ErrorWithResponse",
")",
":",
"response",
"=",
"e",
".",
"response",
"content",
"=",
"e",
".",
"content",
"response",
".",
"status",
"=",
"500",
"response",
".",
"reason",
"=",
"str",
"(",
"e",
")",
"elif",
"is_timeout",
":",
"content",
"=",
"\"Request Timeout\"",
"response",
"=",
"Response",
"(",
"{",
"\"content-type\"",
":",
"\"text/plain\"",
",",
"\"status\"",
":",
"\"408\"",
",",
"\"content-length\"",
":",
"len",
"(",
"content",
")",
",",
"}",
")",
"response",
".",
"reason",
"=",
"\"Request Timeout\"",
"else",
":",
"content",
"=",
"str",
"(",
"e",
")",
"response",
"=",
"Response",
"(",
"{",
"\"content-type\"",
":",
"\"text/plain\"",
",",
"\"status\"",
":",
"\"400\"",
",",
"\"content-length\"",
":",
"len",
"(",
"content",
")",
",",
"}",
")",
"response",
".",
"reason",
"=",
"\"Bad Request\"",
"else",
":",
"raise",
"return",
"(",
"response",
",",
"content",
")"
] | [
1896,
4
] | [
2172,
34
] | python | en | ['en', 'en', 'en'] | True |
Http._get_proxy_info | (self, scheme, authority) | Return a ProxyInfo instance (or None) based on the scheme
and authority.
| Return a ProxyInfo instance (or None) based on the scheme
and authority.
| def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname):
proxy_info = None
return proxy_info | [
"def",
"_get_proxy_info",
"(",
"self",
",",
"scheme",
",",
"authority",
")",
":",
"hostname",
",",
"port",
"=",
"urllib",
".",
"splitport",
"(",
"authority",
")",
"proxy_info",
"=",
"self",
".",
"proxy_info",
"if",
"callable",
"(",
"proxy_info",
")",
":",
"proxy_info",
"=",
"proxy_info",
"(",
"scheme",
")",
"if",
"hasattr",
"(",
"proxy_info",
",",
"\"applies_to\"",
")",
"and",
"not",
"proxy_info",
".",
"applies_to",
"(",
"hostname",
")",
":",
"proxy_info",
"=",
"None",
"return",
"proxy_info"
] | [
2174,
4
] | [
2185,
25
] | python | en | ['en', 'en', 'en'] | True |
AttentionSelfAttention.__init__ | (self, depth, spatial_dims, num_heads=1, positional_encoding=True, name="attention_self_attention") |
depth : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
|
depth : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
| def __init__(self, depth, spatial_dims, num_heads=1, positional_encoding=True, name="attention_self_attention"):
'''
depth : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.num_heads = num_heads
self.depth = depth
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.depth * num_heads *2, name=name+"_q")
self.wka = Dense(self.depth * num_heads, name=name+"_ka")
self.wva = Dense(self.depth * num_heads, name=name+"_va")
self.wksa = Dense(self.depth * num_heads, name=name+"_ksa")
self.wvsa = Dense(self.depth * num_heads, name=name+"_vsa")
self.dense = Dense(self.depth, name=name+"_lin")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, self.depth, name=name+"pos_enc") | [
"def",
"__init__",
"(",
"self",
",",
"depth",
",",
"spatial_dims",
",",
"num_heads",
"=",
"1",
",",
"positional_encoding",
"=",
"True",
",",
"name",
"=",
"\"attention_self_attention\"",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
"=",
"name",
")",
"self",
".",
"num_heads",
"=",
"num_heads",
"self",
".",
"depth",
"=",
"depth",
"self",
".",
"spatial_dims",
"=",
"spatial_dims",
"self",
".",
"spatial_dim",
"=",
"np",
".",
"prod",
"(",
"spatial_dims",
")",
"self",
".",
"wq",
"=",
"Dense",
"(",
"self",
".",
"depth",
"*",
"num_heads",
"*",
"2",
",",
"name",
"=",
"name",
"+",
"\"_q\"",
")",
"self",
".",
"wka",
"=",
"Dense",
"(",
"self",
".",
"depth",
"*",
"num_heads",
",",
"name",
"=",
"name",
"+",
"\"_ka\"",
")",
"self",
".",
"wva",
"=",
"Dense",
"(",
"self",
".",
"depth",
"*",
"num_heads",
",",
"name",
"=",
"name",
"+",
"\"_va\"",
")",
"self",
".",
"wksa",
"=",
"Dense",
"(",
"self",
".",
"depth",
"*",
"num_heads",
",",
"name",
"=",
"name",
"+",
"\"_ksa\"",
")",
"self",
".",
"wvsa",
"=",
"Dense",
"(",
"self",
".",
"depth",
"*",
"num_heads",
",",
"name",
"=",
"name",
"+",
"\"_vsa\"",
")",
"self",
".",
"dense",
"=",
"Dense",
"(",
"self",
".",
"depth",
",",
"name",
"=",
"name",
"+",
"\"_lin\"",
")",
"self",
".",
"positional_encoding",
"=",
"positional_encoding",
"if",
"positional_encoding",
":",
"self",
".",
"pos_embedding",
"=",
"Embedding",
"(",
"self",
".",
"spatial_dim",
",",
"self",
".",
"depth",
",",
"name",
"=",
"name",
"+",
"\"pos_enc\"",
")"
] | [
7,
4
] | [
27,
93
] | python | en | ['en', 'error', 'th'] | False |
AttentionSelfAttention.split_heads | (self, x, batch_size, num_heads) | Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, spa_dim, depth)
| Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, spa_dim, depth)
| def split_heads(self, x, batch_size, num_heads):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, spa_dim, depth)
"""
x = tf.reshape(x, (batch_size, self.spatial_dim, num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) | [
"def",
"split_heads",
"(",
"self",
",",
"x",
",",
"batch_size",
",",
"num_heads",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"batch_size",
",",
"self",
".",
"spatial_dim",
",",
"num_heads",
",",
"self",
".",
"depth",
")",
")",
"return",
"tf",
".",
"transpose",
"(",
"x",
",",
"perm",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
")"
] | [
29,
4
] | [
34,
47
] | python | en | ['en', 'fr', 'en'] | True |
AttentionSelfAttention.call | (self, x) |
x : list of 2 tensor with shape (batch_size, y, x, channels)
|
x : list of 2 tensor with shape (batch_size, y, x, channels)
| def call(self, x):
'''
x : list of 2 tensor with shape (batch_size, y, x, channels)
'''
[kvx, qx] = x
shape = tf.shape(qx)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.depth)) #for broadcasting purpose
qx = qx + pos_emb # broadcast (depth mut be equals to depth_dim)
kvx = kvx + pos_emb # broadcast (depth mut be equals to depth_dim)
q = self.wq(qx) # (batch_size, *spa_dims, depth*num_heads*2)
ka = self.wka(kvx) # (batch_size, *spa_dims, depth*num_heads)
va = self.wva(kvx) # (batch_size, *spa_dims, depth*num_heads)
ksa = self.wksa(qx) # (batch_size, *spa_dims, depth*num_heads)
vsa = self.wvsa(qx) # (batch_size, *spa_dims, depth*num_heads)
q = self.split_heads(q, batch_size, self.num_heads*2) # (batch_size, num_heads*2, spa_dim, depth)
ka = self.split_heads(ka, batch_size, self.num_heads) # (batch_size, num_heads, spa_dim, depth)
va = self.split_heads(va, batch_size, self.num_heads) # (batch_size, num_heads, spa_dim, depth)
ksa = self.split_heads(ksa, batch_size, self.num_heads) # (batch_size, num_heads, spa_dim, depth)
vsa = self.split_heads(vsa, batch_size, self.num_heads) # (batch_size, num_heads, spa_dim, depth)
k = tf.concat([ka, ksa], 1) # (batch_size, num_heads * 2, spa_dim, depth)
v = tf.concat([va, vsa], 1) # (batch_size, num_heads * 2, spa_dim, depth)
# scaled_attention.shape == (batch_size, num_heads, spa_dim, depth)
# attention_weights.shape == (batch_size, num_heads, spa_dim, spa_dim)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
tf.identity(attention_weights, name=self.name+"_attention_selfattention_weights")
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, spa_dims, num_heads*2, depth)
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.depth * self.num_heads*2)) # (batch_size, spa_dims, depth*num_heads*2)
output = self.dense(concat_attention) # (batch_size, spa_dim, depth)
output = tf.reshape(output, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.depth))
return output, attention_weights | [
"def",
"call",
"(",
"self",
",",
"x",
")",
":",
"[",
"kvx",
",",
"qx",
"]",
"=",
"x",
"shape",
"=",
"tf",
".",
"shape",
"(",
"qx",
")",
"batch_size",
"=",
"shape",
"[",
"0",
"]",
"#spatial_dims = shape[1:-1]",
"#spatial_dim = tf.reduce_prod(spatial_dims)",
"depth_dim",
"=",
"shape",
"[",
"3",
"]",
"if",
"self",
".",
"positional_encoding",
":",
"x_index",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"spatial_dim",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"pos_emb",
"=",
"self",
".",
"pos_embedding",
"(",
"x_index",
")",
"# (spa_dim, d_model)",
"pos_emb",
"=",
"tf",
".",
"reshape",
"(",
"pos_emb",
",",
"(",
"self",
".",
"spatial_dims",
"[",
"0",
"]",
",",
"self",
".",
"spatial_dims",
"[",
"1",
"]",
",",
"self",
".",
"depth",
")",
")",
"#for broadcasting purpose",
"qx",
"=",
"qx",
"+",
"pos_emb",
"# broadcast (depth mut be equals to depth_dim)",
"kvx",
"=",
"kvx",
"+",
"pos_emb",
"# broadcast (depth mut be equals to depth_dim)",
"q",
"=",
"self",
".",
"wq",
"(",
"qx",
")",
"# (batch_size, *spa_dims, depth*num_heads*2)",
"ka",
"=",
"self",
".",
"wka",
"(",
"kvx",
")",
"# (batch_size, *spa_dims, depth*num_heads)",
"va",
"=",
"self",
".",
"wva",
"(",
"kvx",
")",
"# (batch_size, *spa_dims, depth*num_heads)",
"ksa",
"=",
"self",
".",
"wksa",
"(",
"qx",
")",
"# (batch_size, *spa_dims, depth*num_heads)",
"vsa",
"=",
"self",
".",
"wvsa",
"(",
"qx",
")",
"# (batch_size, *spa_dims, depth*num_heads)",
"q",
"=",
"self",
".",
"split_heads",
"(",
"q",
",",
"batch_size",
",",
"self",
".",
"num_heads",
"*",
"2",
")",
"# (batch_size, num_heads*2, spa_dim, depth)",
"ka",
"=",
"self",
".",
"split_heads",
"(",
"ka",
",",
"batch_size",
",",
"self",
".",
"num_heads",
")",
"# (batch_size, num_heads, spa_dim, depth)",
"va",
"=",
"self",
".",
"split_heads",
"(",
"va",
",",
"batch_size",
",",
"self",
".",
"num_heads",
")",
"# (batch_size, num_heads, spa_dim, depth)",
"ksa",
"=",
"self",
".",
"split_heads",
"(",
"ksa",
",",
"batch_size",
",",
"self",
".",
"num_heads",
")",
"# (batch_size, num_heads, spa_dim, depth)",
"vsa",
"=",
"self",
".",
"split_heads",
"(",
"vsa",
",",
"batch_size",
",",
"self",
".",
"num_heads",
")",
"# (batch_size, num_heads, spa_dim, depth)",
"k",
"=",
"tf",
".",
"concat",
"(",
"[",
"ka",
",",
"ksa",
"]",
",",
"1",
")",
"# (batch_size, num_heads * 2, spa_dim, depth)",
"v",
"=",
"tf",
".",
"concat",
"(",
"[",
"va",
",",
"vsa",
"]",
",",
"1",
")",
"# (batch_size, num_heads * 2, spa_dim, depth)",
"# scaled_attention.shape == (batch_size, num_heads, spa_dim, depth)",
"# attention_weights.shape == (batch_size, num_heads, spa_dim, spa_dim)",
"scaled_attention",
",",
"attention_weights",
"=",
"scaled_dot_product_attention",
"(",
"q",
",",
"k",
",",
"v",
")",
"tf",
".",
"identity",
"(",
"attention_weights",
",",
"name",
"=",
"self",
".",
"name",
"+",
"\"_attention_selfattention_weights\"",
")",
"scaled_attention",
"=",
"tf",
".",
"transpose",
"(",
"scaled_attention",
",",
"perm",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
")",
"# (batch_size, spa_dims, num_heads*2, depth)",
"concat_attention",
"=",
"tf",
".",
"reshape",
"(",
"scaled_attention",
",",
"(",
"batch_size",
",",
"-",
"1",
",",
"self",
".",
"depth",
"*",
"self",
".",
"num_heads",
"*",
"2",
")",
")",
"# (batch_size, spa_dims, depth*num_heads*2)",
"output",
"=",
"self",
".",
"dense",
"(",
"concat_attention",
")",
"# (batch_size, spa_dim, depth)",
"output",
"=",
"tf",
".",
"reshape",
"(",
"output",
",",
"(",
"batch_size",
",",
"self",
".",
"spatial_dims",
"[",
"0",
"]",
",",
"self",
".",
"spatial_dims",
"[",
"1",
"]",
",",
"self",
".",
"depth",
")",
")",
"return",
"output",
",",
"attention_weights"
] | [
36,
4
] | [
76,
40
] | python | en | ['en', 'error', 'th'] | False |
is_iterable | (x) | An implementation independent way of checking for iterables | An implementation independent way of checking for iterables | def is_iterable(x):
"An implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True | [
"def",
"is_iterable",
"(",
"x",
")",
":",
"try",
":",
"iter",
"(",
"x",
")",
"except",
"TypeError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | [
0,
0
] | [
7,
19
] | python | en | ['en', 'en', 'en'] | True |
Envelope.__init__ | (self, *args) |
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
|
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
| def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % type(args[0]))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.') | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"OGREnvelope",
")",
":",
"# OGREnvelope (a ctypes Structure) was passed in.",
"self",
".",
"_envelope",
"=",
"args",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# A tuple was passed in.",
"if",
"len",
"(",
"args",
"[",
"0",
"]",
")",
"!=",
"4",
":",
"raise",
"GDALException",
"(",
"'Incorrect number of tuple elements (%d).'",
"%",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
"else",
":",
"self",
".",
"_from_sequence",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type of argument: %s'",
"%",
"type",
"(",
"args",
"[",
"0",
"]",
")",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"4",
":",
"# Individual parameters passed in.",
"# Thanks to ww for the help",
"self",
".",
"_from_sequence",
"(",
"[",
"float",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]",
")",
"else",
":",
"raise",
"GDALException",
"(",
"'Incorrect number (%d) of arguments.'",
"%",
"len",
"(",
"args",
")",
")",
"# Checking the x,y coordinates",
"if",
"self",
".",
"min_x",
">",
"self",
".",
"max_x",
":",
"raise",
"GDALException",
"(",
"'Envelope minimum X > maximum X.'",
")",
"if",
"self",
".",
"min_y",
">",
"self",
".",
"max_y",
":",
"raise",
"GDALException",
"(",
"'Envelope minimum Y > maximum Y.'",
")"
] | [
36,
4
] | [
65,
66
] | python | en | ['en', 'error', 'th'] | False |
Envelope.__eq__ | (self, other) |
Return True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
|
Return True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
| def __eq__(self, other):
"""
Return True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.') | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"Envelope",
")",
":",
"return",
"(",
"self",
".",
"min_x",
"==",
"other",
".",
"min_x",
")",
"and",
"(",
"self",
".",
"min_y",
"==",
"other",
".",
"min_y",
")",
"and",
"(",
"self",
".",
"max_x",
"==",
"other",
".",
"max_x",
")",
"and",
"(",
"self",
".",
"max_y",
"==",
"other",
".",
"max_y",
")",
"elif",
"isinstance",
"(",
"other",
",",
"tuple",
")",
"and",
"len",
"(",
"other",
")",
"==",
"4",
":",
"return",
"(",
"self",
".",
"min_x",
"==",
"other",
"[",
"0",
"]",
")",
"and",
"(",
"self",
".",
"min_y",
"==",
"other",
"[",
"1",
"]",
")",
"and",
"(",
"self",
".",
"max_x",
"==",
"other",
"[",
"2",
"]",
")",
"and",
"(",
"self",
".",
"max_y",
"==",
"other",
"[",
"3",
"]",
")",
"else",
":",
"raise",
"GDALException",
"(",
"'Equivalence testing only works with other Envelopes.'",
")"
] | [
67,
4
] | [
79,
87
] | python | en | ['en', 'error', 'th'] | False |
Envelope.__str__ | (self) | Return a string representation of the tuple. | Return a string representation of the tuple. | def __str__(self):
"Return a string representation of the tuple."
return str(self.tuple) | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"str",
"(",
"self",
".",
"tuple",
")"
] | [
81,
4
] | [
83,
30
] | python | en | ['en', 'en', 'en'] | True |
Envelope._from_sequence | (self, seq) | Initialize the C OGR Envelope structure from the given sequence. | Initialize the C OGR Envelope structure from the given sequence. | def _from_sequence(self, seq):
"Initialize the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3] | [
"def",
"_from_sequence",
"(",
"self",
",",
"seq",
")",
":",
"self",
".",
"_envelope",
"=",
"OGREnvelope",
"(",
")",
"self",
".",
"_envelope",
".",
"MinX",
"=",
"seq",
"[",
"0",
"]",
"self",
".",
"_envelope",
".",
"MinY",
"=",
"seq",
"[",
"1",
"]",
"self",
".",
"_envelope",
".",
"MaxX",
"=",
"seq",
"[",
"2",
"]",
"self",
".",
"_envelope",
".",
"MaxY",
"=",
"seq",
"[",
"3",
"]"
] | [
85,
4
] | [
91,
36
] | python | en | ['en', 'en', 'en'] | True |
Envelope.expand_to_include | (self, *args) |
Modify the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
|
Modify the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
| def expand_to_include(self, *args):
"""
Modify the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % type(args[0]))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0])) | [
"def",
"expand_to_include",
"(",
"self",
",",
"*",
"args",
")",
":",
"# We provide a number of different signatures for this method,",
"# and the logic here is all about converting them into a",
"# 4-tuple single parameter which does the actual work of",
"# expanding the envelope.",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"Envelope",
")",
":",
"return",
"self",
".",
"expand_to_include",
"(",
"args",
"[",
"0",
"]",
".",
"tuple",
")",
"elif",
"hasattr",
"(",
"args",
"[",
"0",
"]",
",",
"'x'",
")",
"and",
"hasattr",
"(",
"args",
"[",
"0",
"]",
",",
"'y'",
")",
":",
"return",
"self",
".",
"expand_to_include",
"(",
"args",
"[",
"0",
"]",
".",
"x",
",",
"args",
"[",
"0",
"]",
".",
"y",
",",
"args",
"[",
"0",
"]",
".",
"x",
",",
"args",
"[",
"0",
"]",
".",
"y",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# A tuple was passed in.",
"if",
"len",
"(",
"args",
"[",
"0",
"]",
")",
"==",
"2",
":",
"return",
"self",
".",
"expand_to_include",
"(",
"(",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
")",
")",
"elif",
"len",
"(",
"args",
"[",
"0",
"]",
")",
"==",
"4",
":",
"(",
"minx",
",",
"miny",
",",
"maxx",
",",
"maxy",
")",
"=",
"args",
"[",
"0",
"]",
"if",
"minx",
"<",
"self",
".",
"_envelope",
".",
"MinX",
":",
"self",
".",
"_envelope",
".",
"MinX",
"=",
"minx",
"if",
"miny",
"<",
"self",
".",
"_envelope",
".",
"MinY",
":",
"self",
".",
"_envelope",
".",
"MinY",
"=",
"miny",
"if",
"maxx",
">",
"self",
".",
"_envelope",
".",
"MaxX",
":",
"self",
".",
"_envelope",
".",
"MaxX",
"=",
"maxx",
"if",
"maxy",
">",
"self",
".",
"_envelope",
".",
"MaxY",
":",
"self",
".",
"_envelope",
".",
"MaxY",
"=",
"maxy",
"else",
":",
"raise",
"GDALException",
"(",
"'Incorrect number of tuple elements (%d).'",
"%",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type of argument: %s'",
"%",
"type",
"(",
"args",
"[",
"0",
"]",
")",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
":",
"# An x and an y parameter were passed in",
"return",
"self",
".",
"expand_to_include",
"(",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
",",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
")",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"4",
":",
"# Individual parameters passed in.",
"return",
"self",
".",
"expand_to_include",
"(",
"args",
")",
"else",
":",
"raise",
"GDALException",
"(",
"'Incorrect number (%d) of arguments.'",
"%",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")"
] | [
93,
4
] | [
133,
85
] | python | en | ['en', 'error', 'th'] | False |
Envelope.min_x | (self) | Return the value of the minimum X coordinate. | Return the value of the minimum X coordinate. | def min_x(self):
"Return the value of the minimum X coordinate."
return self._envelope.MinX | [
"def",
"min_x",
"(",
"self",
")",
":",
"return",
"self",
".",
"_envelope",
".",
"MinX"
] | [
136,
4
] | [
138,
34
] | python | en | ['en', 'la', 'en'] | True |
Envelope.min_y | (self) | Return the value of the minimum Y coordinate. | Return the value of the minimum Y coordinate. | def min_y(self):
"Return the value of the minimum Y coordinate."
return self._envelope.MinY | [
"def",
"min_y",
"(",
"self",
")",
":",
"return",
"self",
".",
"_envelope",
".",
"MinY"
] | [
141,
4
] | [
143,
34
] | python | en | ['en', 'la', 'en'] | True |
Envelope.max_x | (self) | Return the value of the maximum X coordinate. | Return the value of the maximum X coordinate. | def max_x(self):
"Return the value of the maximum X coordinate."
return self._envelope.MaxX | [
"def",
"max_x",
"(",
"self",
")",
":",
"return",
"self",
".",
"_envelope",
".",
"MaxX"
] | [
146,
4
] | [
148,
34
] | python | en | ['en', 'la', 'en'] | True |
Envelope.max_y | (self) | Return the value of the maximum Y coordinate. | Return the value of the maximum Y coordinate. | def max_y(self):
"Return the value of the maximum Y coordinate."
return self._envelope.MaxY | [
"def",
"max_y",
"(",
"self",
")",
":",
"return",
"self",
".",
"_envelope",
".",
"MaxY"
] | [
151,
4
] | [
153,
34
] | python | en | ['en', 'la', 'en'] | True |
Envelope.ur | (self) | Return the upper-right coordinate. | Return the upper-right coordinate. | def ur(self):
"Return the upper-right coordinate."
return (self.max_x, self.max_y) | [
"def",
"ur",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"max_x",
",",
"self",
".",
"max_y",
")"
] | [
156,
4
] | [
158,
39
] | python | en | ['en', 'en', 'en'] | True |
Envelope.ll | (self) | Return the lower-left coordinate. | Return the lower-left coordinate. | def ll(self):
"Return the lower-left coordinate."
return (self.min_x, self.min_y) | [
"def",
"ll",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"min_x",
",",
"self",
".",
"min_y",
")"
] | [
161,
4
] | [
163,
39
] | python | en | ['en', 'en', 'en'] | True |
Envelope.tuple | (self) | Return a tuple representing the envelope. | Return a tuple representing the envelope. | def tuple(self):
"Return a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y) | [
"def",
"tuple",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"min_x",
",",
"self",
".",
"min_y",
",",
"self",
".",
"max_x",
",",
"self",
".",
"max_y",
")"
] | [
166,
4
] | [
168,
63
] | python | en | ['en', 'en', 'en'] | True |
Envelope.wkt | (self) | Return WKT representing a Polygon for this envelope. | Return WKT representing a Polygon for this envelope. | def wkt(self):
"Return WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y) | [
"def",
"wkt",
"(",
"self",
")",
":",
"# TODO: Fix significant figures.",
"return",
"'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))'",
"%",
"(",
"self",
".",
"min_x",
",",
"self",
".",
"min_y",
",",
"self",
".",
"min_x",
",",
"self",
".",
"max_y",
",",
"self",
".",
"max_x",
",",
"self",
".",
"max_y",
",",
"self",
".",
"max_x",
",",
"self",
".",
"min_y",
",",
"self",
".",
"min_x",
",",
"self",
".",
"min_y",
")"
] | [
171,
4
] | [
177,
39
] | python | en | ['en', 'en', 'en'] | True |
validate_twilio_request | (f) | Validates that incoming requests genuinely originated from Twilio | Validates that incoming requests genuinely originated from Twilio | def validate_twilio_request(f):
"""Validates that incoming requests genuinely originated from Twilio"""
@wraps(f)
def decorated_function(request, *args, **kwargs):
# Create an instance of the RequestValidator class
validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))
# Validate the request using its URL, POST data,
# and X-TWILIO-SIGNATURE header
request_valid = validator.validate(
request.build_absolute_uri(),
request.POST,
request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))
# Continue processing the request if it's valid (or if DEBUG is True)
# and return a 403 error if it's not
if request_valid or settings.DEBUG:
return f(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return decorated_function | [
"def",
"validate_twilio_request",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorated_function",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create an instance of the RequestValidator class",
"validator",
"=",
"RequestValidator",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'TWILIO_AUTH_TOKEN'",
")",
")",
"# Validate the request using its URL, POST data,",
"# and X-TWILIO-SIGNATURE header",
"request_valid",
"=",
"validator",
".",
"validate",
"(",
"request",
".",
"build_absolute_uri",
"(",
")",
",",
"request",
".",
"POST",
",",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_X_TWILIO_SIGNATURE'",
",",
"''",
")",
")",
"# Continue processing the request if it's valid (or if DEBUG is True)",
"# and return a 403 error if it's not",
"if",
"request_valid",
"or",
"settings",
".",
"DEBUG",
":",
"return",
"f",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"HttpResponseForbidden",
"(",
")",
"return",
"decorated_function"
] | [
8,
0
] | [
28,
29
] | python | en | ['en', 'en', 'en'] | True |
Asn1Type.effectiveTagSet | (self) | For |ASN.1| type is equivalent to *tagSet*
| For |ASN.1| type is equivalent to *tagSet*
| def effectiveTagSet(self):
"""For |ASN.1| type is equivalent to *tagSet*
"""
return self.tagSet | [
"def",
"effectiveTagSet",
"(",
"self",
")",
":",
"return",
"self",
".",
"tagSet"
] | [
76,
4
] | [
79,
26
] | python | en | ['en', 'en', 'en'] | True |
Asn1Type.tagMap | (self) | Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
| Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
| def tagMap(self):
"""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
"""
return tagmap.TagMap({self.tagSet: self}) | [
"def",
"tagMap",
"(",
"self",
")",
":",
"return",
"tagmap",
".",
"TagMap",
"(",
"{",
"self",
".",
"tagSet",
":",
"self",
"}",
")"
] | [
82,
4
] | [
85,
49
] | python | en | ['en', 'en', 'en'] | True |
Asn1Type.isSameTypeWith | (self, other, matchTags=True, matchConstraints=True) | Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is |ASN.1| type,
:obj:`False` otherwise.
| Examine |ASN.1| type for equality with other ASN.1 type. | def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is |ASN.1| type,
:obj:`False` otherwise.
"""
return (self is other or
(not matchTags or self.tagSet == other.tagSet) and
(not matchConstraints or self.subtypeSpec == other.subtypeSpec)) | [
"def",
"isSameTypeWith",
"(",
"self",
",",
"other",
",",
"matchTags",
"=",
"True",
",",
"matchConstraints",
"=",
"True",
")",
":",
"return",
"(",
"self",
"is",
"other",
"or",
"(",
"not",
"matchTags",
"or",
"self",
".",
"tagSet",
"==",
"other",
".",
"tagSet",
")",
"and",
"(",
"not",
"matchConstraints",
"or",
"self",
".",
"subtypeSpec",
"==",
"other",
".",
"subtypeSpec",
")",
")"
] | [
87,
4
] | [
109,
80
] | python | en | ['en', 'en', 'en'] | True |
Asn1Type.isSuperTypeOf | (self, other, matchTags=True, matchConstraints=True) | Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is a subtype of |ASN.1| type,
:obj:`False` otherwise.
| Examine |ASN.1| type for subtype relationship with other ASN.1 type. | def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is a subtype of |ASN.1| type,
:obj:`False` otherwise.
"""
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec))) | [
"def",
"isSuperTypeOf",
"(",
"self",
",",
"other",
",",
"matchTags",
"=",
"True",
",",
"matchConstraints",
"=",
"True",
")",
":",
"return",
"(",
"not",
"matchTags",
"or",
"(",
"self",
".",
"tagSet",
".",
"isSuperTagSetOf",
"(",
"other",
".",
"tagSet",
")",
")",
"and",
"(",
"not",
"matchConstraints",
"or",
"self",
".",
"subtypeSpec",
".",
"isSuperTypeOf",
"(",
"other",
".",
"subtypeSpec",
")",
")",
")"
] | [
111,
4
] | [
133,
93
] | python | en | ['en', 'en', 'en'] | True |
SimpleAsn1Type.isValue | (self) | Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is :obj:`False` then this object represents just
ASN.1 schema.
If *isValue* is :obj:`True` then, in addition to its ASN.1 schema
features, this object can also be used like a Python built-in object
(e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
Returns
-------
: :class:`bool`
:obj:`False` if object represents just ASN.1 schema.
:obj:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
| Indicate that |ASN.1| object represents ASN.1 value. | def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is :obj:`False` then this object represents just
ASN.1 schema.
If *isValue* is :obj:`True` then, in addition to its ASN.1 schema
features, this object can also be used like a Python built-in object
(e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
Returns
-------
: :class:`bool`
:obj:`False` if object represents just ASN.1 schema.
:obj:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
return self._value is not noValue | [
"def",
"isValue",
"(",
"self",
")",
":",
"return",
"self",
".",
"_value",
"is",
"not",
"noValue"
] | [
321,
4
] | [
348,
41
] | python | en | ['en', 'en', 'en'] | True |
SimpleAsn1Type.clone | (self, value=noValue, **kwargs) | Create a modified version of |ASN.1| schema or value object.
The `clone()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all arguments
of the `clone()` method are optional.
Whatever arguments are supplied, they are used to create a copy
of `self` taking precedence over the ones used to instantiate `self`.
Note
----
Due to the immutable nature of the |ASN.1| object, if no arguments
are supplied, no new |ASN.1| object will be created and `self` will
be returned instead.
| Create a modified version of |ASN.1| schema or value object. | def clone(self, value=noValue, **kwargs):
"""Create a modified version of |ASN.1| schema or value object.
The `clone()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all arguments
of the `clone()` method are optional.
Whatever arguments are supplied, they are used to create a copy
of `self` taking precedence over the ones used to instantiate `self`.
Note
----
Due to the immutable nature of the |ASN.1| object, if no arguments
are supplied, no new |ASN.1| object will be created and `self` will
be returned instead.
"""
if value is noValue:
if not kwargs:
return self
value = self._value
initializers = self.readOnly.copy()
initializers.update(kwargs)
return self.__class__(value, **initializers) | [
"def",
"clone",
"(",
"self",
",",
"value",
"=",
"noValue",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"value",
"is",
"noValue",
":",
"if",
"not",
"kwargs",
":",
"return",
"self",
"value",
"=",
"self",
".",
"_value",
"initializers",
"=",
"self",
".",
"readOnly",
".",
"copy",
"(",
")",
"initializers",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"value",
",",
"*",
"*",
"initializers",
")"
] | [
350,
4
] | [
375,
52
] | python | en | ['en', 'en', 'en'] | True |
SimpleAsn1Type.subtype | (self, value=noValue, **kwargs) | Create a specialization of |ASN.1| schema or value object.
The subtype relationship between ASN.1 types has no correlation with
subtype relationship between Python types. ASN.1 type is mainly identified
by its tag(s) (:py:class:`~pyasn1.type.tag.TagSet`) and value range
constraints (:py:class:`~pyasn1.type.constraint.ConstraintsIntersection`).
These ASN.1 type properties are implemented as |ASN.1| attributes.
The `subtype()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all parameters
of the `subtype()` method are optional.
With the exception of the arguments described below, the rest of
supplied arguments they are used to create a copy of `self` taking
precedence over the ones used to instantiate `self`.
The following arguments to `subtype()` create a ASN.1 subtype out of
|ASN.1| type:
Other Parameters
----------------
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the `self`'s, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| schema or value object
Note
----
Due to the immutable nature of the |ASN.1| object, if no arguments
are supplied, no new |ASN.1| object will be created and `self` will
be returned instead.
| Create a specialization of |ASN.1| schema or value object. | def subtype(self, value=noValue, **kwargs):
"""Create a specialization of |ASN.1| schema or value object.
The subtype relationship between ASN.1 types has no correlation with
subtype relationship between Python types. ASN.1 type is mainly identified
by its tag(s) (:py:class:`~pyasn1.type.tag.TagSet`) and value range
constraints (:py:class:`~pyasn1.type.constraint.ConstraintsIntersection`).
These ASN.1 type properties are implemented as |ASN.1| attributes.
The `subtype()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all parameters
of the `subtype()` method are optional.
With the exception of the arguments described below, the rest of
supplied arguments they are used to create a copy of `self` taking
precedence over the ones used to instantiate `self`.
The following arguments to `subtype()` create a ASN.1 subtype out of
|ASN.1| type:
Other Parameters
----------------
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the `self`'s, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| schema or value object
Note
----
Due to the immutable nature of the |ASN.1| object, if no arguments
are supplied, no new |ASN.1| object will be created and `self` will
be returned instead.
"""
if value is noValue:
if not kwargs:
return self
value = self._value
initializers = self.readOnly.copy()
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
return self.__class__(value, **initializers) | [
"def",
"subtype",
"(",
"self",
",",
"value",
"=",
"noValue",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"value",
"is",
"noValue",
":",
"if",
"not",
"kwargs",
":",
"return",
"self",
"value",
"=",
"self",
".",
"_value",
"initializers",
"=",
"self",
".",
"readOnly",
".",
"copy",
"(",
")",
"implicitTag",
"=",
"kwargs",
".",
"pop",
"(",
"'implicitTag'",
",",
"None",
")",
"if",
"implicitTag",
"is",
"not",
"None",
":",
"initializers",
"[",
"'tagSet'",
"]",
"=",
"self",
".",
"tagSet",
".",
"tagImplicitly",
"(",
"implicitTag",
")",
"explicitTag",
"=",
"kwargs",
".",
"pop",
"(",
"'explicitTag'",
",",
"None",
")",
"if",
"explicitTag",
"is",
"not",
"None",
":",
"initializers",
"[",
"'tagSet'",
"]",
"=",
"self",
".",
"tagSet",
".",
"tagExplicitly",
"(",
"explicitTag",
")",
"for",
"arg",
",",
"option",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"initializers",
"[",
"arg",
"]",
"+=",
"option",
"return",
"self",
".",
"__class__",
"(",
"value",
",",
"*",
"*",
"initializers",
")"
] | [
377,
4
] | [
443,
52
] | python | en | ['en', 'en', 'en'] | True |
ConstructedAsn1Type.clone | (self, **kwargs) | Create a modified version of |ASN.1| schema object.
The `clone()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all arguments
of the `clone()` method are optional.
Whatever arguments are supplied, they are used to create a copy
of `self` taking precedence over the ones used to instantiate `self`.
Possible values of `self` are never copied over thus `clone()` can
only create a new schema object.
Returns
-------
:
new instance of |ASN.1| type/value
Note
----
Due to the mutable nature of the |ASN.1| object, even if no arguments
are supplied, a new |ASN.1| object will be created and returned.
| Create a modified version of |ASN.1| schema object. | def clone(self, **kwargs):
"""Create a modified version of |ASN.1| schema object.
The `clone()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all arguments
of the `clone()` method are optional.
Whatever arguments are supplied, they are used to create a copy
of `self` taking precedence over the ones used to instantiate `self`.
Possible values of `self` are never copied over thus `clone()` can
only create a new schema object.
Returns
-------
:
new instance of |ASN.1| type/value
Note
----
Due to the mutable nature of the |ASN.1| object, even if no arguments
are supplied, a new |ASN.1| object will be created and returned.
"""
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
initializers = self.readOnly.copy()
initializers.update(kwargs)
clone = self.__class__(**initializers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"cloneValueFlag",
"=",
"kwargs",
".",
"pop",
"(",
"'cloneValueFlag'",
",",
"False",
")",
"initializers",
"=",
"self",
".",
"readOnly",
".",
"copy",
"(",
")",
"initializers",
".",
"update",
"(",
"kwargs",
")",
"clone",
"=",
"self",
".",
"__class__",
"(",
"*",
"*",
"initializers",
")",
"if",
"cloneValueFlag",
":",
"self",
".",
"_cloneComponentValues",
"(",
"clone",
",",
"cloneValueFlag",
")",
"return",
"clone"
] | [
580,
4
] | [
613,
20
] | python | en | ['en', 'en', 'en'] | True |
ConstructedAsn1Type.subtype | (self, **kwargs) | Create a specialization of |ASN.1| schema object.
The `subtype()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all parameters
of the `subtype()` method are optional.
With the exception of the arguments described below, the rest of
supplied arguments they are used to create a copy of `self` taking
precedence over the ones used to instantiate `self`.
The following arguments to `subtype()` create a ASN.1 subtype out of
|ASN.1| type.
Other Parameters
----------------
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the `self`'s, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| type/value
Note
----
Due to the mutable nature of the |ASN.1| object, even if no arguments
are supplied, a new |ASN.1| object will be created and returned.
| Create a specialization of |ASN.1| schema object. | def subtype(self, **kwargs):
"""Create a specialization of |ASN.1| schema object.
The `subtype()` method accepts the same set arguments as |ASN.1|
class takes on instantiation except that all parameters
of the `subtype()` method are optional.
With the exception of the arguments described below, the rest of
supplied arguments they are used to create a copy of `self` taking
precedence over the ones used to instantiate `self`.
The following arguments to `subtype()` create a ASN.1 subtype out of
|ASN.1| type.
Other Parameters
----------------
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to `self`'s
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the `self`'s, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| type/value
Note
----
Due to the mutable nature of the |ASN.1| object, even if no arguments
are supplied, a new |ASN.1| object will be created and returned.
"""
initializers = self.readOnly.copy()
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
clone = self.__class__(**initializers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone | [
"def",
"subtype",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"initializers",
"=",
"self",
".",
"readOnly",
".",
"copy",
"(",
")",
"cloneValueFlag",
"=",
"kwargs",
".",
"pop",
"(",
"'cloneValueFlag'",
",",
"False",
")",
"implicitTag",
"=",
"kwargs",
".",
"pop",
"(",
"'implicitTag'",
",",
"None",
")",
"if",
"implicitTag",
"is",
"not",
"None",
":",
"initializers",
"[",
"'tagSet'",
"]",
"=",
"self",
".",
"tagSet",
".",
"tagImplicitly",
"(",
"implicitTag",
")",
"explicitTag",
"=",
"kwargs",
".",
"pop",
"(",
"'explicitTag'",
",",
"None",
")",
"if",
"explicitTag",
"is",
"not",
"None",
":",
"initializers",
"[",
"'tagSet'",
"]",
"=",
"self",
".",
"tagSet",
".",
"tagExplicitly",
"(",
"explicitTag",
")",
"for",
"arg",
",",
"option",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"initializers",
"[",
"arg",
"]",
"+=",
"option",
"clone",
"=",
"self",
".",
"__class__",
"(",
"*",
"*",
"initializers",
")",
"if",
"cloneValueFlag",
":",
"self",
".",
"_cloneComponentValues",
"(",
"clone",
",",
"cloneValueFlag",
")",
"return",
"clone"
] | [
615,
4
] | [
677,
20
] | python | en | ['en', 'co', 'en'] | True |
_byte_string | (s) | Cast a string or byte string to an ASCII byte string. | Cast a string or byte string to an ASCII byte string. | def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII') | [
"def",
"_byte_string",
"(",
"s",
")",
":",
"return",
"s",
".",
"encode",
"(",
"'ASCII'",
")"
] | [
11,
0
] | [
13,
28
] | python | en | ['en', 'en', 'en'] | True |
_std_string | (s) | Cast a string or byte string to an ASCII string. | Cast a string or byte string to an ASCII string. | def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII')) | [
"def",
"_std_string",
"(",
"s",
")",
":",
"return",
"str",
"(",
"s",
".",
"decode",
"(",
"'ASCII'",
")",
")"
] | [
18,
0
] | [
20,
33
] | python | en | ['en', 'en', 'en'] | True |
normalize_version_info | (py_version_info) |
Convert a tuple of ints representing a Python version to one of length
three.
:param py_version_info: a tuple of ints representing a Python version,
or None to specify no version. The tuple can have any length.
:return: a tuple of length three if `py_version_info` is non-None.
Otherwise, return `py_version_info` unchanged (i.e. None).
|
Convert a tuple of ints representing a Python version to one of length
three. | def normalize_version_info(py_version_info):
# type: (Tuple[int, ...]) -> Tuple[int, int, int]
"""
Convert a tuple of ints representing a Python version to one of length
three.
:param py_version_info: a tuple of ints representing a Python version,
or None to specify no version. The tuple can have any length.
:return: a tuple of length three if `py_version_info` is non-None.
Otherwise, return `py_version_info` unchanged (i.e. None).
"""
if len(py_version_info) < 3:
py_version_info += (3 - len(py_version_info)) * (0,)
elif len(py_version_info) > 3:
py_version_info = py_version_info[:3]
return cast("VersionInfo", py_version_info) | [
"def",
"normalize_version_info",
"(",
"py_version_info",
")",
":",
"# type: (Tuple[int, ...]) -> Tuple[int, int, int]",
"if",
"len",
"(",
"py_version_info",
")",
"<",
"3",
":",
"py_version_info",
"+=",
"(",
"3",
"-",
"len",
"(",
"py_version_info",
")",
")",
"*",
"(",
"0",
",",
")",
"elif",
"len",
"(",
"py_version_info",
")",
">",
"3",
":",
"py_version_info",
"=",
"py_version_info",
"[",
":",
"3",
"]",
"return",
"cast",
"(",
"\"VersionInfo\"",
",",
"py_version_info",
")"
] | [
85,
0
] | [
102,
47
] | python | en | ['en', 'error', 'th'] | False |
ensure_dir | (path) | os.path.makedirs without EEXIST. | os.path.makedirs without EEXIST. | def ensure_dir(path):
# type: (AnyStr) -> None
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
# Windows can raise spurious ENOTEMPTY errors. See #6426.
if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
raise | [
"def",
"ensure_dir",
"(",
"path",
")",
":",
"# type: (AnyStr) -> None",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"# Windows can raise spurious ENOTEMPTY errors. See #6426.",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
"and",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOTEMPTY",
":",
"raise"
] | [
105,
0
] | [
113,
17
] | python | en | ['en', 'en', 'en'] | True |
rmtree_errorhandler | (func, path, exc_info) | On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems. | On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems. | def rmtree_errorhandler(func, path, exc_info):
# type: (Callable[..., Any], str, ExcInfo) -> None
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
try:
has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE)
except OSError:
# it's equivalent to os.path.exists
return
if has_attr_readonly:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise | [
"def",
"rmtree_errorhandler",
"(",
"func",
",",
"path",
",",
"exc_info",
")",
":",
"# type: (Callable[..., Any], str, ExcInfo) -> None",
"try",
":",
"has_attr_readonly",
"=",
"not",
"(",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
"&",
"stat",
".",
"S_IWRITE",
")",
"except",
"OSError",
":",
"# it's equivalent to os.path.exists",
"return",
"if",
"has_attr_readonly",
":",
"# convert to read/write",
"os",
".",
"chmod",
"(",
"path",
",",
"stat",
".",
"S_IWRITE",
")",
"# use the original function to repeat the operation",
"func",
"(",
"path",
")",
"return",
"else",
":",
"raise"
] | [
137,
0
] | [
155,
13
] | python | en | ['en', 'en', 'en'] | True |
display_path | (path) | Gives the display value for a given path, making it relative to cwd
if possible. | Gives the display value for a given path, making it relative to cwd
if possible. | def display_path(path):
# type: (str) -> str
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = "." + path[len(os.getcwd()) :]
return path | [
"def",
"display_path",
"(",
"path",
")",
":",
"# type: (str) -> str",
"path",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"if",
"path",
".",
"startswith",
"(",
"os",
".",
"getcwd",
"(",
")",
"+",
"os",
".",
"path",
".",
"sep",
")",
":",
"path",
"=",
"\".\"",
"+",
"path",
"[",
"len",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
":",
"]",
"return",
"path"
] | [
158,
0
] | [
165,
15
] | python | en | ['en', 'en', 'en'] | True |
backup_dir | (dir, ext=".bak") | Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc) | Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc) | def backup_dir(dir, ext=".bak"):
# type: (str, str) -> str
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension | [
"def",
"backup_dir",
"(",
"dir",
",",
"ext",
"=",
"\".bak\"",
")",
":",
"# type: (str, str) -> str",
"n",
"=",
"1",
"extension",
"=",
"ext",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"dir",
"+",
"extension",
")",
":",
"n",
"+=",
"1",
"extension",
"=",
"ext",
"+",
"str",
"(",
"n",
")",
"return",
"dir",
"+",
"extension"
] | [
168,
0
] | [
177,
26
] | python | en | ['en', 'en', 'en'] | True |
_check_no_input | (message) | Raise an error if no input is allowed. | Raise an error if no input is allowed. | def _check_no_input(message):
# type: (str) -> None
"""Raise an error if no input is allowed."""
if os.environ.get("PIP_NO_INPUT"):
raise Exception(
f"No input was expected ($PIP_NO_INPUT set); question: {message}"
) | [
"def",
"_check_no_input",
"(",
"message",
")",
":",
"# type: (str) -> None",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"PIP_NO_INPUT\"",
")",
":",
"raise",
"Exception",
"(",
"f\"No input was expected ($PIP_NO_INPUT set); question: {message}\"",
")"
] | [
188,
0
] | [
194,
9
] | python | en | ['en', 'lb', 'en'] | True |
ask | (message, options) | Ask the message interactively, with the given possible responses | Ask the message interactively, with the given possible responses | def ask(message, options):
# type: (str, Iterable[str]) -> str
"""Ask the message interactively, with the given possible responses"""
while 1:
_check_no_input(message)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
"Your response ({!r}) was not one of the expected responses: "
"{}".format(response, ", ".join(options))
)
else:
return response | [
"def",
"ask",
"(",
"message",
",",
"options",
")",
":",
"# type: (str, Iterable[str]) -> str",
"while",
"1",
":",
"_check_no_input",
"(",
"message",
")",
"response",
"=",
"input",
"(",
"message",
")",
"response",
"=",
"response",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"response",
"not",
"in",
"options",
":",
"print",
"(",
"\"Your response ({!r}) was not one of the expected responses: \"",
"\"{}\"",
".",
"format",
"(",
"response",
",",
"\", \"",
".",
"join",
"(",
"options",
")",
")",
")",
"else",
":",
"return",
"response"
] | [
197,
0
] | [
210,
27
] | python | en | ['en', 'en', 'en'] | True |
ask_input | (message) | Ask for input interactively. | Ask for input interactively. | def ask_input(message):
# type: (str) -> str
"""Ask for input interactively."""
_check_no_input(message)
return input(message) | [
"def",
"ask_input",
"(",
"message",
")",
":",
"# type: (str) -> str",
"_check_no_input",
"(",
"message",
")",
"return",
"input",
"(",
"message",
")"
] | [
213,
0
] | [
217,
25
] | python | en | ['en', 'en', 'en'] | True |
ask_password | (message) | Ask for a password interactively. | Ask for a password interactively. | def ask_password(message):
# type: (str) -> str
"""Ask for a password interactively."""
_check_no_input(message)
return getpass.getpass(message) | [
"def",
"ask_password",
"(",
"message",
")",
":",
"# type: (str) -> str",
"_check_no_input",
"(",
"message",
")",
"return",
"getpass",
".",
"getpass",
"(",
"message",
")"
] | [
220,
0
] | [
224,
35
] | python | en | ['en', 'en', 'en'] | True |
strtobool | (val) | Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
| Convert a string representation of truth to true (1) or false (0). | def strtobool(val):
# type: (str) -> int
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {val!r}") | [
"def",
"strtobool",
"(",
"val",
")",
":",
"# type: (str) -> int",
"val",
"=",
"val",
".",
"lower",
"(",
")",
"if",
"val",
"in",
"(",
"\"y\"",
",",
"\"yes\"",
",",
"\"t\"",
",",
"\"true\"",
",",
"\"on\"",
",",
"\"1\"",
")",
":",
"return",
"1",
"elif",
"val",
"in",
"(",
"\"n\"",
",",
"\"no\"",
",",
"\"f\"",
",",
"\"false\"",
",",
"\"off\"",
",",
"\"0\"",
")",
":",
"return",
"0",
"else",
":",
"raise",
"ValueError",
"(",
"f\"invalid truth value {val!r}\"",
")"
] | [
227,
0
] | [
241,
56
] | python | en | ['en', 'pt', 'en'] | True |
tabulate | (rows) | Return a list of formatted rows and a list of column sizes.
For example::
>>> tabulate([['foobar', 2000], [0xdeadbeef]])
(['foobar 2000', '3735928559'], [10, 4])
| Return a list of formatted rows and a list of column sizes. | def tabulate(rows):
# type: (Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]
"""Return a list of formatted rows and a list of column sizes.
For example::
>>> tabulate([['foobar', 2000], [0xdeadbeef]])
(['foobar 2000', '3735928559'], [10, 4])
"""
rows = [tuple(map(str, row)) for row in rows]
sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")]
table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows]
return table, sizes | [
"def",
"tabulate",
"(",
"rows",
")",
":",
"# type: (Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]",
"rows",
"=",
"[",
"tuple",
"(",
"map",
"(",
"str",
",",
"row",
")",
")",
"for",
"row",
"in",
"rows",
"]",
"sizes",
"=",
"[",
"max",
"(",
"map",
"(",
"len",
",",
"col",
")",
")",
"for",
"col",
"in",
"zip_longest",
"(",
"*",
"rows",
",",
"fillvalue",
"=",
"\"\"",
")",
"]",
"table",
"=",
"[",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
".",
"ljust",
",",
"row",
",",
"sizes",
")",
")",
".",
"rstrip",
"(",
")",
"for",
"row",
"in",
"rows",
"]",
"return",
"table",
",",
"sizes"
] | [
256,
0
] | [
268,
23
] | python | en | ['en', 'en', 'en'] | True |
is_installable_dir | (path: str) | Is path is a directory containing pyproject.toml or setup.py?
If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for
a legacy setuptools layout by identifying setup.py. We don't check for the
setup.cfg because using it without setup.py is only available for PEP 517
projects, which are already covered by the pyproject.toml check.
| Is path is a directory containing pyproject.toml or setup.py? | def is_installable_dir(path: str) -> bool:
"""Is path is a directory containing pyproject.toml or setup.py?
If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for
a legacy setuptools layout by identifying setup.py. We don't check for the
setup.cfg because using it without setup.py is only available for PEP 517
projects, which are already covered by the pyproject.toml check.
"""
if not os.path.isdir(path):
return False
if os.path.isfile(os.path.join(path, "pyproject.toml")):
return True
if os.path.isfile(os.path.join(path, "setup.py")):
return True
return False | [
"def",
"is_installable_dir",
"(",
"path",
":",
"str",
")",
"->",
"bool",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"False",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"pyproject.toml\"",
")",
")",
":",
"return",
"True",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"setup.py\"",
")",
")",
":",
"return",
"True",
"return",
"False"
] | [
271,
0
] | [
285,
16
] | python | en | ['en', 'en', 'en'] | True |
read_chunks | (file, size=io.DEFAULT_BUFFER_SIZE) | Yield pieces of data from a file-like object until EOF. | Yield pieces of data from a file-like object until EOF. | def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
# type: (BinaryIO, int) -> Iterator[bytes]
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk | [
"def",
"read_chunks",
"(",
"file",
",",
"size",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
":",
"# type: (BinaryIO, int) -> Iterator[bytes]",
"while",
"True",
":",
"chunk",
"=",
"file",
".",
"read",
"(",
"size",
")",
"if",
"not",
"chunk",
":",
"break",
"yield",
"chunk"
] | [
288,
0
] | [
295,
19
] | python | en | ['en', 'en', 'en'] | True |
normalize_path | (path, resolve_symlinks=True) |
Convert a path to its canonical, case-normalized, absolute version.
|
Convert a path to its canonical, case-normalized, absolute version. | def normalize_path(path, resolve_symlinks=True):
# type: (str, bool) -> str
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = os.path.expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path) | [
"def",
"normalize_path",
"(",
"path",
",",
"resolve_symlinks",
"=",
"True",
")",
":",
"# type: (str, bool) -> str",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"if",
"resolve_symlinks",
":",
"path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"return",
"os",
".",
"path",
".",
"normcase",
"(",
"path",
")"
] | [
298,
0
] | [
309,
33
] | python | en | ['en', 'error', 'th'] | False |
splitext | (path) | Like os.path.splitext, but take off .tar too | Like os.path.splitext, but take off .tar too | def splitext(path):
# type: (str) -> Tuple[str, str]
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith(".tar"):
ext = base[-4:] + ext
base = base[:-4]
return base, ext | [
"def",
"splitext",
"(",
"path",
")",
":",
"# type: (str) -> Tuple[str, str]",
"base",
",",
"ext",
"=",
"posixpath",
".",
"splitext",
"(",
"path",
")",
"if",
"base",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".tar\"",
")",
":",
"ext",
"=",
"base",
"[",
"-",
"4",
":",
"]",
"+",
"ext",
"base",
"=",
"base",
"[",
":",
"-",
"4",
"]",
"return",
"base",
",",
"ext"
] | [
312,
0
] | [
319,
20
] | python | en | ['en', 'en', 'en'] | True |
renames | (old, new) | Like os.renames(), but handles renaming across devices. | Like os.renames(), but handles renaming across devices. | def renames(old, new):
# type: (str, str) -> None
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass | [
"def",
"renames",
"(",
"old",
",",
"new",
")",
":",
"# type: (str, str) -> None",
"# Implementation borrowed from os.renames().",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"new",
")",
"if",
"head",
"and",
"tail",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"head",
")",
":",
"os",
".",
"makedirs",
"(",
"head",
")",
"shutil",
".",
"move",
"(",
"old",
",",
"new",
")",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"old",
")",
"if",
"head",
"and",
"tail",
":",
"try",
":",
"os",
".",
"removedirs",
"(",
"head",
")",
"except",
"OSError",
":",
"pass"
] | [
322,
0
] | [
337,
16
] | python | en | ['en', 'en', 'en'] | True |
is_local | (path) |
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
Caution: this function assumes the head of path has been normalized
with normalize_path.
|
Return True if path is within sys.prefix, if we're running in a virtualenv. | def is_local(path):
# type: (str) -> bool
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
Caution: this function assumes the head of path has been normalized
with normalize_path.
"""
if not running_under_virtualenv():
return True
return path.startswith(normalize_path(sys.prefix)) | [
"def",
"is_local",
"(",
"path",
")",
":",
"# type: (str) -> bool",
"if",
"not",
"running_under_virtualenv",
"(",
")",
":",
"return",
"True",
"return",
"path",
".",
"startswith",
"(",
"normalize_path",
"(",
"sys",
".",
"prefix",
")",
")"
] | [
340,
0
] | [
352,
54
] | python | en | ['en', 'error', 'th'] | False |
dist_is_local | (dist) |
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
|
Return True if given Distribution object is installed locally
(i.e. within current virtualenv). | def dist_is_local(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist)) | [
"def",
"dist_is_local",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"return",
"is_local",
"(",
"dist_location",
"(",
"dist",
")",
")"
] | [
355,
0
] | [
364,
40
] | python | en | ['en', 'error', 'th'] | False |
dist_in_usersite | (dist) |
Return True if given Distribution is installed in user site.
|
Return True if given Distribution is installed in user site.
| def dist_in_usersite(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in user site.
"""
return dist_location(dist).startswith(normalize_path(user_site)) | [
"def",
"dist_in_usersite",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"return",
"dist_location",
"(",
"dist",
")",
".",
"startswith",
"(",
"normalize_path",
"(",
"user_site",
")",
")"
] | [
367,
0
] | [
372,
68
] | python | en | ['en', 'error', 'th'] | False |
dist_in_site_packages | (dist) |
Return True if given Distribution is installed in
sysconfig.get_python_lib().
|
Return True if given Distribution is installed in
sysconfig.get_python_lib().
| def dist_in_site_packages(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in
sysconfig.get_python_lib().
"""
return dist_location(dist).startswith(normalize_path(site_packages)) | [
"def",
"dist_in_site_packages",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"return",
"dist_location",
"(",
"dist",
")",
".",
"startswith",
"(",
"normalize_path",
"(",
"site_packages",
")",
")"
] | [
375,
0
] | [
381,
72
] | python | en | ['en', 'error', 'th'] | False |
dist_is_editable | (dist) |
Return True if given Distribution is an editable install.
|
Return True if given Distribution is an editable install.
| def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + ".egg-link")
if os.path.isfile(egg_link):
return True
return False | [
"def",
"dist_is_editable",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"for",
"path_item",
"in",
"sys",
".",
"path",
":",
"egg_link",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"dist",
".",
"project_name",
"+",
"\".egg-link\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egg_link",
")",
":",
"return",
"True",
"return",
"False"
] | [
384,
0
] | [
393,
16
] | python | en | ['en', 'error', 'th'] | False |
get_installed_distributions | (
local_only=True, # type: bool
skip=stdlib_pkgs, # type: Container[str]
include_editables=True, # type: bool
editables_only=False, # type: bool
user_only=False, # type: bool
paths=None, # type: Optional[List[str]]
) | Return a list of installed Distribution objects.
Left for compatibility until direct pkg_resources uses are refactored out.
| Return a list of installed Distribution objects. | def get_installed_distributions(
local_only=True, # type: bool
skip=stdlib_pkgs, # type: Container[str]
include_editables=True, # type: bool
editables_only=False, # type: bool
user_only=False, # type: bool
paths=None, # type: Optional[List[str]]
):
# type: (...) -> List[Distribution]
"""Return a list of installed Distribution objects.
Left for compatibility until direct pkg_resources uses are refactored out.
"""
from pip._internal.metadata import get_default_environment, get_environment
from pip._internal.metadata.pkg_resources import Distribution as _Dist
if paths is None:
env = get_default_environment()
else:
env = get_environment(paths)
dists = env.iter_installed_distributions(
local_only=local_only,
skip=skip,
include_editables=include_editables,
editables_only=editables_only,
user_only=user_only,
)
return [cast(_Dist, dist)._dist for dist in dists] | [
"def",
"get_installed_distributions",
"(",
"local_only",
"=",
"True",
",",
"# type: bool",
"skip",
"=",
"stdlib_pkgs",
",",
"# type: Container[str]",
"include_editables",
"=",
"True",
",",
"# type: bool",
"editables_only",
"=",
"False",
",",
"# type: bool",
"user_only",
"=",
"False",
",",
"# type: bool",
"paths",
"=",
"None",
",",
"# type: Optional[List[str]]",
")",
":",
"# type: (...) -> List[Distribution]",
"from",
"pip",
".",
"_internal",
".",
"metadata",
"import",
"get_default_environment",
",",
"get_environment",
"from",
"pip",
".",
"_internal",
".",
"metadata",
".",
"pkg_resources",
"import",
"Distribution",
"as",
"_Dist",
"if",
"paths",
"is",
"None",
":",
"env",
"=",
"get_default_environment",
"(",
")",
"else",
":",
"env",
"=",
"get_environment",
"(",
"paths",
")",
"dists",
"=",
"env",
".",
"iter_installed_distributions",
"(",
"local_only",
"=",
"local_only",
",",
"skip",
"=",
"skip",
",",
"include_editables",
"=",
"include_editables",
",",
"editables_only",
"=",
"editables_only",
",",
"user_only",
"=",
"user_only",
",",
")",
"return",
"[",
"cast",
"(",
"_Dist",
",",
"dist",
")",
".",
"_dist",
"for",
"dist",
"in",
"dists",
"]"
] | [
396,
0
] | [
423,
54
] | python | en | ['en', 'en', 'en'] | True |
get_distribution | (req_name) | Given a requirement name, return the installed Distribution object.
This searches from *all* distributions available in the environment, to
match the behavior of ``pkg_resources.get_distribution()``.
Left for compatibility until direct pkg_resources uses are refactored out.
| Given a requirement name, return the installed Distribution object. | def get_distribution(req_name):
# type: (str) -> Optional[Distribution]
"""Given a requirement name, return the installed Distribution object.
This searches from *all* distributions available in the environment, to
match the behavior of ``pkg_resources.get_distribution()``.
Left for compatibility until direct pkg_resources uses are refactored out.
"""
from pip._internal.metadata import get_default_environment
from pip._internal.metadata.pkg_resources import Distribution as _Dist
dist = get_default_environment().get_distribution(req_name)
if dist is None:
return None
return cast(_Dist, dist)._dist | [
"def",
"get_distribution",
"(",
"req_name",
")",
":",
"# type: (str) -> Optional[Distribution]",
"from",
"pip",
".",
"_internal",
".",
"metadata",
"import",
"get_default_environment",
"from",
"pip",
".",
"_internal",
".",
"metadata",
".",
"pkg_resources",
"import",
"Distribution",
"as",
"_Dist",
"dist",
"=",
"get_default_environment",
"(",
")",
".",
"get_distribution",
"(",
"req_name",
")",
"if",
"dist",
"is",
"None",
":",
"return",
"None",
"return",
"cast",
"(",
"_Dist",
",",
"dist",
")",
".",
"_dist"
] | [
426,
0
] | [
441,
34
] | python | en | ['en', 'en', 'en'] | True |
egg_link_path | (dist) |
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
|
Return the path for the .egg-link file if it exists, otherwise, None. | def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
sites.append(site_packages)
if not virtualenv_no_global() and user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + ".egg-link"
if os.path.isfile(egglink):
return egglink
return None | [
"def",
"egg_link_path",
"(",
"dist",
")",
":",
"# type: (Distribution) -> Optional[str]",
"sites",
"=",
"[",
"]",
"if",
"running_under_virtualenv",
"(",
")",
":",
"sites",
".",
"append",
"(",
"site_packages",
")",
"if",
"not",
"virtualenv_no_global",
"(",
")",
"and",
"user_site",
":",
"sites",
".",
"append",
"(",
"user_site",
")",
"else",
":",
"if",
"user_site",
":",
"sites",
".",
"append",
"(",
"user_site",
")",
"sites",
".",
"append",
"(",
"site_packages",
")",
"for",
"site",
"in",
"sites",
":",
"egglink",
"=",
"os",
".",
"path",
".",
"join",
"(",
"site",
",",
"dist",
".",
"project_name",
")",
"+",
"\".egg-link\"",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egglink",
")",
":",
"return",
"egglink",
"return",
"None"
] | [
444,
0
] | [
477,
15
] | python | en | ['en', 'error', 'th'] | False |
dist_location | (dist) |
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
The returned location is normalized (in particular, with symlinks removed).
|
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is. | def dist_location(dist):
# type: (Distribution) -> str
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
The returned location is normalized (in particular, with symlinks removed).
"""
egg_link = egg_link_path(dist)
if egg_link:
return normalize_path(egg_link)
return normalize_path(dist.location) | [
"def",
"dist_location",
"(",
"dist",
")",
":",
"# type: (Distribution) -> str",
"egg_link",
"=",
"egg_link_path",
"(",
"dist",
")",
"if",
"egg_link",
":",
"return",
"normalize_path",
"(",
"egg_link",
")",
"return",
"normalize_path",
"(",
"dist",
".",
"location",
")"
] | [
480,
0
] | [
493,
40
] | python | en | ['en', 'error', 'th'] | False |
captured_output | (stream_name) | Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
| Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO. | def captured_output(stream_name):
# type: (str) -> Iterator[StreamWrapper]
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout) | [
"def",
"captured_output",
"(",
"stream_name",
")",
":",
"# type: (str) -> Iterator[StreamWrapper]",
"orig_stdout",
"=",
"getattr",
"(",
"sys",
",",
"stream_name",
")",
"setattr",
"(",
"sys",
",",
"stream_name",
",",
"StreamWrapper",
".",
"from_stream",
"(",
"orig_stdout",
")",
")",
"try",
":",
"yield",
"getattr",
"(",
"sys",
",",
"stream_name",
")",
"finally",
":",
"setattr",
"(",
"sys",
",",
"stream_name",
",",
"orig_stdout",
")"
] | [
518,
0
] | [
530,
46
] | python | en | ['en', 'en', 'en'] | True |
captured_stdout | () | Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
| Capture the output of sys.stdout: | def captured_stdout():
# type: () -> ContextManager[StreamWrapper]
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output("stdout") | [
"def",
"captured_stdout",
"(",
")",
":",
"# type: () -> ContextManager[StreamWrapper]",
"return",
"captured_output",
"(",
"\"stdout\"",
")"
] | [
533,
0
] | [
543,
36
] | python | en | ['en', 'en', 'en'] | True |
captured_stderr | () |
See captured_stdout().
|
See captured_stdout().
| def captured_stderr():
# type: () -> ContextManager[StreamWrapper]
"""
See captured_stdout().
"""
return captured_output("stderr") | [
"def",
"captured_stderr",
"(",
")",
":",
"# type: () -> ContextManager[StreamWrapper]",
"return",
"captured_output",
"(",
"\"stderr\"",
")"
] | [
546,
0
] | [
551,
36
] | python | en | ['en', 'error', 'th'] | False |
build_netloc | (host, port) |
Build a netloc from a host-port pair
|
Build a netloc from a host-port pair
| def build_netloc(host, port):
# type: (str, Optional[int]) -> str
"""
Build a netloc from a host-port pair
"""
if port is None:
return host
if ":" in host:
# Only wrap host with square brackets when it is IPv6
host = f"[{host}]"
return f"{host}:{port}" | [
"def",
"build_netloc",
"(",
"host",
",",
"port",
")",
":",
"# type: (str, Optional[int]) -> str",
"if",
"port",
"is",
"None",
":",
"return",
"host",
"if",
"\":\"",
"in",
"host",
":",
"# Only wrap host with square brackets when it is IPv6",
"host",
"=",
"f\"[{host}]\"",
"return",
"f\"{host}:{port}\""
] | [
563,
0
] | [
573,
27
] | python | en | ['en', 'error', 'th'] | False |
build_url_from_netloc | (netloc, scheme="https") |
Build a full URL from a netloc.
|
Build a full URL from a netloc.
| def build_url_from_netloc(netloc, scheme="https"):
# type: (str, str) -> str
"""
Build a full URL from a netloc.
"""
if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc:
# It must be a bare IPv6 address, so wrap it with brackets.
netloc = f"[{netloc}]"
return f"{scheme}://{netloc}" | [
"def",
"build_url_from_netloc",
"(",
"netloc",
",",
"scheme",
"=",
"\"https\"",
")",
":",
"# type: (str, str) -> str",
"if",
"netloc",
".",
"count",
"(",
"\":\"",
")",
">=",
"2",
"and",
"\"@\"",
"not",
"in",
"netloc",
"and",
"\"[\"",
"not",
"in",
"netloc",
":",
"# It must be a bare IPv6 address, so wrap it with brackets.",
"netloc",
"=",
"f\"[{netloc}]\"",
"return",
"f\"{scheme}://{netloc}\""
] | [
576,
0
] | [
584,
33
] | python | en | ['en', 'error', 'th'] | False |
parse_netloc | (netloc) |
Return the host-port pair from a netloc.
|
Return the host-port pair from a netloc.
| def parse_netloc(netloc):
# type: (str) -> Tuple[str, Optional[int]]
"""
Return the host-port pair from a netloc.
"""
url = build_url_from_netloc(netloc)
parsed = urllib.parse.urlparse(url)
return parsed.hostname, parsed.port | [
"def",
"parse_netloc",
"(",
"netloc",
")",
":",
"# type: (str) -> Tuple[str, Optional[int]]",
"url",
"=",
"build_url_from_netloc",
"(",
"netloc",
")",
"parsed",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"return",
"parsed",
".",
"hostname",
",",
"parsed",
".",
"port"
] | [
587,
0
] | [
594,
39
] | python | en | ['en', 'error', 'th'] | False |
split_auth_from_netloc | (netloc) |
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
|
Parse out and remove the auth information from a netloc. | def split_auth_from_netloc(netloc):
# type: (str) -> NetlocTuple
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if "@" not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit("@", 1)
pw = None # type: Optional[str]
if ":" in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user, pw = auth.split(":", 1)
else:
user, pw = auth, None
user = urllib.parse.unquote(user)
if pw is not None:
pw = urllib.parse.unquote(pw)
return netloc, (user, pw) | [
"def",
"split_auth_from_netloc",
"(",
"netloc",
")",
":",
"# type: (str) -> NetlocTuple",
"if",
"\"@\"",
"not",
"in",
"netloc",
":",
"return",
"netloc",
",",
"(",
"None",
",",
"None",
")",
"# Split from the right because that's how urllib.parse.urlsplit()",
"# behaves if more than one @ is present (which can be checked using",
"# the password attribute of urlsplit()'s return value).",
"auth",
",",
"netloc",
"=",
"netloc",
".",
"rsplit",
"(",
"\"@\"",
",",
"1",
")",
"pw",
"=",
"None",
"# type: Optional[str]",
"if",
"\":\"",
"in",
"auth",
":",
"# Split from the left because that's how urllib.parse.urlsplit()",
"# behaves if more than one : is present (which again can be checked",
"# using the password attribute of the return value)",
"user",
",",
"pw",
"=",
"auth",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"else",
":",
"user",
",",
"pw",
"=",
"auth",
",",
"None",
"user",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"user",
")",
"if",
"pw",
"is",
"not",
"None",
":",
"pw",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"pw",
")",
"return",
"netloc",
",",
"(",
"user",
",",
"pw",
")"
] | [
597,
0
] | [
624,
29
] | python | en | ['en', 'error', 'th'] | False |
redact_netloc | (netloc) |
Replace the sensitive data in a netloc with "****", if it exists.
For example:
- "user:[email protected]" returns "user:****@example.com"
- "[email protected]" returns "****@example.com"
|
Replace the sensitive data in a netloc with "****", if it exists. | def redact_netloc(netloc):
# type: (str) -> str
"""
Replace the sensitive data in a netloc with "****", if it exists.
For example:
- "user:[email protected]" returns "user:****@example.com"
- "[email protected]" returns "****@example.com"
"""
netloc, (user, password) = split_auth_from_netloc(netloc)
if user is None:
return netloc
if password is None:
user = "****"
password = ""
else:
user = urllib.parse.quote(user)
password = ":****"
return "{user}{password}@{netloc}".format(
user=user, password=password, netloc=netloc
) | [
"def",
"redact_netloc",
"(",
"netloc",
")",
":",
"# type: (str) -> str",
"netloc",
",",
"(",
"user",
",",
"password",
")",
"=",
"split_auth_from_netloc",
"(",
"netloc",
")",
"if",
"user",
"is",
"None",
":",
"return",
"netloc",
"if",
"password",
"is",
"None",
":",
"user",
"=",
"\"****\"",
"password",
"=",
"\"\"",
"else",
":",
"user",
"=",
"urllib",
".",
"parse",
".",
"quote",
"(",
"user",
")",
"password",
"=",
"\":****\"",
"return",
"\"{user}{password}@{netloc}\"",
".",
"format",
"(",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"netloc",
"=",
"netloc",
")"
] | [
627,
0
] | [
647,
5
] | python | en | ['en', 'error', 'th'] | False |
_transform_url | (url, transform_netloc) | Transform and replace netloc in a url.
transform_netloc is a function taking the netloc and returning a
tuple. The first element of this tuple is the new netloc. The
entire tuple is returned.
Returns a tuple containing the transformed url as item 0 and the
original tuple returned by transform_netloc as item 1.
| Transform and replace netloc in a url. | def _transform_url(url, transform_netloc):
# type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple]
"""Transform and replace netloc in a url.
transform_netloc is a function taking the netloc and returning a
tuple. The first element of this tuple is the new netloc. The
entire tuple is returned.
Returns a tuple containing the transformed url as item 0 and the
original tuple returned by transform_netloc as item 1.
"""
purl = urllib.parse.urlsplit(url)
netloc_tuple = transform_netloc(purl.netloc)
# stripped url
url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)
surl = urllib.parse.urlunsplit(url_pieces)
return surl, cast("NetlocTuple", netloc_tuple) | [
"def",
"_transform_url",
"(",
"url",
",",
"transform_netloc",
")",
":",
"# type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple]",
"purl",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"url",
")",
"netloc_tuple",
"=",
"transform_netloc",
"(",
"purl",
".",
"netloc",
")",
"# stripped url",
"url_pieces",
"=",
"(",
"purl",
".",
"scheme",
",",
"netloc_tuple",
"[",
"0",
"]",
",",
"purl",
".",
"path",
",",
"purl",
".",
"query",
",",
"purl",
".",
"fragment",
")",
"surl",
"=",
"urllib",
".",
"parse",
".",
"urlunsplit",
"(",
"url_pieces",
")",
"return",
"surl",
",",
"cast",
"(",
"\"NetlocTuple\"",
",",
"netloc_tuple",
")"
] | [
650,
0
] | [
666,
50
] | python | en | ['en', 'en', 'en'] | True |
split_auth_netloc_from_url | (url) |
Parse a url into separate netloc, auth, and url with no auth.
Returns: (url_without_auth, netloc, (username, password))
|
Parse a url into separate netloc, auth, and url with no auth. | def split_auth_netloc_from_url(url):
# type: (str) -> Tuple[str, str, Tuple[str, str]]
"""
Parse a url into separate netloc, auth, and url with no auth.
Returns: (url_without_auth, netloc, (username, password))
"""
url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
return url_without_auth, netloc, auth | [
"def",
"split_auth_netloc_from_url",
"(",
"url",
")",
":",
"# type: (str) -> Tuple[str, str, Tuple[str, str]]",
"url_without_auth",
",",
"(",
"netloc",
",",
"auth",
")",
"=",
"_transform_url",
"(",
"url",
",",
"_get_netloc",
")",
"return",
"url_without_auth",
",",
"netloc",
",",
"auth"
] | [
679,
0
] | [
687,
41
] | python | en | ['en', 'error', 'th'] | False |
remove_auth_from_url | (url) | Return a copy of url with 'username:password@' removed. | Return a copy of url with 'username:password | def remove_auth_from_url(url):
# type: (str) -> str
"""Return a copy of url with 'username:password@' removed."""
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)[0] | [
"def",
"remove_auth_from_url",
"(",
"url",
")",
":",
"# type: (str) -> str",
"# username/pass params are passed to subversion through flags",
"# and are not recognized in the url.",
"return",
"_transform_url",
"(",
"url",
",",
"_get_netloc",
")",
"[",
"0",
"]"
] | [
690,
0
] | [
695,
46
] | python | en | ['en', 'en', 'en'] | True |
redact_auth_from_url | (url) | Replace the password in a given url with ****. | Replace the password in a given url with ****. | def redact_auth_from_url(url):
# type: (str) -> str
"""Replace the password in a given url with ****."""
return _transform_url(url, _redact_netloc)[0] | [
"def",
"redact_auth_from_url",
"(",
"url",
")",
":",
"# type: (str) -> str",
"return",
"_transform_url",
"(",
"url",
",",
"_redact_netloc",
")",
"[",
"0",
"]"
] | [
698,
0
] | [
701,
49
] | python | en | ['en', 'en', 'en'] | True |
protect_pip_from_modification_on_windows | (modifying_pip) | Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
| Protection of pip.exe from modification on Windows | def protect_pip_from_modification_on_windows(modifying_pip):
# type: (bool) -> None
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip.exe",
"pip{}.exe".format(sys.version_info[0]),
"pip{}.{}.exe".format(*sys.version_info[:2]),
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
raise CommandError(
"To modify pip, please run the following command:\n{}".format(
" ".join(new_command)
)
) | [
"def",
"protect_pip_from_modification_on_windows",
"(",
"modifying_pip",
")",
":",
"# type: (bool) -> None",
"pip_names",
"=",
"[",
"\"pip.exe\"",
",",
"\"pip{}.exe\"",
".",
"format",
"(",
"sys",
".",
"version_info",
"[",
"0",
"]",
")",
",",
"\"pip{}.{}.exe\"",
".",
"format",
"(",
"*",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
")",
",",
"]",
"# See https://github.com/pypa/pip/issues/1299 for more discussion",
"should_show_use_python_msg",
"=",
"(",
"modifying_pip",
"and",
"WINDOWS",
"and",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"in",
"pip_names",
")",
"if",
"should_show_use_python_msg",
":",
"new_command",
"=",
"[",
"sys",
".",
"executable",
",",
"\"-m\"",
",",
"\"pip\"",
"]",
"+",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"raise",
"CommandError",
"(",
"\"To modify pip, please run the following command:\\n{}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"new_command",
")",
")",
")"
] | [
744,
0
] | [
768,
9
] | python | en | ['en', 'en', 'en'] | True |
is_console_interactive | () | Is this console interactive? | Is this console interactive? | def is_console_interactive():
# type: () -> bool
"""Is this console interactive?"""
return sys.stdin is not None and sys.stdin.isatty() | [
"def",
"is_console_interactive",
"(",
")",
":",
"# type: () -> bool",
"return",
"sys",
".",
"stdin",
"is",
"not",
"None",
"and",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")"
] | [
771,
0
] | [
774,
55
] | python | en | ['en', 'en', 'en'] | True |
hash_file | (path, blocksize=1 << 20) | Return (hash, length) for path using hashlib.sha256() | Return (hash, length) for path using hashlib.sha256() | def hash_file(path, blocksize=1 << 20):
# type: (str, int) -> Tuple[Any, int]
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
return h, length | [
"def",
"hash_file",
"(",
"path",
",",
"blocksize",
"=",
"1",
"<<",
"20",
")",
":",
"# type: (str, int) -> Tuple[Any, int]",
"h",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"length",
"=",
"0",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"for",
"block",
"in",
"read_chunks",
"(",
"f",
",",
"size",
"=",
"blocksize",
")",
":",
"length",
"+=",
"len",
"(",
"block",
")",
"h",
".",
"update",
"(",
"block",
")",
"return",
"h",
",",
"length"
] | [
777,
0
] | [
787,
20
] | python | en | ['en', 'hi-Latn', 'en'] | True |
is_wheel_installed | () |
Return whether the wheel package is installed.
|
Return whether the wheel package is installed.
| def is_wheel_installed():
# type: () -> bool
"""
Return whether the wheel package is installed.
"""
try:
import wheel # noqa: F401
except ImportError:
return False
return True | [
"def",
"is_wheel_installed",
"(",
")",
":",
"# type: () -> bool",
"try",
":",
"import",
"wheel",
"# noqa: F401",
"except",
"ImportError",
":",
"return",
"False",
"return",
"True"
] | [
790,
0
] | [
800,
15
] | python | en | ['en', 'error', 'th'] | False |
pairwise | (iterable) |
Return paired elements.
For example:
s -> (s0, s1), (s2, s3), (s4, s5), ...
|
Return paired elements. | def pairwise(iterable):
# type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]]
"""
Return paired elements.
For example:
s -> (s0, s1), (s2, s3), (s4, s5), ...
"""
iterable = iter(iterable)
return zip_longest(iterable, iterable) | [
"def",
"pairwise",
"(",
"iterable",
")",
":",
"# type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]]",
"iterable",
"=",
"iter",
"(",
"iterable",
")",
"return",
"zip_longest",
"(",
"iterable",
",",
"iterable",
")"
] | [
803,
0
] | [
812,
42
] | python | en | ['en', 'error', 'th'] | False |
partition | (
pred, # type: Callable[[T], bool]
iterable, # type: Iterable[T]
) |
Use a predicate to partition entries into false entries and true entries,
like
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
|
Use a predicate to partition entries into false entries and true entries,
like | def partition(
pred, # type: Callable[[T], bool]
iterable, # type: Iterable[T]
):
# type: (...) -> Tuple[Iterable[T], Iterable[T]]
"""
Use a predicate to partition entries into false entries and true entries,
like
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2) | [
"def",
"partition",
"(",
"pred",
",",
"# type: Callable[[T], bool]",
"iterable",
",",
"# type: Iterable[T]",
")",
":",
"# type: (...) -> Tuple[Iterable[T], Iterable[T]]",
"t1",
",",
"t2",
"=",
"tee",
"(",
"iterable",
")",
"return",
"filterfalse",
"(",
"pred",
",",
"t1",
")",
",",
"filter",
"(",
"pred",
",",
"t2",
")"
] | [
815,
0
] | [
827,
50
] | python | en | ['en', 'error', 'th'] | False |
extract_all_gold_standard_data | (data_dir, nprocesses=1,
overwrite=False, **kwargs) |
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``.
Args:
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html files and gold standard content +
comments text files
nprocesses (int): If > 1, use a :class:`multiprocessing.Pool` to
parallelize the extractions
overwrite (bool): If True, overwrite existing gold-standard blocks files.
**kwargs: passed into :func:`extract_gold_standard_blocks`
See Also:
:func:`extract_gold_standard_blocks`
|
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``. | def extract_all_gold_standard_data(data_dir, nprocesses=1,
overwrite=False, **kwargs):
"""
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``.
Args:
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html files and gold standard content +
comments text files
nprocesses (int): If > 1, use a :class:`multiprocessing.Pool` to
parallelize the extractions
overwrite (bool): If True, overwrite existing gold-standard blocks files.
**kwargs: passed into :func:`extract_gold_standard_blocks`
See Also:
:func:`extract_gold_standard_blocks`
"""
use_pool = nprocesses > 1
if use_pool:
pool = multiprocessing.Pool(processes=nprocesses)
# get the set of files that have already been block corrected
# so that we don't block correct them again
if overwrite is False:
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
if not os.path.isdir(gs_blocks_dir):
os.mkdir(gs_blocks_dir)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = {
re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames}
else:
gs_blocks_fileroots = set()
# extract the block-level gold parse from
# the set of files to be block corrected
gs_dir = os.path.join(data_dir, GOLD_STANDARD_DIRNAME)
gs_filenames = get_filenames(
gs_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_EXT))
for i, gs_filename in enumerate(gs_filenames):
gs_fileroot = re.search(r'(.+)' + re.escape(GOLD_STANDARD_EXT), gs_filename).group(1)
if gs_fileroot in gs_blocks_fileroots:
continue
if i % 100 == 0:
print('Extracting gold standard blocks for file "{}"'.format(gs_filename))
if use_pool:
pool.apply_async(extract_gold_standard_blocks, (data_dir, gs_fileroot), kwargs)
else:
extract_gold_standard_blocks(data_dir, gs_fileroot, **kwargs)
# close out our pool
if use_pool:
pool.close()
pool.join() | [
"def",
"extract_all_gold_standard_data",
"(",
"data_dir",
",",
"nprocesses",
"=",
"1",
",",
"overwrite",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"use_pool",
"=",
"nprocesses",
">",
"1",
"if",
"use_pool",
":",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"nprocesses",
")",
"# get the set of files that have already been block corrected",
"# so that we don't block correct them again",
"if",
"overwrite",
"is",
"False",
":",
"gs_blocks_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_BLOCKS_DIRNAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"gs_blocks_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"gs_blocks_dir",
")",
"gs_blocks_filenames",
"=",
"get_filenames",
"(",
"gs_blocks_dir",
",",
"full_path",
"=",
"False",
",",
"match_regex",
"=",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_BLOCKS_EXT",
")",
")",
"gs_blocks_fileroots",
"=",
"{",
"re",
".",
"search",
"(",
"r'(.+)'",
"+",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_BLOCKS_EXT",
")",
",",
"gs_blocks_filename",
")",
".",
"group",
"(",
"1",
")",
"for",
"gs_blocks_filename",
"in",
"gs_blocks_filenames",
"}",
"else",
":",
"gs_blocks_fileroots",
"=",
"set",
"(",
")",
"# extract the block-level gold parse from",
"# the set of files to be block corrected",
"gs_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_DIRNAME",
")",
"gs_filenames",
"=",
"get_filenames",
"(",
"gs_dir",
",",
"full_path",
"=",
"False",
",",
"match_regex",
"=",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_EXT",
")",
")",
"for",
"i",
",",
"gs_filename",
"in",
"enumerate",
"(",
"gs_filenames",
")",
":",
"gs_fileroot",
"=",
"re",
".",
"search",
"(",
"r'(.+)'",
"+",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_EXT",
")",
",",
"gs_filename",
")",
".",
"group",
"(",
"1",
")",
"if",
"gs_fileroot",
"in",
"gs_blocks_fileroots",
":",
"continue",
"if",
"i",
"%",
"100",
"==",
"0",
":",
"print",
"(",
"'Extracting gold standard blocks for file \"{}\"'",
".",
"format",
"(",
"gs_filename",
")",
")",
"if",
"use_pool",
":",
"pool",
".",
"apply_async",
"(",
"extract_gold_standard_blocks",
",",
"(",
"data_dir",
",",
"gs_fileroot",
")",
",",
"kwargs",
")",
"else",
":",
"extract_gold_standard_blocks",
"(",
"data_dir",
",",
"gs_fileroot",
",",
"*",
"*",
"kwargs",
")",
"# close out our pool",
"if",
"use_pool",
":",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] | [
28,
0
] | [
85,
19
] | python | en | ['en', 'error', 'th'] | False |
extract_gold_standard_blocks | (data_dir, fileroot, encoding=None,
tokenizer=simple_tokenizer, cetr=False) |
Extract the gold standard block-level content and comments for a single
observation identified by ``fileroot``, and write the results to file.
Args:
data_dir (str): The root directory containing sub-directories for
raw HTML, gold standard extracted content, and gold standard blocks.
fileroot (str): Unique identifier for a single observation of training
data, corresponding to the start of its raw html and gold standard
filenames under ``data_dir``.
encoding (str)
tokenizer (Callable): Object that takes a string and returns the tokens
as a list of strings.
cetr (bool): If True, parse the gold standard in clean eval format.
Notes:
Results are written to a text file in the block-level gold standard dir
:obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line
corresponds to a single block in its order of appearance, and has the
following format::
content_frac comments_frac all_tokens content_tokens comments_tokens
where each item is separated by a tab. ``content_frac`` is equal to the
fraction of ``all_tokens`` found in the corresponding gold parse content
text; ``comments_frac`` is the same but for comments text.
|
Extract the gold standard block-level content and comments for a single
observation identified by ``fileroot``, and write the results to file. | def extract_gold_standard_blocks(data_dir, fileroot, encoding=None,
tokenizer=simple_tokenizer, cetr=False):
"""
Extract the gold standard block-level content and comments for a single
observation identified by ``fileroot``, and write the results to file.
Args:
data_dir (str): The root directory containing sub-directories for
raw HTML, gold standard extracted content, and gold standard blocks.
fileroot (str): Unique identifier for a single observation of training
data, corresponding to the start of its raw html and gold standard
filenames under ``data_dir``.
encoding (str)
tokenizer (Callable): Object that takes a string and returns the tokens
as a list of strings.
cetr (bool): If True, parse the gold standard in clean eval format.
Notes:
Results are written to a text file in the block-level gold standard dir
:obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line
corresponds to a single block in its order of appearance, and has the
following format::
content_frac comments_frac all_tokens content_tokens comments_tokens
where each item is separated by a tab. ``content_frac`` is equal to the
fraction of ``all_tokens`` found in the corresponding gold parse content
text; ``comments_frac`` is the same but for comments text.
"""
# read the raw html, split it into blocks, and tokenize each block
raw_html = read_html_file(data_dir, fileroot, encoding=encoding) # text is unicode
from dragnet.blocks import BlockifyError
try:
blocks = [b.text for b in Blockifier.blockify(raw_html)] # text is bytes
except BlockifyError as e:
print('BlockifyError for file "{}"'.format(fileroot))
return
blocks_tokens = [tokenizer(block) for block in blocks]
num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens]
# solve the longest common subsequence problem to determine which blocks were kept
# need a list of all the tokens in the blocks, plus a correspondence of which
# block they belong to.
# we will determine which of the tokens is in the extracted content,
# then use the correspondence to block id to determine which blocks were kept
# get a flattened sequence of all tokens in all blocks
# and their corresponding block ids
all_blocks_tokens = []
all_blocks_tokens_block_id = []
for i, block_tokens in enumerate(blocks_tokens):
all_blocks_tokens.extend(block_tokens)
all_blocks_tokens_block_id.extend([i] * len(block_tokens))
# TODO: do we really need `num_all_blocks_tokens`?
# it was used to determine if there were more gold standard tokens than *all*
# tokens, and if so, some info was written to disk
# but it seems like an odd check, and it's probably better to take the
# gold standard data at face value -- presumably, somebody checked it!
# num_all_blocks_tokens = len(all_blocks_tokens)
def get_frac_and_str_tokens_in_gs(gs_txt):
"""
For each block, determine which and what fraction of tokens are
also in the gold standard text ``gs_txt`` for either content
or comments.
Returns:
List[float]
List[str]
"""
gs_tokens = tokenizer(gs_txt)
tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens)
num_blocks_tokens_in_gs = [0 for _ in range(len(blocks))]
blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))]
for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id):
if token_in_gs is True:
num_blocks_tokens_in_gs[block_id] += 1
blocks_tokens_in_gs_tokens[block_id].append(token)
blocks_tokens_strs_in_gs = [
' '.join(block_tokens_in_gs_tokens)
for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens]
frac_blocks_tokens_in_gs = [
num_block_tokens_in_gs / num_block_tokens
for num_block_tokens_in_gs, num_block_tokens
in zip(num_blocks_tokens_in_gs, num_blocks_tokens)]
return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs)
gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr)
frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content = \
get_frac_and_str_tokens_in_gs(gs_content)
frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments = \
get_frac_and_str_tokens_in_gs(gs_comments)
output_fname = os.path.join(
data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT)
line_fmt = u'{frac_content}\t{frac_comments}\t{block_tokens}\t{content_tokens}\t{comment_tokens}\n'
with io.open(output_fname, mode='w') as f:
for block_id, block_tokens in enumerate(blocks_tokens):
line = line_fmt.format(
frac_content=frac_blocks_tokens_in_gs_content[block_id],
frac_comments=frac_blocks_tokens_in_gs_comments[block_id],
block_tokens=' '.join(block_tokens),
content_tokens=blocks_tokens_strs_in_gs_content[block_id],
comment_tokens=blocks_tokens_strs_in_gs_comments[block_id])
f.write(line) | [
"def",
"extract_gold_standard_blocks",
"(",
"data_dir",
",",
"fileroot",
",",
"encoding",
"=",
"None",
",",
"tokenizer",
"=",
"simple_tokenizer",
",",
"cetr",
"=",
"False",
")",
":",
"# read the raw html, split it into blocks, and tokenize each block",
"raw_html",
"=",
"read_html_file",
"(",
"data_dir",
",",
"fileroot",
",",
"encoding",
"=",
"encoding",
")",
"# text is unicode",
"from",
"dragnet",
".",
"blocks",
"import",
"BlockifyError",
"try",
":",
"blocks",
"=",
"[",
"b",
".",
"text",
"for",
"b",
"in",
"Blockifier",
".",
"blockify",
"(",
"raw_html",
")",
"]",
"# text is bytes",
"except",
"BlockifyError",
"as",
"e",
":",
"print",
"(",
"'BlockifyError for file \"{}\"'",
".",
"format",
"(",
"fileroot",
")",
")",
"return",
"blocks_tokens",
"=",
"[",
"tokenizer",
"(",
"block",
")",
"for",
"block",
"in",
"blocks",
"]",
"num_blocks_tokens",
"=",
"[",
"len",
"(",
"block_tokens",
")",
"for",
"block_tokens",
"in",
"blocks_tokens",
"]",
"# solve the longest common subsequence problem to determine which blocks were kept",
"# need a list of all the tokens in the blocks, plus a correspondence of which",
"# block they belong to.",
"# we will determine which of the tokens is in the extracted content,",
"# then use the correspondence to block id to determine which blocks were kept",
"# get a flattened sequence of all tokens in all blocks",
"# and their corresponding block ids",
"all_blocks_tokens",
"=",
"[",
"]",
"all_blocks_tokens_block_id",
"=",
"[",
"]",
"for",
"i",
",",
"block_tokens",
"in",
"enumerate",
"(",
"blocks_tokens",
")",
":",
"all_blocks_tokens",
".",
"extend",
"(",
"block_tokens",
")",
"all_blocks_tokens_block_id",
".",
"extend",
"(",
"[",
"i",
"]",
"*",
"len",
"(",
"block_tokens",
")",
")",
"# TODO: do we really need `num_all_blocks_tokens`?",
"# it was used to determine if there were more gold standard tokens than *all*",
"# tokens, and if so, some info was written to disk",
"# but it seems like an odd check, and it's probably better to take the",
"# gold standard data at face value -- presumably, somebody checked it!",
"# num_all_blocks_tokens = len(all_blocks_tokens)",
"def",
"get_frac_and_str_tokens_in_gs",
"(",
"gs_txt",
")",
":",
"\"\"\"\n For each block, determine which and what fraction of tokens are\n also in the gold standard text ``gs_txt`` for either content\n or comments.\n\n Returns:\n List[float]\n List[str]\n \"\"\"",
"gs_tokens",
"=",
"tokenizer",
"(",
"gs_txt",
")",
"tokens_in_gs",
"=",
"check_inclusion",
"(",
"all_blocks_tokens",
",",
"gs_tokens",
")",
"num_blocks_tokens_in_gs",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"blocks",
")",
")",
"]",
"blocks_tokens_in_gs_tokens",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"blocks",
")",
")",
"]",
"for",
"token",
",",
"token_in_gs",
",",
"block_id",
"in",
"zip",
"(",
"all_blocks_tokens",
",",
"tokens_in_gs",
",",
"all_blocks_tokens_block_id",
")",
":",
"if",
"token_in_gs",
"is",
"True",
":",
"num_blocks_tokens_in_gs",
"[",
"block_id",
"]",
"+=",
"1",
"blocks_tokens_in_gs_tokens",
"[",
"block_id",
"]",
".",
"append",
"(",
"token",
")",
"blocks_tokens_strs_in_gs",
"=",
"[",
"' '",
".",
"join",
"(",
"block_tokens_in_gs_tokens",
")",
"for",
"block_tokens_in_gs_tokens",
"in",
"blocks_tokens_in_gs_tokens",
"]",
"frac_blocks_tokens_in_gs",
"=",
"[",
"num_block_tokens_in_gs",
"/",
"num_block_tokens",
"for",
"num_block_tokens_in_gs",
",",
"num_block_tokens",
"in",
"zip",
"(",
"num_blocks_tokens_in_gs",
",",
"num_blocks_tokens",
")",
"]",
"return",
"(",
"frac_blocks_tokens_in_gs",
",",
"blocks_tokens_strs_in_gs",
")",
"gs_content",
",",
"gs_comments",
"=",
"read_gold_standard_file",
"(",
"data_dir",
",",
"fileroot",
",",
"cetr",
")",
"frac_blocks_tokens_in_gs_content",
",",
"blocks_tokens_strs_in_gs_content",
"=",
"get_frac_and_str_tokens_in_gs",
"(",
"gs_content",
")",
"frac_blocks_tokens_in_gs_comments",
",",
"blocks_tokens_strs_in_gs_comments",
"=",
"get_frac_and_str_tokens_in_gs",
"(",
"gs_comments",
")",
"output_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_BLOCKS_DIRNAME",
",",
"fileroot",
"+",
"GOLD_STANDARD_BLOCKS_EXT",
")",
"line_fmt",
"=",
"u'{frac_content}\\t{frac_comments}\\t{block_tokens}\\t{content_tokens}\\t{comment_tokens}\\n'",
"with",
"io",
".",
"open",
"(",
"output_fname",
",",
"mode",
"=",
"'w'",
")",
"as",
"f",
":",
"for",
"block_id",
",",
"block_tokens",
"in",
"enumerate",
"(",
"blocks_tokens",
")",
":",
"line",
"=",
"line_fmt",
".",
"format",
"(",
"frac_content",
"=",
"frac_blocks_tokens_in_gs_content",
"[",
"block_id",
"]",
",",
"frac_comments",
"=",
"frac_blocks_tokens_in_gs_comments",
"[",
"block_id",
"]",
",",
"block_tokens",
"=",
"' '",
".",
"join",
"(",
"block_tokens",
")",
",",
"content_tokens",
"=",
"blocks_tokens_strs_in_gs_content",
"[",
"block_id",
"]",
",",
"comment_tokens",
"=",
"blocks_tokens_strs_in_gs_comments",
"[",
"block_id",
"]",
")",
"f",
".",
"write",
"(",
"line",
")"
] | [
88,
0
] | [
195,
25
] | python | en | ['en', 'error', 'th'] | False |
get_filenames | (dirname, full_path=False, match_regex=None, extension=None) |
Get all filenames under ``dirname`` that match ``match_regex`` or have file
extension equal to ``extension``, optionally prepending the full path.
Args:
dirname (str): /path/to/dir on disk where files to read are saved
full_path (bool): if False, return filenames without path; if True,
return filenames with path, as ``os.path.join(dirname, fname)``
match_regex (str): include files whose names match this regex pattern
extension (str): if files only of a certain type are wanted,
specify the file extension (e.g. ".txt")
Yields:
str: next matching filename
|
Get all filenames under ``dirname`` that match ``match_regex`` or have file
extension equal to ``extension``, optionally prepending the full path. | def get_filenames(dirname, full_path=False, match_regex=None, extension=None):
"""
Get all filenames under ``dirname`` that match ``match_regex`` or have file
extension equal to ``extension``, optionally prepending the full path.
Args:
dirname (str): /path/to/dir on disk where files to read are saved
full_path (bool): if False, return filenames without path; if True,
return filenames with path, as ``os.path.join(dirname, fname)``
match_regex (str): include files whose names match this regex pattern
extension (str): if files only of a certain type are wanted,
specify the file extension (e.g. ".txt")
Yields:
str: next matching filename
"""
if not os.path.exists(dirname):
raise OSError('directory "{}" does not exist'.format(dirname))
match_regex = re.compile(match_regex) if match_regex else None
for filename in sorted(os.listdir(dirname)):
if extension and not os.path.splitext(filename)[-1] == extension:
continue
if match_regex and not match_regex.search(filename):
continue
if full_path is True:
yield os.path.join(dirname, filename)
else:
yield filename | [
"def",
"get_filenames",
"(",
"dirname",
",",
"full_path",
"=",
"False",
",",
"match_regex",
"=",
"None",
",",
"extension",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"raise",
"OSError",
"(",
"'directory \"{}\" does not exist'",
".",
"format",
"(",
"dirname",
")",
")",
"match_regex",
"=",
"re",
".",
"compile",
"(",
"match_regex",
")",
"if",
"match_regex",
"else",
"None",
"for",
"filename",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"dirname",
")",
")",
":",
"if",
"extension",
"and",
"not",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
"==",
"extension",
":",
"continue",
"if",
"match_regex",
"and",
"not",
"match_regex",
".",
"search",
"(",
"filename",
")",
":",
"continue",
"if",
"full_path",
"is",
"True",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"else",
":",
"yield",
"filename"
] | [
198,
0
] | [
225,
26
] | python | en | ['en', 'error', 'th'] | False |
read_html_file | (data_dir, fileroot, encoding=None) |
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
Returns:
str
|
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``. | def read_html_file(data_dir, fileroot, encoding=None):
"""
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
Returns:
str
"""
fname = os.path.join(
data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT)
encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16'
for encoding in encodings:
try:
with io.open(fname, mode='rt', encoding=encoding) as f:
raw_html = f.read()
break
except (UnicodeDecodeError, UnicodeError):
raw_html = None
return ftfy.fix_encoding(raw_html).strip() | [
"def",
"read_html_file",
"(",
"data_dir",
",",
"fileroot",
",",
"encoding",
"=",
"None",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"RAW_HTML_DIRNAME",
",",
"fileroot",
"+",
"RAW_HTML_EXT",
")",
"encodings",
"=",
"(",
"encoding",
",",
")",
"if",
"encoding",
"else",
"(",
"'utf-8'",
",",
"'iso-8859-1'",
")",
"# 'utf-16'",
"for",
"encoding",
"in",
"encodings",
":",
"try",
":",
"with",
"io",
".",
"open",
"(",
"fname",
",",
"mode",
"=",
"'rt'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"raw_html",
"=",
"f",
".",
"read",
"(",
")",
"break",
"except",
"(",
"UnicodeDecodeError",
",",
"UnicodeError",
")",
":",
"raw_html",
"=",
"None",
"return",
"ftfy",
".",
"fix_encoding",
"(",
"raw_html",
")",
".",
"strip",
"(",
")"
] | [
228,
0
] | [
252,
46
] | python | en | ['en', 'error', 'th'] | False |
read_gold_standard_file | (data_dir, fileroot, encoding=None, cetr=False) |
Read the gold standard content file corresponding to identifier ``fileroot``
in the gold standard directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
cetr (bool): if True, assume no comments and parse the gold standard
to remove tags
Returns:
List[str, str]: contents string and comments string, respectively
|
Read the gold standard content file corresponding to identifier ``fileroot``
in the gold standard directory below the root ``data_dir``. | def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False):
"""
Read the gold standard content file corresponding to identifier ``fileroot``
in the gold standard directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
cetr (bool): if True, assume no comments and parse the gold standard
to remove tags
Returns:
List[str, str]: contents string and comments string, respectively
"""
fname = os.path.join(
data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT)
encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1')
for encoding in encodings:
try:
with io.open(fname, mode='rt', encoding=encoding) as f:
gold_standard = f.read()
break
except (UnicodeDecodeError, UnicodeError):
gold_standard = None
if not gold_standard:
return [u'', u'']
if not cetr:
content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1)
# if no comments delimiter found, append empty comments string
if len(content_comments) == 1:
content_comments = [content_comments[0], u'']
else:
tree = etree.fromstring(gold_standard, parser=etree.HTMLParser())
content_comments = [u' '.join(text_from_subtree(tree)), u'']
# fix text in case of mangled encodings
content_comments = [ftfy.fix_encoding(content_comments[0]).strip(),
ftfy.fix_encoding(content_comments[1]).strip()]
return content_comments | [
"def",
"read_gold_standard_file",
"(",
"data_dir",
",",
"fileroot",
",",
"encoding",
"=",
"None",
",",
"cetr",
"=",
"False",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_DIRNAME",
",",
"fileroot",
"+",
"GOLD_STANDARD_EXT",
")",
"encodings",
"=",
"(",
"encoding",
",",
")",
"if",
"encoding",
"else",
"(",
"'utf-8'",
",",
"'utf-16'",
",",
"'iso-8859-1'",
")",
"for",
"encoding",
"in",
"encodings",
":",
"try",
":",
"with",
"io",
".",
"open",
"(",
"fname",
",",
"mode",
"=",
"'rt'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"gold_standard",
"=",
"f",
".",
"read",
"(",
")",
"break",
"except",
"(",
"UnicodeDecodeError",
",",
"UnicodeError",
")",
":",
"gold_standard",
"=",
"None",
"if",
"not",
"gold_standard",
":",
"return",
"[",
"u''",
",",
"u''",
"]",
"if",
"not",
"cetr",
":",
"content_comments",
"=",
"RE_COMMENTS_DELIM",
".",
"split",
"(",
"gold_standard",
",",
"maxsplit",
"=",
"1",
")",
"# if no comments delimiter found, append empty comments string",
"if",
"len",
"(",
"content_comments",
")",
"==",
"1",
":",
"content_comments",
"=",
"[",
"content_comments",
"[",
"0",
"]",
",",
"u''",
"]",
"else",
":",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"gold_standard",
",",
"parser",
"=",
"etree",
".",
"HTMLParser",
"(",
")",
")",
"content_comments",
"=",
"[",
"u' '",
".",
"join",
"(",
"text_from_subtree",
"(",
"tree",
")",
")",
",",
"u''",
"]",
"# fix text in case of mangled encodings",
"content_comments",
"=",
"[",
"ftfy",
".",
"fix_encoding",
"(",
"content_comments",
"[",
"0",
"]",
")",
".",
"strip",
"(",
")",
",",
"ftfy",
".",
"fix_encoding",
"(",
"content_comments",
"[",
"1",
"]",
")",
".",
"strip",
"(",
")",
"]",
"return",
"content_comments"
] | [
255,
0
] | [
297,
27
] | python | en | ['en', 'error', 'th'] | False |
read_gold_standard_blocks_file | (data_dir, fileroot, split_blocks=True) |
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
split_blocks (bool): If True, split the file's content into blocks.
Returns:
str or List[str]
|
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``. | def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True):
"""
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
split_blocks (bool): If True, split the file's content into blocks.
Returns:
str or List[str]
"""
fname = os.path.join(
data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT)
with io.open(fname, mode='r') as f:
data = f.read()
if split_blocks:
return filter(None, data[:-1].split('\n'))
return filter(None, data) | [
"def",
"read_gold_standard_blocks_file",
"(",
"data_dir",
",",
"fileroot",
",",
"split_blocks",
"=",
"True",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_BLOCKS_DIRNAME",
",",
"fileroot",
"+",
"GOLD_STANDARD_BLOCKS_EXT",
")",
"with",
"io",
".",
"open",
"(",
"fname",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"if",
"split_blocks",
":",
"return",
"filter",
"(",
"None",
",",
"data",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
"'\\n'",
")",
")",
"return",
"filter",
"(",
"None",
",",
"data",
")"
] | [
300,
0
] | [
319,
29
] | python | en | ['en', 'error', 'th'] | False |
prepare_data | (data_dir, fileroot, block_pct_tokens_thresh=0.1) |
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
|
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``. | def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1):
"""
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
"""
if not 0.0 <= block_pct_tokens_thresh <= 1.0:
raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]')
html = read_html_file(data_dir, fileroot)
blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True)
content_blocks = []
comments_blocks = []
for block in blocks:
block_split = block.split('\t')
num_block_tokens = len(block_split[2].split())
# total number of tokens in block is used as weights
content_blocks.append(
(float(block_split[0]), num_block_tokens, block_split[3].split()))
comments_blocks.append(
(float(block_split[1]), num_block_tokens, block_split[4].split()))
parsed_content_blocks = _parse_content_or_comments_blocks(
content_blocks, block_pct_tokens_thresh)
parsed_comments_blocks = _parse_content_or_comments_blocks(
comments_blocks, block_pct_tokens_thresh)
return (html, parsed_content_blocks, parsed_comments_blocks) | [
"def",
"prepare_data",
"(",
"data_dir",
",",
"fileroot",
",",
"block_pct_tokens_thresh",
"=",
"0.1",
")",
":",
"if",
"not",
"0.0",
"<=",
"block_pct_tokens_thresh",
"<=",
"1.0",
":",
"raise",
"ValueError",
"(",
"'block_pct_tokens_thresh must be in the range [0.0, 1.0]'",
")",
"html",
"=",
"read_html_file",
"(",
"data_dir",
",",
"fileroot",
")",
"blocks",
"=",
"read_gold_standard_blocks_file",
"(",
"data_dir",
",",
"fileroot",
",",
"split_blocks",
"=",
"True",
")",
"content_blocks",
"=",
"[",
"]",
"comments_blocks",
"=",
"[",
"]",
"for",
"block",
"in",
"blocks",
":",
"block_split",
"=",
"block",
".",
"split",
"(",
"'\\t'",
")",
"num_block_tokens",
"=",
"len",
"(",
"block_split",
"[",
"2",
"]",
".",
"split",
"(",
")",
")",
"# total number of tokens in block is used as weights",
"content_blocks",
".",
"append",
"(",
"(",
"float",
"(",
"block_split",
"[",
"0",
"]",
")",
",",
"num_block_tokens",
",",
"block_split",
"[",
"3",
"]",
".",
"split",
"(",
")",
")",
")",
"comments_blocks",
".",
"append",
"(",
"(",
"float",
"(",
"block_split",
"[",
"1",
"]",
")",
",",
"num_block_tokens",
",",
"block_split",
"[",
"4",
"]",
".",
"split",
"(",
")",
")",
")",
"parsed_content_blocks",
"=",
"_parse_content_or_comments_blocks",
"(",
"content_blocks",
",",
"block_pct_tokens_thresh",
")",
"parsed_comments_blocks",
"=",
"_parse_content_or_comments_blocks",
"(",
"comments_blocks",
",",
"block_pct_tokens_thresh",
")",
"return",
"(",
"html",
",",
"parsed_content_blocks",
",",
"parsed_comments_blocks",
")"
] | [
330,
0
] | [
376,
64
] | python | en | ['en', 'error', 'th'] | False |
prepare_all_data | (data_dir, block_pct_tokens_thresh=0.1) |
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
|
Prepare data for all HTML + gold standard blocks examples in ``data_dir``. | def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):
"""
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
"""
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = (
re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames)
return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh)
for fileroot in gs_blocks_fileroots] | [
"def",
"prepare_all_data",
"(",
"data_dir",
",",
"block_pct_tokens_thresh",
"=",
"0.1",
")",
":",
"gs_blocks_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"GOLD_STANDARD_BLOCKS_DIRNAME",
")",
"gs_blocks_filenames",
"=",
"get_filenames",
"(",
"gs_blocks_dir",
",",
"full_path",
"=",
"False",
",",
"match_regex",
"=",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_BLOCKS_EXT",
")",
")",
"gs_blocks_fileroots",
"=",
"(",
"re",
".",
"search",
"(",
"r'(.+)'",
"+",
"re",
".",
"escape",
"(",
"GOLD_STANDARD_BLOCKS_EXT",
")",
",",
"gs_blocks_filename",
")",
".",
"group",
"(",
"1",
")",
"for",
"gs_blocks_filename",
"in",
"gs_blocks_filenames",
")",
"return",
"[",
"prepare_data",
"(",
"data_dir",
",",
"fileroot",
",",
"block_pct_tokens_thresh",
")",
"for",
"fileroot",
"in",
"gs_blocks_fileroots",
"]"
] | [
379,
0
] | [
401,
48
] | python | en | ['en', 'error', 'th'] | False |
rnn_multistation_sampling_temperature_sequencer | (filenames, resample_by=1, batch_size=sys.maxsize, sequence_size=sys.maxsize, n_forward=0, nb_epochs=1, tminmax=False, keepinmem=True) |
Loads temperature data from CSV files.
Each data sequence is resampled by "resample_by". Use 1 not to resample.
The data is also shifted by n_forward after resampling to generate target training sequences.
n_forward is typically 1 but can be 0 to disable shifting or >1 to train to predict further in advance.
Each data sequence is split into sequences of size sequence_size. The default is a single sequence with everything.
Sequences are then assembled in batches of batch_size, sequences from the same weather station
always on the same line in each batch. batch_size is infinite by default which will result in a batch
as large as the number of available files.
When batch_size data files are exhausted, the next batch of files is loaded.
By default (Tmin, Tmax, interpolated) are returned if tminmax is False. Otherwise (Tmin, Tmax).
By default, all loaded data is kept in memory and re-served from there. Set keepinmem=False to discard and reload.
Returns epoch, filecount, sample, target, date
epoch: the epoch number. RNN state should be reset on every epoch change.
filecount: files loaded in this epoch. RNN state should be reset on every filecount change.
sample, target: the pair of training samples and targets, of size batch_size or less
date: sequence of dates corresponding to the samples (assumed the same across batch)
|
Loads temperature data from CSV files.
Each data sequence is resampled by "resample_by". Use 1 not to resample.
The data is also shifted by n_forward after resampling to generate target training sequences.
n_forward is typically 1 but can be 0 to disable shifting or >1 to train to predict further in advance.
Each data sequence is split into sequences of size sequence_size. The default is a single sequence with everything.
Sequences are then assembled in batches of batch_size, sequences from the same weather station
always on the same line in each batch. batch_size is infinite by default which will result in a batch
as large as the number of available files.
When batch_size data files are exhausted, the next batch of files is loaded.
By default (Tmin, Tmax, interpolated) are returned if tminmax is False. Otherwise (Tmin, Tmax).
By default, all loaded data is kept in memory and re-served from there. Set keepinmem=False to discard and reload.
Returns epoch, filecount, sample, target, date
epoch: the epoch number. RNN state should be reset on every epoch change.
filecount: files loaded in this epoch. RNN state should be reset on every filecount change.
sample, target: the pair of training samples and targets, of size batch_size or less
date: sequence of dates corresponding to the samples (assumed the same across batch)
| def rnn_multistation_sampling_temperature_sequencer(filenames, resample_by=1, batch_size=sys.maxsize, sequence_size=sys.maxsize, n_forward=0, nb_epochs=1, tminmax=False, keepinmem=True):
"""
Loads temperature data from CSV files.
Each data sequence is resampled by "resample_by". Use 1 not to resample.
The data is also shifted by n_forward after resampling to generate target training sequences.
n_forward is typically 1 but can be 0 to disable shifting or >1 to train to predict further in advance.
Each data sequence is split into sequences of size sequence_size. The default is a single sequence with everything.
Sequences are then assembled in batches of batch_size, sequences from the same weather station
always on the same line in each batch. batch_size is infinite by default which will result in a batch
as large as the number of available files.
When batch_size data files are exhausted, the next batch of files is loaded.
By default (Tmin, Tmax, interpolated) are returned if tminmax is False. Otherwise (Tmin, Tmax).
By default, all loaded data is kept in memory and re-served from there. Set keepinmem=False to discard and reload.
Returns epoch, filecount, sample, target, date
epoch: the epoch number. RNN state should be reset on every epoch change.
filecount: files loaded in this epoch. RNN state should be reset on every filecount change.
sample, target: the pair of training samples and targets, of size batch_size or less
date: sequence of dates corresponding to the samples (assumed the same across batch)
"""
#filenames = gfile.get_matching_files(filepattern)
#print('Pattern "{}" matches {} files'.format(filepattern, len(filenames)))
#filenames = np.array(filenames)
def adjust(ary, n):
return ary[:ary.shape[0]//n*n]
loaded_samples = {}
loaded_targets = {}
for epoch in range(nb_epochs):
np.random.shuffle(filenames)
batchlen = len(filenames) % batch_size # Remainder as
batchlen = batch_size if batchlen == 0 else batchlen # first smaller batch.
filebatch = []
for filecount, filename in enumerate(filenames):
filebatch.append(filename)
if len(filebatch) == batchlen:
if filecount in loaded_samples:
# shuffle lines every time the data is reused (this does not appear to be useful though)
perm = np.random.permutation(loaded_samples[filecount].shape[1])
#print("reshuffling {} rows".format(loaded_samples[filecount].shape[1]))
samples = loaded_samples[filecount][:,perm]
targets = loaded_targets[filecount][:,perm]
#samples = loaded_samples[filecount]
#targets = loaded_targets[filecount]
else:
print("Loading {} files".format(batchlen), end="")
samples = []
targets = []
for filename in filebatch:
with tf.gfile.Open(filename, mode='rb') as f:
# Load min max temperatures from CSV
print(".", end="")
temperatures = np.genfromtxt(f, delimiter=",", skip_header=True, usecols=[0,1,2,3],
converters = {0: lambda s: np.datetime64(s)})
dates = temperatures[:]['f0'] # dates
temperatures = np.stack([temperatures[:]['f1'], # min temperatures
temperatures[:]['f2'], # max temperatures
temperatures[:]['f3']], # interpolated
axis=1) # shape [18262, 3]
# Resample temperatures by averaging them across RESAMPLE_BY days
temperatures = np.reshape(adjust(temperatures, resample_by), [-1, resample_by, 3]) # [n, RESAMPLE_BY, 3]
temperatures = np.mean(temperatures, axis=1) # shape [n, 3]
# Shift temperature sequence to generate training targets
temp_targets = temperatures[n_forward:]
temperatures = temperatures[:temperatures.shape[0]-n_forward] # to allow n_forward=0
# Group temperatures into sequences of SEQLEN values
nseq = min(sequence_size, temperatures.shape[0]) # If not even full sequence, return everything
temp_targets = np.reshape(adjust(temp_targets, nseq), [-1, nseq, 3]) # [p, SEQLEN, 3]
temperatures = np.reshape(adjust(temperatures, nseq), [-1, nseq, 3]) # [p, SEQLEN, 3]
# do the same with dates, assume all dates identical in all files
dates = np.reshape(adjust(dates, resample_by), [-1, resample_by])
dates = dates[:dates.shape[0]-n_forward,0] # to allow n_forward=0
dates = np.reshape(adjust(dates, nseq), [-1, nseq]) # shape [p, SEQLEN]
# Add to batch, temperatures from one file forming a line across batches
samples.append(temperatures)
targets.append(temp_targets)
samples = np.stack(samples, axis=1) # shape [p, BATCHSIZE, SEQLEN, 3]
targets = np.stack(targets, axis=1) # shape [p, BATCHSIZE, SEQLEN, 3]
# keep them in memory
if keepinmem:
loaded_samples.update({filecount:samples})
loaded_targets.update({filecount:targets})
print()
for sample, target, date in zip(samples, targets, dates):
if tminmax:
sample = sample[:,:,0:2] # return (Tmin, Tmax) only
target = target[:,:,0:2] # return (Tmin, Tmax) only
yield sample, target, date, epoch, filecount
filebatch = []
batchlen = batch_size | [
"def",
"rnn_multistation_sampling_temperature_sequencer",
"(",
"filenames",
",",
"resample_by",
"=",
"1",
",",
"batch_size",
"=",
"sys",
".",
"maxsize",
",",
"sequence_size",
"=",
"sys",
".",
"maxsize",
",",
"n_forward",
"=",
"0",
",",
"nb_epochs",
"=",
"1",
",",
"tminmax",
"=",
"False",
",",
"keepinmem",
"=",
"True",
")",
":",
"#filenames = gfile.get_matching_files(filepattern)",
"#print('Pattern \"{}\" matches {} files'.format(filepattern, len(filenames)))",
"#filenames = np.array(filenames)",
"def",
"adjust",
"(",
"ary",
",",
"n",
")",
":",
"return",
"ary",
"[",
":",
"ary",
".",
"shape",
"[",
"0",
"]",
"//",
"n",
"*",
"n",
"]",
"loaded_samples",
"=",
"{",
"}",
"loaded_targets",
"=",
"{",
"}",
"for",
"epoch",
"in",
"range",
"(",
"nb_epochs",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"filenames",
")",
"batchlen",
"=",
"len",
"(",
"filenames",
")",
"%",
"batch_size",
"# Remainder as",
"batchlen",
"=",
"batch_size",
"if",
"batchlen",
"==",
"0",
"else",
"batchlen",
"# first smaller batch.",
"filebatch",
"=",
"[",
"]",
"for",
"filecount",
",",
"filename",
"in",
"enumerate",
"(",
"filenames",
")",
":",
"filebatch",
".",
"append",
"(",
"filename",
")",
"if",
"len",
"(",
"filebatch",
")",
"==",
"batchlen",
":",
"if",
"filecount",
"in",
"loaded_samples",
":",
"# shuffle lines every time the data is reused (this does not appear to be useful though)",
"perm",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"loaded_samples",
"[",
"filecount",
"]",
".",
"shape",
"[",
"1",
"]",
")",
"#print(\"reshuffling {} rows\".format(loaded_samples[filecount].shape[1]))",
"samples",
"=",
"loaded_samples",
"[",
"filecount",
"]",
"[",
":",
",",
"perm",
"]",
"targets",
"=",
"loaded_targets",
"[",
"filecount",
"]",
"[",
":",
",",
"perm",
"]",
"#samples = loaded_samples[filecount]",
"#targets = loaded_targets[filecount]",
"else",
":",
"print",
"(",
"\"Loading {} files\"",
".",
"format",
"(",
"batchlen",
")",
",",
"end",
"=",
"\"\"",
")",
"samples",
"=",
"[",
"]",
"targets",
"=",
"[",
"]",
"for",
"filename",
"in",
"filebatch",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"# Load min max temperatures from CSV",
"print",
"(",
"\".\"",
",",
"end",
"=",
"\"\"",
")",
"temperatures",
"=",
"np",
".",
"genfromtxt",
"(",
"f",
",",
"delimiter",
"=",
"\",\"",
",",
"skip_header",
"=",
"True",
",",
"usecols",
"=",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
"]",
",",
"converters",
"=",
"{",
"0",
":",
"lambda",
"s",
":",
"np",
".",
"datetime64",
"(",
"s",
")",
"}",
")",
"dates",
"=",
"temperatures",
"[",
":",
"]",
"[",
"'f0'",
"]",
"# dates",
"temperatures",
"=",
"np",
".",
"stack",
"(",
"[",
"temperatures",
"[",
":",
"]",
"[",
"'f1'",
"]",
",",
"# min temperatures",
"temperatures",
"[",
":",
"]",
"[",
"'f2'",
"]",
",",
"# max temperatures",
"temperatures",
"[",
":",
"]",
"[",
"'f3'",
"]",
"]",
",",
"# interpolated",
"axis",
"=",
"1",
")",
"# shape [18262, 3]",
"# Resample temperatures by averaging them across RESAMPLE_BY days",
"temperatures",
"=",
"np",
".",
"reshape",
"(",
"adjust",
"(",
"temperatures",
",",
"resample_by",
")",
",",
"[",
"-",
"1",
",",
"resample_by",
",",
"3",
"]",
")",
"# [n, RESAMPLE_BY, 3]",
"temperatures",
"=",
"np",
".",
"mean",
"(",
"temperatures",
",",
"axis",
"=",
"1",
")",
"# shape [n, 3]",
"# Shift temperature sequence to generate training targets",
"temp_targets",
"=",
"temperatures",
"[",
"n_forward",
":",
"]",
"temperatures",
"=",
"temperatures",
"[",
":",
"temperatures",
".",
"shape",
"[",
"0",
"]",
"-",
"n_forward",
"]",
"# to allow n_forward=0",
"# Group temperatures into sequences of SEQLEN values",
"nseq",
"=",
"min",
"(",
"sequence_size",
",",
"temperatures",
".",
"shape",
"[",
"0",
"]",
")",
"# If not even full sequence, return everything",
"temp_targets",
"=",
"np",
".",
"reshape",
"(",
"adjust",
"(",
"temp_targets",
",",
"nseq",
")",
",",
"[",
"-",
"1",
",",
"nseq",
",",
"3",
"]",
")",
"# [p, SEQLEN, 3]",
"temperatures",
"=",
"np",
".",
"reshape",
"(",
"adjust",
"(",
"temperatures",
",",
"nseq",
")",
",",
"[",
"-",
"1",
",",
"nseq",
",",
"3",
"]",
")",
"# [p, SEQLEN, 3]",
"# do the same with dates, assume all dates identical in all files",
"dates",
"=",
"np",
".",
"reshape",
"(",
"adjust",
"(",
"dates",
",",
"resample_by",
")",
",",
"[",
"-",
"1",
",",
"resample_by",
"]",
")",
"dates",
"=",
"dates",
"[",
":",
"dates",
".",
"shape",
"[",
"0",
"]",
"-",
"n_forward",
",",
"0",
"]",
"# to allow n_forward=0",
"dates",
"=",
"np",
".",
"reshape",
"(",
"adjust",
"(",
"dates",
",",
"nseq",
")",
",",
"[",
"-",
"1",
",",
"nseq",
"]",
")",
"# shape [p, SEQLEN]",
"# Add to batch, temperatures from one file forming a line across batches",
"samples",
".",
"append",
"(",
"temperatures",
")",
"targets",
".",
"append",
"(",
"temp_targets",
")",
"samples",
"=",
"np",
".",
"stack",
"(",
"samples",
",",
"axis",
"=",
"1",
")",
"# shape [p, BATCHSIZE, SEQLEN, 3]",
"targets",
"=",
"np",
".",
"stack",
"(",
"targets",
",",
"axis",
"=",
"1",
")",
"# shape [p, BATCHSIZE, SEQLEN, 3]",
"# keep them in memory",
"if",
"keepinmem",
":",
"loaded_samples",
".",
"update",
"(",
"{",
"filecount",
":",
"samples",
"}",
")",
"loaded_targets",
".",
"update",
"(",
"{",
"filecount",
":",
"targets",
"}",
")",
"print",
"(",
")",
"for",
"sample",
",",
"target",
",",
"date",
"in",
"zip",
"(",
"samples",
",",
"targets",
",",
"dates",
")",
":",
"if",
"tminmax",
":",
"sample",
"=",
"sample",
"[",
":",
",",
":",
",",
"0",
":",
"2",
"]",
"# return (Tmin, Tmax) only",
"target",
"=",
"target",
"[",
":",
",",
":",
",",
"0",
":",
"2",
"]",
"# return (Tmin, Tmax) only",
"yield",
"sample",
",",
"target",
",",
"date",
",",
"epoch",
",",
"filecount",
"filebatch",
"=",
"[",
"]",
"batchlen",
"=",
"batch_size"
] | [
21,
0
] | [
112,
37
] | python | en | ['en', 'error', 'th'] | False |
rnn_minibatch_sequencer | (data, batch_size, sequence_size, nb_epochs) |
Divides the data into batches of sequences so that all the sequences in one batch
continue in the next batch. This is a generator that will keep returning batches
until the input data has been seen nb_epochs times. Sequences are continued even
between epochs, apart from one, the one corresponding to the end of data.
The remainder at the end of data that does not fit in an full batch is ignored.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
|
Divides the data into batches of sequences so that all the sequences in one batch
continue in the next batch. This is a generator that will keep returning batches
until the input data has been seen nb_epochs times. Sequences are continued even
between epochs, apart from one, the one corresponding to the end of data.
The remainder at the end of data that does not fit in an full batch is ignored.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
| def rnn_minibatch_sequencer(data, batch_size, sequence_size, nb_epochs):
"""
Divides the data into batches of sequences so that all the sequences in one batch
continue in the next batch. This is a generator that will keep returning batches
until the input data has been seen nb_epochs times. Sequences are continued even
between epochs, apart from one, the one corresponding to the end of data.
The remainder at the end of data that does not fit in an full batch is ignored.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
"""
data_len = data.shape[0]
# using (data_len-1) because we must provide for the sequence shifted by 1 too
nb_batches = (data_len - 1) // (batch_size * sequence_size)
assert nb_batches > 0, "Not enough data, even for a single batch. Try using a smaller batch_size."
rounded_data_len = nb_batches * batch_size * sequence_size
xdata = np.reshape(data[0:rounded_data_len], [batch_size, nb_batches * sequence_size])
ydata = np.reshape(data[1:rounded_data_len + 1], [batch_size, nb_batches * sequence_size])
whole_epochs = math.floor(nb_epochs)
frac_epoch = nb_epochs - whole_epochs
last_nb_batch = math.floor(frac_epoch * nb_batches)
for epoch in range(whole_epochs+1):
for batch in range(nb_batches if epoch < whole_epochs else last_nb_batch):
x = xdata[:, batch * sequence_size:(batch + 1) * sequence_size]
y = ydata[:, batch * sequence_size:(batch + 1) * sequence_size]
x = np.roll(x, -epoch, axis=0) # to continue the sequence from epoch to epoch (do not reset rnn state!)
y = np.roll(y, -epoch, axis=0)
yield x, y, epoch | [
"def",
"rnn_minibatch_sequencer",
"(",
"data",
",",
"batch_size",
",",
"sequence_size",
",",
"nb_epochs",
")",
":",
"data_len",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"# using (data_len-1) because we must provide for the sequence shifted by 1 too",
"nb_batches",
"=",
"(",
"data_len",
"-",
"1",
")",
"//",
"(",
"batch_size",
"*",
"sequence_size",
")",
"assert",
"nb_batches",
">",
"0",
",",
"\"Not enough data, even for a single batch. Try using a smaller batch_size.\"",
"rounded_data_len",
"=",
"nb_batches",
"*",
"batch_size",
"*",
"sequence_size",
"xdata",
"=",
"np",
".",
"reshape",
"(",
"data",
"[",
"0",
":",
"rounded_data_len",
"]",
",",
"[",
"batch_size",
",",
"nb_batches",
"*",
"sequence_size",
"]",
")",
"ydata",
"=",
"np",
".",
"reshape",
"(",
"data",
"[",
"1",
":",
"rounded_data_len",
"+",
"1",
"]",
",",
"[",
"batch_size",
",",
"nb_batches",
"*",
"sequence_size",
"]",
")",
"whole_epochs",
"=",
"math",
".",
"floor",
"(",
"nb_epochs",
")",
"frac_epoch",
"=",
"nb_epochs",
"-",
"whole_epochs",
"last_nb_batch",
"=",
"math",
".",
"floor",
"(",
"frac_epoch",
"*",
"nb_batches",
")",
"for",
"epoch",
"in",
"range",
"(",
"whole_epochs",
"+",
"1",
")",
":",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
"if",
"epoch",
"<",
"whole_epochs",
"else",
"last_nb_batch",
")",
":",
"x",
"=",
"xdata",
"[",
":",
",",
"batch",
"*",
"sequence_size",
":",
"(",
"batch",
"+",
"1",
")",
"*",
"sequence_size",
"]",
"y",
"=",
"ydata",
"[",
":",
",",
"batch",
"*",
"sequence_size",
":",
"(",
"batch",
"+",
"1",
")",
"*",
"sequence_size",
"]",
"x",
"=",
"np",
".",
"roll",
"(",
"x",
",",
"-",
"epoch",
",",
"axis",
"=",
"0",
")",
"# to continue the sequence from epoch to epoch (do not reset rnn state!)",
"y",
"=",
"np",
".",
"roll",
"(",
"y",
",",
"-",
"epoch",
",",
"axis",
"=",
"0",
")",
"yield",
"x",
",",
"y",
",",
"epoch"
] | [
115,
0
] | [
149,
29
] | python | en | ['en', 'error', 'th'] | False |
dumb_minibatch_sequencer | (data, batch_size, sequence_size, nb_epochs) |
Divides the data into batches of sequences in the simplest way: sequentially.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
|
Divides the data into batches of sequences in the simplest way: sequentially.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
| def dumb_minibatch_sequencer(data, batch_size, sequence_size, nb_epochs):
"""
Divides the data into batches of sequences in the simplest way: sequentially.
:param data: the training sequence
:param batch_size: the size of a training minibatch
:param sequence_size: the unroll size of the RNN
:param nb_epochs: number of epochs to train on
:return:
x: one batch of training sequences
y: one batch of target sequences, i.e. training sequences shifted by 1
epoch: the current epoch number (starting at 0)
"""
data_len = data.shape[0]
nb_batches = data_len // (batch_size * sequence_size)
rounded_size = nb_batches * batch_size * sequence_size
xdata = data[:rounded_size]
ydata = np.roll(data, -1)[:rounded_size]
xdata = np.reshape(xdata, [nb_batches, batch_size, sequence_size])
ydata = np.reshape(ydata, [nb_batches, batch_size, sequence_size])
for epoch in range(nb_epochs):
for batch in range(nb_batches):
yield xdata[batch,:,:], ydata[batch,:,:], epoch | [
"def",
"dumb_minibatch_sequencer",
"(",
"data",
",",
"batch_size",
",",
"sequence_size",
",",
"nb_epochs",
")",
":",
"data_len",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"nb_batches",
"=",
"data_len",
"//",
"(",
"batch_size",
"*",
"sequence_size",
")",
"rounded_size",
"=",
"nb_batches",
"*",
"batch_size",
"*",
"sequence_size",
"xdata",
"=",
"data",
"[",
":",
"rounded_size",
"]",
"ydata",
"=",
"np",
".",
"roll",
"(",
"data",
",",
"-",
"1",
")",
"[",
":",
"rounded_size",
"]",
"xdata",
"=",
"np",
".",
"reshape",
"(",
"xdata",
",",
"[",
"nb_batches",
",",
"batch_size",
",",
"sequence_size",
"]",
")",
"ydata",
"=",
"np",
".",
"reshape",
"(",
"ydata",
",",
"[",
"nb_batches",
",",
"batch_size",
",",
"sequence_size",
"]",
")",
"for",
"epoch",
"in",
"range",
"(",
"nb_epochs",
")",
":",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"yield",
"xdata",
"[",
"batch",
",",
":",
",",
":",
"]",
",",
"ydata",
"[",
"batch",
",",
":",
",",
":",
"]",
",",
"epoch"
] | [
152,
0
] | [
174,
59
] | python | en | ['en', 'error', 'th'] | False |
release_local | (local) | Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
| Releases the contents of the local for the current context.
This makes it possible to use locals without a manager. | def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__() | [
"def",
"release_local",
"(",
"local",
")",
":",
"local",
".",
"__release_local__",
"(",
")"
] | [
29,
0
] | [
49,
29
] | python | en | ['en', 'en', 'en'] | True |
Local.__call__ | (self, proxy) | Create a proxy for a name. | Create a proxy for a name. | def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy) | [
"def",
"__call__",
"(",
"self",
",",
"proxy",
")",
":",
"return",
"LocalProxy",
"(",
"self",
",",
"proxy",
")"
] | [
62,
4
] | [
64,
38
] | python | en | ['en', 'en', 'en'] | True |
LocalStack.push | (self, obj) | Pushes a new item to the stack | Pushes a new item to the stack | def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, "stack", None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv | [
"def",
"push",
"(",
"self",
",",
"obj",
")",
":",
"rv",
"=",
"getattr",
"(",
"self",
".",
"_local",
",",
"\"stack\"",
",",
"None",
")",
"if",
"rv",
"is",
"None",
":",
"self",
".",
"_local",
".",
"stack",
"=",
"rv",
"=",
"[",
"]",
"rv",
".",
"append",
"(",
"obj",
")",
"return",
"rv"
] | [
141,
4
] | [
147,
17
] | python | en | ['en', 'en', 'en'] | True |
LocalStack.pop | (self) | Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
| Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
| def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, "stack", None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop() | [
"def",
"pop",
"(",
"self",
")",
":",
"stack",
"=",
"getattr",
"(",
"self",
".",
"_local",
",",
"\"stack\"",
",",
"None",
")",
"if",
"stack",
"is",
"None",
":",
"return",
"None",
"elif",
"len",
"(",
"stack",
")",
"==",
"1",
":",
"release_local",
"(",
"self",
".",
"_local",
")",
"return",
"stack",
"[",
"-",
"1",
"]",
"else",
":",
"return",
"stack",
".",
"pop",
"(",
")"
] | [
149,
4
] | [
160,
30
] | python | en | ['en', 'en', 'en'] | True |
LocalStack.top | (self) | The topmost item on the stack. If the stack is empty,
`None` is returned.
| The topmost item on the stack. If the stack is empty,
`None` is returned.
| def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None | [
"def",
"top",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_local",
".",
"stack",
"[",
"-",
"1",
"]",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"return",
"None"
] | [
163,
4
] | [
170,
23
] | python | en | ['en', 'en', 'en'] | True |
LocalManager.get_ident | (self) | Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
| Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals. | def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func() | [
"def",
"get_ident",
"(",
"self",
")",
":",
"return",
"self",
".",
"ident_func",
"(",
")"
] | [
204,
4
] | [
215,
32
] | python | en | ['en', 'en', 'en'] | True |
LocalManager.cleanup | (self) | Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
| Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
| def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local) | [
"def",
"cleanup",
"(",
"self",
")",
":",
"for",
"local",
"in",
"self",
".",
"locals",
":",
"release_local",
"(",
"local",
")"
] | [
217,
4
] | [
222,
32
] | python | en | ['en', 'en', 'en'] | True |
LocalManager.make_middleware | (self, app) | Wrap a WSGI application so that cleaning up happens after
request end.
| Wrap a WSGI application so that cleaning up happens after
request end.
| def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application | [
"def",
"make_middleware",
"(",
"self",
",",
"app",
")",
":",
"def",
"application",
"(",
"environ",
",",
"start_response",
")",
":",
"return",
"ClosingIterator",
"(",
"app",
"(",
"environ",
",",
"start_response",
")",
",",
"self",
".",
"cleanup",
")",
"return",
"application"
] | [
224,
4
] | [
232,
26
] | python | en | ['en', 'en', 'en'] | True |
LocalManager.middleware | (self, func) | Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
| Like `make_middleware` but for decorating functions. | def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func) | [
"def",
"middleware",
"(",
"self",
",",
"func",
")",
":",
"return",
"update_wrapper",
"(",
"self",
".",
"make_middleware",
"(",
"func",
")",
",",
"func",
")"
] | [
234,
4
] | [
247,
63
] | python | en | ['en', 'en', 'en'] | True |
splitPackageName | (packageName) | e.g. given com.example.appname.library.widgetname
returns com
com.example
com.example.appname
etc.
| e.g. given com.example.appname.library.widgetname
returns com
com.example
com.example.appname
etc.
| def splitPackageName(packageName):
"""e.g. given com.example.appname.library.widgetname
returns com
com.example
com.example.appname
etc.
"""
result = []
end = packageName.find('.')
while end > 0:
result.append(packageName[0:end])
end = packageName.find('.', end+1)
result.append(packageName)
return result | [
"def",
"splitPackageName",
"(",
"packageName",
")",
":",
"result",
"=",
"[",
"]",
"end",
"=",
"packageName",
".",
"find",
"(",
"'.'",
")",
"while",
"end",
">",
"0",
":",
"result",
".",
"append",
"(",
"packageName",
"[",
"0",
":",
"end",
"]",
")",
"end",
"=",
"packageName",
".",
"find",
"(",
"'.'",
",",
"end",
"+",
"1",
")",
"result",
".",
"append",
"(",
"packageName",
")",
"return",
"result"
] | [
22,
0
] | [
35,
16
] | python | en | ['en', 'en', 'pt'] | True |
make_distribution_for_install_requirement | (
install_req: InstallRequirement,
) | Returns a Distribution for the given InstallRequirement | Returns a Distribution for the given InstallRequirement | def make_distribution_for_install_requirement(
install_req: InstallRequirement,
) -> AbstractDistribution:
"""Returns a Distribution for the given InstallRequirement"""
# Editable requirements will always be source distributions. They use the
# legacy logic until we create a modern standard for them.
if install_req.editable:
return SourceDistribution(install_req)
# If it's a wheel, it's a WheelDistribution
if install_req.is_wheel:
return WheelDistribution(install_req)
# Otherwise, a SourceDistribution
return SourceDistribution(install_req) | [
"def",
"make_distribution_for_install_requirement",
"(",
"install_req",
":",
"InstallRequirement",
",",
")",
"->",
"AbstractDistribution",
":",
"# Editable requirements will always be source distributions. They use the",
"# legacy logic until we create a modern standard for them.",
"if",
"install_req",
".",
"editable",
":",
"return",
"SourceDistribution",
"(",
"install_req",
")",
"# If it's a wheel, it's a WheelDistribution",
"if",
"install_req",
".",
"is_wheel",
":",
"return",
"WheelDistribution",
"(",
"install_req",
")",
"# Otherwise, a SourceDistribution",
"return",
"SourceDistribution",
"(",
"install_req",
")"
] | [
6,
0
] | [
20,
42
] | python | en | ['en', 'en', 'en'] | True |
estimator_1st_order.__init__ | (self, sims, fiducial_point, offsets, print_params=False, tf_dtype=tf.float32,
tikohnov=0.0) |
Initsalizes the first order estimator from a given evaluation of a function around a fiducial point
:param sims: prediction of the fiducial point and its perturbations, shape [2*n_params + 1, n_sims, n_output]
:param fiducial_point: the fiducial point of the expansion
:param offsets: the perturbations used
:param print_params: whether to print out the relevant parameters or not
:param tf_dtype: the tensorflow dtype (float32 or float64)
:param tikohnov: Add tikohnov regularization before inverting the jacobian
|
Initsalizes the first order estimator from a given evaluation of a function around a fiducial point
:param sims: prediction of the fiducial point and its perturbations, shape [2*n_params + 1, n_sims, n_output]
:param fiducial_point: the fiducial point of the expansion
:param offsets: the perturbations used
:param print_params: whether to print out the relevant parameters or not
:param tf_dtype: the tensorflow dtype (float32 or float64)
:param tikohnov: Add tikohnov regularization before inverting the jacobian
| def __init__(self, sims, fiducial_point, offsets, print_params=False, tf_dtype=tf.float32,
tikohnov=0.0):
"""
Initsalizes the first order estimator from a given evaluation of a function around a fiducial point
:param sims: prediction of the fiducial point and its perturbations, shape [2*n_params + 1, n_sims, n_output]
:param fiducial_point: the fiducial point of the expansion
:param offsets: the perturbations used
:param print_params: whether to print out the relevant parameters or not
:param tf_dtype: the tensorflow dtype (float32 or float64)
:param tikohnov: Add tikohnov regularization before inverting the jacobian
"""
self.tf_dtype = tf_dtype
# dimension check
fidu_param = np.atleast_2d(fiducial_point)
n_param = fidu_param.shape[-1]
# get the fidu mean and cov
sims = sims.astype(np.float64)
fidu_sim = sims[0]
# set fidu sims
self.fidu_sim = fidu_sim.copy()
fidu_mean = np.mean(fidu_sim, axis=0)
fidu_cov = np.cov(fidu_sim, rowvar=False)
# repeat the beginning
fidu_sim = sims[0]
fidu_mean = np.mean(fidu_sim, axis=0)
fidu_cov = np.cov(fidu_sim, rowvar=False)
# First we calculate the first order derivatives
mean_derivatives = []
cov_derivatives = []
# to save the means
means = []
covs = []
for i in range(n_param):
# sims
sims_minus = sims[2 * (i + 1) - 1]
sims_plus = sims[2 * (i + 1)]
# means
mean_plus = np.mean(sims_plus, axis=0)
mean_minus = np.mean(sims_minus, axis=0)
# covariance
cov_plus = np.cov(sims_plus, rowvar=False)
cov_minus = np.cov(sims_minus, rowvar=False)
# save
means.append([mean_plus, mean_minus])
covs.append([cov_plus, cov_minus])
mean_derivatives.append((mean_plus - mean_minus) / (2.0 * offsets[i]))
cov_derivatives.append((cov_plus - cov_minus) / (2.0 * offsets[i]))
mean_jacobian = np.stack(mean_derivatives, axis=-1)
cov_jacobian = np.stack(cov_derivatives, axis=-1)
# calculate approximate fisher information
# F = inv(J^-1 cov J^T^-1) = J^T cov^-1 J
try:
inv_cov = np.linalg.inv(fidu_cov)
except:
print("Covariance appears to be singular, using pseudo inverse...")
inv_cov = np.linalg.pinv(fidu_cov)
fisher = np.einsum('ij,jk->ik', inv_cov, mean_jacobian)
fisher = np.einsum('ji,jk->ik', mean_jacobian, fisher)
self.fisher = fisher
# add regularization
mean_jacobian += tikohnov*np.eye(mean_jacobian.shape[0], mean_jacobian.shape[1])
# create a first order correction (we have pinv here as jac does not have to be square...)
if mean_jacobian.shape[0] == mean_jacobian.shape[1]:
inv_jac = np.linalg.inv(mean_jacobian)
else:
inv_jac = np.linalg.pinv(mean_jacobian)
# set the other params
self.mean_fidu = np.atleast_2d(fidu_mean)
self.fidu_point = fidu_param
self.inv_cov= inv_cov
self.inv_jac = inv_jac
self.fidu_point_tf = tf.constant(self.fidu_point, dtype=self.tf_dtype)
self.inv_jac_tf = tf.constant(self.inv_jac, dtype=self.tf_dtype)
self.mean_fidu_tf = tf.constant(self.mean_fidu, dtype=self.tf_dtype)
# some info
if print_params:
print("Creating fisrt order estimator: fidu + J^-1(x-mu)")
print("fidu: {}".format(self.fidu_point))
print("J: {}".format(mean_jacobian))
print("J^-1: {}".format(self.inv_jac))
print("mu: {}".format(self.mean_fidu))
print("\n Fiducial covariance: {}".format(fidu_cov))
print("\n Derivative covariance: {}".format(cov_jacobian)) | [
"def",
"__init__",
"(",
"self",
",",
"sims",
",",
"fiducial_point",
",",
"offsets",
",",
"print_params",
"=",
"False",
",",
"tf_dtype",
"=",
"tf",
".",
"float32",
",",
"tikohnov",
"=",
"0.0",
")",
":",
"self",
".",
"tf_dtype",
"=",
"tf_dtype",
"# dimension check",
"fidu_param",
"=",
"np",
".",
"atleast_2d",
"(",
"fiducial_point",
")",
"n_param",
"=",
"fidu_param",
".",
"shape",
"[",
"-",
"1",
"]",
"# get the fidu mean and cov",
"sims",
"=",
"sims",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"fidu_sim",
"=",
"sims",
"[",
"0",
"]",
"# set fidu sims",
"self",
".",
"fidu_sim",
"=",
"fidu_sim",
".",
"copy",
"(",
")",
"fidu_mean",
"=",
"np",
".",
"mean",
"(",
"fidu_sim",
",",
"axis",
"=",
"0",
")",
"fidu_cov",
"=",
"np",
".",
"cov",
"(",
"fidu_sim",
",",
"rowvar",
"=",
"False",
")",
"# repeat the beginning",
"fidu_sim",
"=",
"sims",
"[",
"0",
"]",
"fidu_mean",
"=",
"np",
".",
"mean",
"(",
"fidu_sim",
",",
"axis",
"=",
"0",
")",
"fidu_cov",
"=",
"np",
".",
"cov",
"(",
"fidu_sim",
",",
"rowvar",
"=",
"False",
")",
"# First we calculate the first order derivatives",
"mean_derivatives",
"=",
"[",
"]",
"cov_derivatives",
"=",
"[",
"]",
"# to save the means",
"means",
"=",
"[",
"]",
"covs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_param",
")",
":",
"# sims",
"sims_minus",
"=",
"sims",
"[",
"2",
"*",
"(",
"i",
"+",
"1",
")",
"-",
"1",
"]",
"sims_plus",
"=",
"sims",
"[",
"2",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"# means",
"mean_plus",
"=",
"np",
".",
"mean",
"(",
"sims_plus",
",",
"axis",
"=",
"0",
")",
"mean_minus",
"=",
"np",
".",
"mean",
"(",
"sims_minus",
",",
"axis",
"=",
"0",
")",
"# covariance",
"cov_plus",
"=",
"np",
".",
"cov",
"(",
"sims_plus",
",",
"rowvar",
"=",
"False",
")",
"cov_minus",
"=",
"np",
".",
"cov",
"(",
"sims_minus",
",",
"rowvar",
"=",
"False",
")",
"# save",
"means",
".",
"append",
"(",
"[",
"mean_plus",
",",
"mean_minus",
"]",
")",
"covs",
".",
"append",
"(",
"[",
"cov_plus",
",",
"cov_minus",
"]",
")",
"mean_derivatives",
".",
"append",
"(",
"(",
"mean_plus",
"-",
"mean_minus",
")",
"/",
"(",
"2.0",
"*",
"offsets",
"[",
"i",
"]",
")",
")",
"cov_derivatives",
".",
"append",
"(",
"(",
"cov_plus",
"-",
"cov_minus",
")",
"/",
"(",
"2.0",
"*",
"offsets",
"[",
"i",
"]",
")",
")",
"mean_jacobian",
"=",
"np",
".",
"stack",
"(",
"mean_derivatives",
",",
"axis",
"=",
"-",
"1",
")",
"cov_jacobian",
"=",
"np",
".",
"stack",
"(",
"cov_derivatives",
",",
"axis",
"=",
"-",
"1",
")",
"# calculate approximate fisher information",
"# F = inv(J^-1 cov J^T^-1) = J^T cov^-1 J",
"try",
":",
"inv_cov",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"fidu_cov",
")",
"except",
":",
"print",
"(",
"\"Covariance appears to be singular, using pseudo inverse...\"",
")",
"inv_cov",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"fidu_cov",
")",
"fisher",
"=",
"np",
".",
"einsum",
"(",
"'ij,jk->ik'",
",",
"inv_cov",
",",
"mean_jacobian",
")",
"fisher",
"=",
"np",
".",
"einsum",
"(",
"'ji,jk->ik'",
",",
"mean_jacobian",
",",
"fisher",
")",
"self",
".",
"fisher",
"=",
"fisher",
"# add regularization",
"mean_jacobian",
"+=",
"tikohnov",
"*",
"np",
".",
"eye",
"(",
"mean_jacobian",
".",
"shape",
"[",
"0",
"]",
",",
"mean_jacobian",
".",
"shape",
"[",
"1",
"]",
")",
"# create a first order correction (we have pinv here as jac does not have to be square...)",
"if",
"mean_jacobian",
".",
"shape",
"[",
"0",
"]",
"==",
"mean_jacobian",
".",
"shape",
"[",
"1",
"]",
":",
"inv_jac",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"mean_jacobian",
")",
"else",
":",
"inv_jac",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"mean_jacobian",
")",
"# set the other params",
"self",
".",
"mean_fidu",
"=",
"np",
".",
"atleast_2d",
"(",
"fidu_mean",
")",
"self",
".",
"fidu_point",
"=",
"fidu_param",
"self",
".",
"inv_cov",
"=",
"inv_cov",
"self",
".",
"inv_jac",
"=",
"inv_jac",
"self",
".",
"fidu_point_tf",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"fidu_point",
",",
"dtype",
"=",
"self",
".",
"tf_dtype",
")",
"self",
".",
"inv_jac_tf",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"inv_jac",
",",
"dtype",
"=",
"self",
".",
"tf_dtype",
")",
"self",
".",
"mean_fidu_tf",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"mean_fidu",
",",
"dtype",
"=",
"self",
".",
"tf_dtype",
")",
"# some info",
"if",
"print_params",
":",
"print",
"(",
"\"Creating fisrt order estimator: fidu + J^-1(x-mu)\"",
")",
"print",
"(",
"\"fidu: {}\"",
".",
"format",
"(",
"self",
".",
"fidu_point",
")",
")",
"print",
"(",
"\"J: {}\"",
".",
"format",
"(",
"mean_jacobian",
")",
")",
"print",
"(",
"\"J^-1: {}\"",
".",
"format",
"(",
"self",
".",
"inv_jac",
")",
")",
"print",
"(",
"\"mu: {}\"",
".",
"format",
"(",
"self",
".",
"mean_fidu",
")",
")",
"print",
"(",
"\"\\n Fiducial covariance: {}\"",
".",
"format",
"(",
"fidu_cov",
")",
")",
"print",
"(",
"\"\\n Derivative covariance: {}\"",
".",
"format",
"(",
"cov_jacobian",
")",
")"
] | [
10,
4
] | [
114,
70
] | python | en | ['en', 'error', 'th'] | False |
estimator_1st_order.__call__ | (self, predictions, numpy=False) |
Given some prediction it estimates to underlying parameters to first order
:param predictions: The predictions i.e. summaries [n_summaries, n_output]
:param numpy: perform the calculation in numpy instead of tensorflow
:return: the estimates [n_summaries, n_output]
|
Given some prediction it estimates to underlying parameters to first order
:param predictions: The predictions i.e. summaries [n_summaries, n_output]
:param numpy: perform the calculation in numpy instead of tensorflow
:return: the estimates [n_summaries, n_output]
| def __call__(self, predictions, numpy=False):
"""
Given some prediction it estimates to underlying parameters to first order
:param predictions: The predictions i.e. summaries [n_summaries, n_output]
:param numpy: perform the calculation in numpy instead of tensorflow
:return: the estimates [n_summaries, n_output]
"""
if numpy:
return self.fidu_point + np.einsum("ij,aj->ai", self.inv_jac, predictions - self.mean_fidu)
else:
return self.fidu_point_tf + tf.einsum("ij,aj->ai", self.inv_jac_tf, predictions - self.mean_fidu_tf) | [
"def",
"__call__",
"(",
"self",
",",
"predictions",
",",
"numpy",
"=",
"False",
")",
":",
"if",
"numpy",
":",
"return",
"self",
".",
"fidu_point",
"+",
"np",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"self",
".",
"inv_jac",
",",
"predictions",
"-",
"self",
".",
"mean_fidu",
")",
"else",
":",
"return",
"self",
".",
"fidu_point_tf",
"+",
"tf",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"self",
".",
"inv_jac_tf",
",",
"predictions",
"-",
"self",
".",
"mean_fidu_tf",
")"
] | [
116,
4
] | [
127,
112
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits