Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
parse_date
(value)
Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object.
Parse one of the following date formats into a datetime object:
def parse_date(value): """Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object. """ if value: t = parsedate_tz(value.strip()) if t is not None: try: year = t[0] # unfortunately that function does not tell us if two digit # years were part of the string, or if they were prefixed # with two zeroes. So what we do is to assume that 69-99 # refer to 1900, and everything below to 2000 if year >= 0 and year <= 68: year += 2000 elif year >= 69 and year <= 99: year += 1900 return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0) except (ValueError, OverflowError): return None
[ "def", "parse_date", "(", "value", ")", ":", "if", "value", ":", "t", "=", "parsedate_tz", "(", "value", ".", "strip", "(", ")", ")", "if", "t", "is", "not", "None", ":", "try", ":", "year", "=", "t", "[", "0", "]", "# unfortunately that function does not tell us if two digit", "# years were part of the string, or if they were prefixed", "# with two zeroes. So what we do is to assume that 69-99", "# refer to 1900, and everything below to 2000", "if", "year", ">=", "0", "and", "year", "<=", "68", ":", "year", "+=", "2000", "elif", "year", ">=", "69", "and", "year", "<=", "99", ":", "year", "+=", "1900", "return", "datetime", "(", "*", "(", "(", "year", ",", ")", "+", "t", "[", "1", ":", "7", "]", ")", ")", "-", "timedelta", "(", "seconds", "=", "t", "[", "-", "1", "]", "or", "0", ")", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "return", "None" ]
[ 779, 0 ]
[ 808, 27 ]
python
en
['en', 'en', 'en']
True
_dump_date
(d, delim)
Used for `http_date` and `cookie_date`.
Used for `http_date` and `cookie_date`.
def _dump_date(d, delim): """Used for `http_date` and `cookie_date`.""" if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday], d.tm_mday, delim, ( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", )[d.tm_mon - 1], delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec, )
[ "def", "_dump_date", "(", "d", ",", "delim", ")", ":", "if", "d", "is", "None", ":", "d", "=", "gmtime", "(", ")", "elif", "isinstance", "(", "d", ",", "datetime", ")", ":", "d", "=", "d", ".", "utctimetuple", "(", ")", "elif", "isinstance", "(", "d", ",", "(", "integer_types", ",", "float", ")", ")", ":", "d", "=", "gmtime", "(", "d", ")", "return", "\"%s, %02d%s%s%s%s %02d:%02d:%02d GMT\"", "%", "(", "(", "\"Mon\"", ",", "\"Tue\"", ",", "\"Wed\"", ",", "\"Thu\"", ",", "\"Fri\"", ",", "\"Sat\"", ",", "\"Sun\"", ")", "[", "d", ".", "tm_wday", "]", ",", "d", ".", "tm_mday", ",", "delim", ",", "(", "\"Jan\"", ",", "\"Feb\"", ",", "\"Mar\"", ",", "\"Apr\"", ",", "\"May\"", ",", "\"Jun\"", ",", "\"Jul\"", ",", "\"Aug\"", ",", "\"Sep\"", ",", "\"Oct\"", ",", "\"Nov\"", ",", "\"Dec\"", ",", ")", "[", "d", ".", "tm_mon", "-", "1", "]", ",", "delim", ",", "str", "(", "d", ".", "tm_year", ")", ",", "d", ".", "tm_hour", ",", "d", ".", "tm_min", ",", "d", ".", "tm_sec", ",", ")" ]
[ 811, 0 ]
[ 842, 5 ]
python
en
['en', 'en', 'en']
True
cookie_date
(expires=None)
Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current.
Formats the time to ensure compatibility with Netscape's cookie standard.
def cookie_date(expires=None): """Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current. """ return _dump_date(expires, "-")
[ "def", "cookie_date", "(", "expires", "=", "None", ")", ":", "return", "_dump_date", "(", "expires", ",", "\"-\"", ")" ]
[ 845, 0 ]
[ 857, 35 ]
python
en
['en', 'en', 'en']
True
http_date
(timestamp=None)
Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current.
Formats the time to match the RFC1123 date format.
def http_date(timestamp=None): """Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current. """ return _dump_date(timestamp, " ")
[ "def", "http_date", "(", "timestamp", "=", "None", ")", ":", "return", "_dump_date", "(", "timestamp", ",", "\" \"", ")" ]
[ 860, 0 ]
[ 871, 37 ]
python
en
['en', 'en', 'en']
True
parse_age
(value=None)
Parses a base-10 integer count of seconds into a timedelta. If parsing fails, the return value is `None`. :param value: a string consisting of an integer represented in base-10 :return: a :class:`datetime.timedelta` object or `None`.
Parses a base-10 integer count of seconds into a timedelta.
def parse_age(value=None): """Parses a base-10 integer count of seconds into a timedelta. If parsing fails, the return value is `None`. :param value: a string consisting of an integer represented in base-10 :return: a :class:`datetime.timedelta` object or `None`. """ if not value: return None try: seconds = int(value) except ValueError: return None if seconds < 0: return None try: return timedelta(seconds=seconds) except OverflowError: return None
[ "def", "parse_age", "(", "value", "=", "None", ")", ":", "if", "not", "value", ":", "return", "None", "try", ":", "seconds", "=", "int", "(", "value", ")", "except", "ValueError", ":", "return", "None", "if", "seconds", "<", "0", ":", "return", "None", "try", ":", "return", "timedelta", "(", "seconds", "=", "seconds", ")", "except", "OverflowError", ":", "return", "None" ]
[ 874, 0 ]
[ 893, 19 ]
python
en
['en', 'en', 'en']
True
dump_age
(age=None)
Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default).
Formats the duration as a base-10 integer.
def dump_age(age=None): """Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default). """ if age is None: return if isinstance(age, timedelta): # do the equivalent of Python 2.7's timedelta.total_seconds(), # but disregarding fractional seconds age = age.seconds + (age.days * 24 * 3600) age = int(age) if age < 0: raise ValueError("age cannot be negative") return str(age)
[ "def", "dump_age", "(", "age", "=", "None", ")", ":", "if", "age", "is", "None", ":", "return", "if", "isinstance", "(", "age", ",", "timedelta", ")", ":", "# do the equivalent of Python 2.7's timedelta.total_seconds(),", "# but disregarding fractional seconds", "age", "=", "age", ".", "seconds", "+", "(", "age", ".", "days", "*", "24", "*", "3600", ")", "age", "=", "int", "(", "age", ")", "if", "age", "<", "0", ":", "raise", "ValueError", "(", "\"age cannot be negative\"", ")", "return", "str", "(", "age", ")" ]
[ 896, 0 ]
[ 914, 19 ]
python
en
['en', 'en', 'en']
True
is_resource_modified
( environ, etag=None, data=None, last_modified=None, ignore_if_range=True )
Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :param ignore_if_range: If `False`, `If-Range` header will be taken into account. :return: `True` if the resource was modified, otherwise `False`.
Convenience method for conditional requests.
def is_resource_modified( environ, etag=None, data=None, last_modified=None, ignore_if_range=True ): """Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :param ignore_if_range: If `False`, `If-Range` header will be taken into account. :return: `True` if the resource was modified, otherwise `False`. """ if etag is None and data is not None: etag = generate_etag(data) elif data is not None: raise TypeError("both data and etag given") if environ["REQUEST_METHOD"] not in ("GET", "HEAD"): return False unmodified = False if isinstance(last_modified, string_types): last_modified = parse_date(last_modified) # ensure that microsecond is zero because the HTTP spec does not transmit # that either and we might have some false positives. See issue #39 if last_modified is not None: last_modified = last_modified.replace(microsecond=0) if_range = None if not ignore_if_range and "HTTP_RANGE" in environ: # https://tools.ietf.org/html/rfc7233#section-3.2 # A server MUST ignore an If-Range header field received in a request # that does not contain a Range header field. if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE")) if if_range is not None and if_range.date is not None: modified_since = if_range.date else: modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE")) if modified_since and last_modified and last_modified <= modified_since: unmodified = True if etag: etag, _ = unquote_etag(etag) if if_range is not None and if_range.etag is not None: unmodified = parse_etags(if_range.etag).contains(etag) else: if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH")) if if_none_match: # https://tools.ietf.org/html/rfc7232#section-3.2 # "A recipient MUST use the weak comparison function when comparing # entity-tags for If-None-Match" unmodified = if_none_match.contains_weak(etag) # https://tools.ietf.org/html/rfc7232#section-3.1 # "Origin server MUST use the strong comparison function when # comparing entity-tags for If-Match" if_match = parse_etags(environ.get("HTTP_IF_MATCH")) if if_match: unmodified = not if_match.is_strong(etag) return not unmodified
[ "def", "is_resource_modified", "(", "environ", ",", "etag", "=", "None", ",", "data", "=", "None", ",", "last_modified", "=", "None", ",", "ignore_if_range", "=", "True", ")", ":", "if", "etag", "is", "None", "and", "data", "is", "not", "None", ":", "etag", "=", "generate_etag", "(", "data", ")", "elif", "data", "is", "not", "None", ":", "raise", "TypeError", "(", "\"both data and etag given\"", ")", "if", "environ", "[", "\"REQUEST_METHOD\"", "]", "not", "in", "(", "\"GET\"", ",", "\"HEAD\"", ")", ":", "return", "False", "unmodified", "=", "False", "if", "isinstance", "(", "last_modified", ",", "string_types", ")", ":", "last_modified", "=", "parse_date", "(", "last_modified", ")", "# ensure that microsecond is zero because the HTTP spec does not transmit", "# that either and we might have some false positives. See issue #39", "if", "last_modified", "is", "not", "None", ":", "last_modified", "=", "last_modified", ".", "replace", "(", "microsecond", "=", "0", ")", "if_range", "=", "None", "if", "not", "ignore_if_range", "and", "\"HTTP_RANGE\"", "in", "environ", ":", "# https://tools.ietf.org/html/rfc7233#section-3.2", "# A server MUST ignore an If-Range header field received in a request", "# that does not contain a Range header field.", "if_range", "=", "parse_if_range_header", "(", "environ", ".", "get", "(", "\"HTTP_IF_RANGE\"", ")", ")", "if", "if_range", "is", "not", "None", "and", "if_range", ".", "date", "is", "not", "None", ":", "modified_since", "=", "if_range", ".", "date", "else", ":", "modified_since", "=", "parse_date", "(", "environ", ".", "get", "(", "\"HTTP_IF_MODIFIED_SINCE\"", ")", ")", "if", "modified_since", "and", "last_modified", "and", "last_modified", "<=", "modified_since", ":", "unmodified", "=", "True", "if", "etag", ":", "etag", ",", "_", "=", "unquote_etag", "(", "etag", ")", "if", "if_range", "is", "not", "None", "and", "if_range", ".", "etag", "is", "not", "None", ":", "unmodified", "=", "parse_etags", "(", "if_range", ".", "etag", ")", ".", "contains", "(", "etag", ")", "else", ":", "if_none_match", "=", "parse_etags", "(", "environ", ".", "get", "(", "\"HTTP_IF_NONE_MATCH\"", ")", ")", "if", "if_none_match", ":", "# https://tools.ietf.org/html/rfc7232#section-3.2", "# \"A recipient MUST use the weak comparison function when comparing", "# entity-tags for If-None-Match\"", "unmodified", "=", "if_none_match", ".", "contains_weak", "(", "etag", ")", "# https://tools.ietf.org/html/rfc7232#section-3.1", "# \"Origin server MUST use the strong comparison function when", "# comparing entity-tags for If-Match\"", "if_match", "=", "parse_etags", "(", "environ", ".", "get", "(", "\"HTTP_IF_MATCH\"", ")", ")", "if", "if_match", ":", "unmodified", "=", "not", "if_match", ".", "is_strong", "(", "etag", ")", "return", "not", "unmodified" ]
[ 917, 0 ]
[ 981, 25 ]
python
en
['en', 'en', 'en']
True
remove_entity_headers
(headers, allowed=("expires", "content-location"))
Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers.
Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent.
def remove_entity_headers(headers, allowed=("expires", "content-location")): """Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers. """ allowed = set(x.lower() for x in allowed) headers[:] = [ (key, value) for key, value in headers if not is_entity_header(key) or key.lower() in allowed ]
[ "def", "remove_entity_headers", "(", "headers", ",", "allowed", "=", "(", "\"expires\"", ",", "\"content-location\"", ")", ")", ":", "allowed", "=", "set", "(", "x", ".", "lower", "(", ")", "for", "x", "in", "allowed", ")", "headers", "[", ":", "]", "=", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "headers", "if", "not", "is_entity_header", "(", "key", ")", "or", "key", ".", "lower", "(", ")", "in", "allowed", "]" ]
[ 984, 0 ]
[ 1002, 5 ]
python
en
['en', 'en', 'en']
True
remove_hop_by_hop_headers
(headers)
Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object.
Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place.
def remove_hop_by_hop_headers(headers): """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object. """ headers[:] = [ (key, value) for key, value in headers if not is_hop_by_hop_header(key) ]
[ "def", "remove_hop_by_hop_headers", "(", "headers", ")", ":", "headers", "[", ":", "]", "=", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "headers", "if", "not", "is_hop_by_hop_header", "(", "key", ")", "]" ]
[ 1005, 0 ]
[ 1015, 5 ]
python
en
['en', 'en', 'en']
True
is_entity_header
(header)
Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise.
Check if a header is an entity header.
def is_entity_header(header): """Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _entity_headers
[ "def", "is_entity_header", "(", "header", ")", ":", "return", "header", ".", "lower", "(", ")", "in", "_entity_headers" ]
[ 1018, 0 ]
[ 1026, 44 ]
python
en
['en', 'en', 'en']
True
is_hop_by_hop_header
(header)
Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
def is_hop_by_hop_header(header): """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise. """ return header.lower() in _hop_by_hop_headers
[ "def", "is_hop_by_hop_header", "(", "header", ")", ":", "return", "header", ".", "lower", "(", ")", "in", "_hop_by_hop_headers" ]
[ 1029, 0 ]
[ 1037, 48 ]
python
en
['en', 'en', 'en']
True
parse_cookie
(header, charset="utf-8", errors="replace", cls=None)
Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used.
Parse a cookie. Either from a string or WSGI environ.
def parse_cookie(header, charset="utf-8", errors="replace", cls=None): """Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used. """ if isinstance(header, dict): header = header.get("HTTP_COOKIE", "") elif header is None: header = "" # If the value is an unicode string it's mangled through latin1. This # is done because on PEP 3333 on Python 3 all headers are assumed latin1 # which however is incorrect for cookies, which are sent in page encoding. # As a result we if isinstance(header, text_type): header = header.encode("latin1", "replace") if cls is None: cls = TypeConversionDict def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) if not key: continue val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val return cls(_parse_pairs())
[ "def", "parse_cookie", "(", "header", ",", "charset", "=", "\"utf-8\"", ",", "errors", "=", "\"replace\"", ",", "cls", "=", "None", ")", ":", "if", "isinstance", "(", "header", ",", "dict", ")", ":", "header", "=", "header", ".", "get", "(", "\"HTTP_COOKIE\"", ",", "\"\"", ")", "elif", "header", "is", "None", ":", "header", "=", "\"\"", "# If the value is an unicode string it's mangled through latin1. This", "# is done because on PEP 3333 on Python 3 all headers are assumed latin1", "# which however is incorrect for cookies, which are sent in page encoding.", "# As a result we", "if", "isinstance", "(", "header", ",", "text_type", ")", ":", "header", "=", "header", ".", "encode", "(", "\"latin1\"", ",", "\"replace\"", ")", "if", "cls", "is", "None", ":", "cls", "=", "TypeConversionDict", "def", "_parse_pairs", "(", ")", ":", "for", "key", ",", "val", "in", "_cookie_parse_impl", "(", "header", ")", ":", "key", "=", "to_unicode", "(", "key", ",", "charset", ",", "errors", ",", "allow_none_charset", "=", "True", ")", "if", "not", "key", ":", "continue", "val", "=", "to_unicode", "(", "val", ",", "charset", ",", "errors", ",", "allow_none_charset", "=", "True", ")", "yield", "try_coerce_native", "(", "key", ")", ",", "val", "return", "cls", "(", "_parse_pairs", "(", ")", ")" ]
[ 1040, 0 ]
[ 1082, 30 ]
python
en
['en', 'en', 'en']
True
dump_cookie
( key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, charset="utf-8", sync_expires=True, max_size=4093, samesite=None, )
Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". .. _`cookie`: http://browsercookielimits.squawky.net/
Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too.
def dump_cookie( key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, charset="utf-8", sync_expires=True, max_size=4093, samesite=None, ): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". .. _`cookie`: http://browsercookielimits.squawky.net/ """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: from .urls import iri_to_uri path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) samesite = samesite.title() if samesite else None if samesite not in ("Strict", "Lax", None): raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None") buf = [key + b"=" + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ( (b"Domain", domain, True), (b"Expires", expires, False), (b"Max-Age", max_age, False), (b"Secure", secure, None), (b"HttpOnly", httponly, None), (b"Path", path, False), (b"SameSite", samesite, False), ): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b"=" + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b"; ".join(buf) if not PY2: rv = rv.decode("latin1") # Warn if the final value of the cookie is less than the limit. If the # cookie is too large, then it may be silently ignored, which can be quite # hard to debug. cookie_size = len(rv) if max_size and cookie_size > max_size: value_size = len(value) warnings.warn( 'The "{key}" cookie is too large: the value was {value_size} bytes' " but the header required {extra_size} extra bytes. The final size" " was {cookie_size} bytes but the limit is {max_size} bytes." " Browsers may silently ignore cookies larger than this.".format( key=key, value_size=value_size, extra_size=cookie_size - value_size, cookie_size=cookie_size, max_size=max_size, ), stacklevel=2, ) return rv
[ "def", "dump_cookie", "(", "key", ",", "value", "=", "\"\"", ",", "max_age", "=", "None", ",", "expires", "=", "None", ",", "path", "=", "\"/\"", ",", "domain", "=", "None", ",", "secure", "=", "False", ",", "httponly", "=", "False", ",", "charset", "=", "\"utf-8\"", ",", "sync_expires", "=", "True", ",", "max_size", "=", "4093", ",", "samesite", "=", "None", ",", ")", ":", "key", "=", "to_bytes", "(", "key", ",", "charset", ")", "value", "=", "to_bytes", "(", "value", ",", "charset", ")", "if", "path", "is", "not", "None", ":", "from", ".", "urls", "import", "iri_to_uri", "path", "=", "iri_to_uri", "(", "path", ",", "charset", ")", "domain", "=", "_make_cookie_domain", "(", "domain", ")", "if", "isinstance", "(", "max_age", ",", "timedelta", ")", ":", "max_age", "=", "(", "max_age", ".", "days", "*", "60", "*", "60", "*", "24", ")", "+", "max_age", ".", "seconds", "if", "expires", "is", "not", "None", ":", "if", "not", "isinstance", "(", "expires", ",", "string_types", ")", ":", "expires", "=", "cookie_date", "(", "expires", ")", "elif", "max_age", "is", "not", "None", "and", "sync_expires", ":", "expires", "=", "to_bytes", "(", "cookie_date", "(", "time", "(", ")", "+", "max_age", ")", ")", "samesite", "=", "samesite", ".", "title", "(", ")", "if", "samesite", "else", "None", "if", "samesite", "not", "in", "(", "\"Strict\"", ",", "\"Lax\"", ",", "None", ")", ":", "raise", "ValueError", "(", "\"invalid SameSite value; must be 'Strict', 'Lax' or None\"", ")", "buf", "=", "[", "key", "+", "b\"=\"", "+", "_cookie_quote", "(", "value", ")", "]", "# XXX: In theory all of these parameters that are not marked with `None`", "# should be quoted. Because stdlib did not quote it before I did not", "# want to introduce quoting there now.", "for", "k", ",", "v", ",", "q", "in", "(", "(", "b\"Domain\"", ",", "domain", ",", "True", ")", ",", "(", "b\"Expires\"", ",", "expires", ",", "False", ")", ",", "(", "b\"Max-Age\"", ",", "max_age", ",", "False", ")", ",", "(", "b\"Secure\"", ",", "secure", ",", "None", ")", ",", "(", "b\"HttpOnly\"", ",", "httponly", ",", "None", ")", ",", "(", "b\"Path\"", ",", "path", ",", "False", ")", ",", "(", "b\"SameSite\"", ",", "samesite", ",", "False", ")", ",", ")", ":", "if", "q", "is", "None", ":", "if", "v", ":", "buf", ".", "append", "(", "k", ")", "continue", "if", "v", "is", "None", ":", "continue", "tmp", "=", "bytearray", "(", "k", ")", "if", "not", "isinstance", "(", "v", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "v", "=", "to_bytes", "(", "text_type", "(", "v", ")", ",", "charset", ")", "if", "q", ":", "v", "=", "_cookie_quote", "(", "v", ")", "tmp", "+=", "b\"=\"", "+", "v", "buf", ".", "append", "(", "bytes", "(", "tmp", ")", ")", "# The return value will be an incorrectly encoded latin1 header on", "# Python 3 for consistency with the headers object and a bytestring", "# on Python 2 because that's how the API makes more sense.", "rv", "=", "b\"; \"", ".", "join", "(", "buf", ")", "if", "not", "PY2", ":", "rv", "=", "rv", ".", "decode", "(", "\"latin1\"", ")", "# Warn if the final value of the cookie is less than the limit. If the", "# cookie is too large, then it may be silently ignored, which can be quite", "# hard to debug.", "cookie_size", "=", "len", "(", "rv", ")", "if", "max_size", "and", "cookie_size", ">", "max_size", ":", "value_size", "=", "len", "(", "value", ")", "warnings", ".", "warn", "(", "'The \"{key}\" cookie is too large: the value was {value_size} bytes'", "\" but the header required {extra_size} extra bytes. The final size\"", "\" was {cookie_size} bytes but the limit is {max_size} bytes.\"", "\" Browsers may silently ignore cookies larger than this.\"", ".", "format", "(", "key", "=", "key", ",", "value_size", "=", "value_size", ",", "extra_size", "=", "cookie_size", "-", "value_size", ",", "cookie_size", "=", "cookie_size", ",", "max_size", "=", "max_size", ",", ")", ",", "stacklevel", "=", "2", ",", ")", "return", "rv" ]
[ 1085, 0 ]
[ 1220, 13 ]
python
en
['en', 'en', 'en']
True
is_byte_range_valid
(start, stop, length)
Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7
Checks if a given byte content range is valid for the given length.
def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length
[ "def", "is_byte_range_valid", "(", "start", ",", "stop", ",", "length", ")", ":", "if", "(", "start", "is", "None", ")", "!=", "(", "stop", "is", "None", ")", ":", "return", "False", "elif", "start", "is", "None", ":", "return", "length", "is", "None", "or", "length", ">=", "0", "elif", "length", "is", "None", ":", "return", "0", "<=", "start", "<", "stop", "elif", "start", ">=", "stop", ":", "return", "False", "return", "0", "<=", "start", "<", "length" ]
[ 1223, 0 ]
[ 1236, 30 ]
python
en
['en', 'en', 'en']
True
OracleGeometryColumns.table_name_col
(cls)
Return the name of the metadata column used to store the feature table name.
Return the name of the metadata column used to store the feature table name.
def table_name_col(cls): """ Return the name of the metadata column used to store the feature table name. """ return 'table_name'
[ "def", "table_name_col", "(", "cls", ")", ":", "return", "'table_name'" ]
[ 29, 4 ]
[ 34, 27 ]
python
en
['en', 'error', 'th']
False
OracleGeometryColumns.geom_col_name
(cls)
Return the name of the metadata column used to store the feature geometry column.
Return the name of the metadata column used to store the feature geometry column.
def geom_col_name(cls): """ Return the name of the metadata column used to store the feature geometry column. """ return 'column_name'
[ "def", "geom_col_name", "(", "cls", ")", ":", "return", "'column_name'" ]
[ 37, 4 ]
[ 42, 28 ]
python
en
['en', 'error', 'th']
False
install_editable
( install_options, # type: List[str] global_options, # type: Sequence[str] prefix, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool name, # type: str setup_py_path, # type: str isolated, # type: bool build_env, # type: BuildEnvironment unpacked_source_directory, # type: str )
Install a package in editable mode. Most arguments are pass-through to setuptools.
Install a package in editable mode. Most arguments are pass-through to setuptools.
def install_editable( install_options, # type: List[str] global_options, # type: Sequence[str] prefix, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool name, # type: str setup_py_path, # type: str isolated, # type: bool build_env, # type: BuildEnvironment unpacked_source_directory, # type: str ): # type: (...) -> None """Install a package in editable mode. Most arguments are pass-through to setuptools. """ logger.info('Running setup.py develop for %s', name) args = make_setuptools_develop_args( setup_py_path, global_options=global_options, install_options=install_options, no_user_config=isolated, prefix=prefix, home=home, use_user_site=use_user_site, ) with indent_log(): with build_env: call_subprocess( args, cwd=unpacked_source_directory, )
[ "def", "install_editable", "(", "install_options", ",", "# type: List[str]", "global_options", ",", "# type: Sequence[str]", "prefix", ",", "# type: Optional[str]", "home", ",", "# type: Optional[str]", "use_user_site", ",", "# type: bool", "name", ",", "# type: str", "setup_py_path", ",", "# type: str", "isolated", ",", "# type: bool", "build_env", ",", "# type: BuildEnvironment", "unpacked_source_directory", ",", "# type: str", ")", ":", "# type: (...) -> None", "logger", ".", "info", "(", "'Running setup.py develop for %s'", ",", "name", ")", "args", "=", "make_setuptools_develop_args", "(", "setup_py_path", ",", "global_options", "=", "global_options", ",", "install_options", "=", "install_options", ",", "no_user_config", "=", "isolated", ",", "prefix", "=", "prefix", ",", "home", "=", "home", ",", "use_user_site", "=", "use_user_site", ",", ")", "with", "indent_log", "(", ")", ":", "with", "build_env", ":", "call_subprocess", "(", "args", ",", "cwd", "=", "unpacked_source_directory", ",", ")" ]
[ 13, 0 ]
[ 46, 13 ]
python
en
['en', 'en', 'en']
True
_dnsname_match
(dn, hostname, max_wildcards=1)
Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3
Matching according to RFC 6125, section 6.4.3
def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r".") leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count("*") if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn) ) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == "*": # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append("[^.]+") elif leftmost.startswith("xn--") or hostname.startswith("xn--"): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r"\*", "[^.]*")) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE) return pat.match(hostname)
[ "def", "_dnsname_match", "(", "dn", ",", "hostname", ",", "max_wildcards", "=", "1", ")", ":", "pats", "=", "[", "]", "if", "not", "dn", ":", "return", "False", "# Ported from python3-syntax:", "# leftmost, *remainder = dn.split(r'.')", "parts", "=", "dn", ".", "split", "(", "r\".\"", ")", "leftmost", "=", "parts", "[", "0", "]", "remainder", "=", "parts", "[", "1", ":", "]", "wildcards", "=", "leftmost", ".", "count", "(", "\"*\"", ")", "if", "wildcards", ">", "max_wildcards", ":", "# Issue #17980: avoid denials of service by refusing more", "# than one wildcard per fragment. A survey of established", "# policy among SSL implementations showed it to be a", "# reasonable choice.", "raise", "CertificateError", "(", "\"too many wildcards in certificate DNS name: \"", "+", "repr", "(", "dn", ")", ")", "# speed up common case w/o wildcards", "if", "not", "wildcards", ":", "return", "dn", ".", "lower", "(", ")", "==", "hostname", ".", "lower", "(", ")", "# RFC 6125, section 6.4.3, subitem 1.", "# The client SHOULD NOT attempt to match a presented identifier in which", "# the wildcard character comprises a label other than the left-most label.", "if", "leftmost", "==", "\"*\"", ":", "# When '*' is a fragment by itself, it matches a non-empty dotless", "# fragment.", "pats", ".", "append", "(", "\"[^.]+\"", ")", "elif", "leftmost", ".", "startswith", "(", "\"xn--\"", ")", "or", "hostname", ".", "startswith", "(", "\"xn--\"", ")", ":", "# RFC 6125, section 6.4.3, subitem 3.", "# The client SHOULD NOT attempt to match a presented identifier", "# where the wildcard character is embedded within an A-label or", "# U-label of an internationalized domain name.", "pats", ".", "append", "(", "re", ".", "escape", "(", "leftmost", ")", ")", "else", ":", "# Otherwise, '*' matches any dotless string, e.g. www*", "pats", ".", "append", "(", "re", ".", "escape", "(", "leftmost", ")", ".", "replace", "(", "r\"\\*\"", ",", "\"[^.]*\"", ")", ")", "# add the remaining fragments, ignore any wildcards", "for", "frag", "in", "remainder", ":", "pats", ".", "append", "(", "re", ".", "escape", "(", "frag", ")", ")", "pat", "=", "re", ".", "compile", "(", "r\"\\A\"", "+", "r\"\\.\"", ".", "join", "(", "pats", ")", "+", "r\"\\Z\"", ",", "re", ".", "IGNORECASE", ")", "return", "pat", ".", "match", "(", "hostname", ")" ]
[ 24, 0 ]
[ 75, 30 ]
python
en
['en', 'en', 'en']
True
_ipaddress_match
(ipname, host_ip)
Exact matching of IP addresses. RFC 6125 explicitly doesn't define an algorithm for this (section 1.7.2 - "Out of Scope").
Exact matching of IP addresses.
def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. RFC 6125 explicitly doesn't define an algorithm for this (section 1.7.2 - "Out of Scope"). """ # OpenSSL may add a trailing newline to a subjectAltName's IP address # Divergence from upstream: ipaddress can't handle byte str ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) return ip == host_ip
[ "def", "_ipaddress_match", "(", "ipname", ",", "host_ip", ")", ":", "# OpenSSL may add a trailing newline to a subjectAltName's IP address", "# Divergence from upstream: ipaddress can't handle byte str", "ip", "=", "ipaddress", ".", "ip_address", "(", "_to_unicode", "(", "ipname", ")", ".", "rstrip", "(", ")", ")", "return", "ip", "==", "host_ip" ]
[ 84, 0 ]
[ 93, 24 ]
python
en
['en', 'sn', 'en']
True
match_hostname
(cert, hostname)
Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing.
Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*.
def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError( "empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED" ) try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) except ValueError: # Not an IP address (common case) host_ip = None except UnicodeError: # Divergence from upstream: Have to deal with ipaddress not taking # byte strings. addresses should be all ascii, so we consider it not # an ipaddress in this case host_ip = None except AttributeError: # Divergence from upstream: Make ipaddress library optional if ipaddress is None: host_ip = None else: raise dnsnames = [] san = cert.get("subjectAltName", ()) for key, value in san: if key == "DNS": if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) elif key == "IP Address": if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get("subject", ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == "commonName": if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError( "hostname %r " "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) ) elif len(dnsnames) == 1: raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError( "no appropriate commonName or subjectAltName fields were found" )
[ "def", "match_hostname", "(", "cert", ",", "hostname", ")", ":", "if", "not", "cert", ":", "raise", "ValueError", "(", "\"empty or no certificate, match_hostname needs a \"", "\"SSL socket or SSL context with either \"", "\"CERT_OPTIONAL or CERT_REQUIRED\"", ")", "try", ":", "# Divergence from upstream: ipaddress can't handle byte str", "host_ip", "=", "ipaddress", ".", "ip_address", "(", "_to_unicode", "(", "hostname", ")", ")", "except", "ValueError", ":", "# Not an IP address (common case)", "host_ip", "=", "None", "except", "UnicodeError", ":", "# Divergence from upstream: Have to deal with ipaddress not taking", "# byte strings. addresses should be all ascii, so we consider it not", "# an ipaddress in this case", "host_ip", "=", "None", "except", "AttributeError", ":", "# Divergence from upstream: Make ipaddress library optional", "if", "ipaddress", "is", "None", ":", "host_ip", "=", "None", "else", ":", "raise", "dnsnames", "=", "[", "]", "san", "=", "cert", ".", "get", "(", "\"subjectAltName\"", ",", "(", ")", ")", "for", "key", ",", "value", "in", "san", ":", "if", "key", "==", "\"DNS\"", ":", "if", "host_ip", "is", "None", "and", "_dnsname_match", "(", "value", ",", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "elif", "key", "==", "\"IP Address\"", ":", "if", "host_ip", "is", "not", "None", "and", "_ipaddress_match", "(", "value", ",", "host_ip", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "not", "dnsnames", ":", "# The subject is only checked when there is no dNSName entry", "# in subjectAltName", "for", "sub", "in", "cert", ".", "get", "(", "\"subject\"", ",", "(", ")", ")", ":", "for", "key", ",", "value", "in", "sub", ":", "# XXX according to RFC 2818, the most specific Common Name", "# must be used.", "if", "key", "==", "\"commonName\"", ":", "if", "_dnsname_match", "(", "value", ",", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "len", "(", "dnsnames", ")", ">", "1", ":", "raise", "CertificateError", "(", "\"hostname %r \"", "\"doesn't match either of %s\"", "%", "(", "hostname", ",", "\", \"", ".", "join", "(", "map", "(", "repr", ",", "dnsnames", ")", ")", ")", ")", "elif", "len", "(", "dnsnames", ")", "==", "1", ":", "raise", "CertificateError", "(", "\"hostname %r doesn't match %r\"", "%", "(", "hostname", ",", "dnsnames", "[", "0", "]", ")", ")", "else", ":", "raise", "CertificateError", "(", "\"no appropriate commonName or subjectAltName fields were found\"", ")" ]
[ 96, 0 ]
[ 159, 9 ]
python
en
['en', 'en', 'en']
True
vary_on_headers
(*headers)
A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive.
A view decorator that adds the specified headers to the Vary header of the response. Usage:
def vary_on_headers(*headers): """ A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive. """ def decorator(func): @wraps(func) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, headers) return response return inner_func return decorator
[ "def", "vary_on_headers", "(", "*", "headers", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "patch_vary_headers", "(", "response", ",", "headers", ")", "return", "response", "return", "inner_func", "return", "decorator" ]
[ 5, 0 ]
[ 23, 20 ]
python
en
['en', 'error', 'th']
False
vary_on_cookie
(func)
A view decorator that adds "Cookie" to the Vary header of a response. This indicates that a page's contents depends on cookies. Usage: @vary_on_cookie def index(request): ...
A view decorator that adds "Cookie" to the Vary header of a response. This indicates that a page's contents depends on cookies. Usage:
def vary_on_cookie(func): """ A view decorator that adds "Cookie" to the Vary header of a response. This indicates that a page's contents depends on cookies. Usage: @vary_on_cookie def index(request): ... """ @wraps(func) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, ('Cookie',)) return response return inner_func
[ "def", "vary_on_cookie", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "patch_vary_headers", "(", "response", ",", "(", "'Cookie'", ",", ")", ")", "return", "response", "return", "inner_func" ]
[ 26, 0 ]
[ 40, 21 ]
python
en
['en', 'error', 'th']
False
command_btc
(bot, user, channel, args)
Display current BTC exchange rates from mtgox. Usage: btc [<currencycode>...].
Display current BTC exchange rates from mtgox. Usage: btc [<currencycode>...].
def command_btc(bot, user, channel, args): """Display current BTC exchange rates from mtgox. Usage: btc [<currencycode>...].""" currencies = ["EUR"] if args: currencies = args.split() return bot.say(channel, get_coin_value(bot, "BTC", currencies))
[ "def", "command_btc", "(", "bot", ",", "user", ",", "channel", ",", "args", ")", ":", "currencies", "=", "[", "\"EUR\"", "]", "if", "args", ":", "currencies", "=", "args", ".", "split", "(", ")", "return", "bot", ".", "say", "(", "channel", ",", "get_coin_value", "(", "bot", ",", "\"BTC\"", ",", "currencies", ")", ")" ]
[ 7, 0 ]
[ 15, 67 ]
python
en
['en', 'en', 'en']
True
_SendRecv
()
Communicate with the Developer Shell server socket.
Communicate with the Developer Shell server socket.
def _SendRecv(): """Communicate with the Developer Shell server socket.""" port = int(os.getenv(DEVSHELL_ENV, 0)) if port == 0: raise NoDevshellServer() sock = socket.socket() sock.connect(('localhost', port)) data = CREDENTIAL_INFO_REQUEST_JSON msg = '{0}\n{1}'.format(len(data), data) sock.sendall(_helpers._to_bytes(msg, encoding='utf-8')) header = sock.recv(6).decode() if '\n' not in header: raise CommunicationError('saw no newline in the first 6 bytes') len_str, json_str = header.split('\n', 1) to_read = int(len_str) - len(json_str) if to_read > 0: json_str += sock.recv(to_read, socket.MSG_WAITALL).decode() return CredentialInfoResponse(json_str)
[ "def", "_SendRecv", "(", ")", ":", "port", "=", "int", "(", "os", ".", "getenv", "(", "DEVSHELL_ENV", ",", "0", ")", ")", "if", "port", "==", "0", ":", "raise", "NoDevshellServer", "(", ")", "sock", "=", "socket", ".", "socket", "(", ")", "sock", ".", "connect", "(", "(", "'localhost'", ",", "port", ")", ")", "data", "=", "CREDENTIAL_INFO_REQUEST_JSON", "msg", "=", "'{0}\\n{1}'", ".", "format", "(", "len", "(", "data", ")", ",", "data", ")", "sock", ".", "sendall", "(", "_helpers", ".", "_to_bytes", "(", "msg", ",", "encoding", "=", "'utf-8'", ")", ")", "header", "=", "sock", ".", "recv", "(", "6", ")", ".", "decode", "(", ")", "if", "'\\n'", "not", "in", "header", ":", "raise", "CommunicationError", "(", "'saw no newline in the first 6 bytes'", ")", "len_str", ",", "json_str", "=", "header", ".", "split", "(", "'\\n'", ",", "1", ")", "to_read", "=", "int", "(", "len_str", ")", "-", "len", "(", "json_str", ")", "if", "to_read", ">", "0", ":", "json_str", "+=", "sock", ".", "recv", "(", "to_read", ",", "socket", ".", "MSG_WAITALL", ")", ".", "decode", "(", ")", "return", "CredentialInfoResponse", "(", "json_str", ")" ]
[ 71, 0 ]
[ 93, 43 ]
python
en
['en', 'no', 'en']
True
CredentialInfoResponse.__init__
(self, json_string)
Initialize the response data from JSON PBLite array.
Initialize the response data from JSON PBLite array.
def __init__(self, json_string): """Initialize the response data from JSON PBLite array.""" pbl = json.loads(json_string) if not isinstance(pbl, list): raise ValueError('Not a list: ' + str(pbl)) pbl_len = len(pbl) self.user_email = pbl[0] if pbl_len > 0 else None self.project_id = pbl[1] if pbl_len > 1 else None self.access_token = pbl[2] if pbl_len > 2 else None self.expires_in = pbl[3] if pbl_len > 3 else None
[ "def", "__init__", "(", "self", ",", "json_string", ")", ":", "pbl", "=", "json", ".", "loads", "(", "json_string", ")", "if", "not", "isinstance", "(", "pbl", ",", "list", ")", ":", "raise", "ValueError", "(", "'Not a list: '", "+", "str", "(", "pbl", ")", ")", "pbl_len", "=", "len", "(", "pbl", ")", "self", ".", "user_email", "=", "pbl", "[", "0", "]", "if", "pbl_len", ">", "0", "else", "None", "self", ".", "project_id", "=", "pbl", "[", "1", "]", "if", "pbl_len", ">", "1", "else", "None", "self", ".", "access_token", "=", "pbl", "[", "2", "]", "if", "pbl_len", ">", "2", "else", "None", "self", ".", "expires_in", "=", "pbl", "[", "3", "]", "if", "pbl_len", ">", "3", "else", "None" ]
[ 59, 4 ]
[ 68, 57 ]
python
en
['en', 'en', 'en']
True
DevshellCredentials._refresh
(self, http)
Refreshes the access token. Args: http: unused HTTP object
Refreshes the access token.
def _refresh(self, http): """Refreshes the access token. Args: http: unused HTTP object """ self.devshell_response = _SendRecv() self.access_token = self.devshell_response.access_token expires_in = self.devshell_response.expires_in if expires_in is not None: delta = datetime.timedelta(seconds=expires_in) self.token_expiry = client._UTCNOW() + delta else: self.token_expiry = None
[ "def", "_refresh", "(", "self", ",", "http", ")", ":", "self", ".", "devshell_response", "=", "_SendRecv", "(", ")", "self", ".", "access_token", "=", "self", ".", "devshell_response", ".", "access_token", "expires_in", "=", "self", ".", "devshell_response", ".", "expires_in", "if", "expires_in", "is", "not", "None", ":", "delta", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "expires_in", ")", "self", ".", "token_expiry", "=", "client", ".", "_UTCNOW", "(", ")", "+", "delta", "else", ":", "self", ".", "token_expiry", "=", "None" ]
[ 120, 4 ]
[ 133, 36 ]
python
en
['en', 'en', 'en']
True
hello_monkey
()
Respond to incoming calls with a simple text message.
Respond to incoming calls with a simple text message.
def hello_monkey(): """Respond to incoming calls with a simple text message.""" resp = MessagingResponse() msg = Message()\ .body("Hello, Mobile Monkey")\ .media("https://demo.twilio.com/owl.png") resp.append(msg) return str(resp) if __name__ == "__main__": app.run(debug=True)
[ "def", "hello_monkey", "(", ")", ":", "resp", "=", "MessagingResponse", "(", ")", "msg", "=", "Message", "(", ")", ".", "body", "(", "\"Hello, Mobile Monkey\"", ")", ".", "media", "(", "\"https://demo.twilio.com/owl.png\"", ")", "resp", ".", "append", "(", "msg", ")", "return", "str", "(", "resp", ")", "if", "__name__", "==", "\"__main__\"", ":", "app", ".", "run", "(", "debug", "=", "True", ")" ]
[ 8, 0 ]
[ 20, 27 ]
python
en
['en', 'en', 'en']
True
Config.from_envvar
(self, variable_name, silent=False)
Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code:: app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) :param variable_name: name of the environment variable :param silent: set to ``True`` if you want silent failure for missing files. :return: bool. ``True`` if able to load config, ``False`` otherwise.
Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code::
def from_envvar(self, variable_name, silent=False): """Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code:: app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) :param variable_name: name of the environment variable :param silent: set to ``True`` if you want silent failure for missing files. :return: bool. ``True`` if able to load config, ``False`` otherwise. """ rv = os.environ.get(variable_name) if not rv: if silent: return False raise RuntimeError('The environment variable %r is not set ' 'and as such configuration could not be ' 'loaded. Set this variable and make it ' 'point to a configuration file' % variable_name) return self.from_pyfile(rv, silent=silent)
[ "def", "from_envvar", "(", "self", ",", "variable_name", ",", "silent", "=", "False", ")", ":", "rv", "=", "os", ".", "environ", ".", "get", "(", "variable_name", ")", "if", "not", "rv", ":", "if", "silent", ":", "return", "False", "raise", "RuntimeError", "(", "'The environment variable %r is not set '", "'and as such configuration could not be '", "'loaded. Set this variable and make it '", "'point to a configuration file'", "%", "variable_name", ")", "return", "self", ".", "from_pyfile", "(", "rv", ",", "silent", "=", "silent", ")" ]
[ 87, 4 ]
[ 108, 50 ]
python
en
['en', 'en', 'en']
True
Config.from_pyfile
(self, filename, silent=False)
Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.7 `silent` parameter.
Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function.
def from_pyfile(self, filename, silent=False): """Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.7 `silent` parameter. """ filename = os.path.join(self.root_path, filename) d = types.ModuleType('config') d.__file__ = filename try: with open(filename, mode='rb') as config_file: exec(compile(config_file.read(), filename, 'exec'), d.__dict__) except IOError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = 'Unable to load configuration file (%s)' % e.strerror raise self.from_object(d) return True
[ "def", "from_pyfile", "(", "self", ",", "filename", ",", "silent", "=", "False", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "filename", ")", "d", "=", "types", ".", "ModuleType", "(", "'config'", ")", "d", ".", "__file__", "=", "filename", "try", ":", "with", "open", "(", "filename", ",", "mode", "=", "'rb'", ")", "as", "config_file", ":", "exec", "(", "compile", "(", "config_file", ".", "read", "(", ")", ",", "filename", ",", "'exec'", ")", ",", "d", ".", "__dict__", ")", "except", "IOError", "as", "e", ":", "if", "silent", "and", "e", ".", "errno", "in", "(", "errno", ".", "ENOENT", ",", "errno", ".", "EISDIR", ")", ":", "return", "False", "e", ".", "strerror", "=", "'Unable to load configuration file (%s)'", "%", "e", ".", "strerror", "raise", "self", ".", "from_object", "(", "d", ")", "return", "True" ]
[ 110, 4 ]
[ 136, 19 ]
python
en
['en', 'en', 'en']
True
Config.from_object
(self, obj)
Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object
Updates the values from the given object. An object can be of one of the following two types:
def from_object(self, obj): """Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object """ if isinstance(obj, string_types): obj = import_string(obj) for key in dir(obj): if key.isupper(): self[key] = getattr(obj, key)
[ "def", "from_object", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "string_types", ")", ":", "obj", "=", "import_string", "(", "obj", ")", "for", "key", "in", "dir", "(", "obj", ")", ":", "if", "key", ".", "isupper", "(", ")", ":", "self", "[", "key", "]", "=", "getattr", "(", "obj", ",", "key", ")" ]
[ 138, 4 ]
[ 170, 45 ]
python
en
['en', 'en', 'en']
True
Config.from_json
(self, filename, silent=False)
Updates the values in the config from a JSON file. This function behaves as if the JSON object was a dictionary and passed to the :meth:`from_mapping` function. :param filename: the filename of the JSON file. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.11
Updates the values in the config from a JSON file. This function behaves as if the JSON object was a dictionary and passed to the :meth:`from_mapping` function.
def from_json(self, filename, silent=False): """Updates the values in the config from a JSON file. This function behaves as if the JSON object was a dictionary and passed to the :meth:`from_mapping` function. :param filename: the filename of the JSON file. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.11 """ filename = os.path.join(self.root_path, filename) try: with open(filename) as json_file: obj = json.loads(json_file.read()) except IOError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = 'Unable to load configuration file (%s)' % e.strerror raise return self.from_mapping(obj)
[ "def", "from_json", "(", "self", ",", "filename", ",", "silent", "=", "False", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "json_file", ":", "obj", "=", "json", ".", "loads", "(", "json_file", ".", "read", "(", ")", ")", "except", "IOError", "as", "e", ":", "if", "silent", "and", "e", ".", "errno", "in", "(", "errno", ".", "ENOENT", ",", "errno", ".", "EISDIR", ")", ":", "return", "False", "e", ".", "strerror", "=", "'Unable to load configuration file (%s)'", "%", "e", ".", "strerror", "raise", "return", "self", ".", "from_mapping", "(", "obj", ")" ]
[ 172, 4 ]
[ 195, 37 ]
python
en
['en', 'en', 'en']
True
Config.from_mapping
(self, *mapping, **kwargs)
Updates the config like :meth:`update` ignoring items with non-upper keys. .. versionadded:: 0.11
Updates the config like :meth:`update` ignoring items with non-upper keys.
def from_mapping(self, *mapping, **kwargs): """Updates the config like :meth:`update` ignoring items with non-upper keys. .. versionadded:: 0.11 """ mappings = [] if len(mapping) == 1: if hasattr(mapping[0], 'items'): mappings.append(mapping[0].items()) else: mappings.append(mapping[0]) elif len(mapping) > 1: raise TypeError( 'expected at most 1 positional argument, got %d' % len(mapping) ) mappings.append(kwargs.items()) for mapping in mappings: for (key, value) in mapping: if key.isupper(): self[key] = value return True
[ "def", "from_mapping", "(", "self", ",", "*", "mapping", ",", "*", "*", "kwargs", ")", ":", "mappings", "=", "[", "]", "if", "len", "(", "mapping", ")", "==", "1", ":", "if", "hasattr", "(", "mapping", "[", "0", "]", ",", "'items'", ")", ":", "mappings", ".", "append", "(", "mapping", "[", "0", "]", ".", "items", "(", ")", ")", "else", ":", "mappings", ".", "append", "(", "mapping", "[", "0", "]", ")", "elif", "len", "(", "mapping", ")", ">", "1", ":", "raise", "TypeError", "(", "'expected at most 1 positional argument, got %d'", "%", "len", "(", "mapping", ")", ")", "mappings", ".", "append", "(", "kwargs", ".", "items", "(", ")", ")", "for", "mapping", "in", "mappings", ":", "for", "(", "key", ",", "value", ")", "in", "mapping", ":", "if", "key", ".", "isupper", "(", ")", ":", "self", "[", "key", "]", "=", "value", "return", "True" ]
[ 197, 4 ]
[ 218, 19 ]
python
en
['en', 'en', 'en']
True
Config.get_namespace
(self, namespace, lowercase=True, trim_namespace=True)
Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage:: app.config['IMAGE_STORE_TYPE'] = 'fs' app.config['IMAGE_STORE_PATH'] = '/var/app/images' app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' image_store_config = app.config.get_namespace('IMAGE_STORE_') The resulting dictionary `image_store_config` would look like:: { 'type': 'fs', 'path': '/var/app/images', 'base_url': 'http://img.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace .. versionadded:: 0.11
Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage::
def get_namespace(self, namespace, lowercase=True, trim_namespace=True): """Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage:: app.config['IMAGE_STORE_TYPE'] = 'fs' app.config['IMAGE_STORE_PATH'] = '/var/app/images' app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' image_store_config = app.config.get_namespace('IMAGE_STORE_') The resulting dictionary `image_store_config` would look like:: { 'type': 'fs', 'path': '/var/app/images', 'base_url': 'http://img.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace .. versionadded:: 0.11 """ rv = {} for k, v in iteritems(self): if not k.startswith(namespace): continue if trim_namespace: key = k[len(namespace):] else: key = k if lowercase: key = key.lower() rv[key] = v return rv
[ "def", "get_namespace", "(", "self", ",", "namespace", ",", "lowercase", "=", "True", ",", "trim_namespace", "=", "True", ")", ":", "rv", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "self", ")", ":", "if", "not", "k", ".", "startswith", "(", "namespace", ")", ":", "continue", "if", "trim_namespace", ":", "key", "=", "k", "[", "len", "(", "namespace", ")", ":", "]", "else", ":", "key", "=", "k", "if", "lowercase", ":", "key", "=", "key", ".", "lower", "(", ")", "rv", "[", "key", "]", "=", "v", "return", "rv" ]
[ 220, 4 ]
[ 259, 17 ]
python
en
['en', 'en', 'en']
True
get_capability_token
()
Respond to incoming requests.
Respond to incoming requests.
def get_capability_token(): """Respond to incoming requests.""" # Find these values at twilio.com/console # To set up environmental variables, see http://twil.io/secure account_sid = os.environ['TWILIO_ACCOUNT_SID'] auth_token = os.environ['TWILIO_AUTH_TOKEN'] capability = ClientCapabilityToken(account_sid, auth_token) # Twilio Application Sid application_sid = 'APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' capability.allow_client_outgoing(application_sid) capability.allow_client_incoming('joey') token = capability.to_jwt() return Response(token, mimetype='application/jwt')
[ "def", "get_capability_token", "(", ")", ":", "# Find these values at twilio.com/console", "# To set up environmental variables, see http://twil.io/secure", "account_sid", "=", "os", ".", "environ", "[", "'TWILIO_ACCOUNT_SID'", "]", "auth_token", "=", "os", ".", "environ", "[", "'TWILIO_AUTH_TOKEN'", "]", "capability", "=", "ClientCapabilityToken", "(", "account_sid", ",", "auth_token", ")", "# Twilio Application Sid", "application_sid", "=", "'APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'", "capability", ".", "allow_client_outgoing", "(", "application_sid", ")", "capability", ".", "allow_client_incoming", "(", "'joey'", ")", "token", "=", "capability", ".", "to_jwt", "(", ")", "return", "Response", "(", "token", ",", "mimetype", "=", "'application/jwt'", ")" ]
[ 8, 0 ]
[ 24, 54 ]
python
en
['en', 'en', 'en']
True
Composable.as_string
(self, context)
Return the string value of the object. :param context: the context to evaluate the string into. :type context: `connection` or `cursor` The method is automatically invoked by `~cursor.execute()`, `~cursor.executemany()`, `~cursor.copy_expert()` if a `!Composable` is passed instead of the query string.
Return the string value of the object.
def as_string(self, context): """ Return the string value of the object. :param context: the context to evaluate the string into. :type context: `connection` or `cursor` The method is automatically invoked by `~cursor.execute()`, `~cursor.executemany()`, `~cursor.copy_expert()` if a `!Composable` is passed instead of the query string. """ raise NotImplementedError
[ "def", "as_string", "(", "self", ",", "context", ")", ":", "raise", "NotImplementedError" ]
[ 54, 4 ]
[ 65, 33 ]
python
en
['en', 'error', 'th']
False
Composed.seq
(self)
The list of the content of the `!Composed`.
The list of the content of the `!Composed`.
def seq(self): """The list of the content of the `!Composed`.""" return list(self._wrapped)
[ "def", "seq", "(", "self", ")", ":", "return", "list", "(", "self", ".", "_wrapped", ")" ]
[ 114, 4 ]
[ 116, 34 ]
python
en
['en', 'en', 'en']
True
Composed.join
(self, joiner)
Return a new `!Composed` interposing the *joiner* with the `!Composed` items. The *joiner* must be a `SQL` or a string which will be interpreted as an `SQL`. Example:: >>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed >>> print(fields.join(', ').as_string(conn)) "foo", "bar"
Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
def join(self, joiner): """ Return a new `!Composed` interposing the *joiner* with the `!Composed` items. The *joiner* must be a `SQL` or a string which will be interpreted as an `SQL`. Example:: >>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed >>> print(fields.join(', ').as_string(conn)) "foo", "bar" """ if isinstance(joiner, str): joiner = SQL(joiner) elif not isinstance(joiner, SQL): raise TypeError( "Composed.join() argument must be a string or an SQL") return joiner.join(self)
[ "def", "join", "(", "self", ",", "joiner", ")", ":", "if", "isinstance", "(", "joiner", ",", "str", ")", ":", "joiner", "=", "SQL", "(", "joiner", ")", "elif", "not", "isinstance", "(", "joiner", ",", "SQL", ")", ":", "raise", "TypeError", "(", "\"Composed.join() argument must be a string or an SQL\"", ")", "return", "joiner", ".", "join", "(", "self", ")" ]
[ 135, 4 ]
[ 155, 32 ]
python
en
['en', 'error', 'th']
False
SQL.string
(self)
The string wrapped by the `!SQL` object.
The string wrapped by the `!SQL` object.
def string(self): """The string wrapped by the `!SQL` object.""" return self._wrapped
[ "def", "string", "(", "self", ")", ":", "return", "self", ".", "_wrapped" ]
[ 186, 4 ]
[ 188, 28 ]
python
en
['en', 'en', 'en']
True
SQL.format
(self, *args, **kwargs)
Merge `Composable` objects into a template. :param `Composable` args: parameters to replace to numbered (``{0}``, ``{1}``) or auto-numbered (``{}``) placeholders :param `Composable` kwargs: parameters to replace to named (``{name}``) placeholders :return: the union of the `!SQL` string with placeholders replaced :rtype: `Composed` The method is similar to the Python `str.format()` method: the string template supports auto-numbered (``{}``), numbered (``{0}``, ``{1}``...), and named placeholders (``{name}``), with positional arguments replacing the numbered placeholders and keywords replacing the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``) are not supported. Only `!Composable` objects can be passed to the template. Example:: >>> print(sql.SQL("select * from {} where {} = %s") ... .format(sql.Identifier('people'), sql.Identifier('id')) ... .as_string(conn)) select * from "people" where "id" = %s >>> print(sql.SQL("select * from {tbl} where {pkey} = %s") ... .format(tbl=sql.Identifier('people'), pkey=sql.Identifier('id')) ... .as_string(conn)) select * from "people" where "id" = %s
Merge `Composable` objects into a template.
def format(self, *args, **kwargs): """ Merge `Composable` objects into a template. :param `Composable` args: parameters to replace to numbered (``{0}``, ``{1}``) or auto-numbered (``{}``) placeholders :param `Composable` kwargs: parameters to replace to named (``{name}``) placeholders :return: the union of the `!SQL` string with placeholders replaced :rtype: `Composed` The method is similar to the Python `str.format()` method: the string template supports auto-numbered (``{}``), numbered (``{0}``, ``{1}``...), and named placeholders (``{name}``), with positional arguments replacing the numbered placeholders and keywords replacing the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``) are not supported. Only `!Composable` objects can be passed to the template. Example:: >>> print(sql.SQL("select * from {} where {} = %s") ... .format(sql.Identifier('people'), sql.Identifier('id')) ... .as_string(conn)) select * from "people" where "id" = %s >>> print(sql.SQL("select * from {tbl} where {pkey} = %s") ... .format(tbl=sql.Identifier('people'), pkey=sql.Identifier('id')) ... .as_string(conn)) select * from "people" where "id" = %s """ rv = [] autonum = 0 for pre, name, spec, conv in _formatter.parse(self._wrapped): if spec: raise ValueError("no format specification supported by SQL") if conv: raise ValueError("no format conversion supported by SQL") if pre: rv.append(SQL(pre)) if name is None: continue if name.isdigit(): if autonum: raise ValueError( "cannot switch from automatic field numbering to manual") rv.append(args[int(name)]) autonum = None elif not name: if autonum is None: raise ValueError( "cannot switch from manual field numbering to automatic") rv.append(args[autonum]) autonum += 1 else: rv.append(kwargs[name]) return Composed(rv)
[ "def", "format", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rv", "=", "[", "]", "autonum", "=", "0", "for", "pre", ",", "name", ",", "spec", ",", "conv", "in", "_formatter", ".", "parse", "(", "self", ".", "_wrapped", ")", ":", "if", "spec", ":", "raise", "ValueError", "(", "\"no format specification supported by SQL\"", ")", "if", "conv", ":", "raise", "ValueError", "(", "\"no format conversion supported by SQL\"", ")", "if", "pre", ":", "rv", ".", "append", "(", "SQL", "(", "pre", ")", ")", "if", "name", "is", "None", ":", "continue", "if", "name", ".", "isdigit", "(", ")", ":", "if", "autonum", ":", "raise", "ValueError", "(", "\"cannot switch from automatic field numbering to manual\"", ")", "rv", ".", "append", "(", "args", "[", "int", "(", "name", ")", "]", ")", "autonum", "=", "None", "elif", "not", "name", ":", "if", "autonum", "is", "None", ":", "raise", "ValueError", "(", "\"cannot switch from manual field numbering to automatic\"", ")", "rv", ".", "append", "(", "args", "[", "autonum", "]", ")", "autonum", "+=", "1", "else", ":", "rv", ".", "append", "(", "kwargs", "[", "name", "]", ")", "return", "Composed", "(", "rv", ")" ]
[ 193, 4 ]
[ 255, 27 ]
python
en
['en', 'error', 'th']
False
SQL.join
(self, seq)
Join a sequence of `Composable`. :param seq: the elements to join. :type seq: iterable of `!Composable` Use the `!SQL` object's *string* to separate the elements in *seq*. Note that `Composed` objects are iterable too, so they can be used as argument for this method. Example:: >>> snip = sql.SQL(', ').join( ... sql.Identifier(n) for n in ['foo', 'bar', 'baz']) >>> print(snip.as_string(conn)) "foo", "bar", "baz"
Join a sequence of `Composable`.
def join(self, seq): """ Join a sequence of `Composable`. :param seq: the elements to join. :type seq: iterable of `!Composable` Use the `!SQL` object's *string* to separate the elements in *seq*. Note that `Composed` objects are iterable too, so they can be used as argument for this method. Example:: >>> snip = sql.SQL(', ').join( ... sql.Identifier(n) for n in ['foo', 'bar', 'baz']) >>> print(snip.as_string(conn)) "foo", "bar", "baz" """ rv = [] it = iter(seq) try: rv.append(next(it)) except StopIteration: pass else: for i in it: rv.append(self) rv.append(i) return Composed(rv)
[ "def", "join", "(", "self", ",", "seq", ")", ":", "rv", "=", "[", "]", "it", "=", "iter", "(", "seq", ")", "try", ":", "rv", ".", "append", "(", "next", "(", "it", ")", ")", "except", "StopIteration", ":", "pass", "else", ":", "for", "i", "in", "it", ":", "rv", ".", "append", "(", "self", ")", "rv", ".", "append", "(", "i", ")", "return", "Composed", "(", "rv", ")" ]
[ 257, 4 ]
[ 286, 27 ]
python
en
['en', 'error', 'th']
False
Identifier.strings
(self)
A tuple with the strings wrapped by the `Identifier`.
A tuple with the strings wrapped by the `Identifier`.
def strings(self): """A tuple with the strings wrapped by the `Identifier`.""" return self._wrapped
[ "def", "strings", "(", "self", ")", ":", "return", "self", ".", "_wrapped" ]
[ 331, 4 ]
[ 333, 28 ]
python
en
['en', 'en', 'en']
True
Identifier.string
(self)
The string wrapped by the `Identifier`.
The string wrapped by the `Identifier`.
def string(self): """The string wrapped by the `Identifier`. """ if len(self._wrapped) == 1: return self._wrapped[0] else: raise AttributeError( "the Identifier wraps more than one than one string")
[ "def", "string", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_wrapped", ")", "==", "1", ":", "return", "self", ".", "_wrapped", "[", "0", "]", "else", ":", "raise", "AttributeError", "(", "\"the Identifier wraps more than one than one string\"", ")" ]
[ 336, 4 ]
[ 343, 69 ]
python
en
['en', 'en', 'en']
True
Literal.wrapped
(self)
The object wrapped by the `!Literal`.
The object wrapped by the `!Literal`.
def wrapped(self): """The object wrapped by the `!Literal`.""" return self._wrapped
[ "def", "wrapped", "(", "self", ")", ":", "return", "self", ".", "_wrapped" ]
[ 373, 4 ]
[ 375, 28 ]
python
en
['en', 'en', 'en']
True
Placeholder.name
(self)
The name of the `!Placeholder`.
The name of the `!Placeholder`.
def name(self): """The name of the `!Placeholder`.""" return self._wrapped
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_wrapped" ]
[ 435, 4 ]
[ 437, 28 ]
python
en
['en', 'en', 'en']
True
LogFormatter.__init__
(self, color=True, datefmt=None)
r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments.
r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments.
def __init__(self, color=True, datefmt=None): r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments. """ logging.Formatter.__init__(self, datefmt=datefmt) self._colors = {} if color and _stderr_supports_color(): # The curses module has some str/bytes confusion in # python3. Until version 3.2.3, most methods return # bytes, but only accept strings. In addition, we want to # output these strings with the logging module, which # works with unicode strings. The explicit calls to # unicode() below are harmless in python2 but will do the # right conversion in python 3. fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") if (3, 0) < sys.version_info < (3, 2, 3): fg_color = str(fg_color, "ascii") for levelno, code in self.DEFAULT_COLORS.items(): self._colors[levelno] = str( curses.tparm(fg_color, code), "ascii") self._normal = str(curses.tigetstr("sgr0"), "ascii") scr = curses.initscr() self.termwidth = scr.getmaxyx()[1] curses.endwin() else: self._normal = '' # Default width is usually 80, but too wide is # worse than too narrow self.termwidth = 70
[ "def", "__init__", "(", "self", ",", "color", "=", "True", ",", "datefmt", "=", "None", ")", ":", "logging", ".", "Formatter", ".", "__init__", "(", "self", ",", "datefmt", "=", "datefmt", ")", "self", ".", "_colors", "=", "{", "}", "if", "color", "and", "_stderr_supports_color", "(", ")", ":", "# The curses module has some str/bytes confusion in", "# python3. Until version 3.2.3, most methods return", "# bytes, but only accept strings. In addition, we want to", "# output these strings with the logging module, which", "# works with unicode strings. The explicit calls to", "# unicode() below are harmless in python2 but will do the", "# right conversion in python 3.", "fg_color", "=", "(", "curses", ".", "tigetstr", "(", "\"setaf\"", ")", "or", "curses", ".", "tigetstr", "(", "\"setf\"", ")", "or", "\"\"", ")", "if", "(", "3", ",", "0", ")", "<", "sys", ".", "version_info", "<", "(", "3", ",", "2", ",", "3", ")", ":", "fg_color", "=", "str", "(", "fg_color", ",", "\"ascii\"", ")", "for", "levelno", ",", "code", "in", "self", ".", "DEFAULT_COLORS", ".", "items", "(", ")", ":", "self", ".", "_colors", "[", "levelno", "]", "=", "str", "(", "curses", ".", "tparm", "(", "fg_color", ",", "code", ")", ",", "\"ascii\"", ")", "self", ".", "_normal", "=", "str", "(", "curses", ".", "tigetstr", "(", "\"sgr0\"", ")", ",", "\"ascii\"", ")", "scr", "=", "curses", ".", "initscr", "(", ")", "self", ".", "termwidth", "=", "scr", ".", "getmaxyx", "(", ")", "[", "1", "]", "curses", ".", "endwin", "(", ")", "else", ":", "self", ".", "_normal", "=", "''", "# Default width is usually 80, but too wide is", "# worse than too narrow", "self", ".", "termwidth", "=", "70" ]
[ 49, 4 ]
[ 90, 31 ]
python
cy
['en', 'cy', 'hi']
False
get_token
(h: torch.tensor, x: torch.tensor, token: int)
Get specific token embedding (e.g. [CLS])
Get specific token embedding (e.g. [CLS])
def get_token(h: torch.tensor, x: torch.tensor, token: int): """ Get specific token embedding (e.g. [CLS]) """ emb_size = h.shape[-1] token_h = h.view(-1, emb_size) flat = x.contiguous().view(-1) # get contextualized embedding of given token token_h = token_h[flat == token, :] return token_h
[ "def", "get_token", "(", "h", ":", "torch", ".", "tensor", ",", "x", ":", "torch", ".", "tensor", ",", "token", ":", "int", ")", ":", "emb_size", "=", "h", ".", "shape", "[", "-", "1", "]", "token_h", "=", "h", ".", "view", "(", "-", "1", ",", "emb_size", ")", "flat", "=", "x", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", "# get contextualized embedding of given token", "token_h", "=", "token_h", "[", "flat", "==", "token", ",", ":", "]", "return", "token_h" ]
[ 10, 0 ]
[ 20, 18 ]
python
en
['en', 'de', 'en']
True
_best_version
(fields)
Detect the best version depending on the fields used.
Detect the best version depending on the fields used.
def _best_version(fields): """Detect the best version depending on the fields used.""" def _has_marker(keys, markers): for marker in markers: if marker in keys: return True return False keys = [] for key, value in fields.items(): if value in ([], 'UNKNOWN', None): continue keys.append(key) possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] # first let's try to see if a field is not part of one of the version for key in keys: if key not in _241_FIELDS and '1.0' in possible_versions: possible_versions.remove('1.0') logger.debug('Removed 1.0 due to %s', key) if key not in _314_FIELDS and '1.1' in possible_versions: possible_versions.remove('1.1') logger.debug('Removed 1.1 due to %s', key) if key not in _345_FIELDS and '1.2' in possible_versions: possible_versions.remove('1.2') logger.debug('Removed 1.2 due to %s', key) if key not in _566_FIELDS and '1.3' in possible_versions: possible_versions.remove('1.3') logger.debug('Removed 1.3 due to %s', key) if key not in _566_FIELDS and '2.1' in possible_versions: if key != 'Description': # In 2.1, description allowed after headers possible_versions.remove('2.1') logger.debug('Removed 2.1 due to %s', key) if key not in _426_FIELDS and '2.0' in possible_versions: possible_versions.remove('2.0') logger.debug('Removed 2.0 due to %s', key) # possible_version contains qualified versions if len(possible_versions) == 1: return possible_versions[0] # found ! elif len(possible_versions) == 0: logger.debug('Out of options - unknown metadata set: %s', fields) raise MetadataConflictError('Unknown metadata set') # let's see if one unique marker is found is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') # we have the choice, 1.0, or 1.2, or 2.0 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption # - 2.0 adds more features and is very new if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION if is_1_1: return '1.1' if is_1_2: return '1.2' if is_2_1: return '2.1' return '2.0'
[ "def", "_best_version", "(", "fields", ")", ":", "def", "_has_marker", "(", "keys", ",", "markers", ")", ":", "for", "marker", "in", "markers", ":", "if", "marker", "in", "keys", ":", "return", "True", "return", "False", "keys", "=", "[", "]", "for", "key", ",", "value", "in", "fields", ".", "items", "(", ")", ":", "if", "value", "in", "(", "[", "]", ",", "'UNKNOWN'", ",", "None", ")", ":", "continue", "keys", ".", "append", "(", "key", ")", "possible_versions", "=", "[", "'1.0'", ",", "'1.1'", ",", "'1.2'", ",", "'1.3'", ",", "'2.0'", ",", "'2.1'", "]", "# first let's try to see if a field is not part of one of the version", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "_241_FIELDS", "and", "'1.0'", "in", "possible_versions", ":", "possible_versions", ".", "remove", "(", "'1.0'", ")", "logger", ".", "debug", "(", "'Removed 1.0 due to %s'", ",", "key", ")", "if", "key", "not", "in", "_314_FIELDS", "and", "'1.1'", "in", "possible_versions", ":", "possible_versions", ".", "remove", "(", "'1.1'", ")", "logger", ".", "debug", "(", "'Removed 1.1 due to %s'", ",", "key", ")", "if", "key", "not", "in", "_345_FIELDS", "and", "'1.2'", "in", "possible_versions", ":", "possible_versions", ".", "remove", "(", "'1.2'", ")", "logger", ".", "debug", "(", "'Removed 1.2 due to %s'", ",", "key", ")", "if", "key", "not", "in", "_566_FIELDS", "and", "'1.3'", "in", "possible_versions", ":", "possible_versions", ".", "remove", "(", "'1.3'", ")", "logger", ".", "debug", "(", "'Removed 1.3 due to %s'", ",", "key", ")", "if", "key", "not", "in", "_566_FIELDS", "and", "'2.1'", "in", "possible_versions", ":", "if", "key", "!=", "'Description'", ":", "# In 2.1, description allowed after headers", "possible_versions", ".", "remove", "(", "'2.1'", ")", "logger", ".", "debug", "(", "'Removed 2.1 due to %s'", ",", "key", ")", "if", "key", "not", "in", "_426_FIELDS", "and", "'2.0'", "in", "possible_versions", ":", "possible_versions", ".", "remove", "(", "'2.0'", ")", "logger", ".", "debug", "(", "'Removed 2.0 due to %s'", ",", "key", ")", "# possible_version contains qualified versions", "if", "len", "(", "possible_versions", ")", "==", "1", ":", "return", "possible_versions", "[", "0", "]", "# found !", "elif", "len", "(", "possible_versions", ")", "==", "0", ":", "logger", ".", "debug", "(", "'Out of options - unknown metadata set: %s'", ",", "fields", ")", "raise", "MetadataConflictError", "(", "'Unknown metadata set'", ")", "# let's see if one unique marker is found", "is_1_1", "=", "'1.1'", "in", "possible_versions", "and", "_has_marker", "(", "keys", ",", "_314_MARKERS", ")", "is_1_2", "=", "'1.2'", "in", "possible_versions", "and", "_has_marker", "(", "keys", ",", "_345_MARKERS", ")", "is_2_1", "=", "'2.1'", "in", "possible_versions", "and", "_has_marker", "(", "keys", ",", "_566_MARKERS", ")", "is_2_0", "=", "'2.0'", "in", "possible_versions", "and", "_has_marker", "(", "keys", ",", "_426_MARKERS", ")", "if", "int", "(", "is_1_1", ")", "+", "int", "(", "is_1_2", ")", "+", "int", "(", "is_2_1", ")", "+", "int", "(", "is_2_0", ")", ">", "1", ":", "raise", "MetadataConflictError", "(", "'You used incompatible 1.1/1.2/2.0/2.1 fields'", ")", "# we have the choice, 1.0, or 1.2, or 2.0", "# - 1.0 has a broken Summary field but works with all tools", "# - 1.1 is to avoid", "# - 1.2 fixes Summary but has little adoption", "# - 2.0 adds more features and is very new", "if", "not", "is_1_1", "and", "not", "is_1_2", "and", "not", "is_2_1", "and", "not", "is_2_0", ":", "# we couldn't find any specific marker", "if", "PKG_INFO_PREFERRED_VERSION", "in", "possible_versions", ":", "return", "PKG_INFO_PREFERRED_VERSION", "if", "is_1_1", ":", "return", "'1.1'", "if", "is_1_2", ":", "return", "'1.2'", "if", "is_2_1", ":", "return", "'2.1'", "return", "'2.0'" ]
[ 127, 0 ]
[ 196, 16 ]
python
en
['en', 'en', 'en']
True
_get_name_and_version
(name, version, for_filename=False)
Return the distribution name with version. If for_filename is true, return a filename-escaped form.
Return the distribution name with version.
def _get_name_and_version(name, version, for_filename=False): """Return the distribution name with version. If for_filename is true, return a filename-escaped form.""" if for_filename: # For both name and version any runs of non-alphanumeric or '.' # characters are replaced with a single '-'. Additionally any # spaces in the version string become '.' name = _FILESAFE.sub('-', name) version = _FILESAFE.sub('-', version.replace(' ', '.')) return '%s-%s' % (name, version)
[ "def", "_get_name_and_version", "(", "name", ",", "version", ",", "for_filename", "=", "False", ")", ":", "if", "for_filename", ":", "# For both name and version any runs of non-alphanumeric or '.'", "# characters are replaced with a single '-'. Additionally any", "# spaces in the version string become '.'", "name", "=", "_FILESAFE", ".", "sub", "(", "'-'", ",", "name", ")", "version", "=", "_FILESAFE", ".", "sub", "(", "'-'", ",", "version", ".", "replace", "(", "' '", ",", "'.'", ")", ")", "return", "'%s-%s'", "%", "(", "name", ",", "version", ")" ]
[ 224, 0 ]
[ 234, 36 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.get_fullname
(self, filesafe=False)
Return the distribution name with version. If filesafe is true, return a filename-escaped form.
Return the distribution name with version.
def get_fullname(self, filesafe=False): """Return the distribution name with version. If filesafe is true, return a filename-escaped form.""" return _get_name_and_version(self['Name'], self['Version'], filesafe)
[ "def", "get_fullname", "(", "self", ",", "filesafe", "=", "False", ")", ":", "return", "_get_name_and_version", "(", "self", "[", "'Name'", "]", ",", "self", "[", "'Version'", "]", ",", "filesafe", ")" ]
[ 316, 4 ]
[ 320, 77 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.is_field
(self, name)
return True if name is a valid metadata key
return True if name is a valid metadata key
def is_field(self, name): """return True if name is a valid metadata key""" name = self._convert_name(name) return name in _ALL_FIELDS
[ "def", "is_field", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "_convert_name", "(", "name", ")", "return", "name", "in", "_ALL_FIELDS" ]
[ 322, 4 ]
[ 325, 34 ]
python
en
['en', 'et', 'en']
True
LegacyMetadata.read
(self, filepath)
Read the metadata values from a file path.
Read the metadata values from a file path.
def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close()
[ "def", "read", "(", "self", ",", "filepath", ")", ":", "fp", "=", "codecs", ".", "open", "(", "filepath", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "try", ":", "self", ".", "read_file", "(", "fp", ")", "finally", ":", "fp", ".", "close", "(", ")" ]
[ 331, 4 ]
[ 337, 22 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.read_file
(self, fileob)
Read the metadata values from a file object.
Read the metadata values from a file object.
def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) # PEP 566 specifies that the body be used for the description, if # available body = msg.get_payload() self["Description"] = body if body else self["Description"]
[ "def", "read_file", "(", "self", ",", "fileob", ")", ":", "msg", "=", "message_from_file", "(", "fileob", ")", "self", ".", "_fields", "[", "'Metadata-Version'", "]", "=", "msg", "[", "'metadata-version'", "]", "# When reading, get all the fields we can", "for", "field", "in", "_ALL_FIELDS", ":", "if", "field", "not", "in", "msg", ":", "continue", "if", "field", "in", "_LISTFIELDS", ":", "# we can have multiple lines", "values", "=", "msg", ".", "get_all", "(", "field", ")", "if", "field", "in", "_LISTTUPLEFIELDS", "and", "values", "is", "not", "None", ":", "values", "=", "[", "tuple", "(", "value", ".", "split", "(", "','", ")", ")", "for", "value", "in", "values", "]", "self", ".", "set", "(", "field", ",", "values", ")", "else", ":", "# single line", "value", "=", "msg", "[", "field", "]", "if", "value", "is", "not", "None", "and", "value", "!=", "'UNKNOWN'", ":", "self", ".", "set", "(", "field", ",", "value", ")", "# PEP 566 specifies that the body be used for the description, if", "# available", "body", "=", "msg", ".", "get_payload", "(", ")", "self", "[", "\"Description\"", "]", "=", "body", "if", "body", "else", "self", "[", "\"Description\"", "]" ]
[ 339, 4 ]
[ 363, 67 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.write
(self, filepath, skip_unknown=False)
Write the metadata fields to filepath.
Write the metadata fields to filepath.
def write(self, filepath, skip_unknown=False): """Write the metadata fields to filepath.""" fp = codecs.open(filepath, 'w', encoding='utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close()
[ "def", "write", "(", "self", ",", "filepath", ",", "skip_unknown", "=", "False", ")", ":", "fp", "=", "codecs", ".", "open", "(", "filepath", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "try", ":", "self", ".", "write_file", "(", "fp", ",", "skip_unknown", ")", "finally", ":", "fp", ".", "close", "(", ")" ]
[ 367, 4 ]
[ 373, 22 ]
python
en
['en', 'el-Latn', 'en']
True
LegacyMetadata.write_file
(self, fileobject, skip_unknown=False)
Write the PKG-INFO format data to a file object.
Write the PKG-INFO format data to a file object.
def write_file(self, fileobject, skip_unknown=False): """Write the PKG-INFO format data to a file object.""" self.set_metadata_version() for field in _version2fieldlist(self['Metadata-Version']): values = self.get(field) if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): continue if field in _ELEMENTSFIELD: self._write_field(fileobject, field, ','.join(values)) continue if field not in _LISTFIELDS: if field == 'Description': if self.metadata_version in ('1.0', '1.1'): values = values.replace('\n', '\n ') else: values = values.replace('\n', '\n |') values = [values] if field in _LISTTUPLEFIELDS: values = [','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value)
[ "def", "write_file", "(", "self", ",", "fileobject", ",", "skip_unknown", "=", "False", ")", ":", "self", ".", "set_metadata_version", "(", ")", "for", "field", "in", "_version2fieldlist", "(", "self", "[", "'Metadata-Version'", "]", ")", ":", "values", "=", "self", ".", "get", "(", "field", ")", "if", "skip_unknown", "and", "values", "in", "(", "'UNKNOWN'", ",", "[", "]", ",", "[", "'UNKNOWN'", "]", ")", ":", "continue", "if", "field", "in", "_ELEMENTSFIELD", ":", "self", ".", "_write_field", "(", "fileobject", ",", "field", ",", "','", ".", "join", "(", "values", ")", ")", "continue", "if", "field", "not", "in", "_LISTFIELDS", ":", "if", "field", "==", "'Description'", ":", "if", "self", ".", "metadata_version", "in", "(", "'1.0'", ",", "'1.1'", ")", ":", "values", "=", "values", ".", "replace", "(", "'\\n'", ",", "'\\n '", ")", "else", ":", "values", "=", "values", ".", "replace", "(", "'\\n'", ",", "'\\n |'", ")", "values", "=", "[", "values", "]", "if", "field", "in", "_LISTTUPLEFIELDS", ":", "values", "=", "[", "','", ".", "join", "(", "value", ")", "for", "value", "in", "values", "]", "for", "value", "in", "values", ":", "self", ".", "_write_field", "(", "fileobject", ",", "field", ",", "value", ")" ]
[ 375, 4 ]
[ 398, 59 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.update
(self, other=None, **kwargs)
Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped.
Set metadata values from the given iterable `other` and kwargs.
def update(self, other=None, **kwargs): """Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. """ def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) if not other: # other is None or empty container pass elif hasattr(other, 'keys'): for k in other.keys(): _set(k, other[k]) else: for k, v in other: _set(k, v) if kwargs: for k, v in kwargs.items(): _set(k, v)
[ "def", "update", "(", "self", ",", "other", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "_set", "(", "key", ",", "value", ")", ":", "if", "key", "in", "_ATTR2FIELD", "and", "value", ":", "self", ".", "set", "(", "self", ".", "_convert_name", "(", "key", ")", ",", "value", ")", "if", "not", "other", ":", "# other is None or empty container", "pass", "elif", "hasattr", "(", "other", ",", "'keys'", ")", ":", "for", "k", "in", "other", ".", "keys", "(", ")", ":", "_set", "(", "k", ",", "other", "[", "k", "]", ")", "else", ":", "for", "k", ",", "v", "in", "other", ":", "_set", "(", "k", ",", "v", ")", "if", "kwargs", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "_set", "(", "k", ",", "v", ")" ]
[ 400, 4 ]
[ 426, 26 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.set
(self, name, value)
Control then set a metadata field.
Control then set a metadata field.
def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self['Name'] scheme = get_scheme(self.scheme) if name in _PREDICATE_FIELDS and value is not None: for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): logger.warning( "'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': value = self._remove_line_prefix(value) self._fields[name] = value
[ "def", "set", "(", "self", ",", "name", ",", "value", ")", ":", "name", "=", "self", ".", "_convert_name", "(", "name", ")", "if", "(", "(", "name", "in", "_ELEMENTSFIELD", "or", "name", "==", "'Platform'", ")", "and", "not", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "value", "=", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", "]", "else", ":", "value", "=", "[", "]", "elif", "(", "name", "in", "_LISTFIELDS", "and", "not", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "value", "=", "[", "value", "]", "else", ":", "value", "=", "[", "]", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "WARNING", ")", ":", "project_name", "=", "self", "[", "'Name'", "]", "scheme", "=", "get_scheme", "(", "self", ".", "scheme", ")", "if", "name", "in", "_PREDICATE_FIELDS", "and", "value", "is", "not", "None", ":", "for", "v", "in", "value", ":", "# check that the values are valid", "if", "not", "scheme", ".", "is_valid_matcher", "(", "v", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ":", "logger", ".", "warning", "(", "\"'%s': '%s' is not valid (field '%s')\"", ",", "project_name", ",", "v", ",", "name", ")", "# FIXME this rejects UNKNOWN, is that right?", "elif", "name", "in", "_VERSIONS_FIELDS", "and", "value", "is", "not", "None", ":", "if", "not", "scheme", ".", "is_valid_constraint_list", "(", "value", ")", ":", "logger", ".", "warning", "(", "\"'%s': '%s' is not a valid version (field '%s')\"", ",", "project_name", ",", "value", ",", "name", ")", "elif", "name", "in", "_VERSION_FIELDS", "and", "value", "is", "not", "None", ":", "if", "not", "scheme", ".", "is_valid_version", "(", "value", ")", ":", "logger", ".", "warning", "(", "\"'%s': '%s' is not a valid version (field '%s')\"", ",", "project_name", ",", "value", ",", "name", ")", "if", "name", "in", "_UNICODEFIELDS", ":", "if", "name", "==", "'Description'", ":", "value", "=", "self", ".", "_remove_line_prefix", "(", "value", ")", "self", ".", "_fields", "[", "name", "]", "=", "value" ]
[ 428, 4 ]
[ 470, 34 ]
python
en
['en', 'lb', 'en']
True
LegacyMetadata.get
(self, name, default=_MISSING)
Get a metadata field.
Get a metadata field.
def get(self, name, default=_MISSING): """Get a metadata field.""" name = self._convert_name(name) if name not in self._fields: if default is _MISSING: default = self._default_value(name) return default if name in _UNICODEFIELDS: value = self._fields[name] return value elif name in _LISTFIELDS: value = self._fields[name] if value is None: return [] res = [] for val in value: if name not in _LISTTUPLEFIELDS: res.append(val) else: # That's for Project-URL res.append((val[0], val[1])) return res elif name in _ELEMENTSFIELD: value = self._fields[name] if isinstance(value, string_types): return value.split(',') return self._fields[name]
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "_MISSING", ")", ":", "name", "=", "self", ".", "_convert_name", "(", "name", ")", "if", "name", "not", "in", "self", ".", "_fields", ":", "if", "default", "is", "_MISSING", ":", "default", "=", "self", ".", "_default_value", "(", "name", ")", "return", "default", "if", "name", "in", "_UNICODEFIELDS", ":", "value", "=", "self", ".", "_fields", "[", "name", "]", "return", "value", "elif", "name", "in", "_LISTFIELDS", ":", "value", "=", "self", ".", "_fields", "[", "name", "]", "if", "value", "is", "None", ":", "return", "[", "]", "res", "=", "[", "]", "for", "val", "in", "value", ":", "if", "name", "not", "in", "_LISTTUPLEFIELDS", ":", "res", ".", "append", "(", "val", ")", "else", ":", "# That's for Project-URL", "res", ".", "append", "(", "(", "val", "[", "0", "]", ",", "val", "[", "1", "]", ")", ")", "return", "res", "elif", "name", "in", "_ELEMENTSFIELD", ":", "value", "=", "self", ".", "_fields", "[", "name", "]", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "return", "value", ".", "split", "(", "','", ")", "return", "self", ".", "_fields", "[", "name", "]" ]
[ 472, 4 ]
[ 499, 33 ]
python
en
['ro', 'lb', 'en']
False
LegacyMetadata.check
(self, strict=False)
Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided
Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided
def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append("Wrong value for '%s': %s" % (field, value)) return missing, warnings
[ "def", "check", "(", "self", ",", "strict", "=", "False", ")", ":", "self", ".", "set_metadata_version", "(", ")", "# XXX should check the versions (if the file was loaded)", "missing", ",", "warnings", "=", "[", "]", ",", "[", "]", "for", "attr", "in", "(", "'Name'", ",", "'Version'", ")", ":", "# required by PEP 345", "if", "attr", "not", "in", "self", ":", "missing", ".", "append", "(", "attr", ")", "if", "strict", "and", "missing", "!=", "[", "]", ":", "msg", "=", "'missing required metadata: %s'", "%", "', '", ".", "join", "(", "missing", ")", "raise", "MetadataMissingError", "(", "msg", ")", "for", "attr", "in", "(", "'Home-page'", ",", "'Author'", ")", ":", "if", "attr", "not", "in", "self", ":", "missing", ".", "append", "(", "attr", ")", "# checking metadata 1.2 (XXX needs to check 1.1, 1.0)", "if", "self", "[", "'Metadata-Version'", "]", "!=", "'1.2'", ":", "return", "missing", ",", "warnings", "scheme", "=", "get_scheme", "(", "self", ".", "scheme", ")", "def", "are_valid_constraints", "(", "value", ")", ":", "for", "v", "in", "value", ":", "if", "not", "scheme", ".", "is_valid_matcher", "(", "v", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ":", "return", "False", "return", "True", "for", "fields", ",", "controller", "in", "(", "(", "_PREDICATE_FIELDS", ",", "are_valid_constraints", ")", ",", "(", "_VERSIONS_FIELDS", ",", "scheme", ".", "is_valid_constraint_list", ")", ",", "(", "_VERSION_FIELDS", ",", "scheme", ".", "is_valid_version", ")", ")", ":", "for", "field", "in", "fields", ":", "value", "=", "self", ".", "get", "(", "field", ",", "None", ")", "if", "value", "is", "not", "None", "and", "not", "controller", "(", "value", ")", ":", "warnings", ".", "append", "(", "\"Wrong value for '%s': %s\"", "%", "(", "field", ",", "value", ")", ")", "return", "missing", ",", "warnings" ]
[ 501, 4 ]
[ 543, 32 ]
python
en
['en', 'en', 'en']
True
LegacyMetadata.todict
(self, skip_missing=False)
Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). This is as per https://www.python.org/dev/peps/pep-0566/#id17.
Return fields as a dict.
def todict(self, skip_missing=False): """Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). This is as per https://www.python.org/dev/peps/pep-0566/#id17. """ self.set_metadata_version() fields = _version2fieldlist(self['Metadata-Version']) data = {} for field_name in fields: if not skip_missing or field_name in self._fields: key = _FIELD2ATTR[field_name] if key != 'project_url': data[key] = self[field_name] else: data[key] = [','.join(u) for u in self[field_name]] return data
[ "def", "todict", "(", "self", ",", "skip_missing", "=", "False", ")", ":", "self", ".", "set_metadata_version", "(", ")", "fields", "=", "_version2fieldlist", "(", "self", "[", "'Metadata-Version'", "]", ")", "data", "=", "{", "}", "for", "field_name", "in", "fields", ":", "if", "not", "skip_missing", "or", "field_name", "in", "self", ".", "_fields", ":", "key", "=", "_FIELD2ATTR", "[", "field_name", "]", "if", "key", "!=", "'project_url'", ":", "data", "[", "key", "]", "=", "self", "[", "field_name", "]", "else", ":", "data", "[", "key", "]", "=", "[", "','", ".", "join", "(", "u", ")", "for", "u", "in", "self", "[", "field_name", "]", "]", "return", "data" ]
[ 545, 4 ]
[ 566, 19 ]
python
en
['en', 'en', 'en']
True
Metadata.get_requirements
(self, reqts, extras=None, env=None)
Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation.
Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation.
def get_requirements(self, reqts, extras=None, env=None): """ Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation. """ if self._legacy: result = reqts else: result = [] extras = get_extras(extras or [], self.extras) for d in reqts: if 'extra' not in d and 'environment' not in d: # unconditional include = True else: if 'extra' not in d: # Not extra-dependent - only environment-dependent include = True else: include = d.get('extra') in extras if include: # Not excluded because of extras, check environment marker = d.get('environment') if marker: include = interpret(marker, env) if include: result.extend(d['requires']) for key in ('build', 'dev', 'test'): e = ':%s:' % key if e in extras: extras.remove(e) # A recursive call, but it should terminate since 'test' # has been removed from the extras reqts = self._data.get('%s_requires' % key, []) result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result
[ "def", "get_requirements", "(", "self", ",", "reqts", ",", "extras", "=", "None", ",", "env", "=", "None", ")", ":", "if", "self", ".", "_legacy", ":", "result", "=", "reqts", "else", ":", "result", "=", "[", "]", "extras", "=", "get_extras", "(", "extras", "or", "[", "]", ",", "self", ".", "extras", ")", "for", "d", "in", "reqts", ":", "if", "'extra'", "not", "in", "d", "and", "'environment'", "not", "in", "d", ":", "# unconditional", "include", "=", "True", "else", ":", "if", "'extra'", "not", "in", "d", ":", "# Not extra-dependent - only environment-dependent", "include", "=", "True", "else", ":", "include", "=", "d", ".", "get", "(", "'extra'", ")", "in", "extras", "if", "include", ":", "# Not excluded because of extras, check environment", "marker", "=", "d", ".", "get", "(", "'environment'", ")", "if", "marker", ":", "include", "=", "interpret", "(", "marker", ",", "env", ")", "if", "include", ":", "result", ".", "extend", "(", "d", "[", "'requires'", "]", ")", "for", "key", "in", "(", "'build'", ",", "'dev'", ",", "'test'", ")", ":", "e", "=", "':%s:'", "%", "key", "if", "e", "in", "extras", ":", "extras", ".", "remove", "(", "e", ")", "# A recursive call, but it should terminate since 'test'", "# has been removed from the extras", "reqts", "=", "self", ".", "_data", ".", "get", "(", "'%s_requires'", "%", "key", ",", "[", "]", ")", "result", ".", "extend", "(", "self", ".", "get_requirements", "(", "reqts", ",", "extras", "=", "extras", ",", "env", "=", "env", ")", ")", "return", "result" ]
[ 826, 4 ]
[ 866, 21 ]
python
en
['en', 'error', 'th']
False
DatabaseSchemaEditor._is_referenced_by_fk_constraint
(self, table_name, column_name=None, ignore_self=False)
Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored.
Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored.
def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name) for constraint in constraints.values(): constraint_table, constraint_column = constraint['foreign_key'] if (constraint_table == table_name and (column_name is None or constraint_column == column_name)): return True return False
[ "def", "_is_referenced_by_fk_constraint", "(", "self", ",", "table_name", ",", "column_name", "=", "None", ",", "ignore_self", "=", "False", ")", ":", "with", "self", ".", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "for", "other_table", "in", "self", ".", "connection", ".", "introspection", ".", "get_table_list", "(", "cursor", ")", ":", "if", "ignore_self", "and", "other_table", ".", "name", "==", "table_name", ":", "continue", "constraints", "=", "self", ".", "connection", ".", "introspection", ".", "_get_foreign_key_constraints", "(", "cursor", ",", "other_table", ".", "name", ")", "for", "constraint", "in", "constraints", ".", "values", "(", ")", ":", "constraint_table", ",", "constraint_column", "=", "constraint", "[", "'foreign_key'", "]", "if", "(", "constraint_table", "==", "table_name", "and", "(", "column_name", "is", "None", "or", "constraint_column", "==", "column_name", ")", ")", ":", "return", "True", "return", "False" ]
[ 66, 4 ]
[ 83, 20 ]
python
en
['en', 'error', 'th']
False
DatabaseSchemaEditor._remake_table
(self, model, create_field=None, delete_field=None, alter_field=None)
Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table.
Shortcut to transform a model from old_model into new_model
def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None): """ Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table. """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields} # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if getattr(create_field, 'primary_key', False) or ( alter_field and getattr(alter_field[1], 'primary_key', False)): for name, field in list(body.items()): if field.primary_key: field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.quote_value( self.effective_default(create_field) ) # Add in any altered fields if alter_field: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { 'col': self.quote_name(old_field.column), 'default': self.quote_value(self.effective_default(new_field)) } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created: return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] constraints = list(model._meta.constraints) # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body_copy = copy.deepcopy(body) # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a # table and solely exists for foreign key reference resolution purposes. # This wouldn't be required if the schema editor was operating on model # states instead of rendered models. meta_contents = { 'app_label': model._meta.app_label, 'db_table': model._meta.db_table, 'unique_together': unique_together, 'index_together': index_together, 'indexes': indexes, 'constraints': constraints, 'apps': apps, } meta = type("Meta", (), meta_contents) body_copy['Meta'] = meta body_copy['__module__'] = model.__module__ type(model._meta.object_name, model.__bases__, body_copy) # Construct a model with a renamed table name. body_copy = copy.deepcopy(body) meta_contents = { 'app_label': model._meta.app_label, 'db_table': 'new__%s' % strip_quotes(model._meta.db_table), 'unique_together': unique_together, 'index_together': index_together, 'indexes': indexes, 'constraints': constraints, 'apps': apps, } meta = type("Meta", (), meta_contents) body_copy['Meta'] = meta body_copy['__module__'] = model.__module__ new_model = type('New%s' % model._meta.object_name, model.__bases__, body_copy) # Create a new table with the updated schema. self.create_model(new_model) # Copy data from the old table into the new table self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_model._meta.db_table), ', '.join(self.quote_name(x) for x in mapping), ', '.join(mapping.values()), self.quote_name(model._meta.db_table), )) # Delete the old table to make way for the new self.delete_model(model, handle_autom2m=False) # Rename the new table to take way for the old self.alter_db_table( new_model, new_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True
[ "def", "_remake_table", "(", "self", ",", "model", ",", "create_field", "=", "None", ",", "delete_field", "=", "None", ",", "alter_field", "=", "None", ")", ":", "# Self-referential fields must be recreated rather than copied from", "# the old model to ensure their remote_field.field_name doesn't refer", "# to an altered field.", "def", "is_self_referential", "(", "f", ")", ":", "return", "f", ".", "is_relation", "and", "f", ".", "remote_field", ".", "model", "is", "model", "# Work out the new fields dict / mapping", "body", "=", "{", "f", ".", "name", ":", "f", ".", "clone", "(", ")", "if", "is_self_referential", "(", "f", ")", "else", "f", "for", "f", "in", "model", ".", "_meta", ".", "local_concrete_fields", "}", "# Since mapping might mix column names and default values,", "# its values must be already quoted.", "mapping", "=", "{", "f", ".", "column", ":", "self", ".", "quote_name", "(", "f", ".", "column", ")", "for", "f", "in", "model", ".", "_meta", ".", "local_concrete_fields", "}", "# This maps field names (not columns) for things like unique_together", "rename_mapping", "=", "{", "}", "# If any of the new or altered fields is introducing a new PK,", "# remove the old one", "restore_pk_field", "=", "None", "if", "getattr", "(", "create_field", ",", "'primary_key'", ",", "False", ")", "or", "(", "alter_field", "and", "getattr", "(", "alter_field", "[", "1", "]", ",", "'primary_key'", ",", "False", ")", ")", ":", "for", "name", ",", "field", "in", "list", "(", "body", ".", "items", "(", ")", ")", ":", "if", "field", ".", "primary_key", ":", "field", ".", "primary_key", "=", "False", "restore_pk_field", "=", "field", "if", "field", ".", "auto_created", ":", "del", "body", "[", "name", "]", "del", "mapping", "[", "field", ".", "column", "]", "# Add in any created fields", "if", "create_field", ":", "body", "[", "create_field", ".", "name", "]", "=", "create_field", "# Choose a default and insert it into the copy map", "if", "not", "create_field", ".", "many_to_many", "and", "create_field", ".", "concrete", ":", "mapping", "[", "create_field", ".", "column", "]", "=", "self", ".", "quote_value", "(", "self", ".", "effective_default", "(", "create_field", ")", ")", "# Add in any altered fields", "if", "alter_field", ":", "old_field", ",", "new_field", "=", "alter_field", "body", ".", "pop", "(", "old_field", ".", "name", ",", "None", ")", "mapping", ".", "pop", "(", "old_field", ".", "column", ",", "None", ")", "body", "[", "new_field", ".", "name", "]", "=", "new_field", "if", "old_field", ".", "null", "and", "not", "new_field", ".", "null", ":", "case_sql", "=", "\"coalesce(%(col)s, %(default)s)\"", "%", "{", "'col'", ":", "self", ".", "quote_name", "(", "old_field", ".", "column", ")", ",", "'default'", ":", "self", ".", "quote_value", "(", "self", ".", "effective_default", "(", "new_field", ")", ")", "}", "mapping", "[", "new_field", ".", "column", "]", "=", "case_sql", "else", ":", "mapping", "[", "new_field", ".", "column", "]", "=", "self", ".", "quote_name", "(", "old_field", ".", "column", ")", "rename_mapping", "[", "old_field", ".", "name", "]", "=", "new_field", ".", "name", "# Remove any deleted fields", "if", "delete_field", ":", "del", "body", "[", "delete_field", ".", "name", "]", "del", "mapping", "[", "delete_field", ".", "column", "]", "# Remove any implicit M2M tables", "if", "delete_field", ".", "many_to_many", "and", "delete_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "return", "self", ".", "delete_model", "(", "delete_field", ".", "remote_field", ".", "through", ")", "# Work inside a new app registry", "apps", "=", "Apps", "(", ")", "# Work out the new value of unique_together, taking renames into", "# account", "unique_together", "=", "[", "[", "rename_mapping", ".", "get", "(", "n", ",", "n", ")", "for", "n", "in", "unique", "]", "for", "unique", "in", "model", ".", "_meta", ".", "unique_together", "]", "# Work out the new value for index_together, taking renames into", "# account", "index_together", "=", "[", "[", "rename_mapping", ".", "get", "(", "n", ",", "n", ")", "for", "n", "in", "index", "]", "for", "index", "in", "model", ".", "_meta", ".", "index_together", "]", "indexes", "=", "model", ".", "_meta", ".", "indexes", "if", "delete_field", ":", "indexes", "=", "[", "index", "for", "index", "in", "indexes", "if", "delete_field", ".", "name", "not", "in", "index", ".", "fields", "]", "constraints", "=", "list", "(", "model", ".", "_meta", ".", "constraints", ")", "# Provide isolated instances of the fields to the new model body so", "# that the existing model's internals aren't interfered with when", "# the dummy model is constructed.", "body_copy", "=", "copy", ".", "deepcopy", "(", "body", ")", "# Construct a new model with the new fields to allow self referential", "# primary key to resolve to. This model won't ever be materialized as a", "# table and solely exists for foreign key reference resolution purposes.", "# This wouldn't be required if the schema editor was operating on model", "# states instead of rendered models.", "meta_contents", "=", "{", "'app_label'", ":", "model", ".", "_meta", ".", "app_label", ",", "'db_table'", ":", "model", ".", "_meta", ".", "db_table", ",", "'unique_together'", ":", "unique_together", ",", "'index_together'", ":", "index_together", ",", "'indexes'", ":", "indexes", ",", "'constraints'", ":", "constraints", ",", "'apps'", ":", "apps", ",", "}", "meta", "=", "type", "(", "\"Meta\"", ",", "(", ")", ",", "meta_contents", ")", "body_copy", "[", "'Meta'", "]", "=", "meta", "body_copy", "[", "'__module__'", "]", "=", "model", ".", "__module__", "type", "(", "model", ".", "_meta", ".", "object_name", ",", "model", ".", "__bases__", ",", "body_copy", ")", "# Construct a model with a renamed table name.", "body_copy", "=", "copy", ".", "deepcopy", "(", "body", ")", "meta_contents", "=", "{", "'app_label'", ":", "model", ".", "_meta", ".", "app_label", ",", "'db_table'", ":", "'new__%s'", "%", "strip_quotes", "(", "model", ".", "_meta", ".", "db_table", ")", ",", "'unique_together'", ":", "unique_together", ",", "'index_together'", ":", "index_together", ",", "'indexes'", ":", "indexes", ",", "'constraints'", ":", "constraints", ",", "'apps'", ":", "apps", ",", "}", "meta", "=", "type", "(", "\"Meta\"", ",", "(", ")", ",", "meta_contents", ")", "body_copy", "[", "'Meta'", "]", "=", "meta", "body_copy", "[", "'__module__'", "]", "=", "model", ".", "__module__", "new_model", "=", "type", "(", "'New%s'", "%", "model", ".", "_meta", ".", "object_name", ",", "model", ".", "__bases__", ",", "body_copy", ")", "# Create a new table with the updated schema.", "self", ".", "create_model", "(", "new_model", ")", "# Copy data from the old table into the new table", "self", ".", "execute", "(", "\"INSERT INTO %s (%s) SELECT %s FROM %s\"", "%", "(", "self", ".", "quote_name", "(", "new_model", ".", "_meta", ".", "db_table", ")", ",", "', '", ".", "join", "(", "self", ".", "quote_name", "(", "x", ")", "for", "x", "in", "mapping", ")", ",", "', '", ".", "join", "(", "mapping", ".", "values", "(", ")", ")", ",", "self", ".", "quote_name", "(", "model", ".", "_meta", ".", "db_table", ")", ",", ")", ")", "# Delete the old table to make way for the new", "self", ".", "delete_model", "(", "model", ",", "handle_autom2m", "=", "False", ")", "# Rename the new table to take way for the old", "self", ".", "alter_db_table", "(", "new_model", ",", "new_model", ".", "_meta", ".", "db_table", ",", "model", ".", "_meta", ".", "db_table", ",", "disable_constraints", "=", "False", ",", ")", "# Run deferred SQL on correct table", "for", "sql", "in", "self", ".", "deferred_sql", ":", "self", ".", "execute", "(", "sql", ")", "self", ".", "deferred_sql", "=", "[", "]", "# Fix any PK-removed field", "if", "restore_pk_field", ":", "restore_pk_field", ".", "primary_key", "=", "True" ]
[ 141, 4 ]
[ 306, 47 ]
python
en
['en', 'error', 'th']
False
DatabaseSchemaEditor.add_field
(self, model, field)
Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields).
Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields).
def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) self._remake_table(model, create_field=field)
[ "def", "add_field", "(", "self", ",", "model", ",", "field", ")", ":", "# Special-case implicit M2M tables", "if", "field", ".", "many_to_many", "and", "field", ".", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "return", "self", ".", "create_model", "(", "field", ".", "remote_field", ".", "through", ")", "self", ".", "_remake_table", "(", "model", ",", "create_field", "=", "field", ")" ]
[ 321, 4 ]
[ 329, 53 ]
python
en
['en', 'error', 'th']
False
DatabaseSchemaEditor.remove_field
(self, model, field)
Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table.
Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table.
def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return self._remake_table(model, delete_field=field)
[ "def", "remove_field", "(", "self", ",", "model", ",", "field", ")", ":", "# M2M fields are a special case", "if", "field", ".", "many_to_many", ":", "# For implicit M2M tables, delete the auto-created table", "if", "field", ".", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "self", ".", "delete_model", "(", "field", ".", "remote_field", ".", "through", ")", "# For explicit \"through\" M2M fields, do nothing", "# For everything else, remake.", "else", ":", "# It might not actually have a column behind it", "if", "field", ".", "db_parameters", "(", "connection", "=", "self", ".", "connection", ")", "[", "'type'", "]", "is", "None", ":", "return", "self", ".", "_remake_table", "(", "model", ",", "delete_field", "=", "field", ")" ]
[ 331, 4 ]
[ 347, 57 ]
python
en
['en', 'error', 'th']
False
DatabaseSchemaEditor._alter_field
(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False)
Perform a "physical" (non-ManyToMany) field update.
Perform a "physical" (non-ManyToMany) field update.
def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Use "ALTER TABLE ... RENAME COLUMN" if only the column name # changed and there aren't any constraints. if (self.connection.features.can_alter_table_rename_column and old_field.column != new_field.column and self.column_sql(model, old_field) == self.column_sql(model, new_field) and not (old_field.remote_field and old_field.db_constraint or new_field.remote_field and new_field.db_constraint)): return self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Alter by remaking table self._remake_table(model, alter_field=(old_field, new_field)) # Rebuild tables with FKs pointing to this field. if new_field.unique and old_type != new_type: related_models = set() opts = new_field.model._meta for remote_field in opts.related_objects: # Ignore self-relationship since the table was already rebuilt. if remote_field.related_model == model: continue if not remote_field.many_to_many: if remote_field.field_name == new_field.name: related_models.add(remote_field.related_model) elif new_field.primary_key and remote_field.through._meta.auto_created: related_models.add(remote_field.through) if new_field.primary_key: for many_to_many in opts.many_to_many: # Ignore self-relationship since the table was already rebuilt. if many_to_many.related_model == model: continue if many_to_many.remote_field.through._meta.auto_created: related_models.add(many_to_many.remote_field.through) for related_model in related_models: self._remake_table(related_model)
[ "def", "_alter_field", "(", "self", ",", "model", ",", "old_field", ",", "new_field", ",", "old_type", ",", "new_type", ",", "old_db_params", ",", "new_db_params", ",", "strict", "=", "False", ")", ":", "# Use \"ALTER TABLE ... RENAME COLUMN\" if only the column name", "# changed and there aren't any constraints.", "if", "(", "self", ".", "connection", ".", "features", ".", "can_alter_table_rename_column", "and", "old_field", ".", "column", "!=", "new_field", ".", "column", "and", "self", ".", "column_sql", "(", "model", ",", "old_field", ")", "==", "self", ".", "column_sql", "(", "model", ",", "new_field", ")", "and", "not", "(", "old_field", ".", "remote_field", "and", "old_field", ".", "db_constraint", "or", "new_field", ".", "remote_field", "and", "new_field", ".", "db_constraint", ")", ")", ":", "return", "self", ".", "execute", "(", "self", ".", "_rename_field_sql", "(", "model", ".", "_meta", ".", "db_table", ",", "old_field", ",", "new_field", ",", "new_type", ")", ")", "# Alter by remaking table", "self", ".", "_remake_table", "(", "model", ",", "alter_field", "=", "(", "old_field", ",", "new_field", ")", ")", "# Rebuild tables with FKs pointing to this field.", "if", "new_field", ".", "unique", "and", "old_type", "!=", "new_type", ":", "related_models", "=", "set", "(", ")", "opts", "=", "new_field", ".", "model", ".", "_meta", "for", "remote_field", "in", "opts", ".", "related_objects", ":", "# Ignore self-relationship since the table was already rebuilt.", "if", "remote_field", ".", "related_model", "==", "model", ":", "continue", "if", "not", "remote_field", ".", "many_to_many", ":", "if", "remote_field", ".", "field_name", "==", "new_field", ".", "name", ":", "related_models", ".", "add", "(", "remote_field", ".", "related_model", ")", "elif", "new_field", ".", "primary_key", "and", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "related_models", ".", "add", "(", "remote_field", ".", "through", ")", "if", "new_field", ".", "primary_key", ":", "for", "many_to_many", "in", "opts", ".", "many_to_many", ":", "# Ignore self-relationship since the table was already rebuilt.", "if", "many_to_many", ".", "related_model", "==", "model", ":", "continue", "if", "many_to_many", ".", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "related_models", ".", "add", "(", "many_to_many", ".", "remote_field", ".", "through", ")", "for", "related_model", "in", "related_models", ":", "self", ".", "_remake_table", "(", "related_model", ")" ]
[ 349, 4 ]
[ 383, 49 ]
python
en
['en', 'en', 'en']
True
DatabaseSchemaEditor._alter_many_to_many
(self, model, old_field, new_field, strict)
Alter M2Ms to repoint their to= endpoints.
Alter M2Ms to repoint their to= endpoints.
def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table: # The field name didn't change, but some options did; we have to propagate this altering. self._remake_table( old_field.remote_field.through, alter_field=( # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ), ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ', '.join([ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ]), ', '.join([ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ]), self.quote_name(old_field.remote_field.through._meta.db_table), )) # Delete the old through table self.delete_model(old_field.remote_field.through)
[ "def", "_alter_many_to_many", "(", "self", ",", "model", ",", "old_field", ",", "new_field", ",", "strict", ")", ":", "if", "old_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "db_table", "==", "new_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "db_table", ":", "# The field name didn't change, but some options did; we have to propagate this altering.", "self", ".", "_remake_table", "(", "old_field", ".", "remote_field", ".", "through", ",", "alter_field", "=", "(", "# We need the field that points to the target model, so we can tell alter_field to change it -", "# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)", "old_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "get_field", "(", "old_field", ".", "m2m_reverse_field_name", "(", ")", ")", ",", "new_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "get_field", "(", "new_field", ".", "m2m_reverse_field_name", "(", ")", ")", ",", ")", ",", ")", "return", "# Make a new through table", "self", ".", "create_model", "(", "new_field", ".", "remote_field", ".", "through", ")", "# Copy the data across", "self", ".", "execute", "(", "\"INSERT INTO %s (%s) SELECT %s FROM %s\"", "%", "(", "self", ".", "quote_name", "(", "new_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "db_table", ")", ",", "', '", ".", "join", "(", "[", "\"id\"", ",", "new_field", ".", "m2m_column_name", "(", ")", ",", "new_field", ".", "m2m_reverse_name", "(", ")", ",", "]", ")", ",", "', '", ".", "join", "(", "[", "\"id\"", ",", "old_field", ".", "m2m_column_name", "(", ")", ",", "old_field", ".", "m2m_reverse_name", "(", ")", ",", "]", ")", ",", "self", ".", "quote_name", "(", "old_field", ".", "remote_field", ".", "through", ".", "_meta", ".", "db_table", ")", ",", ")", ")", "# Delete the old through table", "self", ".", "delete_model", "(", "old_field", ".", "remote_field", ".", "through", ")" ]
[ 385, 4 ]
[ 418, 57 ]
python
en
['en', 'en', 'en']
True
NameAliasMixin.get_real_name
(self)
Returns the real name (object name) of this identifier.
Returns the real name (object name) of this identifier.
def get_real_name(self): """Returns the real name (object name) of this identifier.""" # a.b dot_idx, _ = self.token_next_by(m=(T.Punctuation, '.')) return self._get_first_name(dot_idx, real_name=True)
[ "def", "get_real_name", "(", "self", ")", ":", "# a.b", "dot_idx", ",", "_", "=", "self", ".", "token_next_by", "(", "m", "=", "(", "T", ".", "Punctuation", ",", "'.'", ")", ")", "return", "self", ".", "_get_first_name", "(", "dot_idx", ",", "real_name", "=", "True", ")" ]
[ 18, 4 ]
[ 22, 60 ]
python
en
['en', 'en', 'en']
True
NameAliasMixin.get_alias
(self)
Returns the alias for this identifier or ``None``.
Returns the alias for this identifier or ``None``.
def get_alias(self): """Returns the alias for this identifier or ``None``.""" # "name AS alias" kw_idx, kw = self.token_next_by(m=(T.Keyword, 'AS')) if kw is not None: return self._get_first_name(kw_idx + 1, keywords=True) # "name alias" or "complicated column expression alias" _, ws = self.token_next_by(t=T.Whitespace) if len(self.tokens) > 2 and ws is not None: return self._get_first_name(reverse=True)
[ "def", "get_alias", "(", "self", ")", ":", "# \"name AS alias\"", "kw_idx", ",", "kw", "=", "self", ".", "token_next_by", "(", "m", "=", "(", "T", ".", "Keyword", ",", "'AS'", ")", ")", "if", "kw", "is", "not", "None", ":", "return", "self", ".", "_get_first_name", "(", "kw_idx", "+", "1", ",", "keywords", "=", "True", ")", "# \"name alias\" or \"complicated column expression alias\"", "_", ",", "ws", "=", "self", ".", "token_next_by", "(", "t", "=", "T", ".", "Whitespace", ")", "if", "len", "(", "self", ".", "tokens", ")", ">", "2", "and", "ws", "is", "not", "None", ":", "return", "self", ".", "_get_first_name", "(", "reverse", "=", "True", ")" ]
[ 24, 4 ]
[ 35, 53 ]
python
en
['en', 'en', 'en']
True
Token.flatten
(self)
Resolve subgroups.
Resolve subgroups.
def flatten(self): """Resolve subgroups.""" yield self
[ "def", "flatten", "(", "self", ")", ":", "yield", "self" ]
[ 83, 4 ]
[ 85, 18 ]
python
en
['et', 'la', 'en']
False
Token.match
(self, ttype, values, regex=False)
Checks whether the token matches the given arguments. *ttype* is a token type. If this token doesn't match the given token type. *values* is a list of possible values for this token. The values are OR'ed together so if only one of the values matches ``True`` is returned. Except for keyword tokens the comparison is case-sensitive. For convenience it's OK to pass in a single string. If *regex* is ``True`` (default is ``False``) the given values are treated as regular expressions.
Checks whether the token matches the given arguments.
def match(self, ttype, values, regex=False): """Checks whether the token matches the given arguments. *ttype* is a token type. If this token doesn't match the given token type. *values* is a list of possible values for this token. The values are OR'ed together so if only one of the values matches ``True`` is returned. Except for keyword tokens the comparison is case-sensitive. For convenience it's OK to pass in a single string. If *regex* is ``True`` (default is ``False``) the given values are treated as regular expressions. """ type_matched = self.ttype is ttype if not type_matched or values is None: return type_matched if isinstance(values, str): values = (values,) if regex: # TODO: Add test for regex with is_keyboard = false flag = re.IGNORECASE if self.is_keyword else 0 values = (re.compile(v, flag) for v in values) for pattern in values: if pattern.search(self.normalized): return True return False if self.is_keyword: values = (v.upper() for v in values) return self.normalized in values
[ "def", "match", "(", "self", ",", "ttype", ",", "values", ",", "regex", "=", "False", ")", ":", "type_matched", "=", "self", ".", "ttype", "is", "ttype", "if", "not", "type_matched", "or", "values", "is", "None", ":", "return", "type_matched", "if", "isinstance", "(", "values", ",", "str", ")", ":", "values", "=", "(", "values", ",", ")", "if", "regex", ":", "# TODO: Add test for regex with is_keyboard = false", "flag", "=", "re", ".", "IGNORECASE", "if", "self", ".", "is_keyword", "else", "0", "values", "=", "(", "re", ".", "compile", "(", "v", ",", "flag", ")", "for", "v", "in", "values", ")", "for", "pattern", "in", "values", ":", "if", "pattern", ".", "search", "(", "self", ".", "normalized", ")", ":", "return", "True", "return", "False", "if", "self", ".", "is_keyword", ":", "values", "=", "(", "v", ".", "upper", "(", ")", "for", "v", "in", "values", ")", "return", "self", ".", "normalized", "in", "values" ]
[ 87, 4 ]
[ 119, 40 ]
python
en
['en', 'en', 'en']
True
Token.within
(self, group_cls)
Returns ``True`` if this token is within *group_cls*. Use this method for example to check if an identifier is within a function: ``t.within(sql.Function)``.
Returns ``True`` if this token is within *group_cls*.
def within(self, group_cls): """Returns ``True`` if this token is within *group_cls*. Use this method for example to check if an identifier is within a function: ``t.within(sql.Function)``. """ parent = self.parent while parent: if isinstance(parent, group_cls): return True parent = parent.parent return False
[ "def", "within", "(", "self", ",", "group_cls", ")", ":", "parent", "=", "self", ".", "parent", "while", "parent", ":", "if", "isinstance", "(", "parent", ",", "group_cls", ")", ":", "return", "True", "parent", "=", "parent", ".", "parent", "return", "False" ]
[ 121, 4 ]
[ 132, 20 ]
python
en
['en', 'en', 'en']
True
Token.is_child_of
(self, other)
Returns ``True`` if this token is a direct child of *other*.
Returns ``True`` if this token is a direct child of *other*.
def is_child_of(self, other): """Returns ``True`` if this token is a direct child of *other*.""" return self.parent == other
[ "def", "is_child_of", "(", "self", ",", "other", ")", ":", "return", "self", ".", "parent", "==", "other" ]
[ 134, 4 ]
[ 136, 35 ]
python
en
['en', 'en', 'en']
True
Token.has_ancestor
(self, other)
Returns ``True`` if *other* is in this tokens ancestry.
Returns ``True`` if *other* is in this tokens ancestry.
def has_ancestor(self, other): """Returns ``True`` if *other* is in this tokens ancestry.""" parent = self.parent while parent: if parent == other: return True parent = parent.parent return False
[ "def", "has_ancestor", "(", "self", ",", "other", ")", ":", "parent", "=", "self", ".", "parent", "while", "parent", ":", "if", "parent", "==", "other", ":", "return", "True", "parent", "=", "parent", ".", "parent", "return", "False" ]
[ 138, 4 ]
[ 145, 20 ]
python
en
['en', 'en', 'en']
True
TokenList._pprint_tree
(self, max_depth=None, depth=0, f=None, _pre='')
Pretty-print the object tree.
Pretty-print the object tree.
def _pprint_tree(self, max_depth=None, depth=0, f=None, _pre=''): """Pretty-print the object tree.""" token_count = len(self.tokens) for idx, token in enumerate(self.tokens): cls = token._get_repr_name() value = token._get_repr_value() last = idx == (token_count - 1) pre = '`- ' if last else '|- ' q = '"' if value.startswith("'") and value.endswith("'") else "'" print("{_pre}{pre}{idx} {cls} {q}{value}{q}" .format(**locals()), file=f) if token.is_group and (max_depth is None or depth < max_depth): parent_pre = ' ' if last else '| ' token._pprint_tree(max_depth, depth + 1, f, _pre + parent_pre)
[ "def", "_pprint_tree", "(", "self", ",", "max_depth", "=", "None", ",", "depth", "=", "0", ",", "f", "=", "None", ",", "_pre", "=", "''", ")", ":", "token_count", "=", "len", "(", "self", ".", "tokens", ")", "for", "idx", ",", "token", "in", "enumerate", "(", "self", ".", "tokens", ")", ":", "cls", "=", "token", ".", "_get_repr_name", "(", ")", "value", "=", "token", ".", "_get_repr_value", "(", ")", "last", "=", "idx", "==", "(", "token_count", "-", "1", ")", "pre", "=", "'`- '", "if", "last", "else", "'|- '", "q", "=", "'\"'", "if", "value", ".", "startswith", "(", "\"'\"", ")", "and", "value", ".", "endswith", "(", "\"'\"", ")", "else", "\"'\"", "print", "(", "\"{_pre}{pre}{idx} {cls} {q}{value}{q}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "file", "=", "f", ")", "if", "token", ".", "is_group", "and", "(", "max_depth", "is", "None", "or", "depth", "<", "max_depth", ")", ":", "parent_pre", "=", "' '", "if", "last", "else", "'| '", "token", ".", "_pprint_tree", "(", "max_depth", ",", "depth", "+", "1", ",", "f", ",", "_pre", "+", "parent_pre", ")" ]
[ 179, 4 ]
[ 195, 78 ]
python
en
['en', 'mt', 'en']
True
TokenList.get_token_at_offset
(self, offset)
Returns the token that is on position offset.
Returns the token that is on position offset.
def get_token_at_offset(self, offset): """Returns the token that is on position offset.""" idx = 0 for token in self.flatten(): end = idx + len(token.value) if idx <= offset < end: return token idx = end
[ "def", "get_token_at_offset", "(", "self", ",", "offset", ")", ":", "idx", "=", "0", "for", "token", "in", "self", ".", "flatten", "(", ")", ":", "end", "=", "idx", "+", "len", "(", "token", ".", "value", ")", "if", "idx", "<=", "offset", "<", "end", ":", "return", "token", "idx", "=", "end" ]
[ 197, 4 ]
[ 204, 21 ]
python
en
['en', 'en', 'en']
True
TokenList.flatten
(self)
Generator yielding ungrouped tokens. This method is recursively called for all child tokens.
Generator yielding ungrouped tokens.
def flatten(self): """Generator yielding ungrouped tokens. This method is recursively called for all child tokens. """ for token in self.tokens: if token.is_group: yield from token.flatten() else: yield token
[ "def", "flatten", "(", "self", ")", ":", "for", "token", "in", "self", ".", "tokens", ":", "if", "token", ".", "is_group", ":", "yield", "from", "token", ".", "flatten", "(", ")", "else", ":", "yield", "token" ]
[ 206, 4 ]
[ 215, 27 ]
python
en
['en', 'en', 'es']
True
TokenList._token_matching
(self, funcs, start=0, end=None, reverse=False)
next token that match functions
next token that match functions
def _token_matching(self, funcs, start=0, end=None, reverse=False): """next token that match functions""" if start is None: return None if not isinstance(funcs, (list, tuple)): funcs = (funcs,) if reverse: assert end is None for idx in range(start - 2, -1, -1): token = self.tokens[idx] for func in funcs: if func(token): return idx, token else: for idx, token in enumerate(self.tokens[start:end], start=start): for func in funcs: if func(token): return idx, token return None, None
[ "def", "_token_matching", "(", "self", ",", "funcs", ",", "start", "=", "0", ",", "end", "=", "None", ",", "reverse", "=", "False", ")", ":", "if", "start", "is", "None", ":", "return", "None", "if", "not", "isinstance", "(", "funcs", ",", "(", "list", ",", "tuple", ")", ")", ":", "funcs", "=", "(", "funcs", ",", ")", "if", "reverse", ":", "assert", "end", "is", "None", "for", "idx", "in", "range", "(", "start", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "token", "=", "self", ".", "tokens", "[", "idx", "]", "for", "func", "in", "funcs", ":", "if", "func", "(", "token", ")", ":", "return", "idx", ",", "token", "else", ":", "for", "idx", ",", "token", "in", "enumerate", "(", "self", ".", "tokens", "[", "start", ":", "end", "]", ",", "start", "=", "start", ")", ":", "for", "func", "in", "funcs", ":", "if", "func", "(", "token", ")", ":", "return", "idx", ",", "token", "return", "None", ",", "None" ]
[ 226, 4 ]
[ 246, 25 ]
python
en
['en', 'en', 'en']
True
TokenList.token_first
(self, skip_ws=True, skip_cm=False)
Returns the first child token. If *skip_ws* is ``True`` (the default), whitespace tokens are ignored. if *skip_cm* is ``True`` (default: ``False``), comments are ignored too.
Returns the first child token.
def token_first(self, skip_ws=True, skip_cm=False): """Returns the first child token. If *skip_ws* is ``True`` (the default), whitespace tokens are ignored. if *skip_cm* is ``True`` (default: ``False``), comments are ignored too. """ # this on is inconsistent, using Comment instead of T.Comment... def matcher(tk): return not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(matcher)[1]
[ "def", "token_first", "(", "self", ",", "skip_ws", "=", "True", ",", "skip_cm", "=", "False", ")", ":", "# this on is inconsistent, using Comment instead of T.Comment...", "def", "matcher", "(", "tk", ")", ":", "return", "not", "(", "(", "skip_ws", "and", "tk", ".", "is_whitespace", ")", "or", "(", "skip_cm", "and", "imt", "(", "tk", ",", "t", "=", "T", ".", "Comment", ",", "i", "=", "Comment", ")", ")", ")", "return", "self", ".", "_token_matching", "(", "matcher", ")", "[", "1", "]" ]
[ 248, 4 ]
[ 261, 47 ]
python
en
['en', 'de', 'en']
True
TokenList.token_prev
(self, idx, skip_ws=True, skip_cm=False)
Returns the previous token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no previous token.
Returns the previous token relative to *idx*.
def token_prev(self, idx, skip_ws=True, skip_cm=False): """Returns the previous token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no previous token. """ return self.token_next(idx, skip_ws, skip_cm, _reverse=True)
[ "def", "token_prev", "(", "self", ",", "idx", ",", "skip_ws", "=", "True", ",", "skip_cm", "=", "False", ")", ":", "return", "self", ".", "token_next", "(", "idx", ",", "skip_ws", ",", "skip_cm", ",", "_reverse", "=", "True", ")" ]
[ 275, 4 ]
[ 282, 68 ]
python
en
['en', 'gl', 'en']
True
TokenList.token_next
(self, idx, skip_ws=True, skip_cm=False, _reverse=False)
Returns the next token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no next token.
Returns the next token relative to *idx*.
def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False): """Returns the next token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no next token. """ if idx is None: return None, None idx += 1 # alot of code usage current pre-compensates for this def matcher(tk): return not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(matcher, idx, reverse=_reverse)
[ "def", "token_next", "(", "self", ",", "idx", ",", "skip_ws", "=", "True", ",", "skip_cm", "=", "False", ",", "_reverse", "=", "False", ")", ":", "if", "idx", "is", "None", ":", "return", "None", ",", "None", "idx", "+=", "1", "# alot of code usage current pre-compensates for this", "def", "matcher", "(", "tk", ")", ":", "return", "not", "(", "(", "skip_ws", "and", "tk", ".", "is_whitespace", ")", "or", "(", "skip_cm", "and", "imt", "(", "tk", ",", "t", "=", "T", ".", "Comment", ",", "i", "=", "Comment", ")", ")", ")", "return", "self", ".", "_token_matching", "(", "matcher", ",", "idx", ",", "reverse", "=", "_reverse", ")" ]
[ 285, 4 ]
[ 299, 67 ]
python
en
['en', 'nl', 'en']
True
TokenList.token_index
(self, token, start=0)
Return list index of token.
Return list index of token.
def token_index(self, token, start=0): """Return list index of token.""" start = start if isinstance(start, int) else self.token_index(start) return start + self.tokens[start:].index(token)
[ "def", "token_index", "(", "self", ",", "token", ",", "start", "=", "0", ")", ":", "start", "=", "start", "if", "isinstance", "(", "start", ",", "int", ")", "else", "self", ".", "token_index", "(", "start", ")", "return", "start", "+", "self", ".", "tokens", "[", "start", ":", "]", ".", "index", "(", "token", ")" ]
[ 301, 4 ]
[ 304, 55 ]
python
en
['en', 'de', 'en']
True
TokenList.group_tokens
(self, grp_cls, start, end, include_end=True, extend=False)
Replace tokens by an instance of *grp_cls*.
Replace tokens by an instance of *grp_cls*.
def group_tokens(self, grp_cls, start, end, include_end=True, extend=False): """Replace tokens by an instance of *grp_cls*.""" start_idx = start start = self.tokens[start_idx] end_idx = end + include_end # will be needed later for new group_clauses # while skip_ws and tokens and tokens[-1].is_whitespace: # tokens = tokens[:-1] if extend and isinstance(start, grp_cls): subtokens = self.tokens[start_idx + 1:end_idx] grp = start grp.tokens.extend(subtokens) del self.tokens[start_idx + 1:end_idx] grp.value = str(start) else: subtokens = self.tokens[start_idx:end_idx] grp = grp_cls(subtokens) self.tokens[start_idx:end_idx] = [grp] grp.parent = self for token in subtokens: token.parent = grp return grp
[ "def", "group_tokens", "(", "self", ",", "grp_cls", ",", "start", ",", "end", ",", "include_end", "=", "True", ",", "extend", "=", "False", ")", ":", "start_idx", "=", "start", "start", "=", "self", ".", "tokens", "[", "start_idx", "]", "end_idx", "=", "end", "+", "include_end", "# will be needed later for new group_clauses", "# while skip_ws and tokens and tokens[-1].is_whitespace:", "# tokens = tokens[:-1]", "if", "extend", "and", "isinstance", "(", "start", ",", "grp_cls", ")", ":", "subtokens", "=", "self", ".", "tokens", "[", "start_idx", "+", "1", ":", "end_idx", "]", "grp", "=", "start", "grp", ".", "tokens", ".", "extend", "(", "subtokens", ")", "del", "self", ".", "tokens", "[", "start_idx", "+", "1", ":", "end_idx", "]", "grp", ".", "value", "=", "str", "(", "start", ")", "else", ":", "subtokens", "=", "self", ".", "tokens", "[", "start_idx", ":", "end_idx", "]", "grp", "=", "grp_cls", "(", "subtokens", ")", "self", ".", "tokens", "[", "start_idx", ":", "end_idx", "]", "=", "[", "grp", "]", "grp", ".", "parent", "=", "self", "for", "token", "in", "subtokens", ":", "token", ".", "parent", "=", "grp", "return", "grp" ]
[ 306, 4 ]
[ 334, 18 ]
python
en
['en', 'en', 'en']
True
TokenList.insert_before
(self, where, token)
Inserts *token* before *where*.
Inserts *token* before *where*.
def insert_before(self, where, token): """Inserts *token* before *where*.""" if not isinstance(where, int): where = self.token_index(where) token.parent = self self.tokens.insert(where, token)
[ "def", "insert_before", "(", "self", ",", "where", ",", "token", ")", ":", "if", "not", "isinstance", "(", "where", ",", "int", ")", ":", "where", "=", "self", ".", "token_index", "(", "where", ")", "token", ".", "parent", "=", "self", "self", ".", "tokens", ".", "insert", "(", "where", ",", "token", ")" ]
[ 336, 4 ]
[ 341, 40 ]
python
en
['en', 'en', 'en']
True
TokenList.insert_after
(self, where, token, skip_ws=True)
Inserts *token* after *where*.
Inserts *token* after *where*.
def insert_after(self, where, token, skip_ws=True): """Inserts *token* after *where*.""" if not isinstance(where, int): where = self.token_index(where) nidx, next_ = self.token_next(where, skip_ws=skip_ws) token.parent = self if next_ is None: self.tokens.append(token) else: self.tokens.insert(nidx, token)
[ "def", "insert_after", "(", "self", ",", "where", ",", "token", ",", "skip_ws", "=", "True", ")", ":", "if", "not", "isinstance", "(", "where", ",", "int", ")", ":", "where", "=", "self", ".", "token_index", "(", "where", ")", "nidx", ",", "next_", "=", "self", ".", "token_next", "(", "where", ",", "skip_ws", "=", "skip_ws", ")", "token", ".", "parent", "=", "self", "if", "next_", "is", "None", ":", "self", ".", "tokens", ".", "append", "(", "token", ")", "else", ":", "self", ".", "tokens", ".", "insert", "(", "nidx", ",", "token", ")" ]
[ 343, 4 ]
[ 352, 43 ]
python
en
['en', 'en', 'en']
True
TokenList.has_alias
(self)
Returns ``True`` if an alias is present.
Returns ``True`` if an alias is present.
def has_alias(self): """Returns ``True`` if an alias is present.""" return self.get_alias() is not None
[ "def", "has_alias", "(", "self", ")", ":", "return", "self", ".", "get_alias", "(", ")", "is", "not", "None" ]
[ 354, 4 ]
[ 356, 43 ]
python
en
['en', 'lb', 'en']
True
TokenList.get_alias
(self)
Returns the alias for this identifier or ``None``.
Returns the alias for this identifier or ``None``.
def get_alias(self): """Returns the alias for this identifier or ``None``.""" return None
[ "def", "get_alias", "(", "self", ")", ":", "return", "None" ]
[ 358, 4 ]
[ 360, 19 ]
python
en
['en', 'en', 'en']
True
TokenList.get_name
(self)
Returns the name of this identifier. This is either it's alias or it's real name. The returned valued can be considered as the name under which the object corresponding to this identifier is known within the current statement.
Returns the name of this identifier.
def get_name(self): """Returns the name of this identifier. This is either it's alias or it's real name. The returned valued can be considered as the name under which the object corresponding to this identifier is known within the current statement. """ return self.get_alias() or self.get_real_name()
[ "def", "get_name", "(", "self", ")", ":", "return", "self", ".", "get_alias", "(", ")", "or", "self", ".", "get_real_name", "(", ")" ]
[ 362, 4 ]
[ 369, 55 ]
python
en
['en', 'en', 'en']
True
TokenList.get_real_name
(self)
Returns the real name (object name) of this identifier.
Returns the real name (object name) of this identifier.
def get_real_name(self): """Returns the real name (object name) of this identifier.""" return None
[ "def", "get_real_name", "(", "self", ")", ":", "return", "None" ]
[ 371, 4 ]
[ 373, 19 ]
python
en
['en', 'en', 'en']
True
TokenList.get_parent_name
(self)
Return name of the parent object if any. A parent object is identified by the first occurring dot.
Return name of the parent object if any.
def get_parent_name(self): """Return name of the parent object if any. A parent object is identified by the first occurring dot. """ dot_idx, _ = self.token_next_by(m=(T.Punctuation, '.')) _, prev_ = self.token_prev(dot_idx) return remove_quotes(prev_.value) if prev_ is not None else None
[ "def", "get_parent_name", "(", "self", ")", ":", "dot_idx", ",", "_", "=", "self", ".", "token_next_by", "(", "m", "=", "(", "T", ".", "Punctuation", ",", "'.'", ")", ")", "_", ",", "prev_", "=", "self", ".", "token_prev", "(", "dot_idx", ")", "return", "remove_quotes", "(", "prev_", ".", "value", ")", "if", "prev_", "is", "not", "None", "else", "None" ]
[ 375, 4 ]
[ 382, 72 ]
python
en
['en', 'en', 'en']
True
TokenList._get_first_name
(self, idx=None, reverse=False, keywords=False, real_name=False)
Returns the name of the first token with a name
Returns the name of the first token with a name
def _get_first_name(self, idx=None, reverse=False, keywords=False, real_name=False): """Returns the name of the first token with a name""" tokens = self.tokens[idx:] if idx else self.tokens tokens = reversed(tokens) if reverse else tokens types = [T.Name, T.Wildcard, T.String.Symbol] if keywords: types.append(T.Keyword) for token in tokens: if token.ttype in types: return remove_quotes(token.value) elif isinstance(token, (Identifier, Function)): return token.get_real_name() if real_name else token.get_name()
[ "def", "_get_first_name", "(", "self", ",", "idx", "=", "None", ",", "reverse", "=", "False", ",", "keywords", "=", "False", ",", "real_name", "=", "False", ")", ":", "tokens", "=", "self", ".", "tokens", "[", "idx", ":", "]", "if", "idx", "else", "self", ".", "tokens", "tokens", "=", "reversed", "(", "tokens", ")", "if", "reverse", "else", "tokens", "types", "=", "[", "T", ".", "Name", ",", "T", ".", "Wildcard", ",", "T", ".", "String", ".", "Symbol", "]", "if", "keywords", ":", "types", ".", "append", "(", "T", ".", "Keyword", ")", "for", "token", "in", "tokens", ":", "if", "token", ".", "ttype", "in", "types", ":", "return", "remove_quotes", "(", "token", ".", "value", ")", "elif", "isinstance", "(", "token", ",", "(", "Identifier", ",", "Function", ")", ")", ":", "return", "token", ".", "get_real_name", "(", ")", "if", "real_name", "else", "token", ".", "get_name", "(", ")" ]
[ 384, 4 ]
[ 399, 79 ]
python
en
['en', 'en', 'en']
True
Statement.get_type
(self)
Returns the type of a statement. The returned value is a string holding an upper-cased reprint of the first DML or DDL keyword. If the first token in this group isn't a DML or DDL keyword "UNKNOWN" is returned. Whitespaces and comments at the beginning of the statement are ignored.
Returns the type of a statement.
def get_type(self): """Returns the type of a statement. The returned value is a string holding an upper-cased reprint of the first DML or DDL keyword. If the first token in this group isn't a DML or DDL keyword "UNKNOWN" is returned. Whitespaces and comments at the beginning of the statement are ignored. """ first_token = self.token_first(skip_cm=True) if first_token is None: # An "empty" statement that either has not tokens at all # or only whitespace tokens. return 'UNKNOWN' elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL): return first_token.normalized elif first_token.ttype == T.Keyword.CTE: # The WITH keyword should be followed by either an Identifier or # an IdentifierList containing the CTE definitions; the actual # DML keyword (e.g. SELECT, INSERT) will follow next. fidx = self.token_index(first_token) tidx, token = self.token_next(fidx, skip_ws=True) if isinstance(token, (Identifier, IdentifierList)): _, dml_keyword = self.token_next(tidx, skip_ws=True) if dml_keyword is not None \ and dml_keyword.ttype == T.Keyword.DML: return dml_keyword.normalized # Hmm, probably invalid syntax, so return unknown. return 'UNKNOWN'
[ "def", "get_type", "(", "self", ")", ":", "first_token", "=", "self", ".", "token_first", "(", "skip_cm", "=", "True", ")", "if", "first_token", "is", "None", ":", "# An \"empty\" statement that either has not tokens at all", "# or only whitespace tokens.", "return", "'UNKNOWN'", "elif", "first_token", ".", "ttype", "in", "(", "T", ".", "Keyword", ".", "DML", ",", "T", ".", "Keyword", ".", "DDL", ")", ":", "return", "first_token", ".", "normalized", "elif", "first_token", ".", "ttype", "==", "T", ".", "Keyword", ".", "CTE", ":", "# The WITH keyword should be followed by either an Identifier or", "# an IdentifierList containing the CTE definitions; the actual", "# DML keyword (e.g. SELECT, INSERT) will follow next.", "fidx", "=", "self", ".", "token_index", "(", "first_token", ")", "tidx", ",", "token", "=", "self", ".", "token_next", "(", "fidx", ",", "skip_ws", "=", "True", ")", "if", "isinstance", "(", "token", ",", "(", "Identifier", ",", "IdentifierList", ")", ")", ":", "_", ",", "dml_keyword", "=", "self", ".", "token_next", "(", "tidx", ",", "skip_ws", "=", "True", ")", "if", "dml_keyword", "is", "not", "None", "and", "dml_keyword", ".", "ttype", "==", "T", ".", "Keyword", ".", "DML", ":", "return", "dml_keyword", ".", "normalized", "# Hmm, probably invalid syntax, so return unknown.", "return", "'UNKNOWN'" ]
[ 405, 4 ]
[ 438, 24 ]
python
en
['en', 'en', 'en']
True
Identifier.is_wildcard
(self)
Return ``True`` if this identifier contains a wildcard.
Return ``True`` if this identifier contains a wildcard.
def is_wildcard(self): """Return ``True`` if this identifier contains a wildcard.""" _, token = self.token_next_by(t=T.Wildcard) return token is not None
[ "def", "is_wildcard", "(", "self", ")", ":", "_", ",", "token", "=", "self", ".", "token_next_by", "(", "t", "=", "T", ".", "Wildcard", ")", "return", "token", "is", "not", "None" ]
[ 447, 4 ]
[ 450, 32 ]
python
en
['en', 'en', 'en']
True
Identifier.get_typecast
(self)
Returns the typecast or ``None`` of this object as a string.
Returns the typecast or ``None`` of this object as a string.
def get_typecast(self): """Returns the typecast or ``None`` of this object as a string.""" midx, marker = self.token_next_by(m=(T.Punctuation, '::')) nidx, next_ = self.token_next(midx, skip_ws=False) return next_.value if next_ else None
[ "def", "get_typecast", "(", "self", ")", ":", "midx", ",", "marker", "=", "self", ".", "token_next_by", "(", "m", "=", "(", "T", ".", "Punctuation", ",", "'::'", ")", ")", "nidx", ",", "next_", "=", "self", ".", "token_next", "(", "midx", ",", "skip_ws", "=", "False", ")", "return", "next_", ".", "value", "if", "next_", "else", "None" ]
[ 452, 4 ]
[ 456, 45 ]
python
en
['en', 'en', 'en']
True
Identifier.get_ordering
(self)
Returns the ordering or ``None`` as uppercase string.
Returns the ordering or ``None`` as uppercase string.
def get_ordering(self): """Returns the ordering or ``None`` as uppercase string.""" _, ordering = self.token_next_by(t=T.Keyword.Order) return ordering.normalized if ordering else None
[ "def", "get_ordering", "(", "self", ")", ":", "_", ",", "ordering", "=", "self", ".", "token_next_by", "(", "t", "=", "T", ".", "Keyword", ".", "Order", ")", "return", "ordering", ".", "normalized", "if", "ordering", "else", "None" ]
[ 458, 4 ]
[ 461, 56 ]
python
en
['en', 'en', 'en']
True
Identifier.get_array_indices
(self)
Returns an iterator of index token lists
Returns an iterator of index token lists
def get_array_indices(self): """Returns an iterator of index token lists""" for token in self.tokens: if isinstance(token, SquareBrackets): # Use [1:-1] index to discard the square brackets yield token.tokens[1:-1]
[ "def", "get_array_indices", "(", "self", ")", ":", "for", "token", "in", "self", ".", "tokens", ":", "if", "isinstance", "(", "token", ",", "SquareBrackets", ")", ":", "# Use [1:-1] index to discard the square brackets", "yield", "token", ".", "tokens", "[", "1", ":", "-", "1", "]" ]
[ 463, 4 ]
[ 469, 40 ]
python
en
['en', 'en', 'en']
True
IdentifierList.get_identifiers
(self)
Returns the identifiers. Whitespaces and punctuations are not included in this generator.
Returns the identifiers.
def get_identifiers(self): """Returns the identifiers. Whitespaces and punctuations are not included in this generator. """ for token in self.tokens: if not (token.is_whitespace or token.match(T.Punctuation, ',')): yield token
[ "def", "get_identifiers", "(", "self", ")", ":", "for", "token", "in", "self", ".", "tokens", ":", "if", "not", "(", "token", ".", "is_whitespace", "or", "token", ".", "match", "(", "T", ".", "Punctuation", ",", "','", ")", ")", ":", "yield", "token" ]
[ 475, 4 ]
[ 482, 27 ]
python
en
['en', 'nl', 'en']
True
Case.get_cases
(self, skip_ws=False)
Returns a list of 2-tuples (condition, value). If an ELSE exists condition is None.
Returns a list of 2-tuples (condition, value).
def get_cases(self, skip_ws=False): """Returns a list of 2-tuples (condition, value). If an ELSE exists condition is None. """ CONDITION = 1 VALUE = 2 ret = [] mode = CONDITION for token in self.tokens: # Set mode from the current statement if token.match(T.Keyword, 'CASE'): continue elif skip_ws and token.ttype in T.Whitespace: continue elif token.match(T.Keyword, 'WHEN'): ret.append(([], [])) mode = CONDITION elif token.match(T.Keyword, 'THEN'): mode = VALUE elif token.match(T.Keyword, 'ELSE'): ret.append((None, [])) mode = VALUE elif token.match(T.Keyword, 'END'): mode = None # First condition without preceding WHEN if mode and not ret: ret.append(([], [])) # Append token depending of the current mode if mode == CONDITION: ret[-1][0].append(token) elif mode == VALUE: ret[-1][1].append(token) # Return cases list return ret
[ "def", "get_cases", "(", "self", ",", "skip_ws", "=", "False", ")", ":", "CONDITION", "=", "1", "VALUE", "=", "2", "ret", "=", "[", "]", "mode", "=", "CONDITION", "for", "token", "in", "self", ".", "tokens", ":", "# Set mode from the current statement", "if", "token", ".", "match", "(", "T", ".", "Keyword", ",", "'CASE'", ")", ":", "continue", "elif", "skip_ws", "and", "token", ".", "ttype", "in", "T", ".", "Whitespace", ":", "continue", "elif", "token", ".", "match", "(", "T", ".", "Keyword", ",", "'WHEN'", ")", ":", "ret", ".", "append", "(", "(", "[", "]", ",", "[", "]", ")", ")", "mode", "=", "CONDITION", "elif", "token", ".", "match", "(", "T", ".", "Keyword", ",", "'THEN'", ")", ":", "mode", "=", "VALUE", "elif", "token", ".", "match", "(", "T", ".", "Keyword", ",", "'ELSE'", ")", ":", "ret", ".", "append", "(", "(", "None", ",", "[", "]", ")", ")", "mode", "=", "VALUE", "elif", "token", ".", "match", "(", "T", ".", "Keyword", ",", "'END'", ")", ":", "mode", "=", "None", "# First condition without preceding WHEN", "if", "mode", "and", "not", "ret", ":", "ret", ".", "append", "(", "(", "[", "]", ",", "[", "]", ")", ")", "# Append token depending of the current mode", "if", "mode", "==", "CONDITION", ":", "ret", "[", "-", "1", "]", "[", "0", "]", ".", "append", "(", "token", ")", "elif", "mode", "==", "VALUE", ":", "ret", "[", "-", "1", "]", "[", "1", "]", ".", "append", "(", "token", ")", "# Return cases list", "return", "ret" ]
[ 566, 4 ]
[ 611, 18 ]
python
en
['en', 'en', 'en']
True
Function.get_parameters
(self)
Return a list of parameters.
Return a list of parameters.
def get_parameters(self): """Return a list of parameters.""" parenthesis = self.tokens[-1] for token in parenthesis.tokens: if isinstance(token, IdentifierList): return token.get_identifiers() elif imt(token, i=(Function, Identifier), t=T.Literal): return [token, ] return []
[ "def", "get_parameters", "(", "self", ")", ":", "parenthesis", "=", "self", ".", "tokens", "[", "-", "1", "]", "for", "token", "in", "parenthesis", ".", "tokens", ":", "if", "isinstance", "(", "token", ",", "IdentifierList", ")", ":", "return", "token", ".", "get_identifiers", "(", ")", "elif", "imt", "(", "token", ",", "i", "=", "(", "Function", ",", "Identifier", ")", ",", "t", "=", "T", ".", "Literal", ")", ":", "return", "[", "token", ",", "]", "return", "[", "]" ]
[ 617, 4 ]
[ 625, 17 ]
python
en
['en', 'en', 'en']
True
run
(argv=None)
The main function which creates the pipeline and runs it.
The main function which creates the pipeline and runs it.
def run(argv=None): """The main function which creates the pipeline and runs it.""" parser = argparse.ArgumentParser() # Here we add some specific command line arguments we expect. # This defaults the output table in your BigQuery you'll have # to create the example_data dataset yourself using bq mk temp parser.add_argument('--output', dest='output', required=False, help='Output BQ table to write results to.', default='lake.orders_denormalized_cogroupbykey') # Parse arguments from the command line. known_args, pipeline_args = parser.parse_known_args(argv) # DataLakeToDataMartCGBK is a class we built in this script to hold the logic for # transforming the file into a BigQuery table. It also contains an example of # using CoGroupByKey data_lake_to_data_mart = DataLakeToDataMartCGBK() schema = parse_table_schema_from_json(data_lake_to_data_mart.schema_str) pipeline = beam.Pipeline(options=PipelineOptions(pipeline_args)) # This query returns details about the account, normalized into a # different table. We will be joining the data in to the main orders dataset in order # to create a denormalized table. account_details_source = ( pipeline | 'Read Account Details from BigQuery ' >> beam.io.Read( beam.io.BigQuerySource(query=""" SELECT acct_number, acct_company_name, acct_group_name, acct_name, acct_org_name, address, city, state, zip_code, country FROM `python-dataflow-example.example_data.account` """, use_standard_sql=True)) # This next stage of the pipeline maps the acct_number to a single row of # results from BigQuery. Mapping this way helps Dataflow move your data arround # to different workers. When later stages of the pipeline run, all results from # a given account number will run on one worker. | 'Map Account to Order Details' >> beam.Map( lambda row: ( row['acct_number'], row ))) orders_query = data_lake_to_data_mart.get_orders_query() # Read the orders from BigQuery. This is the source of the pipeline. All further # processing starts with rows read from the query results here. orders = ( pipeline | 'Read Orders from BigQuery ' >> beam.io.Read( beam.io.BigQuerySource(query=orders_query, use_standard_sql=True)) | # This next stage of the pipeline maps the acct_number to a single row of # results from BigQuery. Mapping this way helps Dataflow move your data around # to different workers. When later stages of the pipeline run, all results from # a given account number will run on one worker. 'Map Account to Account Details' >> beam.Map( lambda row: ( row['acct_number'], row ))) # CoGroupByKey allows us to arrange the results together by key # Both "orders" and "account_details" are maps of # acct_number -> "Row of results from BigQuery". # The mapping is done in the above code using Beam.Map() result = {'orders': orders, 'account_details': account_details_source} | \ beam.CoGroupByKey() # The add_account_details function is responsible for defining how to # join the two datasets. It passes the results of CoGroupByKey, which # groups the data from the same key in each dataset together in the same # worker. joined = result | beam.FlatMap(data_lake_to_data_mart.add_account_details) joined | 'Write Data to BigQuery' >> beam.io.Write( beam.io.BigQuerySink( # The table name is a required argument for the BigQuery sink. # In this case we use the value passed in from the command line. known_args.output, # Here we use the JSON schema read in from a JSON file. # Specifying the schema allows the API to create the table correctly if it does not yet exist. schema=schema, # Creates the table in BigQuery if it does not yet exist. create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, # Deletes all data in the BigQuery table before writing. write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)) pipeline.run().wait_until_finish()
[ "def", "run", "(", "argv", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# Here we add some specific command line arguments we expect.", "# This defaults the output table in your BigQuery you'll have", "# to create the example_data dataset yourself using bq mk temp", "parser", ".", "add_argument", "(", "'--output'", ",", "dest", "=", "'output'", ",", "required", "=", "False", ",", "help", "=", "'Output BQ table to write results to.'", ",", "default", "=", "'lake.orders_denormalized_cogroupbykey'", ")", "# Parse arguments from the command line.", "known_args", ",", "pipeline_args", "=", "parser", ".", "parse_known_args", "(", "argv", ")", "# DataLakeToDataMartCGBK is a class we built in this script to hold the logic for", "# transforming the file into a BigQuery table. It also contains an example of", "# using CoGroupByKey", "data_lake_to_data_mart", "=", "DataLakeToDataMartCGBK", "(", ")", "schema", "=", "parse_table_schema_from_json", "(", "data_lake_to_data_mart", ".", "schema_str", ")", "pipeline", "=", "beam", ".", "Pipeline", "(", "options", "=", "PipelineOptions", "(", "pipeline_args", ")", ")", "# This query returns details about the account, normalized into a", "# different table. We will be joining the data in to the main orders dataset in order", "# to create a denormalized table.", "account_details_source", "=", "(", "pipeline", "|", "'Read Account Details from BigQuery '", ">>", "beam", ".", "io", ".", "Read", "(", "beam", ".", "io", ".", "BigQuerySource", "(", "query", "=", "\"\"\"\n SELECT\n acct_number,\n acct_company_name,\n acct_group_name,\n acct_name,\n acct_org_name,\n address,\n city,\n state,\n zip_code,\n country\n FROM\n `python-dataflow-example.example_data.account`\n \"\"\"", ",", "use_standard_sql", "=", "True", ")", ")", "# This next stage of the pipeline maps the acct_number to a single row of", "# results from BigQuery. Mapping this way helps Dataflow move your data arround", "# to different workers. When later stages of the pipeline run, all results from", "# a given account number will run on one worker.", "|", "'Map Account to Order Details'", ">>", "beam", ".", "Map", "(", "lambda", "row", ":", "(", "row", "[", "'acct_number'", "]", ",", "row", ")", ")", ")", "orders_query", "=", "data_lake_to_data_mart", ".", "get_orders_query", "(", ")", "# Read the orders from BigQuery. This is the source of the pipeline. All further", "# processing starts with rows read from the query results here.", "orders", "=", "(", "pipeline", "|", "'Read Orders from BigQuery '", ">>", "beam", ".", "io", ".", "Read", "(", "beam", ".", "io", ".", "BigQuerySource", "(", "query", "=", "orders_query", ",", "use_standard_sql", "=", "True", ")", ")", "|", "# This next stage of the pipeline maps the acct_number to a single row of", "# results from BigQuery. Mapping this way helps Dataflow move your data around", "# to different workers. When later stages of the pipeline run, all results from", "# a given account number will run on one worker.", "'Map Account to Account Details'", ">>", "beam", ".", "Map", "(", "lambda", "row", ":", "(", "row", "[", "'acct_number'", "]", ",", "row", ")", ")", ")", "# CoGroupByKey allows us to arrange the results together by key", "# Both \"orders\" and \"account_details\" are maps of", "# acct_number -> \"Row of results from BigQuery\".", "# The mapping is done in the above code using Beam.Map()", "result", "=", "{", "'orders'", ":", "orders", ",", "'account_details'", ":", "account_details_source", "}", "|", "beam", ".", "CoGroupByKey", "(", ")", "# The add_account_details function is responsible for defining how to", "# join the two datasets. It passes the results of CoGroupByKey, which", "# groups the data from the same key in each dataset together in the same", "# worker.", "joined", "=", "result", "|", "beam", ".", "FlatMap", "(", "data_lake_to_data_mart", ".", "add_account_details", ")", "joined", "|", "'Write Data to BigQuery'", ">>", "beam", ".", "io", ".", "Write", "(", "beam", ".", "io", ".", "BigQuerySink", "(", "# The table name is a required argument for the BigQuery sink.", "# In this case we use the value passed in from the command line.", "known_args", ".", "output", ",", "# Here we use the JSON schema read in from a JSON file.", "# Specifying the schema allows the API to create the table correctly if it does not yet exist.", "schema", "=", "schema", ",", "# Creates the table in BigQuery if it does not yet exist.", "create_disposition", "=", "beam", ".", "io", ".", "BigQueryDisposition", ".", "CREATE_IF_NEEDED", ",", "# Deletes all data in the BigQuery table before writing.", "write_disposition", "=", "beam", ".", "io", ".", "BigQueryDisposition", ".", "WRITE_TRUNCATE", ")", ")", "pipeline", ".", "run", "(", ")", ".", "wait_until_finish", "(", ")" ]
[ 232, 0 ]
[ 324, 38 ]
python
en
['en', 'en', 'en']
True
DataLakeToDataMartCGBK.get_orders_query
(self)
This returns a query against a very large fact table. We are using a fake orders dataset to simulate a fact table in a typical data warehouse.
This returns a query against a very large fact table. We are using a fake orders dataset to simulate a fact table in a typical data warehouse.
def get_orders_query(self): """This returns a query against a very large fact table. We are using a fake orders dataset to simulate a fact table in a typical data warehouse.""" orders_query = """SELECT acct_number, col_number, col_number_1, col_number_10, col_number_100, col_number_101, col_number_102, col_number_103, col_number_104, col_number_105, col_number_106, col_number_107, col_number_108, col_number_109, col_number_11, col_number_110, col_number_111, col_number_112, col_number_113, col_number_114, col_number_115, col_number_116, col_number_117, col_number_118, col_number_119, col_number_12, col_number_120, col_number_121, col_number_122, col_number_123, col_number_124, col_number_125, col_number_126, col_number_127, col_number_128, col_number_129, col_number_13, col_number_130, col_number_131, col_number_132, col_number_133, col_number_134, col_number_135, col_number_136, col_number_14, col_number_15, col_number_16, col_number_17, col_number_18, col_number_19, col_number_2, col_number_20, col_number_21, col_number_22, col_number_23, col_number_24, col_number_25, col_number_26, col_number_27, col_number_28, col_number_29, col_number_3, col_number_30, col_number_31, col_number_32, col_number_33, col_number_34, col_number_35, col_number_36, col_number_37, col_number_38, col_number_39, col_number_4, col_number_40, col_number_41, col_number_42, col_number_43, col_number_44, col_number_45, col_number_46, col_number_47, col_number_48, col_number_49, col_number_5, col_number_50, col_number_51, col_number_52, col_number_53, col_number_54, col_number_55, col_number_56, col_number_57, col_number_58, col_number_59, col_number_6, col_number_60, col_number_61, col_number_62, col_number_63, col_number_64, col_number_65, col_number_66, col_number_67, col_number_68, col_number_69, col_number_7, col_number_70, col_number_71, col_number_72, col_number_73, col_number_74, col_number_75, col_number_76, col_number_77, col_number_78, col_number_79, col_number_8, col_number_80, col_number_81, col_number_82, col_number_83, col_number_84, col_number_85, col_number_86, col_number_87, col_number_88, col_number_89, col_number_9, col_number_90, col_number_91, col_number_92, col_number_93, col_number_94, col_number_95, col_number_96, col_number_97, col_number_98, col_number_99, col_number_num1, date, foo, num1, num2, num3, num5, num6, product_number, quantity FROM `python-dataflow-example.example_data.orders` orders LIMIT 10 """ return orders_query
[ "def", "get_orders_query", "(", "self", ")", ":", "orders_query", "=", "\"\"\"SELECT\n acct_number,\n col_number,\n col_number_1,\n col_number_10,\n col_number_100,\n col_number_101,\n col_number_102,\n col_number_103,\n col_number_104,\n col_number_105,\n col_number_106,\n col_number_107,\n col_number_108,\n col_number_109,\n col_number_11,\n col_number_110,\n col_number_111,\n col_number_112,\n col_number_113,\n col_number_114,\n col_number_115,\n col_number_116,\n col_number_117,\n col_number_118,\n col_number_119,\n col_number_12,\n col_number_120,\n col_number_121,\n col_number_122,\n col_number_123,\n col_number_124,\n col_number_125,\n col_number_126,\n col_number_127,\n col_number_128,\n col_number_129,\n col_number_13,\n col_number_130,\n col_number_131,\n col_number_132,\n col_number_133,\n col_number_134,\n col_number_135,\n col_number_136,\n col_number_14,\n col_number_15,\n col_number_16,\n col_number_17,\n col_number_18,\n col_number_19,\n col_number_2,\n col_number_20,\n col_number_21,\n col_number_22,\n col_number_23,\n col_number_24,\n col_number_25,\n col_number_26,\n col_number_27,\n col_number_28,\n col_number_29,\n col_number_3,\n col_number_30,\n col_number_31,\n col_number_32,\n col_number_33,\n col_number_34,\n col_number_35,\n col_number_36,\n col_number_37,\n col_number_38,\n col_number_39,\n col_number_4,\n col_number_40,\n col_number_41,\n col_number_42,\n col_number_43,\n col_number_44,\n col_number_45,\n col_number_46,\n col_number_47,\n col_number_48,\n col_number_49,\n col_number_5,\n col_number_50,\n col_number_51,\n col_number_52,\n col_number_53,\n col_number_54,\n col_number_55,\n col_number_56,\n col_number_57,\n col_number_58,\n col_number_59,\n col_number_6,\n col_number_60,\n col_number_61,\n col_number_62,\n col_number_63,\n col_number_64,\n col_number_65,\n col_number_66,\n col_number_67,\n col_number_68,\n col_number_69,\n col_number_7,\n col_number_70,\n col_number_71,\n col_number_72,\n col_number_73,\n col_number_74,\n col_number_75,\n col_number_76,\n col_number_77,\n col_number_78,\n col_number_79,\n col_number_8,\n col_number_80,\n col_number_81,\n col_number_82,\n col_number_83,\n col_number_84,\n col_number_85,\n col_number_86,\n col_number_87,\n col_number_88,\n col_number_89,\n col_number_9,\n col_number_90,\n col_number_91,\n col_number_92,\n col_number_93,\n col_number_94,\n col_number_95,\n col_number_96,\n col_number_97,\n col_number_98,\n col_number_99,\n col_number_num1,\n date,\n foo,\n num1,\n num2,\n num3,\n num5,\n num6,\n product_number,\n quantity\n FROM\n `python-dataflow-example.example_data.orders` orders\n LIMIT\n 10 \n \"\"\"", "return", "orders_query" ]
[ 48, 4 ]
[ 206, 27 ]
python
en
['en', 'en', 'en']
True