Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
S3Url.suffix | (self) |
Attempts to get a file suffix from the S3 key.
If can't find one returns `None`.
|
Attempts to get a file suffix from the S3 key.
If can't find one returns `None`.
| def suffix(self) -> Optional[str]:
"""
Attempts to get a file suffix from the S3 key.
If can't find one returns `None`.
"""
splits = self._parsed.path.rsplit(".", 1)
_suffix = splits[-1]
if len(_suffix) > 0 and len(splits) > 1:
return str(_suffix)
return None | [
"def",
"suffix",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"splits",
"=",
"self",
".",
"_parsed",
".",
"path",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"_suffix",
"=",
"splits",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"_suffix",
")",
">",
"0",
"and",
"len",
"(",
"splits",
")",
">",
"1",
":",
"return",
"str",
"(",
"_suffix",
")",
"return",
"None"
] | [
446,
4
] | [
455,
19
] | python | en | ['en', 'error', 'th'] | False |
move_on_at | (deadline) | Use as a context manager to create a cancel scope with the given
absolute deadline.
Args:
deadline (float): The deadline.
| Use as a context manager to create a cancel scope with the given
absolute deadline. | def move_on_at(deadline):
"""Use as a context manager to create a cancel scope with the given
absolute deadline.
Args:
deadline (float): The deadline.
"""
return trio.CancelScope(deadline=deadline) | [
"def",
"move_on_at",
"(",
"deadline",
")",
":",
"return",
"trio",
".",
"CancelScope",
"(",
"deadline",
"=",
"deadline",
")"
] | [
5,
0
] | [
13,
46
] | python | en | ['en', 'en', 'en'] | True |
move_on_after | (seconds) | Use as a context manager to create a cancel scope whose deadline is
set to now + *seconds*.
Args:
seconds (float): The timeout.
Raises:
ValueError: if timeout is less than zero.
| Use as a context manager to create a cancel scope whose deadline is
set to now + *seconds*. | def move_on_after(seconds):
"""Use as a context manager to create a cancel scope whose deadline is
set to now + *seconds*.
Args:
seconds (float): The timeout.
Raises:
ValueError: if timeout is less than zero.
"""
if seconds < 0:
raise ValueError("timeout must be non-negative")
return move_on_at(trio.current_time() + seconds) | [
"def",
"move_on_after",
"(",
"seconds",
")",
":",
"if",
"seconds",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"timeout must be non-negative\"",
")",
"return",
"move_on_at",
"(",
"trio",
".",
"current_time",
"(",
")",
"+",
"seconds",
")"
] | [
16,
0
] | [
30,
52
] | python | en | ['en', 'en', 'en'] | True |
sleep_forever | () | Pause execution of the current task forever (or until cancelled).
Equivalent to calling ``await sleep(math.inf)``.
| Pause execution of the current task forever (or until cancelled). | async def sleep_forever():
"""Pause execution of the current task forever (or until cancelled).
Equivalent to calling ``await sleep(math.inf)``.
"""
await trio.lowlevel.wait_task_rescheduled(lambda _: trio.lowlevel.Abort.SUCCEEDED) | [
"async",
"def",
"sleep_forever",
"(",
")",
":",
"await",
"trio",
".",
"lowlevel",
".",
"wait_task_rescheduled",
"(",
"lambda",
"_",
":",
"trio",
".",
"lowlevel",
".",
"Abort",
".",
"SUCCEEDED",
")"
] | [
33,
0
] | [
39,
86
] | python | en | ['en', 'en', 'en'] | True |
sleep_until | (deadline) | Pause execution of the current task until the given time.
The difference between :func:`sleep` and :func:`sleep_until` is that the
former takes a relative time and the latter takes an absolute time.
Args:
deadline (float): The time at which we should wake up again. May be in
the past, in which case this function executes a checkpoint but
does not block.
| Pause execution of the current task until the given time. | async def sleep_until(deadline):
"""Pause execution of the current task until the given time.
The difference between :func:`sleep` and :func:`sleep_until` is that the
former takes a relative time and the latter takes an absolute time.
Args:
deadline (float): The time at which we should wake up again. May be in
the past, in which case this function executes a checkpoint but
does not block.
"""
with move_on_at(deadline):
await sleep_forever() | [
"async",
"def",
"sleep_until",
"(",
"deadline",
")",
":",
"with",
"move_on_at",
"(",
"deadline",
")",
":",
"await",
"sleep_forever",
"(",
")"
] | [
42,
0
] | [
55,
29
] | python | en | ['en', 'en', 'en'] | True |
sleep | (seconds) | Pause execution of the current task for the given number of seconds.
Args:
seconds (float): The number of seconds to sleep. May be zero to
insert a checkpoint without actually blocking.
Raises:
ValueError: if *seconds* is negative.
| Pause execution of the current task for the given number of seconds. | async def sleep(seconds):
"""Pause execution of the current task for the given number of seconds.
Args:
seconds (float): The number of seconds to sleep. May be zero to
insert a checkpoint without actually blocking.
Raises:
ValueError: if *seconds* is negative.
"""
if seconds < 0:
raise ValueError("duration must be non-negative")
if seconds == 0:
await trio.lowlevel.checkpoint()
else:
await sleep_until(trio.current_time() + seconds) | [
"async",
"def",
"sleep",
"(",
"seconds",
")",
":",
"if",
"seconds",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"duration must be non-negative\"",
")",
"if",
"seconds",
"==",
"0",
":",
"await",
"trio",
".",
"lowlevel",
".",
"checkpoint",
"(",
")",
"else",
":",
"await",
"sleep_until",
"(",
"trio",
".",
"current_time",
"(",
")",
"+",
"seconds",
")"
] | [
58,
0
] | [
74,
56
] | python | en | ['en', 'en', 'en'] | True |
fail_at | (deadline) | Creates a cancel scope with the given deadline, and raises an error if it
is actually cancelled.
This function and :func:`move_on_at` are similar in that both create a
cancel scope with a given absolute deadline, and if the deadline expires
then both will cause :exc:`Cancelled` to be raised within the scope. The
difference is that when the :exc:`Cancelled` exception reaches
:func:`move_on_at`, it's caught and discarded. When it reaches
:func:`fail_at`, then it's caught and :exc:`TooSlowError` is raised in its
place.
Raises:
TooSlowError: if a :exc:`Cancelled` exception is raised in this scope
and caught by the context manager.
| Creates a cancel scope with the given deadline, and raises an error if it
is actually cancelled. | def fail_at(deadline):
"""Creates a cancel scope with the given deadline, and raises an error if it
is actually cancelled.
This function and :func:`move_on_at` are similar in that both create a
cancel scope with a given absolute deadline, and if the deadline expires
then both will cause :exc:`Cancelled` to be raised within the scope. The
difference is that when the :exc:`Cancelled` exception reaches
:func:`move_on_at`, it's caught and discarded. When it reaches
:func:`fail_at`, then it's caught and :exc:`TooSlowError` is raised in its
place.
Raises:
TooSlowError: if a :exc:`Cancelled` exception is raised in this scope
and caught by the context manager.
"""
with move_on_at(deadline) as scope:
yield scope
if scope.cancelled_caught:
raise TooSlowError | [
"def",
"fail_at",
"(",
"deadline",
")",
":",
"with",
"move_on_at",
"(",
"deadline",
")",
"as",
"scope",
":",
"yield",
"scope",
"if",
"scope",
".",
"cancelled_caught",
":",
"raise",
"TooSlowError"
] | [
85,
0
] | [
106,
26
] | python | en | ['en', 'en', 'en'] | True |
fail_after | (seconds) | Creates a cancel scope with the given timeout, and raises an error if
it is actually cancelled.
This function and :func:`move_on_after` are similar in that both create a
cancel scope with a given timeout, and if the timeout expires then both
will cause :exc:`Cancelled` to be raised within the scope. The difference
is that when the :exc:`Cancelled` exception reaches :func:`move_on_after`,
it's caught and discarded. When it reaches :func:`fail_after`, then it's
caught and :exc:`TooSlowError` is raised in its place.
Raises:
TooSlowError: if a :exc:`Cancelled` exception is raised in this scope
and caught by the context manager.
ValueError: if *seconds* is less than zero.
| Creates a cancel scope with the given timeout, and raises an error if
it is actually cancelled. | def fail_after(seconds):
"""Creates a cancel scope with the given timeout, and raises an error if
it is actually cancelled.
This function and :func:`move_on_after` are similar in that both create a
cancel scope with a given timeout, and if the timeout expires then both
will cause :exc:`Cancelled` to be raised within the scope. The difference
is that when the :exc:`Cancelled` exception reaches :func:`move_on_after`,
it's caught and discarded. When it reaches :func:`fail_after`, then it's
caught and :exc:`TooSlowError` is raised in its place.
Raises:
TooSlowError: if a :exc:`Cancelled` exception is raised in this scope
and caught by the context manager.
ValueError: if *seconds* is less than zero.
"""
if seconds < 0:
raise ValueError("timeout must be non-negative")
return fail_at(trio.current_time() + seconds) | [
"def",
"fail_after",
"(",
"seconds",
")",
":",
"if",
"seconds",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"timeout must be non-negative\"",
")",
"return",
"fail_at",
"(",
"trio",
".",
"current_time",
"(",
")",
"+",
"seconds",
")"
] | [
109,
0
] | [
128,
49
] | python | en | ['en', 'en', 'en'] | True |
scanner_t.xml_generator_from_xml_file | (self) |
Configuration object containing information about the xml generator
read from the xml file.
Returns:
utils.xml_generators: configuration object
|
Configuration object containing information about the xml generator
read from the xml file. | def xml_generator_from_xml_file(self):
"""
Configuration object containing information about the xml generator
read from the xml file.
Returns:
utils.xml_generators: configuration object
"""
return self.__xml_generator_from_xml_file | [
"def",
"xml_generator_from_xml_file",
"(",
"self",
")",
":",
"return",
"self",
".",
"__xml_generator_from_xml_file"
] | [
169,
4
] | [
177,
49
] | python | en | ['en', 'error', 'th'] | False |
scanner_t.__read_location_bootstrap | (self, inst, decl, attrs, _) | This function monkey patches the __read_location function to either
__read_location_gccxml or __read_location_castxml depending on the
xml generator in use
| This function monkey patches the __read_location function to either
__read_location_gccxml or __read_location_castxml depending on the
xml generator in use
| def __read_location_bootstrap(self, inst, decl, attrs, _):
""" This function monkey patches the __read_location function to either
__read_location_gccxml or __read_location_castxml depending on the
xml generator in use
"""
if self.__xml_generator_from_xml_file.is_castxml:
# These fields are generated by clang, and have no location.
# Just set an empty location for them. Gccxml does not have
# this problem.
# bug #19: gp_offset, fp_offset, overflow_arg_area, reg_save_area
# bug #32: isa, flags, str and length were added in llvm 3.9
inst.__name_attrs_to_skip = [
"gp_offset",
"fp_offset",
"overflow_arg_area",
"reg_save_area",
"isa",
"flags",
"str",
"length"
]
inst.__read_location = inst.__read_location_castxml
else:
inst.__read_location = inst.__read_location_gccxml
return inst.__read_location(decl, attrs, inst.__name_attrs_to_skip) | [
"def",
"__read_location_bootstrap",
"(",
"self",
",",
"inst",
",",
"decl",
",",
"attrs",
",",
"_",
")",
":",
"if",
"self",
".",
"__xml_generator_from_xml_file",
".",
"is_castxml",
":",
"# These fields are generated by clang, and have no location.",
"# Just set an empty location for them. Gccxml does not have",
"# this problem.",
"# bug #19: gp_offset, fp_offset, overflow_arg_area, reg_save_area",
"# bug #32: isa, flags, str and length were added in llvm 3.9",
"inst",
".",
"__name_attrs_to_skip",
"=",
"[",
"\"gp_offset\"",
",",
"\"fp_offset\"",
",",
"\"overflow_arg_area\"",
",",
"\"reg_save_area\"",
",",
"\"isa\"",
",",
"\"flags\"",
",",
"\"str\"",
",",
"\"length\"",
"]",
"inst",
".",
"__read_location",
"=",
"inst",
".",
"__read_location_castxml",
"else",
":",
"inst",
".",
"__read_location",
"=",
"inst",
".",
"__read_location_gccxml",
"return",
"inst",
".",
"__read_location",
"(",
"decl",
",",
"attrs",
",",
"inst",
".",
"__name_attrs_to_skip",
")"
] | [
279,
4
] | [
304,
75
] | python | en | ['en', 'en', 'en'] | True |
scanner_t.__read_byte_size | (decl, attrs) | Using duck typing to set the size instead of in constructor | Using duck typing to set the size instead of in constructor | def __read_byte_size(decl, attrs):
"""Using duck typing to set the size instead of in constructor"""
size = attrs.get(XML_AN_SIZE, 0)
# Make sure the size is in bytes instead of bits
decl.byte_size = int(size) / 8 | [
"def",
"__read_byte_size",
"(",
"decl",
",",
"attrs",
")",
":",
"size",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_SIZE",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_size",
"=",
"int",
"(",
"size",
")",
"/",
"8"
] | [
369,
4
] | [
373,
38
] | python | en | ['en', 'en', 'en'] | True |
scanner_t.__read_byte_offset | (decl, attrs) | Using duck typing to set the offset instead of in constructor | Using duck typing to set the offset instead of in constructor | def __read_byte_offset(decl, attrs):
"""Using duck typing to set the offset instead of in constructor"""
offset = attrs.get(XML_AN_OFFSET, 0)
# Make sure the size is in bytes instead of bits
decl.byte_offset = int(offset) / 8 | [
"def",
"__read_byte_offset",
"(",
"decl",
",",
"attrs",
")",
":",
"offset",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_OFFSET",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_offset",
"=",
"int",
"(",
"offset",
")",
"/",
"8"
] | [
376,
4
] | [
380,
42
] | python | en | ['en', 'en', 'en'] | True |
scanner_t.__read_byte_align | (decl, attrs) | Using duck typing to set the alignment | Using duck typing to set the alignment | def __read_byte_align(decl, attrs):
"""Using duck typing to set the alignment"""
align = attrs.get(XML_AN_ALIGN, 0)
# Make sure the size is in bytes instead of bits
decl.byte_align = int(align) / 8 | [
"def",
"__read_byte_align",
"(",
"decl",
",",
"attrs",
")",
":",
"align",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_ALIGN",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_align",
"=",
"int",
"(",
"align",
")",
"/",
"8"
] | [
383,
4
] | [
387,
40
] | python | en | ['en', 'en', 'en'] | True |
ColumnMean._pandas | (cls, column, **kwargs) | Pandas Mean Implementation | Pandas Mean Implementation | def _pandas(cls, column, **kwargs):
"""Pandas Mean Implementation"""
return column.mean() | [
"def",
"_pandas",
"(",
"cls",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"column",
".",
"mean",
"(",
")"
] | [
21,
4
] | [
23,
28
] | python | en | ['pt', 'fr', 'en'] | False |
ColumnMean._sqlalchemy | (cls, column, **kwargs) | SqlAlchemy Mean Implementation | SqlAlchemy Mean Implementation | def _sqlalchemy(cls, column, **kwargs):
"""SqlAlchemy Mean Implementation"""
# column * 1.0 needed for correct calculation of avg in MSSQL
return sa.func.avg(column * 1.0) | [
"def",
"_sqlalchemy",
"(",
"cls",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"# column * 1.0 needed for correct calculation of avg in MSSQL",
"return",
"sa",
".",
"func",
".",
"avg",
"(",
"column",
"*",
"1.0",
")"
] | [
26,
4
] | [
29,
40
] | python | en | ['en', 'en', 'en'] | True |
ColumnMean._spark | (cls, column, _table, _column_name, **kwargs) | Spark Mean Implementation | Spark Mean Implementation | def _spark(cls, column, _table, _column_name, **kwargs):
"""Spark Mean Implementation"""
types = dict(_table.dtypes)
if types[_column_name] not in ("int", "float", "double", "bigint"):
raise TypeError("Expected numeric column type for function mean()")
return F.mean(column) | [
"def",
"_spark",
"(",
"cls",
",",
"column",
",",
"_table",
",",
"_column_name",
",",
"*",
"*",
"kwargs",
")",
":",
"types",
"=",
"dict",
"(",
"_table",
".",
"dtypes",
")",
"if",
"types",
"[",
"_column_name",
"]",
"not",
"in",
"(",
"\"int\"",
",",
"\"float\"",
",",
"\"double\"",
",",
"\"bigint\"",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected numeric column type for function mean()\"",
")",
"return",
"F",
".",
"mean",
"(",
"column",
")"
] | [
32,
4
] | [
37,
29
] | python | en | ['en', 'da', 'en'] | True |
open_tcp_stream | (
host, port, *, happy_eyeballs_delay=DEFAULT_DELAY, local_address=None
) | Connect to the given host and port over TCP.
If the given ``host`` has multiple IP addresses associated with it, then
we have a problem: which one do we use?
One approach would be to attempt to connect to the first one, and then if
that fails, attempt to connect to the second one ... until we've tried all
of them. But the problem with this is that if the first IP address is
unreachable (for example, because it's an IPv6 address and our network
discards IPv6 packets), then we might end up waiting tens of seconds for
the first connection attempt to timeout before we try the second address.
Another approach would be to attempt to connect to all of the addresses at
the same time, in parallel, and then use whichever connection succeeds
first, abandoning the others. This would be fast, but create a lot of
unnecessary load on the network and the remote server.
This function strikes a balance between these two extremes: it works its
way through the available addresses one at a time, like the first
approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's
still waiting for an attempt to succeed or fail, then it gets impatient
and starts the next connection attempt in parallel. As soon as any one
connection attempt succeeds, all the other attempts are cancelled. This
avoids unnecessary load because most connections will succeed after just
one or two attempts, but if one of the addresses is unreachable then it
doesn't slow us down too much.
This is known as a "happy eyeballs" algorithm, and our particular variant
is modelled after how Chrome connects to webservers; see `RFC 6555
<https://tools.ietf.org/html/rfc6555>`__ for more details.
Args:
host (str or bytes): The host to connect to. Can be an IPv4 address,
IPv6 address, or a hostname.
port (int): The port to connect to.
happy_eyeballs_delay (float): How many seconds to wait for each
connection attempt to succeed or fail before getting impatient and
starting another one in parallel. Set to `math.inf` if you want
to limit to only one connection attempt at a time (like
:func:`socket.create_connection`). Default: 0.25 (250 ms).
local_address (None or str): The local IP address or hostname to use as
the source for outgoing connections. If ``None``, we let the OS pick
the source IP.
This is useful in some exotic networking configurations where your
host has multiple IP addresses, and you want to force the use of a
specific one.
Note that if you pass an IPv4 ``local_address``, then you won't be
able to connect to IPv6 hosts, and vice-versa. If you want to take
advantage of this to force the use of IPv4 or IPv6 without
specifying an exact source address, you can use the IPv4 wildcard
address ``local_address="0.0.0.0"``, or the IPv6 wildcard address
``local_address="::"``.
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given server.
Raises:
OSError: if the connection fails.
See also:
open_ssl_over_tcp_stream
| Connect to the given host and port over TCP. | async def open_tcp_stream(
host, port, *, happy_eyeballs_delay=DEFAULT_DELAY, local_address=None
):
"""Connect to the given host and port over TCP.
If the given ``host`` has multiple IP addresses associated with it, then
we have a problem: which one do we use?
One approach would be to attempt to connect to the first one, and then if
that fails, attempt to connect to the second one ... until we've tried all
of them. But the problem with this is that if the first IP address is
unreachable (for example, because it's an IPv6 address and our network
discards IPv6 packets), then we might end up waiting tens of seconds for
the first connection attempt to timeout before we try the second address.
Another approach would be to attempt to connect to all of the addresses at
the same time, in parallel, and then use whichever connection succeeds
first, abandoning the others. This would be fast, but create a lot of
unnecessary load on the network and the remote server.
This function strikes a balance between these two extremes: it works its
way through the available addresses one at a time, like the first
approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's
still waiting for an attempt to succeed or fail, then it gets impatient
and starts the next connection attempt in parallel. As soon as any one
connection attempt succeeds, all the other attempts are cancelled. This
avoids unnecessary load because most connections will succeed after just
one or two attempts, but if one of the addresses is unreachable then it
doesn't slow us down too much.
This is known as a "happy eyeballs" algorithm, and our particular variant
is modelled after how Chrome connects to webservers; see `RFC 6555
<https://tools.ietf.org/html/rfc6555>`__ for more details.
Args:
host (str or bytes): The host to connect to. Can be an IPv4 address,
IPv6 address, or a hostname.
port (int): The port to connect to.
happy_eyeballs_delay (float): How many seconds to wait for each
connection attempt to succeed or fail before getting impatient and
starting another one in parallel. Set to `math.inf` if you want
to limit to only one connection attempt at a time (like
:func:`socket.create_connection`). Default: 0.25 (250 ms).
local_address (None or str): The local IP address or hostname to use as
the source for outgoing connections. If ``None``, we let the OS pick
the source IP.
This is useful in some exotic networking configurations where your
host has multiple IP addresses, and you want to force the use of a
specific one.
Note that if you pass an IPv4 ``local_address``, then you won't be
able to connect to IPv6 hosts, and vice-versa. If you want to take
advantage of this to force the use of IPv4 or IPv6 without
specifying an exact source address, you can use the IPv4 wildcard
address ``local_address="0.0.0.0"``, or the IPv6 wildcard address
``local_address="::"``.
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given server.
Raises:
OSError: if the connection fails.
See also:
open_ssl_over_tcp_stream
"""
# To keep our public API surface smaller, rule out some cases that
# getaddrinfo will accept in some circumstances, but that act weird or
# have non-portable behavior or are just plain not useful.
# No type check on host though b/c we want to allow bytes-likes.
if host is None:
raise ValueError("host cannot be None")
if not isinstance(port, int):
raise TypeError("port must be int, not {!r}".format(port))
if happy_eyeballs_delay is None:
happy_eyeballs_delay = DEFAULT_DELAY
targets = await getaddrinfo(host, port, type=SOCK_STREAM)
# I don't think this can actually happen -- if there are no results,
# getaddrinfo should have raised OSError instead of returning an empty
# list. But let's be paranoid and handle it anyway:
if not targets:
msg = "no results found for hostname lookup: {}".format(
format_host_port(host, port)
)
raise OSError(msg)
reorder_for_rfc_6555_section_5_4(targets)
# This list records all the connection failures that we ignored.
oserrors = []
# Keeps track of the socket that we're going to complete with,
# need to make sure this isn't automatically closed
winning_socket = None
# Try connecting to the specified address. Possible outcomes:
# - success: record connected socket in winning_socket and cancel
# concurrent attempts
# - failure: record exception in oserrors, set attempt_failed allowing
# the next connection attempt to start early
# code needs to ensure sockets can be closed appropriately in the
# face of crash or cancellation
async def attempt_connect(socket_args, sockaddr, attempt_failed):
nonlocal winning_socket
try:
sock = socket(*socket_args)
open_sockets.add(sock)
if local_address is not None:
# TCP connections are identified by a 4-tuple:
#
# (local IP, local port, remote IP, remote port)
#
# So if a single local IP wants to make multiple connections
# to the same (remote IP, remote port) pair, then those
# connections have to use different local ports, or else TCP
# won't be able to tell them apart. OTOH, if you have multiple
# connections to different remote IP/ports, then those
# connections can share a local port.
#
# Normally, when you call bind(), the kernel will immediately
# assign a specific local port to your socket. At this point
# the kernel doesn't know which (remote IP, remote port)
# you're going to use, so it has to pick a local port that
# *no* other connection is using. That's the only way to
# guarantee that this local port will be usable later when we
# call connect(). (Alternatively, you can set SO_REUSEADDR to
# allow multiple nascent connections to share the same port,
# but then connect() might fail with EADDRNOTAVAIL if we get
# unlucky and our TCP 4-tuple ends up colliding with another
# unrelated connection.)
#
# So calling bind() before connect() works, but it disables
# sharing of local ports. This is inefficient: it makes you
# more likely to run out of local ports.
#
# But on some versions of Linux, we can re-enable sharing of
# local ports by setting a special flag. This flag tells
# bind() to only bind the IP, and not the port. That way,
# connect() is allowed to pick the the port, and it can do a
# better job of it because it knows the remote IP/port.
try:
sock.setsockopt(
trio.socket.IPPROTO_IP, trio.socket.IP_BIND_ADDRESS_NO_PORT, 1
)
except (OSError, AttributeError):
pass
try:
await sock.bind((local_address, 0))
except OSError:
raise OSError(
f"local_address={local_address!r} is incompatible "
f"with remote address {sockaddr}"
)
await sock.connect(sockaddr)
# Success! Save the winning socket and cancel all outstanding
# connection attempts.
winning_socket = sock
nursery.cancel_scope.cancel()
except OSError as exc:
# This connection attempt failed, but the next one might
# succeed. Save the error for later so we can report it if
# everything fails, and tell the next attempt that it should go
# ahead (if it hasn't already).
oserrors.append(exc)
attempt_failed.set()
with close_all() as open_sockets:
# nursery spawns a task for each connection attempt, will be
# cancelled by the task that gets a successful connection
async with trio.open_nursery() as nursery:
for *sa, _, addr in targets:
# create an event to indicate connection failure,
# allowing the next target to be tried early
attempt_failed = trio.Event()
nursery.start_soon(attempt_connect, sa, addr, attempt_failed)
# give this attempt at most this time before moving on
with trio.move_on_after(happy_eyeballs_delay):
await attempt_failed.wait()
# nothing succeeded
if winning_socket is None:
assert len(oserrors) == len(targets)
msg = "all attempts to connect to {} failed".format(
format_host_port(host, port)
)
raise OSError(msg) from trio.MultiError(oserrors)
else:
stream = trio.SocketStream(winning_socket)
open_sockets.remove(winning_socket)
return stream | [
"async",
"def",
"open_tcp_stream",
"(",
"host",
",",
"port",
",",
"*",
",",
"happy_eyeballs_delay",
"=",
"DEFAULT_DELAY",
",",
"local_address",
"=",
"None",
")",
":",
"# To keep our public API surface smaller, rule out some cases that",
"# getaddrinfo will accept in some circumstances, but that act weird or",
"# have non-portable behavior or are just plain not useful.",
"# No type check on host though b/c we want to allow bytes-likes.",
"if",
"host",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"host cannot be None\"",
")",
"if",
"not",
"isinstance",
"(",
"port",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"port must be int, not {!r}\"",
".",
"format",
"(",
"port",
")",
")",
"if",
"happy_eyeballs_delay",
"is",
"None",
":",
"happy_eyeballs_delay",
"=",
"DEFAULT_DELAY",
"targets",
"=",
"await",
"getaddrinfo",
"(",
"host",
",",
"port",
",",
"type",
"=",
"SOCK_STREAM",
")",
"# I don't think this can actually happen -- if there are no results,",
"# getaddrinfo should have raised OSError instead of returning an empty",
"# list. But let's be paranoid and handle it anyway:",
"if",
"not",
"targets",
":",
"msg",
"=",
"\"no results found for hostname lookup: {}\"",
".",
"format",
"(",
"format_host_port",
"(",
"host",
",",
"port",
")",
")",
"raise",
"OSError",
"(",
"msg",
")",
"reorder_for_rfc_6555_section_5_4",
"(",
"targets",
")",
"# This list records all the connection failures that we ignored.",
"oserrors",
"=",
"[",
"]",
"# Keeps track of the socket that we're going to complete with,",
"# need to make sure this isn't automatically closed",
"winning_socket",
"=",
"None",
"# Try connecting to the specified address. Possible outcomes:",
"# - success: record connected socket in winning_socket and cancel",
"# concurrent attempts",
"# - failure: record exception in oserrors, set attempt_failed allowing",
"# the next connection attempt to start early",
"# code needs to ensure sockets can be closed appropriately in the",
"# face of crash or cancellation",
"async",
"def",
"attempt_connect",
"(",
"socket_args",
",",
"sockaddr",
",",
"attempt_failed",
")",
":",
"nonlocal",
"winning_socket",
"try",
":",
"sock",
"=",
"socket",
"(",
"*",
"socket_args",
")",
"open_sockets",
".",
"add",
"(",
"sock",
")",
"if",
"local_address",
"is",
"not",
"None",
":",
"# TCP connections are identified by a 4-tuple:",
"#",
"# (local IP, local port, remote IP, remote port)",
"#",
"# So if a single local IP wants to make multiple connections",
"# to the same (remote IP, remote port) pair, then those",
"# connections have to use different local ports, or else TCP",
"# won't be able to tell them apart. OTOH, if you have multiple",
"# connections to different remote IP/ports, then those",
"# connections can share a local port.",
"#",
"# Normally, when you call bind(), the kernel will immediately",
"# assign a specific local port to your socket. At this point",
"# the kernel doesn't know which (remote IP, remote port)",
"# you're going to use, so it has to pick a local port that",
"# *no* other connection is using. That's the only way to",
"# guarantee that this local port will be usable later when we",
"# call connect(). (Alternatively, you can set SO_REUSEADDR to",
"# allow multiple nascent connections to share the same port,",
"# but then connect() might fail with EADDRNOTAVAIL if we get",
"# unlucky and our TCP 4-tuple ends up colliding with another",
"# unrelated connection.)",
"#",
"# So calling bind() before connect() works, but it disables",
"# sharing of local ports. This is inefficient: it makes you",
"# more likely to run out of local ports.",
"#",
"# But on some versions of Linux, we can re-enable sharing of",
"# local ports by setting a special flag. This flag tells",
"# bind() to only bind the IP, and not the port. That way,",
"# connect() is allowed to pick the the port, and it can do a",
"# better job of it because it knows the remote IP/port.",
"try",
":",
"sock",
".",
"setsockopt",
"(",
"trio",
".",
"socket",
".",
"IPPROTO_IP",
",",
"trio",
".",
"socket",
".",
"IP_BIND_ADDRESS_NO_PORT",
",",
"1",
")",
"except",
"(",
"OSError",
",",
"AttributeError",
")",
":",
"pass",
"try",
":",
"await",
"sock",
".",
"bind",
"(",
"(",
"local_address",
",",
"0",
")",
")",
"except",
"OSError",
":",
"raise",
"OSError",
"(",
"f\"local_address={local_address!r} is incompatible \"",
"f\"with remote address {sockaddr}\"",
")",
"await",
"sock",
".",
"connect",
"(",
"sockaddr",
")",
"# Success! Save the winning socket and cancel all outstanding",
"# connection attempts.",
"winning_socket",
"=",
"sock",
"nursery",
".",
"cancel_scope",
".",
"cancel",
"(",
")",
"except",
"OSError",
"as",
"exc",
":",
"# This connection attempt failed, but the next one might",
"# succeed. Save the error for later so we can report it if",
"# everything fails, and tell the next attempt that it should go",
"# ahead (if it hasn't already).",
"oserrors",
".",
"append",
"(",
"exc",
")",
"attempt_failed",
".",
"set",
"(",
")",
"with",
"close_all",
"(",
")",
"as",
"open_sockets",
":",
"# nursery spawns a task for each connection attempt, will be",
"# cancelled by the task that gets a successful connection",
"async",
"with",
"trio",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"for",
"*",
"sa",
",",
"_",
",",
"addr",
"in",
"targets",
":",
"# create an event to indicate connection failure,",
"# allowing the next target to be tried early",
"attempt_failed",
"=",
"trio",
".",
"Event",
"(",
")",
"nursery",
".",
"start_soon",
"(",
"attempt_connect",
",",
"sa",
",",
"addr",
",",
"attempt_failed",
")",
"# give this attempt at most this time before moving on",
"with",
"trio",
".",
"move_on_after",
"(",
"happy_eyeballs_delay",
")",
":",
"await",
"attempt_failed",
".",
"wait",
"(",
")",
"# nothing succeeded",
"if",
"winning_socket",
"is",
"None",
":",
"assert",
"len",
"(",
"oserrors",
")",
"==",
"len",
"(",
"targets",
")",
"msg",
"=",
"\"all attempts to connect to {} failed\"",
".",
"format",
"(",
"format_host_port",
"(",
"host",
",",
"port",
")",
")",
"raise",
"OSError",
"(",
"msg",
")",
"from",
"trio",
".",
"MultiError",
"(",
"oserrors",
")",
"else",
":",
"stream",
"=",
"trio",
".",
"SocketStream",
"(",
"winning_socket",
")",
"open_sockets",
".",
"remove",
"(",
"winning_socket",
")",
"return",
"stream"
] | [
166,
0
] | [
370,
25
] | python | en | ['en', 'en', 'en'] | True |
Validator._repr_args | (self) | A string representation of the args passed to this validator. Used by
`__repr__`.
| A string representation of the args passed to this validator. Used by
`__repr__`.
| def _repr_args(self) -> str:
"""A string representation of the args passed to this validator. Used by
`__repr__`.
"""
return "" | [
"def",
"_repr_args",
"(",
"self",
")",
"->",
"str",
":",
"return",
"\"\""
] | [
29,
4
] | [
33,
17
] | python | en | ['en', 'en', 'en'] | True |
OneOf.options | (
self,
valuegetter: typing.Union[str, typing.Callable[[typing.Any], typing.Any]] = str,
) | Return a generator over the (value, label) pairs, where value
is a string associated with each choice. This convenience method
is useful to populate, for instance, a form select field.
:param valuegetter: Can be a callable or a string. In the former case, it must
be a one-argument callable which returns the value of a
choice. In the latter case, the string specifies the name
of an attribute of the choice objects. Defaults to `str()`
or `str()`.
| Return a generator over the (value, label) pairs, where value
is a string associated with each choice. This convenience method
is useful to populate, for instance, a form select field. | def options(
self,
valuegetter: typing.Union[str, typing.Callable[[typing.Any], typing.Any]] = str,
) -> typing.Iterable[typing.Tuple[typing.Any, str]]:
"""Return a generator over the (value, label) pairs, where value
is a string associated with each choice. This convenience method
is useful to populate, for instance, a form select field.
:param valuegetter: Can be a callable or a string. In the former case, it must
be a one-argument callable which returns the value of a
choice. In the latter case, the string specifies the name
of an attribute of the choice objects. Defaults to `str()`
or `str()`.
"""
valuegetter = valuegetter if callable(valuegetter) else attrgetter(valuegetter)
pairs = zip_longest(self.choices, self.labels, fillvalue="")
return ((valuegetter(choice), label) for choice, label in pairs) | [
"def",
"options",
"(",
"self",
",",
"valuegetter",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"typing",
".",
"Callable",
"[",
"[",
"typing",
".",
"Any",
"]",
",",
"typing",
".",
"Any",
"]",
"]",
"=",
"str",
",",
")",
"->",
"typing",
".",
"Iterable",
"[",
"typing",
".",
"Tuple",
"[",
"typing",
".",
"Any",
",",
"str",
"]",
"]",
":",
"valuegetter",
"=",
"valuegetter",
"if",
"callable",
"(",
"valuegetter",
")",
"else",
"attrgetter",
"(",
"valuegetter",
")",
"pairs",
"=",
"zip_longest",
"(",
"self",
".",
"choices",
",",
"self",
".",
"labels",
",",
"fillvalue",
"=",
"\"\"",
")",
"return",
"(",
"(",
"valuegetter",
"(",
"choice",
")",
",",
"label",
")",
"for",
"choice",
",",
"label",
"in",
"pairs",
")"
] | [
504,
4
] | [
521,
72
] | python | en | ['en', 'en', 'en'] | True |
suite | () | Expectation Suite operations | Expectation Suite operations | def suite():
"""Expectation Suite operations"""
pass | [
"def",
"suite",
"(",
")",
":",
"pass"
] | [
32,
0
] | [
34,
8
] | python | en | ['ca', 'en', 'en'] | True |
suite_edit | (suite, datasource, directory, jupyter, batch_kwargs) |
Generate a Jupyter notebook for editing an existing Expectation Suite.
The SUITE argument is required. This is the name you gave to the suite
when you created it.
A batch of data is required to edit the suite, which is used as a sample.
The edit command will help you specify a batch interactively. Or you can
specify them manually by providing --batch-kwargs in valid JSON format.
Read more about specifying batches of data in the documentation: https://docs.greatexpectations.io/
|
Generate a Jupyter notebook for editing an existing Expectation Suite. | def suite_edit(suite, datasource, directory, jupyter, batch_kwargs):
"""
Generate a Jupyter notebook for editing an existing Expectation Suite.
The SUITE argument is required. This is the name you gave to the suite
when you created it.
A batch of data is required to edit the suite, which is used as a sample.
The edit command will help you specify a batch interactively. Or you can
specify them manually by providing --batch-kwargs in valid JSON format.
Read more about specifying batches of data in the documentation: https://docs.greatexpectations.io/
"""
_suite_edit(
suite,
datasource,
directory,
jupyter,
batch_kwargs,
usage_event="cli.suite.edit",
) | [
"def",
"suite_edit",
"(",
"suite",
",",
"datasource",
",",
"directory",
",",
"jupyter",
",",
"batch_kwargs",
")",
":",
"_suite_edit",
"(",
"suite",
",",
"datasource",
",",
"directory",
",",
"jupyter",
",",
"batch_kwargs",
",",
"usage_event",
"=",
"\"cli.suite.edit\"",
",",
")"
] | [
64,
0
] | [
85,
5
] | python | en | ['en', 'error', 'th'] | False |
suite_demo | (suite, directory, view) |
Create a new demo Expectation Suite.
Great Expectations will choose a couple of columns and generate expectations
about them to demonstrate some examples of assertions you can make about
your data.
|
Create a new demo Expectation Suite. | def suite_demo(suite, directory, view):
"""
Create a new demo Expectation Suite.
Great Expectations will choose a couple of columns and generate expectations
about them to demonstrate some examples of assertions you can make about
your data.
"""
_suite_new(
suite=suite,
directory=directory,
empty=False,
jupyter=False,
view=view,
batch_kwargs=None,
usage_event="cli.suite.demo",
) | [
"def",
"suite_demo",
"(",
"suite",
",",
"directory",
",",
"view",
")",
":",
"_suite_new",
"(",
"suite",
"=",
"suite",
",",
"directory",
"=",
"directory",
",",
"empty",
"=",
"False",
",",
"jupyter",
"=",
"False",
",",
"view",
"=",
"view",
",",
"batch_kwargs",
"=",
"None",
",",
"usage_event",
"=",
"\"cli.suite.demo\"",
",",
")"
] | [
235,
0
] | [
251,
5
] | python | en | ['en', 'error', 'th'] | False |
suite_new | (suite, directory, jupyter, batch_kwargs) |
Create a new empty Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag
|
Create a new empty Expectation Suite. | def suite_new(suite, directory, jupyter, batch_kwargs):
"""
Create a new empty Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag
"""
_suite_new(
suite=suite,
directory=directory,
empty=True,
jupyter=jupyter,
view=False,
batch_kwargs=batch_kwargs,
usage_event="cli.suite.new",
) | [
"def",
"suite_new",
"(",
"suite",
",",
"directory",
",",
"jupyter",
",",
"batch_kwargs",
")",
":",
"_suite_new",
"(",
"suite",
"=",
"suite",
",",
"directory",
"=",
"directory",
",",
"empty",
"=",
"True",
",",
"jupyter",
"=",
"jupyter",
",",
"view",
"=",
"False",
",",
"batch_kwargs",
"=",
"batch_kwargs",
",",
"usage_event",
"=",
"\"cli.suite.new\"",
",",
")"
] | [
273,
0
] | [
287,
5
] | python | en | ['en', 'error', 'th'] | False |
suite_delete | (suite, directory) |
Delete an expectation suite from the expectation store.
|
Delete an expectation suite from the expectation store.
| def suite_delete(suite, directory):
"""
Delete an expectation suite from the expectation store.
"""
usage_event = "cli.suite.delete"
context = toolkit.load_data_context_with_error_handling(directory)
suite_names = context.list_expectation_suite_names()
if not suite_names:
toolkit.exit_with_failure_message_and_stats(
context,
usage_event,
"</red>No expectation suites found in the project.</red>",
)
if suite not in suite_names:
toolkit.exit_with_failure_message_and_stats(
context, usage_event, f"No expectation suite named {suite} found."
)
context.delete_expectation_suite(suite)
cli_message(f"Deleted the expectation suite named: {suite}")
toolkit.send_usage_message(data_context=context, event=usage_event, success=True) | [
"def",
"suite_delete",
"(",
"suite",
",",
"directory",
")",
":",
"usage_event",
"=",
"\"cli.suite.delete\"",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"suite_names",
"=",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"if",
"not",
"suite_names",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"\"</red>No expectation suites found in the project.</red>\"",
",",
")",
"if",
"suite",
"not",
"in",
"suite_names",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"No expectation suite named {suite} found.\"",
")",
"context",
".",
"delete_expectation_suite",
"(",
"suite",
")",
"cli_message",
"(",
"f\"Deleted the expectation suite named: {suite}\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event",
",",
"success",
"=",
"True",
")"
] | [
373,
0
] | [
394,
85
] | python | en | ['en', 'error', 'th'] | False |
suite_scaffold | (suite, directory, jupyter) | Scaffold a new Expectation Suite. | Scaffold a new Expectation Suite. | def suite_scaffold(suite, directory, jupyter):
"""Scaffold a new Expectation Suite."""
_suite_scaffold(suite, directory, jupyter) | [
"def",
"suite_scaffold",
"(",
"suite",
",",
"directory",
",",
"jupyter",
")",
":",
"_suite_scaffold",
"(",
"suite",
",",
"directory",
",",
"jupyter",
")"
] | [
412,
0
] | [
414,
46
] | python | en | ['en', 'en', 'en'] | True |
suite_list | (directory) | Lists available Expectation Suites. | Lists available Expectation Suites. | def suite_list(directory):
"""Lists available Expectation Suites."""
context = toolkit.load_data_context_with_error_handling(directory)
try:
suite_names = [
" - <cyan>{}</cyan>".format(suite_name)
for suite_name in context.list_expectation_suite_names()
]
if len(suite_names) == 0:
cli_message("No Expectation Suites found")
toolkit.send_usage_message(
data_context=context, event="cli.suite.list", success=True
)
return
elif len(suite_names) == 1:
list_intro_string = "1 Expectation Suite found:"
else:
list_intro_string = "{} Expectation Suites found:".format(len(suite_names))
cli_message_list(suite_names, list_intro_string)
toolkit.send_usage_message(
data_context=context, event="cli.suite.list", success=True
)
except Exception as e:
toolkit.send_usage_message(
data_context=context, event="cli.suite.list", success=False
)
raise e | [
"def",
"suite_list",
"(",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"try",
":",
"suite_names",
"=",
"[",
"\" - <cyan>{}</cyan>\"",
".",
"format",
"(",
"suite_name",
")",
"for",
"suite_name",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"]",
"if",
"len",
"(",
"suite_names",
")",
"==",
"0",
":",
"cli_message",
"(",
"\"No Expectation Suites found\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.suite.list\"",
",",
"success",
"=",
"True",
")",
"return",
"elif",
"len",
"(",
"suite_names",
")",
"==",
"1",
":",
"list_intro_string",
"=",
"\"1 Expectation Suite found:\"",
"else",
":",
"list_intro_string",
"=",
"\"{} Expectation Suites found:\"",
".",
"format",
"(",
"len",
"(",
"suite_names",
")",
")",
"cli_message_list",
"(",
"suite_names",
",",
"list_intro_string",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.suite.list\"",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.suite.list\"",
",",
"success",
"=",
"False",
")",
"raise",
"e"
] | [
464,
0
] | [
492,
15
] | python | en | ['en', 'en', 'en'] | True |
make_pipe | () | Makes a new pair of pipes. | Makes a new pair of pipes. | async def make_pipe() -> "Tuple[FdStream, FdStream]":
"""Makes a new pair of pipes."""
(r, w) = os.pipe()
return FdStream(w), FdStream(r) | [
"async",
"def",
"make_pipe",
"(",
")",
"->",
"\"Tuple[FdStream, FdStream]\"",
":",
"(",
"r",
",",
"w",
")",
"=",
"os",
".",
"pipe",
"(",
")",
"return",
"FdStream",
"(",
"w",
")",
",",
"FdStream",
"(",
"r",
")"
] | [
22,
0
] | [
25,
35
] | python | en | ['en', 'en', 'en'] | True |
QueryBatchKwargsGenerator._build_batch_kwargs | (self, batch_parameters) | Build batch kwargs from a partition id. | Build batch kwargs from a partition id. | def _build_batch_kwargs(self, batch_parameters):
"""Build batch kwargs from a partition id."""
data_asset_name = batch_parameters.pop("data_asset_name")
raw_query = self._get_raw_query(data_asset_name=data_asset_name)
partition_id = batch_parameters.pop("partition_id", None)
batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)
batch_kwargs["query"] = raw_query
if partition_id:
if not batch_kwargs["query_parameters"]:
batch_kwargs["query_parameters"] = {}
batch_kwargs["query_parameters"]["partition_id"] = partition_id
return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs) | [
"def",
"_build_batch_kwargs",
"(",
"self",
",",
"batch_parameters",
")",
":",
"data_asset_name",
"=",
"batch_parameters",
".",
"pop",
"(",
"\"data_asset_name\"",
")",
"raw_query",
"=",
"self",
".",
"_get_raw_query",
"(",
"data_asset_name",
"=",
"data_asset_name",
")",
"partition_id",
"=",
"batch_parameters",
".",
"pop",
"(",
"\"partition_id\"",
",",
"None",
")",
"batch_kwargs",
"=",
"self",
".",
"_datasource",
".",
"process_batch_parameters",
"(",
"*",
"*",
"batch_parameters",
")",
"batch_kwargs",
"[",
"\"query\"",
"]",
"=",
"raw_query",
"if",
"partition_id",
":",
"if",
"not",
"batch_kwargs",
"[",
"\"query_parameters\"",
"]",
":",
"batch_kwargs",
"[",
"\"query_parameters\"",
"]",
"=",
"{",
"}",
"batch_kwargs",
"[",
"\"query_parameters\"",
"]",
"[",
"\"partition_id\"",
"]",
"=",
"partition_id",
"return",
"SqlAlchemyDatasourceQueryBatchKwargs",
"(",
"batch_kwargs",
")"
] | [
161,
4
] | [
174,
65
] | python | en | ['en', 'en', 'sw'] | True |
usage_statistics_enabled_method | (
func=None, event_name=None, args_payload_fn=None, result_payload_fn=None
) |
A decorator for usage statistics which defaults to the less detailed payload schema.
|
A decorator for usage statistics which defaults to the less detailed payload schema.
| def usage_statistics_enabled_method(
func=None, event_name=None, args_payload_fn=None, result_payload_fn=None
):
"""
A decorator for usage statistics which defaults to the less detailed payload schema.
"""
if callable(func):
if event_name is None:
event_name = func.__name__
@wraps(func)
def usage_statistics_wrapped_method(*args, **kwargs):
# if a function like `build_data_docs()` is being called as a `dry_run`
# then we dont want to emit usage_statistics. We just return the function without sending a usage_stats message
if "dry_run" in kwargs and kwargs["dry_run"]:
return func(*args, **kwargs)
# Set event_payload now so it can be updated below
event_payload = {}
message = {"event_payload": event_payload, "event": event_name}
handler = None
try:
if args_payload_fn is not None:
nested_update(event_payload, args_payload_fn(*args, **kwargs))
result = func(*args, **kwargs)
# We try to get the handler only now, so that it *could* be initialized in func, e.g. if it is an
# __init__ method
handler = get_usage_statistics_handler(args)
if result_payload_fn is not None:
nested_update(event_payload, result_payload_fn(result))
message["success"] = True
if handler is not None:
handler.emit(message)
# except Exception:
except Exception:
message["success"] = False
handler = get_usage_statistics_handler(args)
if handler:
handler.emit(message)
raise
return result
return usage_statistics_wrapped_method
else:
def usage_statistics_wrapped_method_partial(func):
return usage_statistics_enabled_method(
func,
event_name=event_name,
args_payload_fn=args_payload_fn,
result_payload_fn=result_payload_fn,
)
return usage_statistics_wrapped_method_partial | [
"def",
"usage_statistics_enabled_method",
"(",
"func",
"=",
"None",
",",
"event_name",
"=",
"None",
",",
"args_payload_fn",
"=",
"None",
",",
"result_payload_fn",
"=",
"None",
")",
":",
"if",
"callable",
"(",
"func",
")",
":",
"if",
"event_name",
"is",
"None",
":",
"event_name",
"=",
"func",
".",
"__name__",
"@",
"wraps",
"(",
"func",
")",
"def",
"usage_statistics_wrapped_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# if a function like `build_data_docs()` is being called as a `dry_run`",
"# then we dont want to emit usage_statistics. We just return the function without sending a usage_stats message",
"if",
"\"dry_run\"",
"in",
"kwargs",
"and",
"kwargs",
"[",
"\"dry_run\"",
"]",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Set event_payload now so it can be updated below",
"event_payload",
"=",
"{",
"}",
"message",
"=",
"{",
"\"event_payload\"",
":",
"event_payload",
",",
"\"event\"",
":",
"event_name",
"}",
"handler",
"=",
"None",
"try",
":",
"if",
"args_payload_fn",
"is",
"not",
"None",
":",
"nested_update",
"(",
"event_payload",
",",
"args_payload_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# We try to get the handler only now, so that it *could* be initialized in func, e.g. if it is an",
"# __init__ method",
"handler",
"=",
"get_usage_statistics_handler",
"(",
"args",
")",
"if",
"result_payload_fn",
"is",
"not",
"None",
":",
"nested_update",
"(",
"event_payload",
",",
"result_payload_fn",
"(",
"result",
")",
")",
"message",
"[",
"\"success\"",
"]",
"=",
"True",
"if",
"handler",
"is",
"not",
"None",
":",
"handler",
".",
"emit",
"(",
"message",
")",
"# except Exception:",
"except",
"Exception",
":",
"message",
"[",
"\"success\"",
"]",
"=",
"False",
"handler",
"=",
"get_usage_statistics_handler",
"(",
"args",
")",
"if",
"handler",
":",
"handler",
".",
"emit",
"(",
"message",
")",
"raise",
"return",
"result",
"return",
"usage_statistics_wrapped_method",
"else",
":",
"def",
"usage_statistics_wrapped_method_partial",
"(",
"func",
")",
":",
"return",
"usage_statistics_enabled_method",
"(",
"func",
",",
"event_name",
"=",
"event_name",
",",
"args_payload_fn",
"=",
"args_payload_fn",
",",
"result_payload_fn",
"=",
"result_payload_fn",
",",
")",
"return",
"usage_statistics_wrapped_method_partial"
] | [
238,
0
] | [
290,
54
] | python | en | ['en', 'error', 'th'] | False |
send_usage_message | (
data_context,
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
) | send a usage statistics message. | send a usage statistics message. | def send_usage_message(
data_context,
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
):
"""send a usage statistics message."""
try:
handler: UsageStatisticsHandler = getattr(
data_context, "_usage_statistics_handler", None
)
message: dict = {
"event": event,
"event_payload": event_payload,
"success": success,
}
if handler is not None:
handler.emit(message)
except Exception:
pass | [
"def",
"send_usage_message",
"(",
"data_context",
",",
"event",
":",
"str",
",",
"event_payload",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"success",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
")",
":",
"try",
":",
"handler",
":",
"UsageStatisticsHandler",
"=",
"getattr",
"(",
"data_context",
",",
"\"_usage_statistics_handler\"",
",",
"None",
")",
"message",
":",
"dict",
"=",
"{",
"\"event\"",
":",
"event",
",",
"\"event_payload\"",
":",
"event_payload",
",",
"\"success\"",
":",
"success",
",",
"}",
"if",
"handler",
"is",
"not",
"None",
":",
"handler",
".",
"emit",
"(",
"message",
")",
"except",
"Exception",
":",
"pass"
] | [
405,
0
] | [
424,
12
] | python | en | ['en', 'lv', 'en'] | True |
UsageStatisticsHandler.send_usage_message | (self, event, event_payload=None, success=None) | send a usage statistics message. | send a usage statistics message. | def send_usage_message(self, event, event_payload=None, success=None):
"""send a usage statistics message."""
try:
message = {
"event": event,
"event_payload": event_payload or {},
"success": success,
}
self.emit(message)
except Exception:
pass | [
"def",
"send_usage_message",
"(",
"self",
",",
"event",
",",
"event_payload",
"=",
"None",
",",
"success",
"=",
"None",
")",
":",
"try",
":",
"message",
"=",
"{",
"\"event\"",
":",
"event",
",",
"\"event_payload\"",
":",
"event_payload",
"or",
"{",
"}",
",",
"\"success\"",
":",
"success",
",",
"}",
"self",
".",
"emit",
"(",
"message",
")",
"except",
"Exception",
":",
"pass"
] | [
118,
4
] | [
129,
16
] | python | en | ['en', 'lv', 'en'] | True |
UsageStatisticsHandler.build_init_payload | (self) | Adds information that may be available only after full data context construction, but is useful to
calculate only one time (for example, anonymization). | Adds information that may be available only after full data context construction, but is useful to
calculate only one time (for example, anonymization). | def build_init_payload(self):
"""Adds information that may be available only after full data context construction, but is useful to
calculate only one time (for example, anonymization)."""
expectation_suites = [
self._data_context.get_expectation_suite(expectation_suite_name)
for expectation_suite_name in self._data_context.list_expectation_suite_names()
]
return {
"platform.system": platform.system(),
"platform.release": platform.release(),
"version_info": str(sys.version_info),
"anonymized_datasources": [
self._datasource_anonymizer.anonymize_datasource_info(
datasource_name, datasource_config
)
for datasource_name, datasource_config in self._data_context.project_config_with_variables_substituted.datasources.items()
],
"anonymized_stores": [
self._store_anonymizer.anonymize_store_info(store_name, store_obj)
for store_name, store_obj in self._data_context.stores.items()
],
"anonymized_validation_operators": [
self._validation_operator_anonymizer.anonymize_validation_operator_info(
validation_operator_name=validation_operator_name,
validation_operator_obj=validation_operator_obj,
)
for validation_operator_name, validation_operator_obj in self._data_context.validation_operators.items()
],
"anonymized_data_docs_sites": [
self._data_docs_sites_anonymizer.anonymize_data_docs_site_info(
site_name=site_name, site_config=site_config
)
for site_name, site_config in self._data_context.project_config_with_variables_substituted.data_docs_sites.items()
],
"anonymized_expectation_suites": [
self._expectation_suite_anonymizer.anonymize_expectation_suite_info(
expectation_suite
)
for expectation_suite in expectation_suites
],
} | [
"def",
"build_init_payload",
"(",
"self",
")",
":",
"expectation_suites",
"=",
"[",
"self",
".",
"_data_context",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
")",
"for",
"expectation_suite_name",
"in",
"self",
".",
"_data_context",
".",
"list_expectation_suite_names",
"(",
")",
"]",
"return",
"{",
"\"platform.system\"",
":",
"platform",
".",
"system",
"(",
")",
",",
"\"platform.release\"",
":",
"platform",
".",
"release",
"(",
")",
",",
"\"version_info\"",
":",
"str",
"(",
"sys",
".",
"version_info",
")",
",",
"\"anonymized_datasources\"",
":",
"[",
"self",
".",
"_datasource_anonymizer",
".",
"anonymize_datasource_info",
"(",
"datasource_name",
",",
"datasource_config",
")",
"for",
"datasource_name",
",",
"datasource_config",
"in",
"self",
".",
"_data_context",
".",
"project_config_with_variables_substituted",
".",
"datasources",
".",
"items",
"(",
")",
"]",
",",
"\"anonymized_stores\"",
":",
"[",
"self",
".",
"_store_anonymizer",
".",
"anonymize_store_info",
"(",
"store_name",
",",
"store_obj",
")",
"for",
"store_name",
",",
"store_obj",
"in",
"self",
".",
"_data_context",
".",
"stores",
".",
"items",
"(",
")",
"]",
",",
"\"anonymized_validation_operators\"",
":",
"[",
"self",
".",
"_validation_operator_anonymizer",
".",
"anonymize_validation_operator_info",
"(",
"validation_operator_name",
"=",
"validation_operator_name",
",",
"validation_operator_obj",
"=",
"validation_operator_obj",
",",
")",
"for",
"validation_operator_name",
",",
"validation_operator_obj",
"in",
"self",
".",
"_data_context",
".",
"validation_operators",
".",
"items",
"(",
")",
"]",
",",
"\"anonymized_data_docs_sites\"",
":",
"[",
"self",
".",
"_data_docs_sites_anonymizer",
".",
"anonymize_data_docs_site_info",
"(",
"site_name",
"=",
"site_name",
",",
"site_config",
"=",
"site_config",
")",
"for",
"site_name",
",",
"site_config",
"in",
"self",
".",
"_data_context",
".",
"project_config_with_variables_substituted",
".",
"data_docs_sites",
".",
"items",
"(",
")",
"]",
",",
"\"anonymized_expectation_suites\"",
":",
"[",
"self",
".",
"_expectation_suite_anonymizer",
".",
"anonymize_expectation_suite_info",
"(",
"expectation_suite",
")",
"for",
"expectation_suite",
"in",
"expectation_suites",
"]",
",",
"}"
] | [
131,
4
] | [
171,
9
] | python | en | ['en', 'en', 'en'] | True |
UsageStatisticsHandler.emit | (self, message) |
Emit a message.
|
Emit a message.
| def emit(self, message):
"""
Emit a message.
"""
try:
if message["event"] == "data_context.__init__":
message["event_payload"] = self.build_init_payload()
message = self.build_envelope(message)
if not self.validate_message(
message, schema=usage_statistics_record_schema
):
return
self._message_queue.put(message)
# noinspection PyBroadException
except Exception as e:
# We *always* tolerate *any* error in usage statistics
logger.debug(e) | [
"def",
"emit",
"(",
"self",
",",
"message",
")",
":",
"try",
":",
"if",
"message",
"[",
"\"event\"",
"]",
"==",
"\"data_context.__init__\"",
":",
"message",
"[",
"\"event_payload\"",
"]",
"=",
"self",
".",
"build_init_payload",
"(",
")",
"message",
"=",
"self",
".",
"build_envelope",
"(",
"message",
")",
"if",
"not",
"self",
".",
"validate_message",
"(",
"message",
",",
"schema",
"=",
"usage_statistics_record_schema",
")",
":",
"return",
"self",
".",
"_message_queue",
".",
"put",
"(",
"message",
")",
"# noinspection PyBroadException",
"except",
"Exception",
"as",
"e",
":",
"# We *always* tolerate *any* error in usage statistics",
"logger",
".",
"debug",
"(",
"e",
")"
] | [
194,
4
] | [
210,
27
] | python | en | ['en', 'error', 'th'] | False |
_invert_regex_to_data_reference_template | (
regex_pattern: str,
group_names: List[str],
) | Create a string template based on a regex and corresponding list of group names.
For example:
filepath_template = _invert_regex_to_data_reference_template(
regex_pattern=r"^(.+)_(\d+)_(\d+)\.csv$",
group_names=["name", "timestamp", "price"],
)
filepath_template
>> "{name}_{timestamp}_{price}.csv"
Such templates are useful because they can be populated using string substitution:
filepath_template.format(**{
"name": "user_logs",
"timestamp": "20200101",
"price": "250",
})
>> "user_logs_20200101_250.csv"
NOTE Abe 20201017: This method is almost certainly still brittle. I haven't exhaustively mapped the OPCODES in sre_constants
| Create a string template based on a regex and corresponding list of group names. | def _invert_regex_to_data_reference_template(
regex_pattern: str,
group_names: List[str],
) -> str:
"""Create a string template based on a regex and corresponding list of group names.
For example:
filepath_template = _invert_regex_to_data_reference_template(
regex_pattern=r"^(.+)_(\d+)_(\d+)\.csv$",
group_names=["name", "timestamp", "price"],
)
filepath_template
>> "{name}_{timestamp}_{price}.csv"
Such templates are useful because they can be populated using string substitution:
filepath_template.format(**{
"name": "user_logs",
"timestamp": "20200101",
"price": "250",
})
>> "user_logs_20200101_250.csv"
NOTE Abe 20201017: This method is almost certainly still brittle. I haven't exhaustively mapped the OPCODES in sre_constants
"""
data_reference_template: str = ""
group_name_index: int = 0
num_groups = len(group_names)
# print("-"*80)
parsed_sre = sre_parse.parse(regex_pattern)
for token, value in parsed_sre:
if token == sre_constants.LITERAL:
# Transcribe the character directly into the template
data_reference_template += chr(value)
elif token == sre_constants.SUBPATTERN:
if not (group_name_index < num_groups):
break
# Replace the captured group with "{next_group_name}" in the template
data_reference_template += "{" + group_names[group_name_index] + "}"
group_name_index += 1
elif token in [
sre_constants.MAX_REPEAT,
sre_constants.IN,
sre_constants.BRANCH,
sre_constants.ANY,
]:
# Replace the uncaptured group a wildcard in the template
data_reference_template += "*"
elif token in [
sre_constants.AT,
sre_constants.ASSERT_NOT,
sre_constants.ASSERT,
]:
pass
else:
raise ValueError(
f"Unrecognized regex token {token} in regex pattern {regex_pattern}."
)
# Collapse adjacent wildcards into a single wildcard
data_reference_template: str = re.sub("\\*+", "*", data_reference_template)
return data_reference_template | [
"def",
"_invert_regex_to_data_reference_template",
"(",
"regex_pattern",
":",
"str",
",",
"group_names",
":",
"List",
"[",
"str",
"]",
",",
")",
"->",
"str",
":",
"data_reference_template",
":",
"str",
"=",
"\"\"",
"group_name_index",
":",
"int",
"=",
"0",
"num_groups",
"=",
"len",
"(",
"group_names",
")",
"# print(\"-\"*80)",
"parsed_sre",
"=",
"sre_parse",
".",
"parse",
"(",
"regex_pattern",
")",
"for",
"token",
",",
"value",
"in",
"parsed_sre",
":",
"if",
"token",
"==",
"sre_constants",
".",
"LITERAL",
":",
"# Transcribe the character directly into the template",
"data_reference_template",
"+=",
"chr",
"(",
"value",
")",
"elif",
"token",
"==",
"sre_constants",
".",
"SUBPATTERN",
":",
"if",
"not",
"(",
"group_name_index",
"<",
"num_groups",
")",
":",
"break",
"# Replace the captured group with \"{next_group_name}\" in the template",
"data_reference_template",
"+=",
"\"{\"",
"+",
"group_names",
"[",
"group_name_index",
"]",
"+",
"\"}\"",
"group_name_index",
"+=",
"1",
"elif",
"token",
"in",
"[",
"sre_constants",
".",
"MAX_REPEAT",
",",
"sre_constants",
".",
"IN",
",",
"sre_constants",
".",
"BRANCH",
",",
"sre_constants",
".",
"ANY",
",",
"]",
":",
"# Replace the uncaptured group a wildcard in the template",
"data_reference_template",
"+=",
"\"*\"",
"elif",
"token",
"in",
"[",
"sre_constants",
".",
"AT",
",",
"sre_constants",
".",
"ASSERT_NOT",
",",
"sre_constants",
".",
"ASSERT",
",",
"]",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Unrecognized regex token {token} in regex pattern {regex_pattern}.\"",
")",
"# Collapse adjacent wildcards into a single wildcard",
"data_reference_template",
":",
"str",
"=",
"re",
".",
"sub",
"(",
"\"\\\\*+\"",
",",
"\"*\"",
",",
"data_reference_template",
")",
"return",
"data_reference_template"
] | [
194,
0
] | [
262,
34
] | python | en | ['en', 'en', 'en'] | True |
get_filesystem_one_level_directory_glob_path_list | (
base_directory_path: str, glob_directive: str
) |
List file names, relative to base_directory_path one level deep, with expansion specified by glob_directive.
:param base_directory_path -- base directory path, relative to which file paths will be collected
:param glob_directive -- glob expansion directive
:returns -- list of relative file paths
|
List file names, relative to base_directory_path one level deep, with expansion specified by glob_directive.
:param base_directory_path -- base directory path, relative to which file paths will be collected
:param glob_directive -- glob expansion directive
:returns -- list of relative file paths
| def get_filesystem_one_level_directory_glob_path_list(
base_directory_path: str, glob_directive: str
) -> List[str]:
"""
List file names, relative to base_directory_path one level deep, with expansion specified by glob_directive.
:param base_directory_path -- base directory path, relative to which file paths will be collected
:param glob_directive -- glob expansion directive
:returns -- list of relative file paths
"""
globbed_paths = Path(base_directory_path).glob(glob_directive)
path_list: List[str] = [
os.path.relpath(str(posix_path), base_directory_path)
for posix_path in globbed_paths
]
return path_list | [
"def",
"get_filesystem_one_level_directory_glob_path_list",
"(",
"base_directory_path",
":",
"str",
",",
"glob_directive",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"globbed_paths",
"=",
"Path",
"(",
"base_directory_path",
")",
".",
"glob",
"(",
"glob_directive",
")",
"path_list",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"str",
"(",
"posix_path",
")",
",",
"base_directory_path",
")",
"for",
"posix_path",
"in",
"globbed_paths",
"]",
"return",
"path_list"
] | [
275,
0
] | [
289,
20
] | python | en | ['en', 'error', 'th'] | False |
list_s3_keys | (
s3, query_options: dict, iterator_dict: dict, recursive: bool = False
) |
For InferredAssetS3DataConnector, we take bucket and prefix and search for files using RegEx at and below the level
specified by that bucket and prefix. However, for ConfiguredAssetS3DataConnector, we take bucket and prefix and
search for files using RegEx only at the level specified by that bucket and prefix. This restriction for the
ConfiguredAssetS3DataConnector is needed, because paths on S3 are comprised not only the leaf file name but the
full path that includes both the prefix and the file name. Otherwise, in the situations where multiple data assets
share levels of a directory tree, matching files to data assets will not be possible, due to the path ambiguity.
:param s3: s3 client connection
:param query_options: s3 query attributes ("Bucket", "Prefix", "Delimiter", "MaxKeys")
:param iterator_dict: dictionary to manage "NextContinuationToken" (if "IsTruncated" is returned from S3)
:param recursive: True for InferredAssetS3DataConnector and False for ConfiguredAssetS3DataConnector (see above)
:return: string valued key representing file path on S3 (full prefix and leaf file name)
|
For InferredAssetS3DataConnector, we take bucket and prefix and search for files using RegEx at and below the level
specified by that bucket and prefix. However, for ConfiguredAssetS3DataConnector, we take bucket and prefix and
search for files using RegEx only at the level specified by that bucket and prefix. This restriction for the
ConfiguredAssetS3DataConnector is needed, because paths on S3 are comprised not only the leaf file name but the
full path that includes both the prefix and the file name. Otherwise, in the situations where multiple data assets
share levels of a directory tree, matching files to data assets will not be possible, due to the path ambiguity.
:param s3: s3 client connection
:param query_options: s3 query attributes ("Bucket", "Prefix", "Delimiter", "MaxKeys")
:param iterator_dict: dictionary to manage "NextContinuationToken" (if "IsTruncated" is returned from S3)
:param recursive: True for InferredAssetS3DataConnector and False for ConfiguredAssetS3DataConnector (see above)
:return: string valued key representing file path on S3 (full prefix and leaf file name)
| def list_s3_keys(
s3, query_options: dict, iterator_dict: dict, recursive: bool = False
) -> str:
"""
For InferredAssetS3DataConnector, we take bucket and prefix and search for files using RegEx at and below the level
specified by that bucket and prefix. However, for ConfiguredAssetS3DataConnector, we take bucket and prefix and
search for files using RegEx only at the level specified by that bucket and prefix. This restriction for the
ConfiguredAssetS3DataConnector is needed, because paths on S3 are comprised not only the leaf file name but the
full path that includes both the prefix and the file name. Otherwise, in the situations where multiple data assets
share levels of a directory tree, matching files to data assets will not be possible, due to the path ambiguity.
:param s3: s3 client connection
:param query_options: s3 query attributes ("Bucket", "Prefix", "Delimiter", "MaxKeys")
:param iterator_dict: dictionary to manage "NextContinuationToken" (if "IsTruncated" is returned from S3)
:param recursive: True for InferredAssetS3DataConnector and False for ConfiguredAssetS3DataConnector (see above)
:return: string valued key representing file path on S3 (full prefix and leaf file name)
"""
if iterator_dict is None:
iterator_dict = {}
if "continuation_token" in iterator_dict:
query_options.update({"ContinuationToken": iterator_dict["continuation_token"]})
logger.debug(f"Fetching objects from S3 with query options: {query_options}")
s3_objects_info: dict = s3.list_objects_v2(**query_options)
if not any(key in s3_objects_info for key in ["Contents", "CommonPrefixes"]):
raise ValueError("S3 query may not have been configured correctly.")
if "Contents" in s3_objects_info:
keys: List[str] = [
item["Key"] for item in s3_objects_info["Contents"] if item["Size"] > 0
]
yield from keys
if recursive and "CommonPrefixes" in s3_objects_info:
common_prefixes: List[Dict[str, Any]] = s3_objects_info["CommonPrefixes"]
for prefix_info in common_prefixes:
query_options_tmp: dict = copy.deepcopy(query_options)
query_options_tmp.update({"Prefix": prefix_info["Prefix"]})
# Recursively fetch from updated prefix
yield from list_s3_keys(
s3=s3,
query_options=query_options_tmp,
iterator_dict={},
recursive=recursive,
)
if s3_objects_info["IsTruncated"]:
iterator_dict["continuation_token"] = s3_objects_info["NextContinuationToken"]
# Recursively fetch more
yield from list_s3_keys(
s3=s3,
query_options=query_options,
iterator_dict=iterator_dict,
recursive=recursive,
)
if "continuation_token" in iterator_dict:
# Make sure we clear the token once we've gotten fully through
del iterator_dict["continuation_token"] | [
"def",
"list_s3_keys",
"(",
"s3",
",",
"query_options",
":",
"dict",
",",
"iterator_dict",
":",
"dict",
",",
"recursive",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"if",
"iterator_dict",
"is",
"None",
":",
"iterator_dict",
"=",
"{",
"}",
"if",
"\"continuation_token\"",
"in",
"iterator_dict",
":",
"query_options",
".",
"update",
"(",
"{",
"\"ContinuationToken\"",
":",
"iterator_dict",
"[",
"\"continuation_token\"",
"]",
"}",
")",
"logger",
".",
"debug",
"(",
"f\"Fetching objects from S3 with query options: {query_options}\"",
")",
"s3_objects_info",
":",
"dict",
"=",
"s3",
".",
"list_objects_v2",
"(",
"*",
"*",
"query_options",
")",
"if",
"not",
"any",
"(",
"key",
"in",
"s3_objects_info",
"for",
"key",
"in",
"[",
"\"Contents\"",
",",
"\"CommonPrefixes\"",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"S3 query may not have been configured correctly.\"",
")",
"if",
"\"Contents\"",
"in",
"s3_objects_info",
":",
"keys",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"item",
"[",
"\"Key\"",
"]",
"for",
"item",
"in",
"s3_objects_info",
"[",
"\"Contents\"",
"]",
"if",
"item",
"[",
"\"Size\"",
"]",
">",
"0",
"]",
"yield",
"from",
"keys",
"if",
"recursive",
"and",
"\"CommonPrefixes\"",
"in",
"s3_objects_info",
":",
"common_prefixes",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"s3_objects_info",
"[",
"\"CommonPrefixes\"",
"]",
"for",
"prefix_info",
"in",
"common_prefixes",
":",
"query_options_tmp",
":",
"dict",
"=",
"copy",
".",
"deepcopy",
"(",
"query_options",
")",
"query_options_tmp",
".",
"update",
"(",
"{",
"\"Prefix\"",
":",
"prefix_info",
"[",
"\"Prefix\"",
"]",
"}",
")",
"# Recursively fetch from updated prefix",
"yield",
"from",
"list_s3_keys",
"(",
"s3",
"=",
"s3",
",",
"query_options",
"=",
"query_options_tmp",
",",
"iterator_dict",
"=",
"{",
"}",
",",
"recursive",
"=",
"recursive",
",",
")",
"if",
"s3_objects_info",
"[",
"\"IsTruncated\"",
"]",
":",
"iterator_dict",
"[",
"\"continuation_token\"",
"]",
"=",
"s3_objects_info",
"[",
"\"NextContinuationToken\"",
"]",
"# Recursively fetch more",
"yield",
"from",
"list_s3_keys",
"(",
"s3",
"=",
"s3",
",",
"query_options",
"=",
"query_options",
",",
"iterator_dict",
"=",
"iterator_dict",
",",
"recursive",
"=",
"recursive",
",",
")",
"if",
"\"continuation_token\"",
"in",
"iterator_dict",
":",
"# Make sure we clear the token once we've gotten fully through",
"del",
"iterator_dict",
"[",
"\"continuation_token\"",
"]"
] | [
292,
0
] | [
350,
47
] | python | en | ['en', 'error', 'th'] | False |
_build_sorter_from_config | (sorter_config: Dict[str, Any]) | Build a Sorter using the provided configuration and return the newly-built Sorter. | Build a Sorter using the provided configuration and return the newly-built Sorter. | def _build_sorter_from_config(sorter_config: Dict[str, Any]) -> Sorter:
"""Build a Sorter using the provided configuration and return the newly-built Sorter."""
runtime_environment: dict = {"name": sorter_config["name"]}
sorter: Sorter = instantiate_class_from_config(
config=sorter_config,
runtime_environment=runtime_environment,
config_defaults={
"module_name": "great_expectations.datasource.data_connector.sorter"
},
)
return sorter | [
"def",
"_build_sorter_from_config",
"(",
"sorter_config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Sorter",
":",
"runtime_environment",
":",
"dict",
"=",
"{",
"\"name\"",
":",
"sorter_config",
"[",
"\"name\"",
"]",
"}",
"sorter",
":",
"Sorter",
"=",
"instantiate_class_from_config",
"(",
"config",
"=",
"sorter_config",
",",
"runtime_environment",
"=",
"runtime_environment",
",",
"config_defaults",
"=",
"{",
"\"module_name\"",
":",
"\"great_expectations.datasource.data_connector.sorter\"",
"}",
",",
")",
"return",
"sorter"
] | [
372,
0
] | [
382,
17
] | python | en | ['en', 'en', 'en'] | True |
assert_checkpoints | () | Use as a context manager to check that the code inside the ``with``
block either exits with an exception or executes at least one
:ref:`checkpoint <checkpoints>`.
Raises:
AssertionError: if no checkpoint was executed.
Example:
Check that :func:`trio.sleep` is a checkpoint, even if it doesn't
block::
with trio.testing.assert_checkpoints():
await trio.sleep(0)
| Use as a context manager to check that the code inside the ``with``
block either exits with an exception or executes at least one
:ref:`checkpoint <checkpoints>`. | def assert_checkpoints():
"""Use as a context manager to check that the code inside the ``with``
block either exits with an exception or executes at least one
:ref:`checkpoint <checkpoints>`.
Raises:
AssertionError: if no checkpoint was executed.
Example:
Check that :func:`trio.sleep` is a checkpoint, even if it doesn't
block::
with trio.testing.assert_checkpoints():
await trio.sleep(0)
"""
__tracebackhide__ = True
return _assert_yields_or_not(True) | [
"def",
"assert_checkpoints",
"(",
")",
":",
"__tracebackhide__",
"=",
"True",
"return",
"_assert_yields_or_not",
"(",
"True",
")"
] | [
24,
0
] | [
41,
38
] | python | en | ['en', 'en', 'en'] | True |
assert_no_checkpoints | () | Use as a context manager to check that the code inside the ``with``
block does not execute any :ref:`checkpoints <checkpoints>`.
Raises:
AssertionError: if a checkpoint was executed.
Example:
Synchronous code never contains any checkpoints, but we can double-check
that::
send_channel, receive_channel = trio.open_memory_channel(10)
with trio.testing.assert_no_checkpoints():
send_channel.send_nowait(None)
| Use as a context manager to check that the code inside the ``with``
block does not execute any :ref:`checkpoints <checkpoints>`. | def assert_no_checkpoints():
"""Use as a context manager to check that the code inside the ``with``
block does not execute any :ref:`checkpoints <checkpoints>`.
Raises:
AssertionError: if a checkpoint was executed.
Example:
Synchronous code never contains any checkpoints, but we can double-check
that::
send_channel, receive_channel = trio.open_memory_channel(10)
with trio.testing.assert_no_checkpoints():
send_channel.send_nowait(None)
"""
__tracebackhide__ = True
return _assert_yields_or_not(False) | [
"def",
"assert_no_checkpoints",
"(",
")",
":",
"__tracebackhide__",
"=",
"True",
"return",
"_assert_yields_or_not",
"(",
"False",
")"
] | [
44,
0
] | [
61,
39
] | python | en | ['en', 'en', 'en'] | True |
progress | (*futures) | Track progress of dask computation in a remote cluster.
LogProgressBar is defined inside here to avoid having to import
its dependencies if not used.
| Track progress of dask computation in a remote cluster.
LogProgressBar is defined inside here to avoid having to import
its dependencies if not used.
| def progress(*futures):
"""Track progress of dask computation in a remote cluster.
LogProgressBar is defined inside here to avoid having to import
its dependencies if not used.
"""
# Import distributed only when used
from distributed.client import futures_of # pylint: disable=C0415
from distributed.diagnostics.progressbar import TextProgressBar # pylint: disable=c0415
class LogProgressBar(TextProgressBar):
"""Dask progress bar based on logging instead of stdout."""
last = 0
logger = logging.getLogger('distributed')
def _draw_bar(self, remaining, all, **kwargs): # pylint: disable=W0221,W0622
done = all - remaining
frac = (done / all) if all else 0
if frac > self.last + 0.01:
self.last = int(frac * 100) / 100
bar = "#" * int(self.width * frac)
percent = int(100 * frac)
time_per_task = self.elapsed / (all - remaining)
remaining_time = timedelta(seconds=time_per_task * remaining)
eta = datetime.utcnow() + remaining_time
elapsed = timedelta(seconds=self.elapsed)
msg = "[{0:<{1}}] | {2}/{3} ({4}%) Completed | {5} | {6} | {7}".format(
bar, self.width, done, all, percent, elapsed, remaining_time, eta
)
self.logger.info(msg)
LOGGER.info(msg)
def _draw_stop(self, **kwargs):
pass
futures = futures_of(futures)
if not isinstance(futures, (set, list)):
futures = [futures]
LogProgressBar(futures) | [
"def",
"progress",
"(",
"*",
"futures",
")",
":",
"# Import distributed only when used",
"from",
"distributed",
".",
"client",
"import",
"futures_of",
"# pylint: disable=C0415",
"from",
"distributed",
".",
"diagnostics",
".",
"progressbar",
"import",
"TextProgressBar",
"# pylint: disable=c0415",
"class",
"LogProgressBar",
"(",
"TextProgressBar",
")",
":",
"\"\"\"Dask progress bar based on logging instead of stdout.\"\"\"",
"last",
"=",
"0",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'distributed'",
")",
"def",
"_draw_bar",
"(",
"self",
",",
"remaining",
",",
"all",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0221,W0622",
"done",
"=",
"all",
"-",
"remaining",
"frac",
"=",
"(",
"done",
"/",
"all",
")",
"if",
"all",
"else",
"0",
"if",
"frac",
">",
"self",
".",
"last",
"+",
"0.01",
":",
"self",
".",
"last",
"=",
"int",
"(",
"frac",
"*",
"100",
")",
"/",
"100",
"bar",
"=",
"\"#\"",
"*",
"int",
"(",
"self",
".",
"width",
"*",
"frac",
")",
"percent",
"=",
"int",
"(",
"100",
"*",
"frac",
")",
"time_per_task",
"=",
"self",
".",
"elapsed",
"/",
"(",
"all",
"-",
"remaining",
")",
"remaining_time",
"=",
"timedelta",
"(",
"seconds",
"=",
"time_per_task",
"*",
"remaining",
")",
"eta",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"remaining_time",
"elapsed",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"elapsed",
")",
"msg",
"=",
"\"[{0:<{1}}] | {2}/{3} ({4}%) Completed | {5} | {6} | {7}\"",
".",
"format",
"(",
"bar",
",",
"self",
".",
"width",
",",
"done",
",",
"all",
",",
"percent",
",",
"elapsed",
",",
"remaining_time",
",",
"eta",
")",
"self",
".",
"logger",
".",
"info",
"(",
"msg",
")",
"LOGGER",
".",
"info",
"(",
"msg",
")",
"def",
"_draw_stop",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"pass",
"futures",
"=",
"futures_of",
"(",
"futures",
")",
"if",
"not",
"isinstance",
"(",
"futures",
",",
"(",
"set",
",",
"list",
")",
")",
":",
"futures",
"=",
"[",
"futures",
"]",
"LogProgressBar",
"(",
"futures",
")"
] | [
20,
0
] | [
62,
27
] | python | en | ['en', 'en', 'en'] | True |
get_dataset | (
dataset_type,
data,
schemas=None,
profiler=ColumnsExistProfiler,
caching=True,
table_name=None,
sqlite_db_path=None,
) | Utility to create datasets for json-formatted tests. | Utility to create datasets for json-formatted tests. | def get_dataset(
dataset_type,
data,
schemas=None,
profiler=ColumnsExistProfiler,
caching=True,
table_name=None,
sqlite_db_path=None,
):
"""Utility to create datasets for json-formatted tests."""
df = pd.DataFrame(data)
if dataset_type == "PandasDataset":
if schemas and "pandas" in schemas:
schema = schemas["pandas"]
pandas_schema = {}
for (key, value) in schema.items():
# Note, these are just names used in our internal schemas to build datasets *for internal tests*
# Further, some changes in pandas internal about how datetimes are created means to support pandas
# pre- 0.25, we need to explicitly specify when we want timezone.
# We will use timestamp for timezone-aware (UTC only) dates in our tests
if value.lower() in ["timestamp", "datetime64[ns, tz]"]:
df[key] = pd.to_datetime(df[key], utc=True)
continue
elif value.lower() in ["datetime", "datetime64", "datetime64[ns]"]:
df[key] = pd.to_datetime(df[key])
continue
elif value.lower() in ["date"]:
df[key] = pd.to_datetime(df[key]).dt.date
value = "object"
try:
type_ = np.dtype(value)
except TypeError:
type_ = getattr(pd.core.dtypes.dtypes, value)
# If this raises AttributeError it's okay: it means someone built a bad test
pandas_schema[key] = type_
# pandas_schema = {key: np.dtype(value) for (key, value) in schemas["pandas"].items()}
df = df.astype(pandas_schema)
return PandasDataset(df, profiler=profiler, caching=caching)
elif dataset_type == "sqlite":
if not create_engine:
return None
engine = create_engine(get_sqlite_connection_url(sqlite_db_path=sqlite_db_path))
# Add the data to the database as a new table
sql_dtypes = {}
if (
schemas
and "sqlite" in schemas
and isinstance(engine.dialect, sqlitetypes.dialect)
):
schema = schemas["sqlite"]
sql_dtypes = {col: SQLITE_TYPES[dtype] for (col, dtype) in schema.items()}
for col in schema:
type_ = schema[col]
if type_ in ["INTEGER", "SMALLINT", "BIGINT"]:
df[col] = pd.to_numeric(df[col], downcast="signed")
elif type_ in ["FLOAT", "DOUBLE", "DOUBLE_PRECISION"]:
df[col] = pd.to_numeric(df[col])
min_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=True
)
max_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=False
)
for api_schema_type in ["api_np", "api_cast"]:
min_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=True
)
max_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=False
)
df.replace(
to_replace=[min_value_api, max_value_api],
value=[min_value_dbms, max_value_dbms],
inplace=True,
)
elif type_ in ["DATETIME", "TIMESTAMP"]:
df[col] = pd.to_datetime(df[col])
elif type_ in ["DATE"]:
df[col] = pd.to_datetime(df[col]).dt.date
if table_name is None:
table_name = generate_test_table_name()
df.to_sql(
name=table_name,
con=engine,
index=False,
dtype=sql_dtypes,
if_exists="replace",
)
# Build a SqlAlchemyDataset using that database
return SqlAlchemyDataset(
table_name, engine=engine, profiler=profiler, caching=caching
)
elif dataset_type == "postgresql":
if not create_engine:
return None
# Create a new database
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
engine = connection_manager.get_engine(
f"postgresql://postgres@{db_hostname}/test_ci"
)
sql_dtypes = {}
if (
schemas
and "postgresql" in schemas
and isinstance(engine.dialect, postgresqltypes.dialect)
):
schema = schemas["postgresql"]
sql_dtypes = {
col: POSTGRESQL_TYPES[dtype] for (col, dtype) in schema.items()
}
for col in schema:
type_ = schema[col]
if type_ in ["INTEGER", "SMALLINT", "BIGINT"]:
df[col] = pd.to_numeric(df[col], downcast="signed")
elif type_ in ["FLOAT", "DOUBLE", "DOUBLE_PRECISION"]:
df[col] = pd.to_numeric(df[col])
min_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=True
)
max_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=False
)
for api_schema_type in ["api_np", "api_cast"]:
min_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=True
)
max_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=False
)
df.replace(
to_replace=[min_value_api, max_value_api],
value=[min_value_dbms, max_value_dbms],
inplace=True,
)
elif type_ in ["DATETIME", "TIMESTAMP"]:
df[col] = pd.to_datetime(df[col])
elif type_ in ["DATE"]:
df[col] = pd.to_datetime(df[col]).dt.date
if table_name is None:
table_name = generate_test_table_name()
df.to_sql(
name=table_name,
con=engine,
index=False,
dtype=sql_dtypes,
if_exists="replace",
)
# Build a SqlAlchemyDataset using that database
return SqlAlchemyDataset(
table_name, engine=engine, profiler=profiler, caching=caching
)
elif dataset_type == "mysql":
if not create_engine:
return None
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
engine = create_engine(f"mysql+pymysql://root@{db_hostname}/test_ci")
sql_dtypes = {}
if (
schemas
and "mysql" in schemas
and isinstance(engine.dialect, mysqltypes.dialect)
):
schema = schemas["mysql"]
sql_dtypes = {col: MYSQL_TYPES[dtype] for (col, dtype) in schema.items()}
for col in schema:
type_ = schema[col]
if type_ in ["INTEGER", "SMALLINT", "BIGINT"]:
df[col] = pd.to_numeric(df[col], downcast="signed")
elif type_ in ["FLOAT", "DOUBLE", "DOUBLE_PRECISION"]:
df[col] = pd.to_numeric(df[col])
min_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=True
)
max_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=False
)
for api_schema_type in ["api_np", "api_cast"]:
min_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=True
)
max_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=False
)
df.replace(
to_replace=[min_value_api, max_value_api],
value=[min_value_dbms, max_value_dbms],
inplace=True,
)
elif type_ in ["DATETIME", "TIMESTAMP"]:
df[col] = pd.to_datetime(df[col])
elif type_ in ["DATE"]:
df[col] = pd.to_datetime(df[col]).dt.date
if table_name is None:
table_name = generate_test_table_name()
df.to_sql(
name=table_name,
con=engine,
index=False,
dtype=sql_dtypes,
if_exists="replace",
)
# Will - 20210126
# For mysql we want our tests to know when a temp_table is referred to more than once in the
# same query. This has caused problems in expectations like expect_column_values_to_be_unique().
# Here we instantiate a SqlAlchemyDataset with a custom_sql, which causes a temp_table to be created,
# rather than referring the table by name.
custom_sql = "SELECT * FROM " + table_name
return SqlAlchemyDataset(
custom_sql=custom_sql, engine=engine, profiler=profiler, caching=caching
)
elif dataset_type == "mssql":
if not create_engine:
return None
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
engine = create_engine(
f"mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@{db_hostname}:1433/test_ci?"
"driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true",
# echo=True,
)
# If "autocommit" is not desired to be on by default, then use the following pattern when explicit "autocommit"
# is desired (e.g., for temporary tables, "autocommit" is off by default, so the override option may be useful).
# engine.execute(sa.text(sql_query_string).execution_options(autocommit=True))
sql_dtypes = {}
if (
schemas
and dataset_type in schemas
and isinstance(engine.dialect, mssqltypes.dialect)
):
schema = schemas[dataset_type]
sql_dtypes = {col: MSSQL_TYPES[dtype] for (col, dtype) in schema.items()}
for col in schema:
type_ = schema[col]
if type_ in ["INTEGER", "SMALLINT", "BIGINT"]:
df[col] = pd.to_numeric(df[col], downcast="signed")
elif type_ in ["FLOAT"]:
df[col] = pd.to_numeric(df[col])
min_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=True
)
max_value_dbms = get_sql_dialect_floating_point_infinity_value(
schema=dataset_type, negative=False
)
for api_schema_type in ["api_np", "api_cast"]:
min_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=True
)
max_value_api = get_sql_dialect_floating_point_infinity_value(
schema=api_schema_type, negative=False
)
df.replace(
to_replace=[min_value_api, max_value_api],
value=[min_value_dbms, max_value_dbms],
inplace=True,
)
elif type_ in ["DATETIME", "TIMESTAMP"]:
df[col] = pd.to_datetime(df[col])
elif type_ in ["DATE"]:
df[col] = pd.to_datetime(df[col]).dt.date
if table_name is None:
table_name = generate_test_table_name()
df.to_sql(
name=table_name,
con=engine,
index=False,
dtype=sql_dtypes,
if_exists="replace",
)
# Build a SqlAlchemyDataset using that database
return SqlAlchemyDataset(
table_name, engine=engine, profiler=profiler, caching=caching
)
elif dataset_type == "SparkDFDataset":
import pyspark.sql.types as sparktypes
SPARK_TYPES = {
"StringType": sparktypes.StringType,
"IntegerType": sparktypes.IntegerType,
"LongType": sparktypes.LongType,
"DateType": sparktypes.DateType,
"TimestampType": sparktypes.TimestampType,
"FloatType": sparktypes.FloatType,
"DoubleType": sparktypes.DoubleType,
"BooleanType": sparktypes.BooleanType,
"DataType": sparktypes.DataType,
"NullType": sparktypes.NullType,
}
spark = get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
# We need to allow null values in some column types that do not support them natively, so we skip
# use of df in this case.
data_reshaped = list(
zip(*[v for _, v in data.items()])
) # create a list of rows
if schemas and "spark" in schemas:
schema = schemas["spark"]
# sometimes first method causes Spark to throw a TypeError
try:
spark_schema = sparktypes.StructType(
[
sparktypes.StructField(
column, SPARK_TYPES[schema[column]](), True
)
for column in schema
]
)
# We create these every time, which is painful for testing
# However nuance around null treatment as well as the desire
# for real datetime support in tests makes this necessary
data = copy.deepcopy(data)
if "ts" in data:
print(data)
print(schema)
for col in schema:
type_ = schema[col]
if type_ in ["IntegerType", "LongType"]:
# Ints cannot be None...but None can be valid in Spark (as Null)
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(int(val))
data[col] = vals
elif type_ in ["FloatType", "DoubleType"]:
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(float(val))
data[col] = vals
elif type_ in ["DateType", "TimestampType"]:
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(parse(val))
data[col] = vals
# Do this again, now that we have done type conversion using the provided schema
data_reshaped = list(
zip(*[v for _, v in data.items()])
) # create a list of rows
spark_df = spark.createDataFrame(data_reshaped, schema=spark_schema)
except TypeError:
string_schema = sparktypes.StructType(
[
sparktypes.StructField(column, sparktypes.StringType())
for column in schema
]
)
spark_df = spark.createDataFrame(data_reshaped, string_schema)
for c in spark_df.columns:
spark_df = spark_df.withColumn(
c, spark_df[c].cast(SPARK_TYPES[schema[c]]())
)
elif len(data_reshaped) == 0:
# if we have an empty dataset and no schema, need to assign an arbitrary type
columns = list(data.keys())
spark_schema = sparktypes.StructType(
[
sparktypes.StructField(column, sparktypes.StringType())
for column in columns
]
)
spark_df = spark.createDataFrame(data_reshaped, spark_schema)
else:
# if no schema provided, uses Spark's schema inference
columns = list(data.keys())
spark_df = spark.createDataFrame(data_reshaped, columns)
return SparkDFDataset(spark_df, profiler=profiler, caching=caching)
else:
raise ValueError("Unknown dataset_type " + str(dataset_type)) | [
"def",
"get_dataset",
"(",
"dataset_type",
",",
"data",
",",
"schemas",
"=",
"None",
",",
"profiler",
"=",
"ColumnsExistProfiler",
",",
"caching",
"=",
"True",
",",
"table_name",
"=",
"None",
",",
"sqlite_db_path",
"=",
"None",
",",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"if",
"dataset_type",
"==",
"\"PandasDataset\"",
":",
"if",
"schemas",
"and",
"\"pandas\"",
"in",
"schemas",
":",
"schema",
"=",
"schemas",
"[",
"\"pandas\"",
"]",
"pandas_schema",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"schema",
".",
"items",
"(",
")",
":",
"# Note, these are just names used in our internal schemas to build datasets *for internal tests*",
"# Further, some changes in pandas internal about how datetimes are created means to support pandas",
"# pre- 0.25, we need to explicitly specify when we want timezone.",
"# We will use timestamp for timezone-aware (UTC only) dates in our tests",
"if",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"timestamp\"",
",",
"\"datetime64[ns, tz]\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
",",
"utc",
"=",
"True",
")",
"continue",
"elif",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"datetime\"",
",",
"\"datetime64\"",
",",
"\"datetime64[ns]\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
")",
"continue",
"elif",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"date\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
")",
".",
"dt",
".",
"date",
"value",
"=",
"\"object\"",
"try",
":",
"type_",
"=",
"np",
".",
"dtype",
"(",
"value",
")",
"except",
"TypeError",
":",
"type_",
"=",
"getattr",
"(",
"pd",
".",
"core",
".",
"dtypes",
".",
"dtypes",
",",
"value",
")",
"# If this raises AttributeError it's okay: it means someone built a bad test",
"pandas_schema",
"[",
"key",
"]",
"=",
"type_",
"# pandas_schema = {key: np.dtype(value) for (key, value) in schemas[\"pandas\"].items()}",
"df",
"=",
"df",
".",
"astype",
"(",
"pandas_schema",
")",
"return",
"PandasDataset",
"(",
"df",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"elif",
"dataset_type",
"==",
"\"sqlite\"",
":",
"if",
"not",
"create_engine",
":",
"return",
"None",
"engine",
"=",
"create_engine",
"(",
"get_sqlite_connection_url",
"(",
"sqlite_db_path",
"=",
"sqlite_db_path",
")",
")",
"# Add the data to the database as a new table",
"sql_dtypes",
"=",
"{",
"}",
"if",
"(",
"schemas",
"and",
"\"sqlite\"",
"in",
"schemas",
"and",
"isinstance",
"(",
"engine",
".",
"dialect",
",",
"sqlitetypes",
".",
"dialect",
")",
")",
":",
"schema",
"=",
"schemas",
"[",
"\"sqlite\"",
"]",
"sql_dtypes",
"=",
"{",
"col",
":",
"SQLITE_TYPES",
"[",
"dtype",
"]",
"for",
"(",
"col",
",",
"dtype",
")",
"in",
"schema",
".",
"items",
"(",
")",
"}",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"INTEGER\"",
",",
"\"SMALLINT\"",
",",
"\"BIGINT\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
",",
"downcast",
"=",
"\"signed\"",
")",
"elif",
"type_",
"in",
"[",
"\"FLOAT\"",
",",
"\"DOUBLE\"",
",",
"\"DOUBLE_PRECISION\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
")",
"min_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"True",
")",
"max_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"False",
")",
"for",
"api_schema_type",
"in",
"[",
"\"api_np\"",
",",
"\"api_cast\"",
"]",
":",
"min_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"True",
")",
"max_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"False",
")",
"df",
".",
"replace",
"(",
"to_replace",
"=",
"[",
"min_value_api",
",",
"max_value_api",
"]",
",",
"value",
"=",
"[",
"min_value_dbms",
",",
"max_value_dbms",
"]",
",",
"inplace",
"=",
"True",
",",
")",
"elif",
"type_",
"in",
"[",
"\"DATETIME\"",
",",
"\"TIMESTAMP\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
"elif",
"type_",
"in",
"[",
"\"DATE\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
".",
"dt",
".",
"date",
"if",
"table_name",
"is",
"None",
":",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"df",
".",
"to_sql",
"(",
"name",
"=",
"table_name",
",",
"con",
"=",
"engine",
",",
"index",
"=",
"False",
",",
"dtype",
"=",
"sql_dtypes",
",",
"if_exists",
"=",
"\"replace\"",
",",
")",
"# Build a SqlAlchemyDataset using that database",
"return",
"SqlAlchemyDataset",
"(",
"table_name",
",",
"engine",
"=",
"engine",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"elif",
"dataset_type",
"==",
"\"postgresql\"",
":",
"if",
"not",
"create_engine",
":",
"return",
"None",
"# Create a new database",
"db_hostname",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCAL_DB_HOSTNAME\"",
",",
"\"localhost\"",
")",
"engine",
"=",
"connection_manager",
".",
"get_engine",
"(",
"f\"postgresql://postgres@{db_hostname}/test_ci\"",
")",
"sql_dtypes",
"=",
"{",
"}",
"if",
"(",
"schemas",
"and",
"\"postgresql\"",
"in",
"schemas",
"and",
"isinstance",
"(",
"engine",
".",
"dialect",
",",
"postgresqltypes",
".",
"dialect",
")",
")",
":",
"schema",
"=",
"schemas",
"[",
"\"postgresql\"",
"]",
"sql_dtypes",
"=",
"{",
"col",
":",
"POSTGRESQL_TYPES",
"[",
"dtype",
"]",
"for",
"(",
"col",
",",
"dtype",
")",
"in",
"schema",
".",
"items",
"(",
")",
"}",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"INTEGER\"",
",",
"\"SMALLINT\"",
",",
"\"BIGINT\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
",",
"downcast",
"=",
"\"signed\"",
")",
"elif",
"type_",
"in",
"[",
"\"FLOAT\"",
",",
"\"DOUBLE\"",
",",
"\"DOUBLE_PRECISION\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
")",
"min_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"True",
")",
"max_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"False",
")",
"for",
"api_schema_type",
"in",
"[",
"\"api_np\"",
",",
"\"api_cast\"",
"]",
":",
"min_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"True",
")",
"max_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"False",
")",
"df",
".",
"replace",
"(",
"to_replace",
"=",
"[",
"min_value_api",
",",
"max_value_api",
"]",
",",
"value",
"=",
"[",
"min_value_dbms",
",",
"max_value_dbms",
"]",
",",
"inplace",
"=",
"True",
",",
")",
"elif",
"type_",
"in",
"[",
"\"DATETIME\"",
",",
"\"TIMESTAMP\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
"elif",
"type_",
"in",
"[",
"\"DATE\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
".",
"dt",
".",
"date",
"if",
"table_name",
"is",
"None",
":",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"df",
".",
"to_sql",
"(",
"name",
"=",
"table_name",
",",
"con",
"=",
"engine",
",",
"index",
"=",
"False",
",",
"dtype",
"=",
"sql_dtypes",
",",
"if_exists",
"=",
"\"replace\"",
",",
")",
"# Build a SqlAlchemyDataset using that database",
"return",
"SqlAlchemyDataset",
"(",
"table_name",
",",
"engine",
"=",
"engine",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"elif",
"dataset_type",
"==",
"\"mysql\"",
":",
"if",
"not",
"create_engine",
":",
"return",
"None",
"db_hostname",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCAL_DB_HOSTNAME\"",
",",
"\"localhost\"",
")",
"engine",
"=",
"create_engine",
"(",
"f\"mysql+pymysql://root@{db_hostname}/test_ci\"",
")",
"sql_dtypes",
"=",
"{",
"}",
"if",
"(",
"schemas",
"and",
"\"mysql\"",
"in",
"schemas",
"and",
"isinstance",
"(",
"engine",
".",
"dialect",
",",
"mysqltypes",
".",
"dialect",
")",
")",
":",
"schema",
"=",
"schemas",
"[",
"\"mysql\"",
"]",
"sql_dtypes",
"=",
"{",
"col",
":",
"MYSQL_TYPES",
"[",
"dtype",
"]",
"for",
"(",
"col",
",",
"dtype",
")",
"in",
"schema",
".",
"items",
"(",
")",
"}",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"INTEGER\"",
",",
"\"SMALLINT\"",
",",
"\"BIGINT\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
",",
"downcast",
"=",
"\"signed\"",
")",
"elif",
"type_",
"in",
"[",
"\"FLOAT\"",
",",
"\"DOUBLE\"",
",",
"\"DOUBLE_PRECISION\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
")",
"min_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"True",
")",
"max_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"False",
")",
"for",
"api_schema_type",
"in",
"[",
"\"api_np\"",
",",
"\"api_cast\"",
"]",
":",
"min_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"True",
")",
"max_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"False",
")",
"df",
".",
"replace",
"(",
"to_replace",
"=",
"[",
"min_value_api",
",",
"max_value_api",
"]",
",",
"value",
"=",
"[",
"min_value_dbms",
",",
"max_value_dbms",
"]",
",",
"inplace",
"=",
"True",
",",
")",
"elif",
"type_",
"in",
"[",
"\"DATETIME\"",
",",
"\"TIMESTAMP\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
"elif",
"type_",
"in",
"[",
"\"DATE\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
".",
"dt",
".",
"date",
"if",
"table_name",
"is",
"None",
":",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"df",
".",
"to_sql",
"(",
"name",
"=",
"table_name",
",",
"con",
"=",
"engine",
",",
"index",
"=",
"False",
",",
"dtype",
"=",
"sql_dtypes",
",",
"if_exists",
"=",
"\"replace\"",
",",
")",
"# Will - 20210126",
"# For mysql we want our tests to know when a temp_table is referred to more than once in the",
"# same query. This has caused problems in expectations like expect_column_values_to_be_unique().",
"# Here we instantiate a SqlAlchemyDataset with a custom_sql, which causes a temp_table to be created,",
"# rather than referring the table by name.",
"custom_sql",
"=",
"\"SELECT * FROM \"",
"+",
"table_name",
"return",
"SqlAlchemyDataset",
"(",
"custom_sql",
"=",
"custom_sql",
",",
"engine",
"=",
"engine",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"elif",
"dataset_type",
"==",
"\"mssql\"",
":",
"if",
"not",
"create_engine",
":",
"return",
"None",
"db_hostname",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCAL_DB_HOSTNAME\"",
",",
"\"localhost\"",
")",
"engine",
"=",
"create_engine",
"(",
"f\"mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@{db_hostname}:1433/test_ci?\"",
"\"driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true\"",
",",
"# echo=True,",
")",
"# If \"autocommit\" is not desired to be on by default, then use the following pattern when explicit \"autocommit\"",
"# is desired (e.g., for temporary tables, \"autocommit\" is off by default, so the override option may be useful).",
"# engine.execute(sa.text(sql_query_string).execution_options(autocommit=True))",
"sql_dtypes",
"=",
"{",
"}",
"if",
"(",
"schemas",
"and",
"dataset_type",
"in",
"schemas",
"and",
"isinstance",
"(",
"engine",
".",
"dialect",
",",
"mssqltypes",
".",
"dialect",
")",
")",
":",
"schema",
"=",
"schemas",
"[",
"dataset_type",
"]",
"sql_dtypes",
"=",
"{",
"col",
":",
"MSSQL_TYPES",
"[",
"dtype",
"]",
"for",
"(",
"col",
",",
"dtype",
")",
"in",
"schema",
".",
"items",
"(",
")",
"}",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"INTEGER\"",
",",
"\"SMALLINT\"",
",",
"\"BIGINT\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
",",
"downcast",
"=",
"\"signed\"",
")",
"elif",
"type_",
"in",
"[",
"\"FLOAT\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"col",
"]",
")",
"min_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"True",
")",
"max_value_dbms",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"dataset_type",
",",
"negative",
"=",
"False",
")",
"for",
"api_schema_type",
"in",
"[",
"\"api_np\"",
",",
"\"api_cast\"",
"]",
":",
"min_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"True",
")",
"max_value_api",
"=",
"get_sql_dialect_floating_point_infinity_value",
"(",
"schema",
"=",
"api_schema_type",
",",
"negative",
"=",
"False",
")",
"df",
".",
"replace",
"(",
"to_replace",
"=",
"[",
"min_value_api",
",",
"max_value_api",
"]",
",",
"value",
"=",
"[",
"min_value_dbms",
",",
"max_value_dbms",
"]",
",",
"inplace",
"=",
"True",
",",
")",
"elif",
"type_",
"in",
"[",
"\"DATETIME\"",
",",
"\"TIMESTAMP\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
"elif",
"type_",
"in",
"[",
"\"DATE\"",
"]",
":",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"col",
"]",
")",
".",
"dt",
".",
"date",
"if",
"table_name",
"is",
"None",
":",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"df",
".",
"to_sql",
"(",
"name",
"=",
"table_name",
",",
"con",
"=",
"engine",
",",
"index",
"=",
"False",
",",
"dtype",
"=",
"sql_dtypes",
",",
"if_exists",
"=",
"\"replace\"",
",",
")",
"# Build a SqlAlchemyDataset using that database",
"return",
"SqlAlchemyDataset",
"(",
"table_name",
",",
"engine",
"=",
"engine",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"elif",
"dataset_type",
"==",
"\"SparkDFDataset\"",
":",
"import",
"pyspark",
".",
"sql",
".",
"types",
"as",
"sparktypes",
"SPARK_TYPES",
"=",
"{",
"\"StringType\"",
":",
"sparktypes",
".",
"StringType",
",",
"\"IntegerType\"",
":",
"sparktypes",
".",
"IntegerType",
",",
"\"LongType\"",
":",
"sparktypes",
".",
"LongType",
",",
"\"DateType\"",
":",
"sparktypes",
".",
"DateType",
",",
"\"TimestampType\"",
":",
"sparktypes",
".",
"TimestampType",
",",
"\"FloatType\"",
":",
"sparktypes",
".",
"FloatType",
",",
"\"DoubleType\"",
":",
"sparktypes",
".",
"DoubleType",
",",
"\"BooleanType\"",
":",
"sparktypes",
".",
"BooleanType",
",",
"\"DataType\"",
":",
"sparktypes",
".",
"DataType",
",",
"\"NullType\"",
":",
"sparktypes",
".",
"NullType",
",",
"}",
"spark",
"=",
"get_or_create_spark_application",
"(",
"spark_config",
"=",
"{",
"\"spark.sql.catalogImplementation\"",
":",
"\"hive\"",
",",
"\"spark.executor.memory\"",
":",
"\"450m\"",
",",
"# \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.",
"}",
")",
"# We need to allow null values in some column types that do not support them natively, so we skip",
"# use of df in this case.",
"data_reshaped",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"v",
"for",
"_",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
"]",
")",
")",
"# create a list of rows",
"if",
"schemas",
"and",
"\"spark\"",
"in",
"schemas",
":",
"schema",
"=",
"schemas",
"[",
"\"spark\"",
"]",
"# sometimes first method causes Spark to throw a TypeError",
"try",
":",
"spark_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"SPARK_TYPES",
"[",
"schema",
"[",
"column",
"]",
"]",
"(",
")",
",",
"True",
")",
"for",
"column",
"in",
"schema",
"]",
")",
"# We create these every time, which is painful for testing",
"# However nuance around null treatment as well as the desire",
"# for real datetime support in tests makes this necessary",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"if",
"\"ts\"",
"in",
"data",
":",
"print",
"(",
"data",
")",
"print",
"(",
"schema",
")",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"IntegerType\"",
",",
"\"LongType\"",
"]",
":",
"# Ints cannot be None...but None can be valid in Spark (as Null)",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"int",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"elif",
"type_",
"in",
"[",
"\"FloatType\"",
",",
"\"DoubleType\"",
"]",
":",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"float",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"elif",
"type_",
"in",
"[",
"\"DateType\"",
",",
"\"TimestampType\"",
"]",
":",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"parse",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"# Do this again, now that we have done type conversion using the provided schema",
"data_reshaped",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"v",
"for",
"_",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
"]",
")",
")",
"# create a list of rows",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"schema",
"=",
"spark_schema",
")",
"except",
"TypeError",
":",
"string_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"sparktypes",
".",
"StringType",
"(",
")",
")",
"for",
"column",
"in",
"schema",
"]",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"string_schema",
")",
"for",
"c",
"in",
"spark_df",
".",
"columns",
":",
"spark_df",
"=",
"spark_df",
".",
"withColumn",
"(",
"c",
",",
"spark_df",
"[",
"c",
"]",
".",
"cast",
"(",
"SPARK_TYPES",
"[",
"schema",
"[",
"c",
"]",
"]",
"(",
")",
")",
")",
"elif",
"len",
"(",
"data_reshaped",
")",
"==",
"0",
":",
"# if we have an empty dataset and no schema, need to assign an arbitrary type",
"columns",
"=",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"spark_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"sparktypes",
".",
"StringType",
"(",
")",
")",
"for",
"column",
"in",
"columns",
"]",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"spark_schema",
")",
"else",
":",
"# if no schema provided, uses Spark's schema inference",
"columns",
"=",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"columns",
")",
"return",
"SparkDFDataset",
"(",
"spark_df",
",",
"profiler",
"=",
"profiler",
",",
"caching",
"=",
"caching",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown dataset_type \"",
"+",
"str",
"(",
"dataset_type",
")",
")"
] | [
242,
0
] | [
643,
69
] | python | en | ['en', 'en', 'en'] | True |
get_test_validator_with_data | (
execution_engine,
data,
schemas=None,
profiler=ColumnsExistProfiler,
caching=True,
table_name=None,
sqlite_db_path=None,
) | Utility to create datasets for json-formatted tests. | Utility to create datasets for json-formatted tests. | def get_test_validator_with_data(
execution_engine,
data,
schemas=None,
profiler=ColumnsExistProfiler,
caching=True,
table_name=None,
sqlite_db_path=None,
):
"""Utility to create datasets for json-formatted tests."""
df = pd.DataFrame(data)
if execution_engine == "pandas":
if schemas and "pandas" in schemas:
schema = schemas["pandas"]
pandas_schema = {}
for (key, value) in schema.items():
# Note, these are just names used in our internal schemas to build datasets *for internal tests*
# Further, some changes in pandas internal about how datetimes are created means to support pandas
# pre- 0.25, we need to explicitly specify when we want timezone.
# We will use timestamp for timezone-aware (UTC only) dates in our tests
if value.lower() in ["timestamp", "datetime64[ns, tz]"]:
df[key] = pd.to_datetime(df[key], utc=True)
continue
elif value.lower() in ["datetime", "datetime64", "datetime64[ns]"]:
df[key] = pd.to_datetime(df[key])
continue
elif value.lower() in ["date"]:
df[key] = pd.to_datetime(df[key]).dt.date
value = "object"
try:
type_ = np.dtype(value)
except TypeError:
type_ = getattr(pd.core.dtypes.dtypes, value)
# If this raises AttributeError it's okay: it means someone built a bad test
pandas_schema[key] = type_
# pandas_schema = {key: np.dtype(value) for (key, value) in schemas["pandas"].items()}
df = df.astype(pandas_schema)
if table_name is None:
# noinspection PyUnusedLocal
table_name = generate_test_table_name()
return build_pandas_validator_with_data(df=df)
elif execution_engine in ["sqlite", "postgresql", "mysql", "mssql"]:
if not create_engine:
return None
return build_sa_validator_with_data(
df=df,
sa_engine_name=execution_engine,
schemas=schemas,
caching=caching,
table_name=table_name,
sqlite_db_path=sqlite_db_path,
)
elif execution_engine == "spark":
import pyspark.sql.types as sparktypes
SPARK_TYPES = {
"StringType": sparktypes.StringType,
"IntegerType": sparktypes.IntegerType,
"LongType": sparktypes.LongType,
"DateType": sparktypes.DateType,
"TimestampType": sparktypes.TimestampType,
"FloatType": sparktypes.FloatType,
"DoubleType": sparktypes.DoubleType,
"BooleanType": sparktypes.BooleanType,
"DataType": sparktypes.DataType,
"NullType": sparktypes.NullType,
}
spark = get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
# We need to allow null values in some column types that do not support them natively, so we skip
# use of df in this case.
data_reshaped = list(
zip(*[v for _, v in data.items()])
) # create a list of rows
if schemas and "spark" in schemas:
schema = schemas["spark"]
# sometimes first method causes Spark to throw a TypeError
try:
spark_schema = sparktypes.StructType(
[
sparktypes.StructField(
column, SPARK_TYPES[schema[column]](), True
)
for column in schema
]
)
# We create these every time, which is painful for testing
# However nuance around null treatment as well as the desire
# for real datetime support in tests makes this necessary
data = copy.deepcopy(data)
if "ts" in data:
print(data)
print(schema)
for col in schema:
type_ = schema[col]
if type_ in ["IntegerType", "LongType"]:
# Ints cannot be None...but None can be valid in Spark (as Null)
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(int(val))
data[col] = vals
elif type_ in ["FloatType", "DoubleType"]:
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(float(val))
data[col] = vals
elif type_ in ["DateType", "TimestampType"]:
vals = []
for val in data[col]:
if val is None:
vals.append(val)
else:
vals.append(parse(val))
data[col] = vals
# Do this again, now that we have done type conversion using the provided schema
data_reshaped = list(
zip(*[v for _, v in data.items()])
) # create a list of rows
spark_df = spark.createDataFrame(data_reshaped, schema=spark_schema)
except TypeError:
string_schema = sparktypes.StructType(
[
sparktypes.StructField(column, sparktypes.StringType())
for column in schema
]
)
spark_df = spark.createDataFrame(data_reshaped, string_schema)
for c in spark_df.columns:
spark_df = spark_df.withColumn(
c, spark_df[c].cast(SPARK_TYPES[schema[c]]())
)
elif len(data_reshaped) == 0:
# if we have an empty dataset and no schema, need to assign an arbitrary type
columns = list(data.keys())
spark_schema = sparktypes.StructType(
[
sparktypes.StructField(column, sparktypes.StringType())
for column in columns
]
)
spark_df = spark.createDataFrame(data_reshaped, spark_schema)
else:
# if no schema provided, uses Spark's schema inference
columns = list(data.keys())
spark_df = spark.createDataFrame(data_reshaped, columns)
if table_name is None:
# noinspection PyUnusedLocal
table_name = generate_test_table_name()
return build_spark_validator_with_data(df=spark_df, spark=spark)
else:
raise ValueError("Unknown dataset_type " + str(execution_engine)) | [
"def",
"get_test_validator_with_data",
"(",
"execution_engine",
",",
"data",
",",
"schemas",
"=",
"None",
",",
"profiler",
"=",
"ColumnsExistProfiler",
",",
"caching",
"=",
"True",
",",
"table_name",
"=",
"None",
",",
"sqlite_db_path",
"=",
"None",
",",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"if",
"execution_engine",
"==",
"\"pandas\"",
":",
"if",
"schemas",
"and",
"\"pandas\"",
"in",
"schemas",
":",
"schema",
"=",
"schemas",
"[",
"\"pandas\"",
"]",
"pandas_schema",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"schema",
".",
"items",
"(",
")",
":",
"# Note, these are just names used in our internal schemas to build datasets *for internal tests*",
"# Further, some changes in pandas internal about how datetimes are created means to support pandas",
"# pre- 0.25, we need to explicitly specify when we want timezone.",
"# We will use timestamp for timezone-aware (UTC only) dates in our tests",
"if",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"timestamp\"",
",",
"\"datetime64[ns, tz]\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
",",
"utc",
"=",
"True",
")",
"continue",
"elif",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"datetime\"",
",",
"\"datetime64\"",
",",
"\"datetime64[ns]\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
")",
"continue",
"elif",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"date\"",
"]",
":",
"df",
"[",
"key",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"key",
"]",
")",
".",
"dt",
".",
"date",
"value",
"=",
"\"object\"",
"try",
":",
"type_",
"=",
"np",
".",
"dtype",
"(",
"value",
")",
"except",
"TypeError",
":",
"type_",
"=",
"getattr",
"(",
"pd",
".",
"core",
".",
"dtypes",
".",
"dtypes",
",",
"value",
")",
"# If this raises AttributeError it's okay: it means someone built a bad test",
"pandas_schema",
"[",
"key",
"]",
"=",
"type_",
"# pandas_schema = {key: np.dtype(value) for (key, value) in schemas[\"pandas\"].items()}",
"df",
"=",
"df",
".",
"astype",
"(",
"pandas_schema",
")",
"if",
"table_name",
"is",
"None",
":",
"# noinspection PyUnusedLocal",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"return",
"build_pandas_validator_with_data",
"(",
"df",
"=",
"df",
")",
"elif",
"execution_engine",
"in",
"[",
"\"sqlite\"",
",",
"\"postgresql\"",
",",
"\"mysql\"",
",",
"\"mssql\"",
"]",
":",
"if",
"not",
"create_engine",
":",
"return",
"None",
"return",
"build_sa_validator_with_data",
"(",
"df",
"=",
"df",
",",
"sa_engine_name",
"=",
"execution_engine",
",",
"schemas",
"=",
"schemas",
",",
"caching",
"=",
"caching",
",",
"table_name",
"=",
"table_name",
",",
"sqlite_db_path",
"=",
"sqlite_db_path",
",",
")",
"elif",
"execution_engine",
"==",
"\"spark\"",
":",
"import",
"pyspark",
".",
"sql",
".",
"types",
"as",
"sparktypes",
"SPARK_TYPES",
"=",
"{",
"\"StringType\"",
":",
"sparktypes",
".",
"StringType",
",",
"\"IntegerType\"",
":",
"sparktypes",
".",
"IntegerType",
",",
"\"LongType\"",
":",
"sparktypes",
".",
"LongType",
",",
"\"DateType\"",
":",
"sparktypes",
".",
"DateType",
",",
"\"TimestampType\"",
":",
"sparktypes",
".",
"TimestampType",
",",
"\"FloatType\"",
":",
"sparktypes",
".",
"FloatType",
",",
"\"DoubleType\"",
":",
"sparktypes",
".",
"DoubleType",
",",
"\"BooleanType\"",
":",
"sparktypes",
".",
"BooleanType",
",",
"\"DataType\"",
":",
"sparktypes",
".",
"DataType",
",",
"\"NullType\"",
":",
"sparktypes",
".",
"NullType",
",",
"}",
"spark",
"=",
"get_or_create_spark_application",
"(",
"spark_config",
"=",
"{",
"\"spark.sql.catalogImplementation\"",
":",
"\"hive\"",
",",
"\"spark.executor.memory\"",
":",
"\"450m\"",
",",
"# \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.",
"}",
")",
"# We need to allow null values in some column types that do not support them natively, so we skip",
"# use of df in this case.",
"data_reshaped",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"v",
"for",
"_",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
"]",
")",
")",
"# create a list of rows",
"if",
"schemas",
"and",
"\"spark\"",
"in",
"schemas",
":",
"schema",
"=",
"schemas",
"[",
"\"spark\"",
"]",
"# sometimes first method causes Spark to throw a TypeError",
"try",
":",
"spark_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"SPARK_TYPES",
"[",
"schema",
"[",
"column",
"]",
"]",
"(",
")",
",",
"True",
")",
"for",
"column",
"in",
"schema",
"]",
")",
"# We create these every time, which is painful for testing",
"# However nuance around null treatment as well as the desire",
"# for real datetime support in tests makes this necessary",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"if",
"\"ts\"",
"in",
"data",
":",
"print",
"(",
"data",
")",
"print",
"(",
"schema",
")",
"for",
"col",
"in",
"schema",
":",
"type_",
"=",
"schema",
"[",
"col",
"]",
"if",
"type_",
"in",
"[",
"\"IntegerType\"",
",",
"\"LongType\"",
"]",
":",
"# Ints cannot be None...but None can be valid in Spark (as Null)",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"int",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"elif",
"type_",
"in",
"[",
"\"FloatType\"",
",",
"\"DoubleType\"",
"]",
":",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"float",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"elif",
"type_",
"in",
"[",
"\"DateType\"",
",",
"\"TimestampType\"",
"]",
":",
"vals",
"=",
"[",
"]",
"for",
"val",
"in",
"data",
"[",
"col",
"]",
":",
"if",
"val",
"is",
"None",
":",
"vals",
".",
"append",
"(",
"val",
")",
"else",
":",
"vals",
".",
"append",
"(",
"parse",
"(",
"val",
")",
")",
"data",
"[",
"col",
"]",
"=",
"vals",
"# Do this again, now that we have done type conversion using the provided schema",
"data_reshaped",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"v",
"for",
"_",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
"]",
")",
")",
"# create a list of rows",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"schema",
"=",
"spark_schema",
")",
"except",
"TypeError",
":",
"string_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"sparktypes",
".",
"StringType",
"(",
")",
")",
"for",
"column",
"in",
"schema",
"]",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"string_schema",
")",
"for",
"c",
"in",
"spark_df",
".",
"columns",
":",
"spark_df",
"=",
"spark_df",
".",
"withColumn",
"(",
"c",
",",
"spark_df",
"[",
"c",
"]",
".",
"cast",
"(",
"SPARK_TYPES",
"[",
"schema",
"[",
"c",
"]",
"]",
"(",
")",
")",
")",
"elif",
"len",
"(",
"data_reshaped",
")",
"==",
"0",
":",
"# if we have an empty dataset and no schema, need to assign an arbitrary type",
"columns",
"=",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"spark_schema",
"=",
"sparktypes",
".",
"StructType",
"(",
"[",
"sparktypes",
".",
"StructField",
"(",
"column",
",",
"sparktypes",
".",
"StringType",
"(",
")",
")",
"for",
"column",
"in",
"columns",
"]",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"spark_schema",
")",
"else",
":",
"# if no schema provided, uses Spark's schema inference",
"columns",
"=",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"spark_df",
"=",
"spark",
".",
"createDataFrame",
"(",
"data_reshaped",
",",
"columns",
")",
"if",
"table_name",
"is",
"None",
":",
"# noinspection PyUnusedLocal",
"table_name",
"=",
"generate_test_table_name",
"(",
")",
"return",
"build_spark_validator_with_data",
"(",
"df",
"=",
"spark_df",
",",
"spark",
"=",
"spark",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown dataset_type \"",
"+",
"str",
"(",
"execution_engine",
")",
")"
] | [
646,
0
] | [
816,
73
] | python | en | ['en', 'en', 'en'] | True |
generate_expectation_tests | (
expectation_type, examples_config, expectation_execution_engines_dict=None
) |
:param expectation_type: snake_case name of the expectation type
:param examples_config: a dictionary that defines the data and test cases for the expectation
:param expectation_execution_engines_dict: (optional) a dictionary that shows which backends/execution engines the
expectation is implemented for. It can be obtained from the output of the expectation's self_check method
Example:
{
"PandasExecutionEngine": True,
"SqlAlchemyExecutionEngine": False,
"SparkDFExecutionEngine": False
}
:return:
| def generate_expectation_tests(
expectation_type, examples_config, expectation_execution_engines_dict=None
):
"""
:param expectation_type: snake_case name of the expectation type
:param examples_config: a dictionary that defines the data and test cases for the expectation
:param expectation_execution_engines_dict: (optional) a dictionary that shows which backends/execution engines the
expectation is implemented for. It can be obtained from the output of the expectation's self_check method
Example:
{
"PandasExecutionEngine": True,
"SqlAlchemyExecutionEngine": False,
"SparkDFExecutionEngine": False
}
:return:
"""
parametrized_tests = []
# use the expectation_execution_engines_dict (if provided) to request only the appropriate backends
if expectation_execution_engines_dict is not None:
backends = build_test_backends_list(
include_pandas=expectation_execution_engines_dict.get(
"PandasExecutionEngine"
)
== True,
include_spark=expectation_execution_engines_dict.get(
"SparkDFExecutionEngine"
)
== True,
include_sqlalchemy=expectation_execution_engines_dict.get(
"SqlAlchemyExecutionEngine"
)
== True,
)
else:
backends = build_test_backends_list()
for c in backends:
for d in examples_config:
d = copy.deepcopy(d)
datasets = []
if candidate_test_is_on_temporary_notimplemented_list_cfe(
c, expectation_type
):
skip_expectation = True
schemas = validator_with_data = None
else:
skip_expectation = False
if isinstance(d["data"], list):
sqlite_db_path = os.path.abspath(
os.path.join(
tmp_dir,
"sqlite_db"
+ "".join(
[
random.choice(string.ascii_letters + string.digits)
for _ in range(8)
]
)
+ ".db",
)
)
for dataset in d["data"]:
datasets.append(
get_test_validator_with_data(
c,
dataset["data"],
dataset.get("schemas"),
table_name=dataset.get("dataset_name"),
sqlite_db_path=sqlite_db_path,
)
)
validator_with_data = datasets[0]
else:
schemas = d["schemas"] if "schemas" in d else None
validator_with_data = get_test_validator_with_data(
c, d["data"], schemas=schemas
)
for test in d["tests"]:
# use the expectation_execution_engines_dict of the expectation
# to exclude unimplemented backends from the testing
if expectation_execution_engines_dict is not None:
supress_test_for = test.get("suppress_test_for")
if supress_test_for is None:
supress_test_for = []
if not expectation_execution_engines_dict.get(
"PandasExecutionEngine"
):
supress_test_for.append("pandas")
if not expectation_execution_engines_dict.get(
"SqlAlchemyExecutionEngine"
):
supress_test_for.append("sqlalchemy")
if not expectation_execution_engines_dict.get(
"SparkDFExecutionEngine"
):
supress_test_for.append("spark")
if len(supress_test_for) > 0:
test["suppress_test_for"] = supress_test_for
generate_test = True
skip_test = False
if "only_for" in test:
# if we're not on the "only_for" list, then never even generate the test
generate_test = False
if not isinstance(test["only_for"], list):
raise ValueError("Invalid test specification.")
if validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
):
# Call out supported dialects
if "sqlalchemy" in test["only_for"]:
generate_test = True
elif (
"sqlite" in test["only_for"]
and sqliteDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
sqliteDialect,
)
):
generate_test = True
elif (
"postgresql" in test["only_for"]
and postgresqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
postgresqlDialect,
)
):
generate_test = True
elif (
"mysql" in test["only_for"]
and mysqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mysqlDialect,
)
):
generate_test = True
elif (
"mssql" in test["only_for"]
and mssqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mssqlDialect,
)
):
generate_test = True
elif validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
pandas_DataFrame,
):
if "pandas" in test["only_for"]:
generate_test = True
if (
"pandas_022" in test["only_for"]
or "pandas_023" in test["only_for"]
) and int(pd.__version__.split(".")[1]) in [22, 23]:
generate_test = True
if ("pandas>=24" in test["only_for"]) and int(
pd.__version__.split(".")[1]
) > 24:
generate_test = True
elif validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
spark_DataFrame,
):
if "spark" in test["only_for"]:
generate_test = True
if not generate_test:
continue
if "suppress_test_for" in test and (
(
"sqlalchemy" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
)
or (
"sqlite" in test["suppress_test_for"]
and sqliteDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
sqliteDialect,
)
)
or (
"postgresql" in test["suppress_test_for"]
and postgresqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
postgresqlDialect,
)
)
or (
"mysql" in test["suppress_test_for"]
and mysqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mysqlDialect,
)
)
or (
"mssql" in test["suppress_test_for"]
and mssqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mssqlDialect,
)
)
or (
"pandas" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
pandas_DataFrame,
)
)
or (
"spark" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
spark_DataFrame,
)
)
):
skip_test = True
# Known condition: SqlAlchemy does not support allow_cross_type_comparisons
if (
"allow_cross_type_comparisons" in test["in"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
):
skip_test = True
if not skip_test:
parametrized_tests.append(
{
"expectation_type": expectation_type,
"validator_with_data": validator_with_data,
"test": test,
"skip": skip_expectation or skip_test,
"backend": c,
}
)
return parametrized_tests | [
"def",
"generate_expectation_tests",
"(",
"expectation_type",
",",
"examples_config",
",",
"expectation_execution_engines_dict",
"=",
"None",
")",
":",
"parametrized_tests",
"=",
"[",
"]",
"# use the expectation_execution_engines_dict (if provided) to request only the appropriate backends",
"if",
"expectation_execution_engines_dict",
"is",
"not",
"None",
":",
"backends",
"=",
"build_test_backends_list",
"(",
"include_pandas",
"=",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"PandasExecutionEngine\"",
")",
"==",
"True",
",",
"include_spark",
"=",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"SparkDFExecutionEngine\"",
")",
"==",
"True",
",",
"include_sqlalchemy",
"=",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"SqlAlchemyExecutionEngine\"",
")",
"==",
"True",
",",
")",
"else",
":",
"backends",
"=",
"build_test_backends_list",
"(",
")",
"for",
"c",
"in",
"backends",
":",
"for",
"d",
"in",
"examples_config",
":",
"d",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"datasets",
"=",
"[",
"]",
"if",
"candidate_test_is_on_temporary_notimplemented_list_cfe",
"(",
"c",
",",
"expectation_type",
")",
":",
"skip_expectation",
"=",
"True",
"schemas",
"=",
"validator_with_data",
"=",
"None",
"else",
":",
"skip_expectation",
"=",
"False",
"if",
"isinstance",
"(",
"d",
"[",
"\"data\"",
"]",
",",
"list",
")",
":",
"sqlite_db_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"sqlite_db\"",
"+",
"\"\"",
".",
"join",
"(",
"[",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"8",
")",
"]",
")",
"+",
"\".db\"",
",",
")",
")",
"for",
"dataset",
"in",
"d",
"[",
"\"data\"",
"]",
":",
"datasets",
".",
"append",
"(",
"get_test_validator_with_data",
"(",
"c",
",",
"dataset",
"[",
"\"data\"",
"]",
",",
"dataset",
".",
"get",
"(",
"\"schemas\"",
")",
",",
"table_name",
"=",
"dataset",
".",
"get",
"(",
"\"dataset_name\"",
")",
",",
"sqlite_db_path",
"=",
"sqlite_db_path",
",",
")",
")",
"validator_with_data",
"=",
"datasets",
"[",
"0",
"]",
"else",
":",
"schemas",
"=",
"d",
"[",
"\"schemas\"",
"]",
"if",
"\"schemas\"",
"in",
"d",
"else",
"None",
"validator_with_data",
"=",
"get_test_validator_with_data",
"(",
"c",
",",
"d",
"[",
"\"data\"",
"]",
",",
"schemas",
"=",
"schemas",
")",
"for",
"test",
"in",
"d",
"[",
"\"tests\"",
"]",
":",
"# use the expectation_execution_engines_dict of the expectation",
"# to exclude unimplemented backends from the testing",
"if",
"expectation_execution_engines_dict",
"is",
"not",
"None",
":",
"supress_test_for",
"=",
"test",
".",
"get",
"(",
"\"suppress_test_for\"",
")",
"if",
"supress_test_for",
"is",
"None",
":",
"supress_test_for",
"=",
"[",
"]",
"if",
"not",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"PandasExecutionEngine\"",
")",
":",
"supress_test_for",
".",
"append",
"(",
"\"pandas\"",
")",
"if",
"not",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"SqlAlchemyExecutionEngine\"",
")",
":",
"supress_test_for",
".",
"append",
"(",
"\"sqlalchemy\"",
")",
"if",
"not",
"expectation_execution_engines_dict",
".",
"get",
"(",
"\"SparkDFExecutionEngine\"",
")",
":",
"supress_test_for",
".",
"append",
"(",
"\"spark\"",
")",
"if",
"len",
"(",
"supress_test_for",
")",
">",
"0",
":",
"test",
"[",
"\"suppress_test_for\"",
"]",
"=",
"supress_test_for",
"generate_test",
"=",
"True",
"skip_test",
"=",
"False",
"if",
"\"only_for\"",
"in",
"test",
":",
"# if we're not on the \"only_for\" list, then never even generate the test",
"generate_test",
"=",
"False",
"if",
"not",
"isinstance",
"(",
"test",
"[",
"\"only_for\"",
"]",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid test specification.\"",
")",
"if",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
":",
"# Call out supported dialects",
"if",
"\"sqlalchemy\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
":",
"generate_test",
"=",
"True",
"elif",
"(",
"\"sqlite\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
"and",
"sqliteDialect",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"sqliteDialect",
",",
")",
")",
":",
"generate_test",
"=",
"True",
"elif",
"(",
"\"postgresql\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
"and",
"postgresqlDialect",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"postgresqlDialect",
",",
")",
")",
":",
"generate_test",
"=",
"True",
"elif",
"(",
"\"mysql\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
"and",
"mysqlDialect",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"mysqlDialect",
",",
")",
")",
":",
"generate_test",
"=",
"True",
"elif",
"(",
"\"mssql\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
"and",
"mssqlDialect",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"mssqlDialect",
",",
")",
")",
":",
"generate_test",
"=",
"True",
"elif",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"pandas_DataFrame",
",",
")",
":",
"if",
"\"pandas\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
":",
"generate_test",
"=",
"True",
"if",
"(",
"\"pandas_022\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
"or",
"\"pandas_023\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
")",
"and",
"int",
"(",
"pd",
".",
"__version__",
".",
"split",
"(",
"\".\"",
")",
"[",
"1",
"]",
")",
"in",
"[",
"22",
",",
"23",
"]",
":",
"generate_test",
"=",
"True",
"if",
"(",
"\"pandas>=24\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
")",
"and",
"int",
"(",
"pd",
".",
"__version__",
".",
"split",
"(",
"\".\"",
")",
"[",
"1",
"]",
")",
">",
"24",
":",
"generate_test",
"=",
"True",
"elif",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"spark_DataFrame",
",",
")",
":",
"if",
"\"spark\"",
"in",
"test",
"[",
"\"only_for\"",
"]",
":",
"generate_test",
"=",
"True",
"if",
"not",
"generate_test",
":",
"continue",
"if",
"\"suppress_test_for\"",
"in",
"test",
"and",
"(",
"(",
"\"sqlalchemy\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
")",
"or",
"(",
"\"sqlite\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"sqliteDialect",
"is",
"not",
"None",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"sqliteDialect",
",",
")",
")",
"or",
"(",
"\"postgresql\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"postgresqlDialect",
"is",
"not",
"None",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"postgresqlDialect",
",",
")",
")",
"or",
"(",
"\"mysql\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"mysqlDialect",
"is",
"not",
"None",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"mysqlDialect",
",",
")",
")",
"or",
"(",
"\"mssql\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"mssqlDialect",
"is",
"not",
"None",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
".",
"sql_engine_dialect",
",",
"mssqlDialect",
",",
")",
")",
"or",
"(",
"\"pandas\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"pandas_DataFrame",
",",
")",
")",
"or",
"(",
"\"spark\"",
"in",
"test",
"[",
"\"suppress_test_for\"",
"]",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"spark_DataFrame",
",",
")",
")",
")",
":",
"skip_test",
"=",
"True",
"# Known condition: SqlAlchemy does not support allow_cross_type_comparisons",
"if",
"(",
"\"allow_cross_type_comparisons\"",
"in",
"test",
"[",
"\"in\"",
"]",
"and",
"validator_with_data",
"and",
"isinstance",
"(",
"validator_with_data",
".",
"execution_engine",
".",
"active_batch_data",
",",
"SqlAlchemyBatchData",
",",
")",
")",
":",
"skip_test",
"=",
"True",
"if",
"not",
"skip_test",
":",
"parametrized_tests",
".",
"append",
"(",
"{",
"\"expectation_type\"",
":",
"expectation_type",
",",
"\"validator_with_data\"",
":",
"validator_with_data",
",",
"\"test\"",
":",
"test",
",",
"\"skip\"",
":",
"skip_expectation",
"or",
"skip_test",
",",
"\"backend\"",
":",
"c",
",",
"}",
")",
"return",
"parametrized_tests"
] | [
1458,
0
] | [
1739,
29
] | python | en | ['en', 'error', 'th'] | False |
|
evaluate_json_test | (data_asset, expectation_type, test) |
This method will evaluate the result of a test build using the Great Expectations json test format.
NOTE: Tests can be suppressed for certain data types if the test contains the Key 'suppress_test_for' with a list
of DataAsset types to suppress, such as ['SQLAlchemy', 'Pandas'].
:param data_asset: (DataAsset) A great expectations DataAsset
:param expectation_type: (string) the name of the expectation to be run using the test input
:param test: (dict) a dictionary containing information for the test to be run. The dictionary must include:
- title: (string) the name of the test
- exact_match_out: (boolean) If true, match the 'out' dictionary exactly against the result of the expectation
- in: (dict or list) a dictionary of keyword arguments to use to evaluate the expectation or a list of positional arguments
- out: (dict) the dictionary keys against which to make assertions. Unless exact_match_out is true, keys must\
come from the following list:
- success
- observed_value
- unexpected_index_list
- unexpected_list
- details
- traceback_substring (if present, the string value will be expected as a substring of the exception_traceback)
:return: None. asserts correctness of results.
|
This method will evaluate the result of a test build using the Great Expectations json test format. | def evaluate_json_test(data_asset, expectation_type, test):
"""
This method will evaluate the result of a test build using the Great Expectations json test format.
NOTE: Tests can be suppressed for certain data types if the test contains the Key 'suppress_test_for' with a list
of DataAsset types to suppress, such as ['SQLAlchemy', 'Pandas'].
:param data_asset: (DataAsset) A great expectations DataAsset
:param expectation_type: (string) the name of the expectation to be run using the test input
:param test: (dict) a dictionary containing information for the test to be run. The dictionary must include:
- title: (string) the name of the test
- exact_match_out: (boolean) If true, match the 'out' dictionary exactly against the result of the expectation
- in: (dict or list) a dictionary of keyword arguments to use to evaluate the expectation or a list of positional arguments
- out: (dict) the dictionary keys against which to make assertions. Unless exact_match_out is true, keys must\
come from the following list:
- success
- observed_value
- unexpected_index_list
- unexpected_list
- details
- traceback_substring (if present, the string value will be expected as a substring of the exception_traceback)
:return: None. asserts correctness of results.
"""
data_asset.set_default_expectation_argument("result_format", "COMPLETE")
data_asset.set_default_expectation_argument("include_config", False)
if "title" not in test:
raise ValueError("Invalid test configuration detected: 'title' is required.")
if "exact_match_out" not in test:
raise ValueError(
"Invalid test configuration detected: 'exact_match_out' is required."
)
if "in" not in test:
raise ValueError("Invalid test configuration detected: 'in' is required.")
if "out" not in test:
raise ValueError("Invalid test configuration detected: 'out' is required.")
# Support tests with positional arguments
if isinstance(test["in"], list):
result = getattr(data_asset, expectation_type)(*test["in"])
# As well as keyword arguments
else:
result = getattr(data_asset, expectation_type)(**test["in"])
check_json_test_result(test=test, result=result, data_asset=data_asset) | [
"def",
"evaluate_json_test",
"(",
"data_asset",
",",
"expectation_type",
",",
"test",
")",
":",
"data_asset",
".",
"set_default_expectation_argument",
"(",
"\"result_format\"",
",",
"\"COMPLETE\"",
")",
"data_asset",
".",
"set_default_expectation_argument",
"(",
"\"include_config\"",
",",
"False",
")",
"if",
"\"title\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'title' is required.\"",
")",
"if",
"\"exact_match_out\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'exact_match_out' is required.\"",
")",
"if",
"\"in\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'in' is required.\"",
")",
"if",
"\"out\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'out' is required.\"",
")",
"# Support tests with positional arguments",
"if",
"isinstance",
"(",
"test",
"[",
"\"in\"",
"]",
",",
"list",
")",
":",
"result",
"=",
"getattr",
"(",
"data_asset",
",",
"expectation_type",
")",
"(",
"*",
"test",
"[",
"\"in\"",
"]",
")",
"# As well as keyword arguments",
"else",
":",
"result",
"=",
"getattr",
"(",
"data_asset",
",",
"expectation_type",
")",
"(",
"*",
"*",
"test",
"[",
"\"in\"",
"]",
")",
"check_json_test_result",
"(",
"test",
"=",
"test",
",",
"result",
"=",
"result",
",",
"data_asset",
"=",
"data_asset",
")"
] | [
1742,
0
] | [
1790,
75
] | python | en | ['en', 'error', 'th'] | False |
evaluate_json_test_cfe | (validator, expectation_type, test) |
This method will evaluate the result of a test build using the Great Expectations json test format.
NOTE: Tests can be suppressed for certain data types if the test contains the Key 'suppress_test_for' with a list
of DataAsset types to suppress, such as ['SQLAlchemy', 'Pandas'].
:param data_asset: (DataAsset) A great expectations DataAsset
:param expectation_type: (string) the name of the expectation to be run using the test input
:param test: (dict) a dictionary containing information for the test to be run. The dictionary must include:
- title: (string) the name of the test
- exact_match_out: (boolean) If true, match the 'out' dictionary exactly against the result of the expectation
- in: (dict or list) a dictionary of keyword arguments to use to evaluate the expectation or a list of positional arguments
- out: (dict) the dictionary keys against which to make assertions. Unless exact_match_out is true, keys must\
come from the following list:
- success
- observed_value
- unexpected_index_list
- unexpected_list
- details
- traceback_substring (if present, the string value will be expected as a substring of the exception_traceback)
:return: None. asserts correctness of results.
|
This method will evaluate the result of a test build using the Great Expectations json test format. | def evaluate_json_test_cfe(validator, expectation_type, test):
"""
This method will evaluate the result of a test build using the Great Expectations json test format.
NOTE: Tests can be suppressed for certain data types if the test contains the Key 'suppress_test_for' with a list
of DataAsset types to suppress, such as ['SQLAlchemy', 'Pandas'].
:param data_asset: (DataAsset) A great expectations DataAsset
:param expectation_type: (string) the name of the expectation to be run using the test input
:param test: (dict) a dictionary containing information for the test to be run. The dictionary must include:
- title: (string) the name of the test
- exact_match_out: (boolean) If true, match the 'out' dictionary exactly against the result of the expectation
- in: (dict or list) a dictionary of keyword arguments to use to evaluate the expectation or a list of positional arguments
- out: (dict) the dictionary keys against which to make assertions. Unless exact_match_out is true, keys must\
come from the following list:
- success
- observed_value
- unexpected_index_list
- unexpected_list
- details
- traceback_substring (if present, the string value will be expected as a substring of the exception_traceback)
:return: None. asserts correctness of results.
"""
expectation_suite = ExpectationSuite("json_test_suite")
# noinspection PyProtectedMember
validator._initialize_expectations(expectation_suite=expectation_suite)
# validator.set_default_expectation_argument("result_format", "COMPLETE")
# validator.set_default_expectation_argument("include_config", False)
if "title" not in test:
raise ValueError("Invalid test configuration detected: 'title' is required.")
if "exact_match_out" not in test:
raise ValueError(
"Invalid test configuration detected: 'exact_match_out' is required."
)
if "in" not in test:
raise ValueError("Invalid test configuration detected: 'in' is required.")
if "out" not in test:
raise ValueError("Invalid test configuration detected: 'out' is required.")
kwargs = copy.deepcopy(test["in"])
if isinstance(test["in"], list):
result = getattr(validator, expectation_type)(*kwargs)
# As well as keyword arguments
else:
runtime_kwargs = {"result_format": "COMPLETE", "include_config": False}
runtime_kwargs.update(kwargs)
result = getattr(validator, expectation_type)(**runtime_kwargs)
check_json_test_result(
test=test,
result=result,
data_asset=validator.execution_engine.active_batch_data,
) | [
"def",
"evaluate_json_test_cfe",
"(",
"validator",
",",
"expectation_type",
",",
"test",
")",
":",
"expectation_suite",
"=",
"ExpectationSuite",
"(",
"\"json_test_suite\"",
")",
"# noinspection PyProtectedMember",
"validator",
".",
"_initialize_expectations",
"(",
"expectation_suite",
"=",
"expectation_suite",
")",
"# validator.set_default_expectation_argument(\"result_format\", \"COMPLETE\")",
"# validator.set_default_expectation_argument(\"include_config\", False)",
"if",
"\"title\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'title' is required.\"",
")",
"if",
"\"exact_match_out\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'exact_match_out' is required.\"",
")",
"if",
"\"in\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'in' is required.\"",
")",
"if",
"\"out\"",
"not",
"in",
"test",
":",
"raise",
"ValueError",
"(",
"\"Invalid test configuration detected: 'out' is required.\"",
")",
"kwargs",
"=",
"copy",
".",
"deepcopy",
"(",
"test",
"[",
"\"in\"",
"]",
")",
"if",
"isinstance",
"(",
"test",
"[",
"\"in\"",
"]",
",",
"list",
")",
":",
"result",
"=",
"getattr",
"(",
"validator",
",",
"expectation_type",
")",
"(",
"*",
"kwargs",
")",
"# As well as keyword arguments",
"else",
":",
"runtime_kwargs",
"=",
"{",
"\"result_format\"",
":",
"\"COMPLETE\"",
",",
"\"include_config\"",
":",
"False",
"}",
"runtime_kwargs",
".",
"update",
"(",
"kwargs",
")",
"result",
"=",
"getattr",
"(",
"validator",
",",
"expectation_type",
")",
"(",
"*",
"*",
"runtime_kwargs",
")",
"check_json_test_result",
"(",
"test",
"=",
"test",
",",
"result",
"=",
"result",
",",
"data_asset",
"=",
"validator",
".",
"execution_engine",
".",
"active_batch_data",
",",
")"
] | [
1793,
0
] | [
1850,
5
] | python | en | ['en', 'error', 'th'] | False |
is_call_invocation | (declaration_string) |
Returns True if `declaration_string` is a function invocation.
:param declaration_string: string that should be checked for pattern.
:type declaration_string: str
:rtype: bool
|
Returns True if `declaration_string` is a function invocation. | def is_call_invocation(declaration_string):
"""
Returns True if `declaration_string` is a function invocation.
:param declaration_string: string that should be checked for pattern.
:type declaration_string: str
:rtype: bool
"""
return __THE_PARSER.has_pattern(declaration_string) | [
"def",
"is_call_invocation",
"(",
"declaration_string",
")",
":",
"return",
"__THE_PARSER",
".",
"has_pattern",
"(",
"declaration_string",
")"
] | [
26,
0
] | [
36,
55
] | python | en | ['en', 'error', 'th'] | False |
name | (declaration_string) |
Returns the name of a function.
:type declaration_string: str
:rtype: str
|
Returns the name of a function. | def name(declaration_string):
"""
Returns the name of a function.
:type declaration_string: str
:rtype: str
"""
return __THE_PARSER.name(declaration_string) | [
"def",
"name",
"(",
"declaration_string",
")",
":",
"return",
"__THE_PARSER",
".",
"name",
"(",
"declaration_string",
")"
] | [
39,
0
] | [
47,
48
] | python | en | ['en', 'error', 'th'] | False |
args | (declaration_string) |
Returns list of function arguments
:type declaration_string: str
:rtype: [str]
|
Returns list of function arguments | def args(declaration_string):
"""
Returns list of function arguments
:type declaration_string: str
:rtype: [str]
"""
return __THE_PARSER.args(declaration_string) | [
"def",
"args",
"(",
"declaration_string",
")",
":",
"return",
"__THE_PARSER",
".",
"args",
"(",
"declaration_string",
")"
] | [
50,
0
] | [
58,
48
] | python | en | ['en', 'error', 'th'] | False |
find_args | (text, start=None) |
Finds arguments within function invocation.
:type text: str
:rtype: [ arguments ] or :data:NOT_FOUND if arguments could not be found.
|
Finds arguments within function invocation. | def find_args(text, start=None):
"""
Finds arguments within function invocation.
:type text: str
:rtype: [ arguments ] or :data:NOT_FOUND if arguments could not be found.
"""
return __THE_PARSER.find_args(text, start) | [
"def",
"find_args",
"(",
"text",
",",
"start",
"=",
"None",
")",
":",
"return",
"__THE_PARSER",
".",
"find_args",
"(",
"text",
",",
"start",
")"
] | [
64,
0
] | [
72,
46
] | python | en | ['en', 'error', 'th'] | False |
split | (declaration_string) |
Returns (name, [arguments] )
|
Returns (name, [arguments] ) | def split(declaration_string):
"""
Returns (name, [arguments] )
"""
return __THE_PARSER.split(declaration_string) | [
"def",
"split",
"(",
"declaration_string",
")",
":",
"return",
"__THE_PARSER",
".",
"split",
"(",
"declaration_string",
")"
] | [
75,
0
] | [
80,
49
] | python | en | ['en', 'error', 'th'] | False |
split_recursive | (declaration_string) |
Returns [(name, [arguments])].
|
Returns [(name, [arguments])]. | def split_recursive(declaration_string):
"""
Returns [(name, [arguments])].
"""
return __THE_PARSER.split_recursive(declaration_string) | [
"def",
"split_recursive",
"(",
"declaration_string",
")",
":",
"return",
"__THE_PARSER",
".",
"split_recursive",
"(",
"declaration_string",
")"
] | [
83,
0
] | [
88,
59
] | python | en | ['en', 'error', 'th'] | False |
join | (name_, args_, arg_separator=None) |
Returns name( argument_1, argument_2, ..., argument_n ).
|
Returns name( argument_1, argument_2, ..., argument_n ). | def join(name_, args_, arg_separator=None):
"""
Returns name( argument_1, argument_2, ..., argument_n ).
"""
return __THE_PARSER.join(name_, args_, arg_separator) | [
"def",
"join",
"(",
"name_",
",",
"args_",
",",
"arg_separator",
"=",
"None",
")",
":",
"return",
"__THE_PARSER",
".",
"join",
"(",
"name_",
",",
"args_",
",",
"arg_separator",
")"
] | [
91,
0
] | [
96,
57
] | python | en | ['en', 'error', 'th'] | False |
MakeGuid | (name, seed='msvs_new') | Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
| Returns a GUID for the specified target name. | def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid | [
"def",
"MakeGuid",
"(",
"name",
",",
"seed",
"=",
"'msvs_new'",
")",
":",
"# Calculate a MD5 signature for the seed and name.",
"d",
"=",
"_new_md5",
"(",
"str",
"(",
"seed",
")",
"+",
"str",
"(",
"name",
")",
")",
".",
"hexdigest",
"(",
")",
".",
"upper",
"(",
")",
"# Convert most of the signature to GUID form (discard the rest)",
"guid",
"=",
"(",
"'{'",
"+",
"d",
"[",
":",
"8",
"]",
"+",
"'-'",
"+",
"d",
"[",
"8",
":",
"12",
"]",
"+",
"'-'",
"+",
"d",
"[",
"12",
":",
"16",
"]",
"+",
"'-'",
"+",
"d",
"[",
"16",
":",
"20",
"]",
"+",
"'-'",
"+",
"d",
"[",
"20",
":",
"32",
"]",
"+",
"'}'",
")",
"return",
"guid"
] | [
36,
0
] | [
56,
13
] | python | en | ['en', 'en', 'en'] | True |
MSVSFolder.__init__ | (self, path, name = None, entries = None,
guid = None, items = None) | Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
| Initializes the folder. | def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder'] | [
"def",
"__init__",
"(",
"self",
",",
"path",
",",
"name",
"=",
"None",
",",
"entries",
"=",
"None",
",",
"guid",
"=",
"None",
",",
"items",
"=",
"None",
")",
":",
"if",
"name",
":",
"self",
".",
"name",
"=",
"name",
"else",
":",
"# Use last layer.",
"self",
".",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"self",
".",
"path",
"=",
"path",
"self",
".",
"guid",
"=",
"guid",
"# Copy passed lists (or set to empty lists)",
"self",
".",
"entries",
"=",
"sorted",
"(",
"list",
"(",
"entries",
"or",
"[",
"]",
")",
")",
"self",
".",
"items",
"=",
"list",
"(",
"items",
"or",
"[",
"]",
")",
"self",
".",
"entry_type_guid",
"=",
"ENTRY_TYPE_GUIDS",
"[",
"'folder'",
"]"
] | [
70,
2
] | [
96,
53
] | python | en | ['en', 'zu', 'en'] | True |
MSVSProject.__init__ | (self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None) | Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
| Initializes the project. | def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None | [
"def",
"__init__",
"(",
"self",
",",
"path",
",",
"name",
"=",
"None",
",",
"dependencies",
"=",
"None",
",",
"guid",
"=",
"None",
",",
"spec",
"=",
"None",
",",
"build_file",
"=",
"None",
",",
"config_platform_overrides",
"=",
"None",
",",
"fixpath_prefix",
"=",
"None",
")",
":",
"self",
".",
"path",
"=",
"path",
"self",
".",
"guid",
"=",
"guid",
"self",
".",
"spec",
"=",
"spec",
"self",
".",
"build_file",
"=",
"build_file",
"# Use project filename if name not specified",
"self",
".",
"name",
"=",
"name",
"or",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
"[",
"0",
"]",
"# Copy passed lists (or set to empty lists)",
"self",
".",
"dependencies",
"=",
"list",
"(",
"dependencies",
"or",
"[",
"]",
")",
"self",
".",
"entry_type_guid",
"=",
"ENTRY_TYPE_GUIDS",
"[",
"'project'",
"]",
"if",
"config_platform_overrides",
":",
"self",
".",
"config_platform_overrides",
"=",
"config_platform_overrides",
"else",
":",
"self",
".",
"config_platform_overrides",
"=",
"{",
"}",
"self",
".",
"fixpath_prefix",
"=",
"fixpath_prefix",
"self",
".",
"msbuild_toolset",
"=",
"None"
] | [
111,
2
] | [
146,
31
] | python | en | ['en', 'en', 'en'] | True |
MSVSSolution.__init__ | (self, path, version, entries=None, variants=None,
websiteProperties=True) | Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
| Initializes the solution. | def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write() | [
"def",
"__init__",
"(",
"self",
",",
"path",
",",
"version",
",",
"entries",
"=",
"None",
",",
"variants",
"=",
"None",
",",
"websiteProperties",
"=",
"True",
")",
":",
"self",
".",
"path",
"=",
"path",
"self",
".",
"websiteProperties",
"=",
"websiteProperties",
"self",
".",
"version",
"=",
"version",
"# Copy passed lists (or set to empty lists)",
"self",
".",
"entries",
"=",
"list",
"(",
"entries",
"or",
"[",
"]",
")",
"if",
"variants",
":",
"# Copy passed list",
"self",
".",
"variants",
"=",
"variants",
"[",
":",
"]",
"else",
":",
"# Use default",
"self",
".",
"variants",
"=",
"[",
"'Debug|Win32'",
",",
"'Release|Win32'",
"]",
"# TODO(rspangler): Need to be able to handle a mapping of solution config",
"# to project config. Should we be able to handle variants being a dict,",
"# or add a separate variant_map variable? If it's a dict, we can't",
"# guarantee the order of variants since dict keys aren't ordered.",
"# TODO(rspangler): Automatically write to disk for now; should delay until",
"# node-evaluation time.",
"self",
".",
"Write",
"(",
")"
] | [
177,
2
] | [
212,
16
] | python | en | ['en', 'en', 'en'] | True |
MSVSSolution.Write | (self, writer=gyp.common.WriteOnDiff) | Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
| Writes the solution file to disk. | def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close() | [
"def",
"Write",
"(",
"self",
",",
"writer",
"=",
"gyp",
".",
"common",
".",
"WriteOnDiff",
")",
":",
"# Walk the entry tree and collect all the folders and projects.",
"all_entries",
"=",
"set",
"(",
")",
"entries_to_check",
"=",
"self",
".",
"entries",
"[",
":",
"]",
"while",
"entries_to_check",
":",
"e",
"=",
"entries_to_check",
".",
"pop",
"(",
"0",
")",
"# If this entry has been visited, nothing to do.",
"if",
"e",
"in",
"all_entries",
":",
"continue",
"all_entries",
".",
"add",
"(",
"e",
")",
"# If this is a folder, check its entries too.",
"if",
"isinstance",
"(",
"e",
",",
"MSVSFolder",
")",
":",
"entries_to_check",
"+=",
"e",
".",
"entries",
"all_entries",
"=",
"sorted",
"(",
"all_entries",
")",
"# Open file and print header",
"f",
"=",
"writer",
"(",
"self",
".",
"path",
")",
"f",
".",
"write",
"(",
"'Microsoft Visual Studio Solution File, '",
"'Format Version %s\\r\\n'",
"%",
"self",
".",
"version",
".",
"SolutionVersion",
"(",
")",
")",
"f",
".",
"write",
"(",
"'# %s\\r\\n'",
"%",
"self",
".",
"version",
".",
"Description",
"(",
")",
")",
"# Project entries",
"sln_root",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"path",
")",
"[",
"0",
"]",
"for",
"e",
"in",
"all_entries",
":",
"relative_path",
"=",
"gyp",
".",
"common",
".",
"RelativePath",
"(",
"e",
".",
"path",
",",
"sln_root",
")",
"# msbuild does not accept an empty folder_name.",
"# use '.' in case relative_path is empty.",
"folder_name",
"=",
"relative_path",
".",
"replace",
"(",
"'/'",
",",
"'\\\\'",
")",
"or",
"'.'",
"f",
".",
"write",
"(",
"'Project(\"%s\") = \"%s\", \"%s\", \"%s\"\\r\\n'",
"%",
"(",
"e",
".",
"entry_type_guid",
",",
"# Entry type GUID",
"e",
".",
"name",
",",
"# Folder name",
"folder_name",
",",
"# Folder name (again)",
"e",
".",
"get_guid",
"(",
")",
",",
"# Entry GUID",
")",
")",
"# TODO(rspangler): Need a way to configure this stuff",
"if",
"self",
".",
"websiteProperties",
":",
"f",
".",
"write",
"(",
"'\\tProjectSection(WebsiteProperties) = preProject\\r\\n'",
"'\\t\\tDebug.AspNetCompiler.Debug = \"True\"\\r\\n'",
"'\\t\\tRelease.AspNetCompiler.Debug = \"False\"\\r\\n'",
"'\\tEndProjectSection\\r\\n'",
")",
"if",
"isinstance",
"(",
"e",
",",
"MSVSFolder",
")",
":",
"if",
"e",
".",
"items",
":",
"f",
".",
"write",
"(",
"'\\tProjectSection(SolutionItems) = preProject\\r\\n'",
")",
"for",
"i",
"in",
"e",
".",
"items",
":",
"f",
".",
"write",
"(",
"'\\t\\t%s = %s\\r\\n'",
"%",
"(",
"i",
",",
"i",
")",
")",
"f",
".",
"write",
"(",
"'\\tEndProjectSection\\r\\n'",
")",
"if",
"isinstance",
"(",
"e",
",",
"MSVSProject",
")",
":",
"if",
"e",
".",
"dependencies",
":",
"f",
".",
"write",
"(",
"'\\tProjectSection(ProjectDependencies) = postProject\\r\\n'",
")",
"for",
"d",
"in",
"e",
".",
"dependencies",
":",
"f",
".",
"write",
"(",
"'\\t\\t%s = %s\\r\\n'",
"%",
"(",
"d",
".",
"get_guid",
"(",
")",
",",
"d",
".",
"get_guid",
"(",
")",
")",
")",
"f",
".",
"write",
"(",
"'\\tEndProjectSection\\r\\n'",
")",
"f",
".",
"write",
"(",
"'EndProject\\r\\n'",
")",
"# Global section",
"f",
".",
"write",
"(",
"'Global\\r\\n'",
")",
"# Configurations (variants)",
"f",
".",
"write",
"(",
"'\\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\\r\\n'",
")",
"for",
"v",
"in",
"self",
".",
"variants",
":",
"f",
".",
"write",
"(",
"'\\t\\t%s = %s\\r\\n'",
"%",
"(",
"v",
",",
"v",
")",
")",
"f",
".",
"write",
"(",
"'\\tEndGlobalSection\\r\\n'",
")",
"# Sort config guids for easier diffing of solution changes.",
"config_guids",
"=",
"[",
"]",
"config_guids_overrides",
"=",
"{",
"}",
"for",
"e",
"in",
"all_entries",
":",
"if",
"isinstance",
"(",
"e",
",",
"MSVSProject",
")",
":",
"config_guids",
".",
"append",
"(",
"e",
".",
"get_guid",
"(",
")",
")",
"config_guids_overrides",
"[",
"e",
".",
"get_guid",
"(",
")",
"]",
"=",
"e",
".",
"config_platform_overrides",
"config_guids",
".",
"sort",
"(",
")",
"f",
".",
"write",
"(",
"'\\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\\r\\n'",
")",
"for",
"g",
"in",
"config_guids",
":",
"for",
"v",
"in",
"self",
".",
"variants",
":",
"nv",
"=",
"config_guids_overrides",
"[",
"g",
"]",
".",
"get",
"(",
"v",
",",
"v",
")",
"# Pick which project configuration to build for this solution",
"# configuration.",
"f",
".",
"write",
"(",
"'\\t\\t%s.%s.ActiveCfg = %s\\r\\n'",
"%",
"(",
"g",
",",
"# Project GUID",
"v",
",",
"# Solution build configuration",
"nv",
",",
"# Project build config for that solution config",
")",
")",
"# Enable project in this solution configuration.",
"f",
".",
"write",
"(",
"'\\t\\t%s.%s.Build.0 = %s\\r\\n'",
"%",
"(",
"g",
",",
"# Project GUID",
"v",
",",
"# Solution build configuration",
"nv",
",",
"# Project build config for that solution config",
")",
")",
"f",
".",
"write",
"(",
"'\\tEndGlobalSection\\r\\n'",
")",
"# TODO(rspangler): Should be able to configure this stuff too (though I've",
"# never seen this be any different)",
"f",
".",
"write",
"(",
"'\\tGlobalSection(SolutionProperties) = preSolution\\r\\n'",
")",
"f",
".",
"write",
"(",
"'\\t\\tHideSolutionNode = FALSE\\r\\n'",
")",
"f",
".",
"write",
"(",
"'\\tEndGlobalSection\\r\\n'",
")",
"# Folder mappings",
"# Omit this section if there are no folders",
"if",
"any",
"(",
"[",
"e",
".",
"entries",
"for",
"e",
"in",
"all_entries",
"if",
"isinstance",
"(",
"e",
",",
"MSVSFolder",
")",
"]",
")",
":",
"f",
".",
"write",
"(",
"'\\tGlobalSection(NestedProjects) = preSolution\\r\\n'",
")",
"for",
"e",
"in",
"all_entries",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"MSVSFolder",
")",
":",
"continue",
"# Does not apply to projects, only folders",
"for",
"subentry",
"in",
"e",
".",
"entries",
":",
"f",
".",
"write",
"(",
"'\\t\\t%s = %s\\r\\n'",
"%",
"(",
"subentry",
".",
"get_guid",
"(",
")",
",",
"e",
".",
"get_guid",
"(",
")",
")",
")",
"f",
".",
"write",
"(",
"'\\tEndGlobalSection\\r\\n'",
")",
"f",
".",
"write",
"(",
"'EndGlobal\\r\\n'",
")",
"f",
".",
"close",
"(",
")"
] | [
215,
2
] | [
339,
13
] | python | en | ['en', 'en', 'en'] | True |
metric_value | (
engine: Type[ExecutionEngine],
metric_fn_type: Union[str, MetricFunctionTypes] = MetricFunctionTypes.VALUE,
**kwargs
) | The metric decorator annotates a method | The metric decorator annotates a method | def metric_value(
engine: Type[ExecutionEngine],
metric_fn_type: Union[str, MetricFunctionTypes] = MetricFunctionTypes.VALUE,
**kwargs
):
"""The metric decorator annotates a method"""
def wrapper(metric_fn: Callable):
@wraps(metric_fn)
def inner_func(*args, **kwargs):
return metric_fn(*args, **kwargs)
inner_func.metric_engine = engine
inner_func.metric_fn_type = MetricFunctionTypes(metric_fn_type)
inner_func.metric_definition_kwargs = kwargs
return inner_func
return wrapper | [
"def",
"metric_value",
"(",
"engine",
":",
"Type",
"[",
"ExecutionEngine",
"]",
",",
"metric_fn_type",
":",
"Union",
"[",
"str",
",",
"MetricFunctionTypes",
"]",
"=",
"MetricFunctionTypes",
".",
"VALUE",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"metric_fn",
":",
"Callable",
")",
":",
"@",
"wraps",
"(",
"metric_fn",
")",
"def",
"inner_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"metric_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"inner_func",
".",
"metric_engine",
"=",
"engine",
"inner_func",
".",
"metric_fn_type",
"=",
"MetricFunctionTypes",
"(",
"metric_fn_type",
")",
"inner_func",
".",
"metric_definition_kwargs",
"=",
"kwargs",
"return",
"inner_func",
"return",
"wrapper"
] | [
25,
0
] | [
42,
18
] | python | en | ['en', 'en', 'en'] | True |
metric_partial | (
engine: Type[ExecutionEngine],
partial_fn_type: Union[str, MetricPartialFunctionTypes],
domain_type: Union[str, MetricDomainTypes],
**kwargs
) | The metric decorator annotates a method | The metric decorator annotates a method | def metric_partial(
engine: Type[ExecutionEngine],
partial_fn_type: Union[str, MetricPartialFunctionTypes],
domain_type: Union[str, MetricDomainTypes],
**kwargs
):
"""The metric decorator annotates a method"""
def wrapper(metric_fn: Callable):
@wraps(metric_fn)
def inner_func(*args, **kwargs):
return metric_fn(*args, **kwargs)
inner_func.metric_engine = engine
inner_func.metric_fn_type = MetricPartialFunctionTypes(
partial_fn_type
) # raises ValueError if unknown type
inner_func.domain_type = MetricDomainTypes(domain_type)
inner_func.metric_definition_kwargs = kwargs
return inner_func
return wrapper | [
"def",
"metric_partial",
"(",
"engine",
":",
"Type",
"[",
"ExecutionEngine",
"]",
",",
"partial_fn_type",
":",
"Union",
"[",
"str",
",",
"MetricPartialFunctionTypes",
"]",
",",
"domain_type",
":",
"Union",
"[",
"str",
",",
"MetricDomainTypes",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"metric_fn",
":",
"Callable",
")",
":",
"@",
"wraps",
"(",
"metric_fn",
")",
"def",
"inner_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"metric_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"inner_func",
".",
"metric_engine",
"=",
"engine",
"inner_func",
".",
"metric_fn_type",
"=",
"MetricPartialFunctionTypes",
"(",
"partial_fn_type",
")",
"# raises ValueError if unknown type",
"inner_func",
".",
"domain_type",
"=",
"MetricDomainTypes",
"(",
"domain_type",
")",
"inner_func",
".",
"metric_definition_kwargs",
"=",
"kwargs",
"return",
"inner_func",
"return",
"wrapper"
] | [
45,
0
] | [
66,
18
] | python | en | ['en', 'en', 'en'] | True |
MetricProvider.get_evaluation_dependencies | (
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) | This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
| This should return a dictionary: | def get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
"""
return (
cls._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
or dict()
) | [
"def",
"get_evaluation_dependencies",
"(",
"cls",
",",
"metric",
":",
"MetricConfiguration",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
"=",
"None",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"runtime_configuration",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"return",
"(",
"cls",
".",
"_get_evaluation_dependencies",
"(",
"metric",
"=",
"metric",
",",
"configuration",
"=",
"configuration",
",",
"execution_engine",
"=",
"execution_engine",
",",
"runtime_configuration",
"=",
"runtime_configuration",
",",
")",
"or",
"dict",
"(",
")",
")"
] | [
180,
4
] | [
202,
9
] | python | en | ['en', 'en', 'en'] | True |
test_requirements_files | () | requirements.txt should be a subset of requirements-dev.txt | requirements.txt should be a subset of requirements-dev.txt | def test_requirements_files():
"""requirements.txt should be a subset of requirements-dev.txt"""
with open(file_relative_path(__file__, "../requirements.txt")) as req:
requirements = {
f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)
}
with open(file_relative_path(__file__, "../requirements-dev.txt")) as req:
requirements_dev = {
f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)
}
with open(file_relative_path(__file__, "../requirements-dev-base.txt")) as req:
requirements_dev_base = {
f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)
}
with open(file_relative_path(__file__, "../requirements-dev-spark.txt")) as req:
requirements_dev_spark = {
f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)
}
with open(
file_relative_path(__file__, "../requirements-dev-sqlalchemy.txt")
) as req:
requirements_dev_sqlalchemy = {
f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)
}
assert requirements <= requirements_dev
assert requirements_dev_base.intersection(requirements_dev_spark) == set()
assert requirements_dev_base.intersection(requirements_dev_sqlalchemy) == set()
assert requirements_dev_spark.intersection(requirements_dev_sqlalchemy) == set()
assert requirements_dev - (
requirements
| requirements_dev_base
| requirements_dev_sqlalchemy
| requirements_dev_spark
) <= {"numpy>=1.21.0", "scipy>=1.7.0"} | [
"def",
"test_requirements_files",
"(",
")",
":",
"with",
"open",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../requirements.txt\"",
")",
")",
"as",
"req",
":",
"requirements",
"=",
"{",
"f'{line.name}{\"\".join(line.specs[0])}'",
"for",
"line",
"in",
"rp",
".",
"parse",
"(",
"req",
")",
"}",
"with",
"open",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../requirements-dev.txt\"",
")",
")",
"as",
"req",
":",
"requirements_dev",
"=",
"{",
"f'{line.name}{\"\".join(line.specs[0])}'",
"for",
"line",
"in",
"rp",
".",
"parse",
"(",
"req",
")",
"}",
"with",
"open",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../requirements-dev-base.txt\"",
")",
")",
"as",
"req",
":",
"requirements_dev_base",
"=",
"{",
"f'{line.name}{\"\".join(line.specs[0])}'",
"for",
"line",
"in",
"rp",
".",
"parse",
"(",
"req",
")",
"}",
"with",
"open",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../requirements-dev-spark.txt\"",
")",
")",
"as",
"req",
":",
"requirements_dev_spark",
"=",
"{",
"f'{line.name}{\"\".join(line.specs[0])}'",
"for",
"line",
"in",
"rp",
".",
"parse",
"(",
"req",
")",
"}",
"with",
"open",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../requirements-dev-sqlalchemy.txt\"",
")",
")",
"as",
"req",
":",
"requirements_dev_sqlalchemy",
"=",
"{",
"f'{line.name}{\"\".join(line.specs[0])}'",
"for",
"line",
"in",
"rp",
".",
"parse",
"(",
"req",
")",
"}",
"assert",
"requirements",
"<=",
"requirements_dev",
"assert",
"requirements_dev_base",
".",
"intersection",
"(",
"requirements_dev_spark",
")",
"==",
"set",
"(",
")",
"assert",
"requirements_dev_base",
".",
"intersection",
"(",
"requirements_dev_sqlalchemy",
")",
"==",
"set",
"(",
")",
"assert",
"requirements_dev_spark",
".",
"intersection",
"(",
"requirements_dev_sqlalchemy",
")",
"==",
"set",
"(",
")",
"assert",
"requirements_dev",
"-",
"(",
"requirements",
"|",
"requirements_dev_base",
"|",
"requirements_dev_sqlalchemy",
"|",
"requirements_dev_spark",
")",
"<=",
"{",
"\"numpy>=1.21.0\"",
",",
"\"scipy>=1.7.0\"",
"}"
] | [
5,
0
] | [
47,
42
] | python | en | ['en', 'en', 'en'] | True |
Sourceify | (path) | Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop. | Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop. | def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path | [
"def",
"Sourceify",
"(",
"path",
")",
":",
"return",
"path"
] | [
82,
0
] | [
85,
13
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.Write | (self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version) | The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
| The main entry point: writes a .mk file for a single target. | def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module | [
"def",
"Write",
"(",
"self",
",",
"qualified_target",
",",
"relative_target",
",",
"base_path",
",",
"output_filename",
",",
"spec",
",",
"configs",
",",
"part_of_all",
",",
"write_alias_target",
",",
"sdk_version",
")",
":",
"gyp",
".",
"common",
".",
"EnsureDirExists",
"(",
"output_filename",
")",
"self",
".",
"fp",
"=",
"open",
"(",
"output_filename",
",",
"'w'",
")",
"self",
".",
"fp",
".",
"write",
"(",
"header",
")",
"self",
".",
"qualified_target",
"=",
"qualified_target",
"self",
".",
"relative_target",
"=",
"relative_target",
"self",
".",
"path",
"=",
"base_path",
"self",
".",
"target",
"=",
"spec",
"[",
"'target_name'",
"]",
"self",
".",
"type",
"=",
"spec",
"[",
"'type'",
"]",
"self",
".",
"toolset",
"=",
"spec",
"[",
"'toolset'",
"]",
"deps",
",",
"link_deps",
"=",
"self",
".",
"ComputeDeps",
"(",
"spec",
")",
"# Some of the generation below can add extra output, sources, or",
"# link dependencies. All of the out params of the functions that",
"# follow use names like extra_foo.",
"extra_outputs",
"=",
"[",
"]",
"extra_sources",
"=",
"[",
"]",
"self",
".",
"android_class",
"=",
"MODULE_CLASSES",
".",
"get",
"(",
"self",
".",
"type",
",",
"'GYP'",
")",
"self",
".",
"android_module",
"=",
"self",
".",
"ComputeAndroidModule",
"(",
"spec",
")",
"(",
"self",
".",
"android_stem",
",",
"self",
".",
"android_suffix",
")",
"=",
"self",
".",
"ComputeOutputParts",
"(",
"spec",
")",
"self",
".",
"output",
"=",
"self",
".",
"output_binary",
"=",
"self",
".",
"ComputeOutput",
"(",
"spec",
")",
"# Standard header.",
"self",
".",
"WriteLn",
"(",
"'include $(CLEAR_VARS)\\n'",
")",
"# Module class and name.",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_CLASS := '",
"+",
"self",
".",
"android_class",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE := '",
"+",
"self",
".",
"android_module",
")",
"# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.",
"# The library module classes fail if the stem is set. ComputeOutputParts",
"# makes sure that stem == modulename in these cases.",
"if",
"self",
".",
"android_stem",
"!=",
"self",
".",
"android_module",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_STEM := '",
"+",
"self",
".",
"android_stem",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_SUFFIX := '",
"+",
"self",
".",
"android_suffix",
")",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_IS_HOST_MODULE := true'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)'",
")",
"elif",
"sdk_version",
">",
"0",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_TARGET_ARCH := '",
"'$(TARGET_$(GYP_VAR_PREFIX)ARCH)'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_SDK_VERSION := %s'",
"%",
"sdk_version",
")",
"# Grab output directories; needed for Actions and Rules.",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"self",
".",
"WriteLn",
"(",
"'gyp_intermediate_dir := '",
"'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))'",
")",
"else",
":",
"self",
".",
"WriteLn",
"(",
"'gyp_intermediate_dir := '",
"'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))'",
")",
"self",
".",
"WriteLn",
"(",
"'gyp_shared_intermediate_dir := '",
"'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))'",
")",
"self",
".",
"WriteLn",
"(",
")",
"# List files this target depends on so that actions/rules/copies/sources",
"# can depend on the list.",
"# TODO: doesn't pull in things through transitive link deps; needed?",
"target_dependencies",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"deps",
"if",
"x",
"[",
"0",
"]",
"==",
"'path'",
"]",
"self",
".",
"WriteLn",
"(",
"'# Make sure our deps are built first.'",
")",
"self",
".",
"WriteList",
"(",
"target_dependencies",
",",
"'GYP_TARGET_DEPENDENCIES'",
",",
"local_pathify",
"=",
"True",
")",
"# Actions must come first, since they can generate more OBJs for use below.",
"if",
"'actions'",
"in",
"spec",
":",
"self",
".",
"WriteActions",
"(",
"spec",
"[",
"'actions'",
"]",
",",
"extra_sources",
",",
"extra_outputs",
")",
"# Rules must be early like actions.",
"if",
"'rules'",
"in",
"spec",
":",
"self",
".",
"WriteRules",
"(",
"spec",
"[",
"'rules'",
"]",
",",
"extra_sources",
",",
"extra_outputs",
")",
"if",
"'copies'",
"in",
"spec",
":",
"self",
".",
"WriteCopies",
"(",
"spec",
"[",
"'copies'",
"]",
",",
"extra_outputs",
")",
"# GYP generated outputs.",
"self",
".",
"WriteList",
"(",
"extra_outputs",
",",
"'GYP_GENERATED_OUTPUTS'",
",",
"local_pathify",
"=",
"True",
")",
"# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend",
"# on both our dependency targets and our generated files.",
"self",
".",
"WriteLn",
"(",
"'# Make sure our deps and generated files are built first.'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '",
"'$(GYP_GENERATED_OUTPUTS)'",
")",
"self",
".",
"WriteLn",
"(",
")",
"# Sources.",
"if",
"spec",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"or",
"extra_sources",
":",
"self",
".",
"WriteSources",
"(",
"spec",
",",
"configs",
",",
"extra_sources",
")",
"self",
".",
"WriteTarget",
"(",
"spec",
",",
"configs",
",",
"deps",
",",
"link_deps",
",",
"part_of_all",
",",
"write_alias_target",
")",
"# Update global list of target outputs, used in dependency tracking.",
"target_outputs",
"[",
"qualified_target",
"]",
"=",
"(",
"'path'",
",",
"self",
".",
"output_binary",
")",
"# Update global list of link dependencies.",
"if",
"self",
".",
"type",
"==",
"'static_library'",
":",
"target_link_deps",
"[",
"qualified_target",
"]",
"=",
"(",
"'static'",
",",
"self",
".",
"android_module",
")",
"elif",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"target_link_deps",
"[",
"qualified_target",
"]",
"=",
"(",
"'shared'",
",",
"self",
".",
"android_module",
")",
"self",
".",
"fp",
".",
"close",
"(",
")",
"return",
"self",
".",
"android_module"
] | [
109,
2
] | [
228,
30
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteActions | (self, actions, extra_sources, extra_outputs) | Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
| Write Makefile code for any 'actions' from the gyp input. | def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn() | [
"def",
"WriteActions",
"(",
"self",
",",
"actions",
",",
"extra_sources",
",",
"extra_outputs",
")",
":",
"for",
"action",
"in",
"actions",
":",
"name",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"'%s_%s'",
"%",
"(",
"self",
".",
"relative_target",
",",
"action",
"[",
"'action_name'",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"'### Rules for action \"%s\":'",
"%",
"action",
"[",
"'action_name'",
"]",
")",
"inputs",
"=",
"action",
"[",
"'inputs'",
"]",
"outputs",
"=",
"action",
"[",
"'outputs'",
"]",
"# Build up a list of outputs.",
"# Collect the output dirs we'll need.",
"dirs",
"=",
"set",
"(",
")",
"for",
"out",
"in",
"outputs",
":",
"if",
"not",
"out",
".",
"startswith",
"(",
"'$'",
")",
":",
"print",
"(",
"'WARNING: Action for target \"%s\" writes output to local path '",
"'\"%s\".'",
"%",
"(",
"self",
".",
"target",
",",
"out",
")",
")",
"dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"out",
")",
"[",
"0",
"]",
"if",
"dir",
":",
"dirs",
".",
"add",
"(",
"dir",
")",
"if",
"int",
"(",
"action",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"extra_sources",
"+=",
"outputs",
"# Prepare the actual command.",
"command",
"=",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"action",
"[",
"'action'",
"]",
")",
"if",
"'message'",
"in",
"action",
":",
"quiet_cmd",
"=",
"'Gyp action: %s ($@)'",
"%",
"action",
"[",
"'message'",
"]",
"else",
":",
"quiet_cmd",
"=",
"'Gyp action: %s ($@)'",
"%",
"name",
"if",
"len",
"(",
"dirs",
")",
">",
"0",
":",
"command",
"=",
"'mkdir -p %s'",
"%",
"' '",
".",
"join",
"(",
"dirs",
")",
"+",
"'; '",
"+",
"command",
"cd_action",
"=",
"'cd $(gyp_local_path)/%s; '",
"%",
"self",
".",
"path",
"command",
"=",
"cd_action",
"+",
"command",
"# The makefile rules are all relative to the top dir, but the gyp actions",
"# are defined relative to their containing dir. This replaces the gyp_*",
"# variables for the action rule with an absolute version so that the",
"# output goes in the right place.",
"# Only write the gyp_* rules for the \"primary\" output (:1);",
"# it's superfluous for the \"extra outputs\", and this avoids accidentally",
"# writing duplicate dummy rules for those outputs.",
"main_output",
"=",
"make",
".",
"QuoteSpaces",
"(",
"self",
".",
"LocalPathify",
"(",
"outputs",
"[",
"0",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_local_path := $(LOCAL_PATH)'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_var_prefix := $(GYP_VAR_PREFIX)'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_intermediate_dir := '",
"'$(abspath $(gyp_intermediate_dir))'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_shared_intermediate_dir := '",
"'$(abspath $(gyp_shared_intermediate_dir))'",
"%",
"main_output",
")",
"# Android's envsetup.sh adds a number of directories to the path including",
"# the built host binary directory. This causes actions/rules invoked by",
"# gyp to sometimes use these instead of system versions, e.g. bison.",
"# The built host binaries may not be suitable, and can cause errors.",
"# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable",
"# set by envsetup.",
"self",
".",
"WriteLn",
"(",
"'%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'",
"%",
"main_output",
")",
"# Don't allow spaces in input/output filenames, but make an exception for",
"# filenames which start with '$(' since it's okay for there to be spaces",
"# inside of make function/macro invocations.",
"for",
"input",
"in",
"inputs",
":",
"if",
"not",
"input",
".",
"startswith",
"(",
"'$('",
")",
"and",
"' '",
"in",
"input",
":",
"raise",
"gyp",
".",
"common",
".",
"GypError",
"(",
"'Action input filename \"%s\" in target %s contains a space'",
"%",
"(",
"input",
",",
"self",
".",
"target",
")",
")",
"for",
"output",
"in",
"outputs",
":",
"if",
"not",
"output",
".",
"startswith",
"(",
"'$('",
")",
"and",
"' '",
"in",
"output",
":",
"raise",
"gyp",
".",
"common",
".",
"GypError",
"(",
"'Action output filename \"%s\" in target %s contains a space'",
"%",
"(",
"output",
",",
"self",
".",
"target",
")",
")",
"self",
".",
"WriteLn",
"(",
"'%s: %s $(GYP_TARGET_DEPENDENCIES)'",
"%",
"(",
"main_output",
",",
"' '",
".",
"join",
"(",
"map",
"(",
"self",
".",
"LocalPathify",
",",
"inputs",
")",
")",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\t@echo \"%s\"'",
"%",
"quiet_cmd",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide)%s\\n'",
"%",
"command",
")",
"for",
"output",
"in",
"outputs",
"[",
"1",
":",
"]",
":",
"# Make each output depend on the main output, with an empty command",
"# to force make to notice that the mtime has changed.",
"self",
".",
"WriteLn",
"(",
"'%s: %s ;'",
"%",
"(",
"self",
".",
"LocalPathify",
"(",
"output",
")",
",",
"main_output",
")",
")",
"extra_outputs",
"+=",
"outputs",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
")"
] | [
231,
2
] | [
322,
18
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteRules | (self, rules, extra_sources, extra_outputs) | Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
| Write Makefile code for any 'rules' from the gyp input. | def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn() | [
"def",
"WriteRules",
"(",
"self",
",",
"rules",
",",
"extra_sources",
",",
"extra_outputs",
")",
":",
"if",
"len",
"(",
"rules",
")",
"==",
"0",
":",
"return",
"for",
"rule",
"in",
"rules",
":",
"if",
"len",
"(",
"rule",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")",
")",
"==",
"0",
":",
"continue",
"name",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"'%s_%s'",
"%",
"(",
"self",
".",
"relative_target",
",",
"rule",
"[",
"'rule_name'",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\n### Generated for rule \"%s\":'",
"%",
"name",
")",
"self",
".",
"WriteLn",
"(",
"'# \"%s\":'",
"%",
"rule",
")",
"inputs",
"=",
"rule",
".",
"get",
"(",
"'inputs'",
")",
"for",
"rule_source",
"in",
"rule",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")",
":",
"(",
"rule_source_dirname",
",",
"rule_source_basename",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"rule_source",
")",
"(",
"rule_source_root",
",",
"rule_source_ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"rule_source_basename",
")",
"outputs",
"=",
"[",
"self",
".",
"ExpandInputRoot",
"(",
"out",
",",
"rule_source_root",
",",
"rule_source_dirname",
")",
"for",
"out",
"in",
"rule",
"[",
"'outputs'",
"]",
"]",
"dirs",
"=",
"set",
"(",
")",
"for",
"out",
"in",
"outputs",
":",
"if",
"not",
"out",
".",
"startswith",
"(",
"'$'",
")",
":",
"print",
"(",
"'WARNING: Rule for target %s writes output to local path %s'",
"%",
"(",
"self",
".",
"target",
",",
"out",
")",
")",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"out",
")",
"if",
"dir",
":",
"dirs",
".",
"add",
"(",
"dir",
")",
"extra_outputs",
"+=",
"outputs",
"if",
"int",
"(",
"rule",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"extra_sources",
".",
"extend",
"(",
"outputs",
")",
"components",
"=",
"[",
"]",
"for",
"component",
"in",
"rule",
"[",
"'action'",
"]",
":",
"component",
"=",
"self",
".",
"ExpandInputRoot",
"(",
"component",
",",
"rule_source_root",
",",
"rule_source_dirname",
")",
"if",
"'$(RULE_SOURCES)'",
"in",
"component",
":",
"component",
"=",
"component",
".",
"replace",
"(",
"'$(RULE_SOURCES)'",
",",
"rule_source",
")",
"components",
".",
"append",
"(",
"component",
")",
"command",
"=",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"components",
")",
"cd_action",
"=",
"'cd $(gyp_local_path)/%s; '",
"%",
"self",
".",
"path",
"command",
"=",
"cd_action",
"+",
"command",
"if",
"dirs",
":",
"command",
"=",
"'mkdir -p %s'",
"%",
"' '",
".",
"join",
"(",
"dirs",
")",
"+",
"'; '",
"+",
"command",
"# We set up a rule to build the first output, and then set up",
"# a rule for each additional output to depend on the first.",
"outputs",
"=",
"map",
"(",
"self",
".",
"LocalPathify",
",",
"outputs",
")",
"main_output",
"=",
"outputs",
"[",
"0",
"]",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_local_path := $(LOCAL_PATH)'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_var_prefix := $(GYP_VAR_PREFIX)'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_intermediate_dir := '",
"'$(abspath $(gyp_intermediate_dir))'",
"%",
"main_output",
")",
"self",
".",
"WriteLn",
"(",
"'%s: gyp_shared_intermediate_dir := '",
"'$(abspath $(gyp_shared_intermediate_dir))'",
"%",
"main_output",
")",
"# See explanation in WriteActions.",
"self",
".",
"WriteLn",
"(",
"'%s: export PATH := '",
"'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))'",
"%",
"main_output",
")",
"main_output_deps",
"=",
"self",
".",
"LocalPathify",
"(",
"rule_source",
")",
"if",
"inputs",
":",
"main_output_deps",
"+=",
"' '",
"main_output_deps",
"+=",
"' '",
".",
"join",
"(",
"[",
"self",
".",
"LocalPathify",
"(",
"f",
")",
"for",
"f",
"in",
"inputs",
"]",
")",
"self",
".",
"WriteLn",
"(",
"'%s: %s $(GYP_TARGET_DEPENDENCIES)'",
"%",
"(",
"main_output",
",",
"main_output_deps",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\t%s\\n'",
"%",
"command",
")",
"for",
"output",
"in",
"outputs",
"[",
"1",
":",
"]",
":",
"# Make each output depend on the main output, with an empty command",
"# to force make to notice that the mtime has changed.",
"self",
".",
"WriteLn",
"(",
"'%s: %s ;'",
"%",
"(",
"output",
",",
"main_output",
")",
")",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
")"
] | [
325,
2
] | [
410,
18
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteCopies | (self, copies, extra_outputs) | Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
| Write Makefile code for any 'copies' from the gyp input. | def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn() | [
"def",
"WriteCopies",
"(",
"self",
",",
"copies",
",",
"extra_outputs",
")",
":",
"self",
".",
"WriteLn",
"(",
"'### Generated for copy rule.'",
")",
"variable",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"self",
".",
"relative_target",
"+",
"'_copies'",
")",
"outputs",
"=",
"[",
"]",
"for",
"copy",
"in",
"copies",
":",
"for",
"path",
"in",
"copy",
"[",
"'files'",
"]",
":",
"# The Android build system does not allow generation of files into the",
"# source tree. The destination should start with a variable, which will",
"# typically be $(gyp_intermediate_dir) or",
"# $(gyp_shared_intermediate_dir). Note that we can't use an assertion",
"# because some of the gyp tests depend on this.",
"if",
"not",
"copy",
"[",
"'destination'",
"]",
".",
"startswith",
"(",
"'$'",
")",
":",
"print",
"(",
"'WARNING: Copy rule for target %s writes output to '",
"'local path %s'",
"%",
"(",
"self",
".",
"target",
",",
"copy",
"[",
"'destination'",
"]",
")",
")",
"# LocalPathify() calls normpath, stripping trailing slashes.",
"path",
"=",
"Sourceify",
"(",
"self",
".",
"LocalPathify",
"(",
"path",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"1",
"]",
"output",
"=",
"Sourceify",
"(",
"self",
".",
"LocalPathify",
"(",
"os",
".",
"path",
".",
"join",
"(",
"copy",
"[",
"'destination'",
"]",
",",
"filename",
")",
")",
")",
"self",
".",
"WriteLn",
"(",
"'%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)'",
"%",
"(",
"output",
",",
"path",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\t@echo Copying: $@'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) mkdir -p $(dir $@)'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) $(ACP) -rpf $< $@'",
")",
"self",
".",
"WriteLn",
"(",
")",
"outputs",
".",
"append",
"(",
"output",
")",
"self",
".",
"WriteLn",
"(",
"'%s = %s'",
"%",
"(",
"variable",
",",
"' '",
".",
"join",
"(",
"map",
"(",
"make",
".",
"QuoteSpaces",
",",
"outputs",
")",
")",
")",
")",
"extra_outputs",
".",
"append",
"(",
"'$(%s)'",
"%",
"variable",
")",
"self",
".",
"WriteLn",
"(",
")"
] | [
413,
2
] | [
450,
18
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteSourceFlags | (self, spec, configs) | Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
| Write out the flags and include paths used to compile source files for
the current target. | def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') | [
"def",
"WriteSourceFlags",
"(",
"self",
",",
"spec",
",",
"configs",
")",
":",
"for",
"configname",
",",
"config",
"in",
"sorted",
"(",
"configs",
".",
"iteritems",
"(",
")",
")",
":",
"extracted_includes",
"=",
"[",
"]",
"self",
".",
"WriteLn",
"(",
"'\\n# Flags passed to both C and C++ files.'",
")",
"cflags",
",",
"includes_from_cflags",
"=",
"self",
".",
"ExtractIncludesFromCFlags",
"(",
"config",
".",
"get",
"(",
"'cflags'",
",",
"[",
"]",
")",
"+",
"config",
".",
"get",
"(",
"'cflags_c'",
",",
"[",
"]",
")",
")",
"extracted_includes",
".",
"extend",
"(",
"includes_from_cflags",
")",
"self",
".",
"WriteList",
"(",
"cflags",
",",
"'MY_CFLAGS_%s'",
"%",
"configname",
")",
"self",
".",
"WriteList",
"(",
"config",
".",
"get",
"(",
"'defines'",
")",
",",
"'MY_DEFS_%s'",
"%",
"configname",
",",
"prefix",
"=",
"'-D'",
",",
"quoter",
"=",
"make",
".",
"EscapeCppDefine",
")",
"self",
".",
"WriteLn",
"(",
"'\\n# Include paths placed before CFLAGS/CPPFLAGS'",
")",
"includes",
"=",
"list",
"(",
"config",
".",
"get",
"(",
"'include_dirs'",
",",
"[",
"]",
")",
")",
"includes",
".",
"extend",
"(",
"extracted_includes",
")",
"includes",
"=",
"map",
"(",
"Sourceify",
",",
"map",
"(",
"self",
".",
"LocalPathify",
",",
"includes",
")",
")",
"includes",
"=",
"self",
".",
"NormalizeIncludePaths",
"(",
"includes",
")",
"self",
".",
"WriteList",
"(",
"includes",
",",
"'LOCAL_C_INCLUDES_%s'",
"%",
"configname",
")",
"self",
".",
"WriteLn",
"(",
"'\\n# Flags passed to only C++ (and not C) files.'",
")",
"self",
".",
"WriteList",
"(",
"config",
".",
"get",
"(",
"'cflags_cc'",
")",
",",
"'LOCAL_CPPFLAGS_%s'",
"%",
"configname",
")",
"self",
".",
"WriteLn",
"(",
"'\\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '",
"'$(MY_DEFS_$(GYP_CONFIGURATION))'",
")",
"# Undefine ANDROID for host modules",
"# TODO: the source code should not use macro ANDROID to tell if it's host",
"# or target module.",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"self",
".",
"WriteLn",
"(",
"'# Undefine ANDROID for host modules'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_CFLAGS += -UANDROID'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '",
"'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))'",
")",
"# Android uses separate flags for assembly file invocations, but gyp expects",
"# the same CFLAGS to be applied:",
"self",
".",
"WriteLn",
"(",
"'LOCAL_ASFLAGS := $(LOCAL_CFLAGS)'",
")"
] | [
453,
2
] | [
495,
52
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteSources | (self, spec, configs, extra_sources) | Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
| Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up. | def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs) | [
"def",
"WriteSources",
"(",
"self",
",",
"spec",
",",
"configs",
",",
"extra_sources",
")",
":",
"sources",
"=",
"filter",
"(",
"make",
".",
"Compilable",
",",
"spec",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
")",
"generated_not_sources",
"=",
"[",
"x",
"for",
"x",
"in",
"extra_sources",
"if",
"not",
"make",
".",
"Compilable",
"(",
"x",
")",
"]",
"extra_sources",
"=",
"filter",
"(",
"make",
".",
"Compilable",
",",
"extra_sources",
")",
"# Determine and output the C++ extension used by these sources.",
"# We simply find the first C++ file and use that extension.",
"all_sources",
"=",
"sources",
"+",
"extra_sources",
"local_cpp_extension",
"=",
"'.cpp'",
"for",
"source",
"in",
"all_sources",
":",
"(",
"root",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"if",
"IsCPPExtension",
"(",
"ext",
")",
":",
"local_cpp_extension",
"=",
"ext",
"break",
"if",
"local_cpp_extension",
"!=",
"'.cpp'",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_CPP_EXTENSION := %s'",
"%",
"local_cpp_extension",
")",
"# We need to move any non-generated sources that are coming from the",
"# shared intermediate directory out of LOCAL_SRC_FILES and put them",
"# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files",
"# that don't match our local_cpp_extension, since Android will only",
"# generate Makefile rules for a single LOCAL_CPP_EXTENSION.",
"local_files",
"=",
"[",
"]",
"for",
"source",
"in",
"sources",
":",
"(",
"root",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"if",
"'$(gyp_shared_intermediate_dir)'",
"in",
"source",
":",
"extra_sources",
".",
"append",
"(",
"source",
")",
"elif",
"'$(gyp_intermediate_dir)'",
"in",
"source",
":",
"extra_sources",
".",
"append",
"(",
"source",
")",
"elif",
"IsCPPExtension",
"(",
"ext",
")",
"and",
"ext",
"!=",
"local_cpp_extension",
":",
"extra_sources",
".",
"append",
"(",
"source",
")",
"else",
":",
"local_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"source",
")",
")",
")",
"# For any generated source, if it is coming from the shared intermediate",
"# directory then we add a Make rule to copy them to the local intermediate",
"# directory first. This is because the Android LOCAL_GENERATED_SOURCES",
"# must be in the local module intermediate directory for the compile rules",
"# to work properly. If the file has the wrong C++ extension, then we add",
"# a rule to copy that to intermediates and use the new version.",
"final_generated_sources",
"=",
"[",
"]",
"# If a source file gets copied, we still need to add the orginal source",
"# directory as header search path, for GCC searches headers in the",
"# directory that contains the source file by default.",
"origin_src_dirs",
"=",
"[",
"]",
"for",
"source",
"in",
"extra_sources",
":",
"local_file",
"=",
"source",
"if",
"not",
"'$(gyp_intermediate_dir)/'",
"in",
"local_file",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_file",
")",
"local_file",
"=",
"'$(gyp_intermediate_dir)/'",
"+",
"basename",
"(",
"root",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"local_file",
")",
"if",
"IsCPPExtension",
"(",
"ext",
")",
"and",
"ext",
"!=",
"local_cpp_extension",
":",
"local_file",
"=",
"root",
"+",
"local_cpp_extension",
"if",
"local_file",
"!=",
"source",
":",
"self",
".",
"WriteLn",
"(",
"'%s: %s'",
"%",
"(",
"local_file",
",",
"self",
".",
"LocalPathify",
"(",
"source",
")",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\tmkdir -p $(@D); cp $< $@'",
")",
"origin_src_dirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"source",
")",
")",
"final_generated_sources",
".",
"append",
"(",
"local_file",
")",
"# We add back in all of the non-compilable stuff to make sure that the",
"# make rules have dependencies on them.",
"final_generated_sources",
".",
"extend",
"(",
"generated_not_sources",
")",
"self",
".",
"WriteList",
"(",
"final_generated_sources",
",",
"'LOCAL_GENERATED_SOURCES'",
")",
"origin_src_dirs",
"=",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"origin_src_dirs",
")",
"origin_src_dirs",
"=",
"map",
"(",
"Sourceify",
",",
"map",
"(",
"self",
".",
"LocalPathify",
",",
"origin_src_dirs",
")",
")",
"self",
".",
"WriteList",
"(",
"origin_src_dirs",
",",
"'GYP_COPIED_SOURCE_ORIGIN_DIRS'",
")",
"self",
".",
"WriteList",
"(",
"local_files",
",",
"'LOCAL_SRC_FILES'",
")",
"# Write out the flags used to compile the source; this must be done last",
"# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.",
"self",
".",
"WriteSourceFlags",
"(",
"spec",
",",
"configs",
")"
] | [
498,
2
] | [
581,
40
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.ComputeAndroidModule | (self, spec) | Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
| Return the Android module name used for a gyp spec. | def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix]) | [
"def",
"ComputeAndroidModule",
"(",
"self",
",",
"spec",
")",
":",
"if",
"int",
"(",
"spec",
".",
"get",
"(",
"'android_unmangled_name'",
",",
"0",
")",
")",
":",
"assert",
"self",
".",
"type",
"!=",
"'shared_library'",
"or",
"self",
".",
"target",
".",
"startswith",
"(",
"'lib'",
")",
"return",
"self",
".",
"target",
"if",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"# For reasons of convention, the Android build system requires that all",
"# shared library modules are named 'libfoo' when generating -l flags.",
"prefix",
"=",
"'lib_'",
"else",
":",
"prefix",
"=",
"''",
"if",
"spec",
"[",
"'toolset'",
"]",
"==",
"'host'",
":",
"suffix",
"=",
"'_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'",
"else",
":",
"suffix",
"=",
"'_gyp'",
"if",
"self",
".",
"path",
":",
"middle",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"'%s_%s'",
"%",
"(",
"self",
".",
"path",
",",
"self",
".",
"target",
")",
")",
"else",
":",
"middle",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"self",
".",
"target",
")",
"return",
"''",
".",
"join",
"(",
"[",
"prefix",
",",
"middle",
",",
"suffix",
"]",
")"
] | [
584,
2
] | [
613,
44
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.ComputeOutputParts | (self, spec) | Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
| Return the 'output basename' of a gyp spec, split into filename + ext. | def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext) | [
"def",
"ComputeOutputParts",
"(",
"self",
",",
"spec",
")",
":",
"assert",
"self",
".",
"type",
"!=",
"'loadable_module'",
"# TODO: not supported?",
"target",
"=",
"spec",
"[",
"'target_name'",
"]",
"target_prefix",
"=",
"''",
"target_ext",
"=",
"''",
"if",
"self",
".",
"type",
"==",
"'static_library'",
":",
"target",
"=",
"self",
".",
"ComputeAndroidModule",
"(",
"spec",
")",
"target_ext",
"=",
"'.a'",
"elif",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"target",
"=",
"self",
".",
"ComputeAndroidModule",
"(",
"spec",
")",
"target_ext",
"=",
"'.so'",
"elif",
"self",
".",
"type",
"==",
"'none'",
":",
"target_ext",
"=",
"'.stamp'",
"elif",
"self",
".",
"type",
"!=",
"'executable'",
":",
"print",
"(",
"\"ERROR: What output file should be generated?\"",
",",
"\"type\"",
",",
"self",
".",
"type",
",",
"\"target\"",
",",
"target",
")",
"if",
"self",
".",
"type",
"!=",
"'static_library'",
"and",
"self",
".",
"type",
"!=",
"'shared_library'",
":",
"target_prefix",
"=",
"spec",
".",
"get",
"(",
"'product_prefix'",
",",
"target_prefix",
")",
"target",
"=",
"spec",
".",
"get",
"(",
"'product_name'",
",",
"target",
")",
"product_ext",
"=",
"spec",
".",
"get",
"(",
"'product_extension'",
")",
"if",
"product_ext",
":",
"target_ext",
"=",
"'.'",
"+",
"product_ext",
"target_stem",
"=",
"target_prefix",
"+",
"target",
"return",
"(",
"target_stem",
",",
"target_ext",
")"
] | [
616,
2
] | [
649,
36
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.ComputeOutputBasename | (self, spec) | Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
| Return the 'output basename' of a gyp spec. | def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec)) | [
"def",
"ComputeOutputBasename",
"(",
"self",
",",
"spec",
")",
":",
"return",
"''",
".",
"join",
"(",
"self",
".",
"ComputeOutputParts",
"(",
"spec",
")",
")"
] | [
652,
2
] | [
658,
49
] | python | en | ['en', 'haw', 'en'] | True |
AndroidMkWriter.ComputeOutput | (self, spec) | Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
| Return the 'output' (full output path) of a gyp spec. | def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec)) | [
"def",
"ComputeOutput",
"(",
"self",
",",
"spec",
")",
":",
"if",
"self",
".",
"type",
"==",
"'executable'",
":",
"# We install host executables into shared_intermediate_dir so they can be",
"# run by gyp rules that refer to PRODUCT_DIR.",
"path",
"=",
"'$(gyp_shared_intermediate_dir)'",
"elif",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"path",
"=",
"'$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'",
"else",
":",
"path",
"=",
"'$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'",
"else",
":",
"# Other targets just get built into their intermediate dir.",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"path",
"=",
"(",
"'$(call intermediates-dir-for,%s,%s,true,,'",
"'$(GYP_HOST_VAR_PREFIX))'",
"%",
"(",
"self",
".",
"android_class",
",",
"self",
".",
"android_module",
")",
")",
"else",
":",
"path",
"=",
"(",
"'$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'",
"%",
"(",
"self",
".",
"android_class",
",",
"self",
".",
"android_module",
")",
")",
"assert",
"spec",
".",
"get",
"(",
"'product_dir'",
")",
"is",
"None",
"# TODO: not supported?",
"return",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"self",
".",
"ComputeOutputBasename",
"(",
"spec",
")",
")"
] | [
661,
2
] | [
687,
63
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.NormalizeIncludePaths | (self, include_paths) | Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
| Normalize include_paths.
Convert absolute paths to relative to the Android top directory. | def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized | [
"def",
"NormalizeIncludePaths",
"(",
"self",
",",
"include_paths",
")",
":",
"normalized",
"=",
"[",
"]",
"for",
"path",
"in",
"include_paths",
":",
"if",
"path",
"[",
"0",
"]",
"==",
"'/'",
":",
"path",
"=",
"gyp",
".",
"common",
".",
"RelativePath",
"(",
"path",
",",
"self",
".",
"android_top_dir",
")",
"normalized",
".",
"append",
"(",
"path",
")",
"return",
"normalized"
] | [
689,
2
] | [
703,
21
] | python | en | ['en', 'en', 'en'] | False |
AndroidMkWriter.ExtractIncludesFromCFlags | (self, cflags) | Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
| Extract includes "-I..." out from cflags | def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths) | [
"def",
"ExtractIncludesFromCFlags",
"(",
"self",
",",
"cflags",
")",
":",
"clean_cflags",
"=",
"[",
"]",
"include_paths",
"=",
"[",
"]",
"for",
"flag",
"in",
"cflags",
":",
"if",
"flag",
".",
"startswith",
"(",
"'-I'",
")",
":",
"include_paths",
".",
"append",
"(",
"flag",
"[",
"2",
":",
"]",
")",
"else",
":",
"clean_cflags",
".",
"append",
"(",
"flag",
")",
"return",
"(",
"clean_cflags",
",",
"include_paths",
")"
] | [
705,
2
] | [
721,
40
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.FilterLibraries | (self, libraries) | Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
| Filter the 'libraries' key to separate things that shouldn't be ldflags. | def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags) | [
"def",
"FilterLibraries",
"(",
"self",
",",
"libraries",
")",
":",
"static_lib_modules",
"=",
"[",
"]",
"dynamic_lib_modules",
"=",
"[",
"]",
"ldflags",
"=",
"[",
"]",
"for",
"libs",
"in",
"libraries",
":",
"# Libs can have multiple words.",
"for",
"lib",
"in",
"libs",
".",
"split",
"(",
")",
":",
"# Filter the system libraries, which are added by default by the Android",
"# build system.",
"if",
"(",
"lib",
"==",
"'-lc'",
"or",
"lib",
"==",
"'-lstdc++'",
"or",
"lib",
"==",
"'-lm'",
"or",
"lib",
".",
"endswith",
"(",
"'libgcc.a'",
")",
")",
":",
"continue",
"match",
"=",
"re",
".",
"search",
"(",
"r'([^/]+)\\.a$'",
",",
"lib",
")",
"if",
"match",
":",
"static_lib_modules",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"continue",
"match",
"=",
"re",
".",
"search",
"(",
"r'([^/]+)\\.so$'",
",",
"lib",
")",
"if",
"match",
":",
"dynamic_lib_modules",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"continue",
"if",
"lib",
".",
"startswith",
"(",
"'-l'",
")",
":",
"ldflags",
".",
"append",
"(",
"lib",
")",
"return",
"(",
"static_lib_modules",
",",
"dynamic_lib_modules",
",",
"ldflags",
")"
] | [
723,
2
] | [
755,
61
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.ComputeDeps | (self, spec) | Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
| Compute the dependencies of a gyp spec. | def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) | [
"def",
"ComputeDeps",
"(",
"self",
",",
"spec",
")",
":",
"deps",
"=",
"[",
"]",
"link_deps",
"=",
"[",
"]",
"if",
"'dependencies'",
"in",
"spec",
":",
"deps",
".",
"extend",
"(",
"[",
"target_outputs",
"[",
"dep",
"]",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
"if",
"target_outputs",
"[",
"dep",
"]",
"]",
")",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
":",
"if",
"dep",
"in",
"target_link_deps",
":",
"link_deps",
".",
"append",
"(",
"target_link_deps",
"[",
"dep",
"]",
")",
"deps",
".",
"extend",
"(",
"link_deps",
")",
"return",
"(",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"deps",
")",
",",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"link_deps",
")",
")"
] | [
758,
2
] | [
774,
68
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteTargetFlags | (self, spec, configs, link_deps) | Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
| Write Makefile code to specify the link flags and library dependencies. | def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES') | [
"def",
"WriteTargetFlags",
"(",
"self",
",",
"spec",
",",
"configs",
",",
"link_deps",
")",
":",
"# Libraries (i.e. -lfoo)",
"# These must be included even for static libraries as some of them provide",
"# implicit include paths through the build system.",
"libraries",
"=",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"spec",
".",
"get",
"(",
"'libraries'",
",",
"[",
"]",
")",
")",
"static_libs",
",",
"dynamic_libs",
",",
"ldflags_libs",
"=",
"self",
".",
"FilterLibraries",
"(",
"libraries",
")",
"if",
"self",
".",
"type",
"!=",
"'static_library'",
":",
"for",
"configname",
",",
"config",
"in",
"sorted",
"(",
"configs",
".",
"iteritems",
"(",
")",
")",
":",
"ldflags",
"=",
"list",
"(",
"config",
".",
"get",
"(",
"'ldflags'",
",",
"[",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"''",
")",
"self",
".",
"WriteList",
"(",
"ldflags",
",",
"'LOCAL_LDFLAGS_%s'",
"%",
"configname",
")",
"self",
".",
"WriteList",
"(",
"ldflags_libs",
",",
"'LOCAL_GYP_LIBS'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '",
"'$(LOCAL_GYP_LIBS)'",
")",
"# Link dependencies (i.e. other gyp targets this target depends on)",
"# These need not be included for static libraries as within the gyp build",
"# we do not use the implicit include path mechanism.",
"if",
"self",
".",
"type",
"!=",
"'static_library'",
":",
"static_link_deps",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"link_deps",
"if",
"x",
"[",
"0",
"]",
"==",
"'static'",
"]",
"shared_link_deps",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"link_deps",
"if",
"x",
"[",
"0",
"]",
"==",
"'shared'",
"]",
"else",
":",
"static_link_deps",
"=",
"[",
"]",
"shared_link_deps",
"=",
"[",
"]",
"# Only write the lists if they are non-empty.",
"if",
"static_libs",
"or",
"static_link_deps",
":",
"self",
".",
"WriteLn",
"(",
"''",
")",
"self",
".",
"WriteList",
"(",
"static_libs",
"+",
"static_link_deps",
",",
"'LOCAL_STATIC_LIBRARIES'",
")",
"self",
".",
"WriteLn",
"(",
"'# Enable grouping to fix circular references'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_GROUP_STATIC_LIBRARIES := true'",
")",
"if",
"dynamic_libs",
"or",
"shared_link_deps",
":",
"self",
".",
"WriteLn",
"(",
"''",
")",
"self",
".",
"WriteList",
"(",
"dynamic_libs",
"+",
"shared_link_deps",
",",
"'LOCAL_SHARED_LIBRARIES'",
")"
] | [
777,
2
] | [
818,
46
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteTarget | (self, spec, configs, deps, link_deps, part_of_all,
write_alias_target) | Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
| Write Makefile code to produce the final target of the gyp spec. | def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
self.WriteLn('LOCAL_CXX_STL := libc++_static')
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') | [
"def",
"WriteTarget",
"(",
"self",
",",
"spec",
",",
"configs",
",",
"deps",
",",
"link_deps",
",",
"part_of_all",
",",
"write_alias_target",
")",
":",
"self",
".",
"WriteLn",
"(",
"'### Rules for final target.'",
")",
"if",
"self",
".",
"type",
"!=",
"'none'",
":",
"self",
".",
"WriteTargetFlags",
"(",
"spec",
",",
"configs",
",",
"link_deps",
")",
"settings",
"=",
"spec",
".",
"get",
"(",
"'aosp_build_settings'",
",",
"{",
"}",
")",
"if",
"settings",
":",
"self",
".",
"WriteLn",
"(",
"'### Set directly by aosp_build_settings.'",
")",
"for",
"k",
",",
"v",
"in",
"settings",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"self",
".",
"WriteList",
"(",
"v",
",",
"k",
")",
"else",
":",
"self",
".",
"WriteLn",
"(",
"'%s := %s'",
"%",
"(",
"k",
",",
"make",
".",
"QuoteIfNecessary",
"(",
"v",
")",
")",
")",
"self",
".",
"WriteLn",
"(",
"''",
")",
"# Add to the set of targets which represent the gyp 'all' target. We use the",
"# name 'gyp_all_modules' as the Android build system doesn't allow the use",
"# of the Make target 'all' and because 'all_modules' is the equivalent of",
"# the Make target 'all' on Android.",
"if",
"part_of_all",
"and",
"write_alias_target",
":",
"self",
".",
"WriteLn",
"(",
"'# Add target alias to \"gyp_all_modules\" target.'",
")",
"self",
".",
"WriteLn",
"(",
"'.PHONY: gyp_all_modules'",
")",
"self",
".",
"WriteLn",
"(",
"'gyp_all_modules: %s'",
"%",
"self",
".",
"android_module",
")",
"self",
".",
"WriteLn",
"(",
"''",
")",
"# Add an alias from the gyp target name to the Android module name. This",
"# simplifies manual builds of the target, and is required by the test",
"# framework.",
"if",
"self",
".",
"target",
"!=",
"self",
".",
"android_module",
"and",
"write_alias_target",
":",
"self",
".",
"WriteLn",
"(",
"'# Alias gyp target name.'",
")",
"self",
".",
"WriteLn",
"(",
"'.PHONY: %s'",
"%",
"self",
".",
"target",
")",
"self",
".",
"WriteLn",
"(",
"'%s: %s'",
"%",
"(",
"self",
".",
"target",
",",
"self",
".",
"android_module",
")",
")",
"self",
".",
"WriteLn",
"(",
"''",
")",
"# Add the command to trigger build of the target type depending",
"# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY",
"# NOTE: This has to come last!",
"modifier",
"=",
"''",
"if",
"self",
".",
"toolset",
"==",
"'host'",
":",
"modifier",
"=",
"'HOST_'",
"if",
"self",
".",
"type",
"==",
"'static_library'",
":",
"self",
".",
"WriteLn",
"(",
"'include $(BUILD_%sSTATIC_LIBRARY)'",
"%",
"modifier",
")",
"elif",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_PRELINK_MODULE := false'",
")",
"self",
".",
"WriteLn",
"(",
"'include $(BUILD_%sSHARED_LIBRARY)'",
"%",
"modifier",
")",
"elif",
"self",
".",
"type",
"==",
"'executable'",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_CXX_STL := libc++_static'",
")",
"# Executables are for build and test purposes only, so they're installed",
"# to a directory that doesn't get included in the system image.",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)'",
")",
"self",
".",
"WriteLn",
"(",
"'include $(BUILD_%sEXECUTABLE)'",
"%",
"modifier",
")",
"else",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp'",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_UNINSTALLABLE_MODULE := true'",
")",
"if",
"self",
".",
"toolset",
"==",
"'target'",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)'",
")",
"else",
":",
"self",
".",
"WriteLn",
"(",
"'LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)'",
")",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
"'include $(BUILD_SYSTEM)/base_rules.mk'",
")",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
"'$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) echo \"Gyp timestamp: $@\"'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) mkdir -p $(dir $@)'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) touch $@'",
")",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
"'LOCAL_2ND_ARCH_VAR_PREFIX :='",
")"
] | [
821,
2
] | [
897,
50
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.WriteList | (self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False) | Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
| Write a variable definition that is a list of values. | def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values)) | [
"def",
"WriteList",
"(",
"self",
",",
"value_list",
",",
"variable",
"=",
"None",
",",
"prefix",
"=",
"''",
",",
"quoter",
"=",
"make",
".",
"QuoteIfNecessary",
",",
"local_pathify",
"=",
"False",
")",
":",
"values",
"=",
"''",
"if",
"value_list",
":",
"value_list",
"=",
"[",
"quoter",
"(",
"prefix",
"+",
"l",
")",
"for",
"l",
"in",
"value_list",
"]",
"if",
"local_pathify",
":",
"value_list",
"=",
"[",
"self",
".",
"LocalPathify",
"(",
"l",
")",
"for",
"l",
"in",
"value_list",
"]",
"values",
"=",
"' \\\\\\n\\t'",
"+",
"' \\\\\\n\\t'",
".",
"join",
"(",
"value_list",
")",
"self",
".",
"fp",
".",
"write",
"(",
"'%s :=%s\\n\\n'",
"%",
"(",
"variable",
",",
"values",
")",
")"
] | [
900,
2
] | [
914,
53
] | python | en | ['en', 'en', 'en'] | True |
AndroidMkWriter.LocalPathify | (self, path) | Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized. | Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized. | def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path | [
"def",
"LocalPathify",
"(",
"self",
",",
"path",
")",
":",
"if",
"'$('",
"in",
"path",
"or",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"# path is not a file in the project tree in this case, but calling",
"# normpath is still important for trimming trailing slashes.",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
")",
"local_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$(LOCAL_PATH)'",
",",
"self",
".",
"path",
",",
"path",
")",
"local_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"local_path",
")",
"# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)",
"# - i.e. that the resulting path is still inside the project tree. The",
"# path may legitimately have ended up containing just $(LOCAL_PATH), though,",
"# so we don't look for a slash.",
"assert",
"local_path",
".",
"startswith",
"(",
"'$(LOCAL_PATH)'",
")",
",",
"(",
"'Path %s attempts to escape from gyp path %s !)'",
"%",
"(",
"path",
",",
"self",
".",
"path",
")",
")",
"return",
"local_path"
] | [
921,
2
] | [
937,
21
] | python | en | ['en', 'en', 'en'] | True |
ExpectColumnValuesToBeNormallyDistributed.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
233,
4
] | [
245,
85
] | python | en | ['en', 'error', 'th'] | False |
patch_unittest_diff | (test_filter=None) |
Patches "assertEquals" to throw DiffError.
@:param test_filter callback to check each test. If not None it should return True to test or EqualsAssertionError will be skipped
|
Patches "assertEquals" to throw DiffError. | def patch_unittest_diff(test_filter=None):
"""
Patches "assertEquals" to throw DiffError.
@:param test_filter callback to check each test. If not None it should return True to test or EqualsAssertionError will be skipped
"""
if sys.version_info < (2, 7):
return
old = unittest.TestCase.assertEqual
def _patched_equals(self, first, second, msg=None):
try:
old(self, first, second, msg)
return
except AssertionError as e:
if not test_filter or test_filter(self):
error = EqualsAssertionError(first, second, msg, real_exception=e)
if error.can_be_serialized():
from .jb_local_exc_store import store_exception
store_exception(error)
raise
unittest.TestCase.assertEqual = _patched_equals | [
"def",
"patch_unittest_diff",
"(",
"test_filter",
"=",
"None",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"7",
")",
":",
"return",
"old",
"=",
"unittest",
".",
"TestCase",
".",
"assertEqual",
"def",
"_patched_equals",
"(",
"self",
",",
"first",
",",
"second",
",",
"msg",
"=",
"None",
")",
":",
"try",
":",
"old",
"(",
"self",
",",
"first",
",",
"second",
",",
"msg",
")",
"return",
"except",
"AssertionError",
"as",
"e",
":",
"if",
"not",
"test_filter",
"or",
"test_filter",
"(",
"self",
")",
":",
"error",
"=",
"EqualsAssertionError",
"(",
"first",
",",
"second",
",",
"msg",
",",
"real_exception",
"=",
"e",
")",
"if",
"error",
".",
"can_be_serialized",
"(",
")",
":",
"from",
".",
"jb_local_exc_store",
"import",
"store_exception",
"store_exception",
"(",
"error",
")",
"raise",
"unittest",
".",
"TestCase",
".",
"assertEqual",
"=",
"_patched_equals"
] | [
19,
0
] | [
41,
51
] | python | en | ['en', 'error', 'th'] | False |
Field.get_value | (self, obj, attr, accessor=None, default=missing_) | Return the value for a given key from an object.
:param object obj: The object to get the value from.
:param str attr: The attribute/key in `obj` to get the value from.
:param callable accessor: A callable used to retrieve the value of `attr` from
the object `obj`. Defaults to `marshmallow.utils.get_value`.
| Return the value for a given key from an object. | def get_value(self, obj, attr, accessor=None, default=missing_):
"""Return the value for a given key from an object.
:param object obj: The object to get the value from.
:param str attr: The attribute/key in `obj` to get the value from.
:param callable accessor: A callable used to retrieve the value of `attr` from
the object `obj`. Defaults to `marshmallow.utils.get_value`.
"""
# NOTE: Use getattr instead of direct attribute access here so that
# subclasses aren't required to define `attribute` member
attribute = getattr(self, "attribute", None)
accessor_func = accessor or utils.get_value
check_key = attr if attribute is None else attribute
return accessor_func(obj, check_key, default) | [
"def",
"get_value",
"(",
"self",
",",
"obj",
",",
"attr",
",",
"accessor",
"=",
"None",
",",
"default",
"=",
"missing_",
")",
":",
"# NOTE: Use getattr instead of direct attribute access here so that",
"# subclasses aren't required to define `attribute` member",
"attribute",
"=",
"getattr",
"(",
"self",
",",
"\"attribute\"",
",",
"None",
")",
"accessor_func",
"=",
"accessor",
"or",
"utils",
".",
"get_value",
"check_key",
"=",
"attr",
"if",
"attribute",
"is",
"None",
"else",
"attribute",
"return",
"accessor_func",
"(",
"obj",
",",
"check_key",
",",
"default",
")"
] | [
206,
4
] | [
219,
53
] | python | en | ['en', 'en', 'en'] | True |
Field._validate | (self, value) | Perform validation on ``value``. Raise a :exc:`ValidationError` if validation
does not succeed.
| Perform validation on ``value``. Raise a :exc:`ValidationError` if validation
does not succeed.
| def _validate(self, value):
"""Perform validation on ``value``. Raise a :exc:`ValidationError` if validation
does not succeed.
"""
errors = []
kwargs = {}
for validator in self.validators:
try:
r = validator(value)
if not isinstance(validator, Validator) and r is False:
raise self.make_error("validator_failed")
except ValidationError as err:
kwargs.update(err.kwargs)
if isinstance(err.messages, dict):
errors.append(err.messages)
else:
errors.extend(err.messages)
if errors:
raise ValidationError(errors, **kwargs) | [
"def",
"_validate",
"(",
"self",
",",
"value",
")",
":",
"errors",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"validator",
"in",
"self",
".",
"validators",
":",
"try",
":",
"r",
"=",
"validator",
"(",
"value",
")",
"if",
"not",
"isinstance",
"(",
"validator",
",",
"Validator",
")",
"and",
"r",
"is",
"False",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"validator_failed\"",
")",
"except",
"ValidationError",
"as",
"err",
":",
"kwargs",
".",
"update",
"(",
"err",
".",
"kwargs",
")",
"if",
"isinstance",
"(",
"err",
".",
"messages",
",",
"dict",
")",
":",
"errors",
".",
"append",
"(",
"err",
".",
"messages",
")",
"else",
":",
"errors",
".",
"extend",
"(",
"err",
".",
"messages",
")",
"if",
"errors",
":",
"raise",
"ValidationError",
"(",
"errors",
",",
"*",
"*",
"kwargs",
")"
] | [
221,
4
] | [
239,
51
] | python | en | ['en', 'ky', 'en'] | True |
Field.make_error | (self, key: str, **kwargs) | Helper method to make a `ValidationError` with an error message
from ``self.error_messages``.
| Helper method to make a `ValidationError` with an error message
from ``self.error_messages``.
| def make_error(self, key: str, **kwargs) -> ValidationError:
"""Helper method to make a `ValidationError` with an error message
from ``self.error_messages``.
"""
try:
msg = self.error_messages[key]
except KeyError as error:
class_name = self.__class__.__name__
message = (
"ValidationError raised by `{class_name}`, but error key `{key}` does "
"not exist in the `error_messages` dictionary."
).format(class_name=class_name, key=key)
raise AssertionError(message) from error
if isinstance(msg, (str, bytes)):
msg = msg.format(**kwargs)
return ValidationError(msg) | [
"def",
"make_error",
"(",
"self",
",",
"key",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"ValidationError",
":",
"try",
":",
"msg",
"=",
"self",
".",
"error_messages",
"[",
"key",
"]",
"except",
"KeyError",
"as",
"error",
":",
"class_name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"message",
"=",
"(",
"\"ValidationError raised by `{class_name}`, but error key `{key}` does \"",
"\"not exist in the `error_messages` dictionary.\"",
")",
".",
"format",
"(",
"class_name",
"=",
"class_name",
",",
"key",
"=",
"key",
")",
"raise",
"AssertionError",
"(",
"message",
")",
"from",
"error",
"if",
"isinstance",
"(",
"msg",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"msg",
"=",
"msg",
".",
"format",
"(",
"*",
"*",
"kwargs",
")",
"return",
"ValidationError",
"(",
"msg",
")"
] | [
241,
4
] | [
256,
35
] | python | en | ['en', 'de', 'en'] | True |
Field.fail | (self, key: str, **kwargs) | Helper method that raises a `ValidationError` with an error message
from ``self.error_messages``.
.. deprecated:: 3.0.0
Use `make_error <marshmallow.fields.Field.make_error>` instead.
| Helper method that raises a `ValidationError` with an error message
from ``self.error_messages``. | def fail(self, key: str, **kwargs):
"""Helper method that raises a `ValidationError` with an error message
from ``self.error_messages``.
.. deprecated:: 3.0.0
Use `make_error <marshmallow.fields.Field.make_error>` instead.
"""
warnings.warn(
'`Field.fail` is deprecated. Use `raise self.make_error("{}", ...)` instead.'.format(
key
),
RemovedInMarshmallow4Warning,
)
raise self.make_error(key=key, **kwargs) | [
"def",
"fail",
"(",
"self",
",",
"key",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'`Field.fail` is deprecated. Use `raise self.make_error(\"{}\", ...)` instead.'",
".",
"format",
"(",
"key",
")",
",",
"RemovedInMarshmallow4Warning",
",",
")",
"raise",
"self",
".",
"make_error",
"(",
"key",
"=",
"key",
",",
"*",
"*",
"kwargs",
")"
] | [
258,
4
] | [
271,
48
] | python | en | ['en', 'de', 'en'] | True |
Field._validate_missing | (self, value) | Validate missing values. Raise a :exc:`ValidationError` if
`value` should be considered missing.
| Validate missing values. Raise a :exc:`ValidationError` if
`value` should be considered missing.
| def _validate_missing(self, value):
"""Validate missing values. Raise a :exc:`ValidationError` if
`value` should be considered missing.
"""
if value is missing_:
if hasattr(self, "required") and self.required:
raise self.make_error("required")
if value is None:
if hasattr(self, "allow_none") and self.allow_none is not True:
raise self.make_error("null") | [
"def",
"_validate_missing",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"missing_",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"required\"",
")",
"and",
"self",
".",
"required",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"required\"",
")",
"if",
"value",
"is",
"None",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"allow_none\"",
")",
"and",
"self",
".",
"allow_none",
"is",
"not",
"True",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"null\"",
")"
] | [
273,
4
] | [
282,
45
] | python | en | ['en', 'et', 'en'] | True |
Field.serialize | (
self,
attr: str,
obj: typing.Any,
accessor: typing.Callable[[typing.Any, str, typing.Any], typing.Any] = None,
**kwargs
) | Pulls the value for the given key from the object, applies the
field's formatting and returns the result.
:param attr: The attribute/key to get from the object.
:param obj: The object to access the attribute/key from.
:param accessor: Function used to access values from ``obj``.
:param kwargs: Field-specific keyword arguments.
| Pulls the value for the given key from the object, applies the
field's formatting and returns the result. | def serialize(
self,
attr: str,
obj: typing.Any,
accessor: typing.Callable[[typing.Any, str, typing.Any], typing.Any] = None,
**kwargs
):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result.
:param attr: The attribute/key to get from the object.
:param obj: The object to access the attribute/key from.
:param accessor: Function used to access values from ``obj``.
:param kwargs: Field-specific keyword arguments.
"""
if self._CHECK_ATTRIBUTE:
value = self.get_value(obj, attr, accessor=accessor)
if value is missing_ and hasattr(self, "default"):
default = self.default
value = default() if callable(default) else default
if value is missing_:
return value
else:
value = None
return self._serialize(value, attr, obj, **kwargs) | [
"def",
"serialize",
"(",
"self",
",",
"attr",
":",
"str",
",",
"obj",
":",
"typing",
".",
"Any",
",",
"accessor",
":",
"typing",
".",
"Callable",
"[",
"[",
"typing",
".",
"Any",
",",
"str",
",",
"typing",
".",
"Any",
"]",
",",
"typing",
".",
"Any",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_CHECK_ATTRIBUTE",
":",
"value",
"=",
"self",
".",
"get_value",
"(",
"obj",
",",
"attr",
",",
"accessor",
"=",
"accessor",
")",
"if",
"value",
"is",
"missing_",
"and",
"hasattr",
"(",
"self",
",",
"\"default\"",
")",
":",
"default",
"=",
"self",
".",
"default",
"value",
"=",
"default",
"(",
")",
"if",
"callable",
"(",
"default",
")",
"else",
"default",
"if",
"value",
"is",
"missing_",
":",
"return",
"value",
"else",
":",
"value",
"=",
"None",
"return",
"self",
".",
"_serialize",
"(",
"value",
",",
"attr",
",",
"obj",
",",
"*",
"*",
"kwargs",
")"
] | [
284,
4
] | [
308,
58
] | python | en | ['en', 'en', 'en'] | True |
Field.deserialize | (
self,
value: typing.Any,
attr: str = None,
data: typing.Mapping[str, typing.Any] = None,
**kwargs
) | Deserialize ``value``.
:param value: The value to deserialize.
:param attr: The attribute/key in `data` to deserialize.
:param data: The raw input data passed to `Schema.load`.
:param kwargs: Field-specific keyword arguments.
:raise ValidationError: If an invalid value is passed or if a required value
is missing.
| Deserialize ``value``. | def deserialize(
self,
value: typing.Any,
attr: str = None,
data: typing.Mapping[str, typing.Any] = None,
**kwargs
):
"""Deserialize ``value``.
:param value: The value to deserialize.
:param attr: The attribute/key in `data` to deserialize.
:param data: The raw input data passed to `Schema.load`.
:param kwargs: Field-specific keyword arguments.
:raise ValidationError: If an invalid value is passed or if a required value
is missing.
"""
# Validate required fields, deserialize, then validate
# deserialized value
self._validate_missing(value)
if value is missing_:
_miss = self.missing
return _miss() if callable(_miss) else _miss
if getattr(self, "allow_none", False) is True and value is None:
return None
output = self._deserialize(value, attr, data, **kwargs)
self._validate(output)
return output | [
"def",
"deserialize",
"(",
"self",
",",
"value",
":",
"typing",
".",
"Any",
",",
"attr",
":",
"str",
"=",
"None",
",",
"data",
":",
"typing",
".",
"Mapping",
"[",
"str",
",",
"typing",
".",
"Any",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Validate required fields, deserialize, then validate",
"# deserialized value",
"self",
".",
"_validate_missing",
"(",
"value",
")",
"if",
"value",
"is",
"missing_",
":",
"_miss",
"=",
"self",
".",
"missing",
"return",
"_miss",
"(",
")",
"if",
"callable",
"(",
"_miss",
")",
"else",
"_miss",
"if",
"getattr",
"(",
"self",
",",
"\"allow_none\"",
",",
"False",
")",
"is",
"True",
"and",
"value",
"is",
"None",
":",
"return",
"None",
"output",
"=",
"self",
".",
"_deserialize",
"(",
"value",
",",
"attr",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_validate",
"(",
"output",
")",
"return",
"output"
] | [
310,
4
] | [
336,
21
] | python | da | ['en', 'da', 'it'] | False |
Field._bind_to_schema | (self, field_name, schema) | Update field with values from its parent schema. Called by
:meth:`Schema._bind_field <marshmallow.Schema._bind_field>`.
:param str field_name: Field name set in schema.
:param Schema schema: Parent schema.
| Update field with values from its parent schema. Called by
:meth:`Schema._bind_field <marshmallow.Schema._bind_field>`. | def _bind_to_schema(self, field_name, schema):
"""Update field with values from its parent schema. Called by
:meth:`Schema._bind_field <marshmallow.Schema._bind_field>`.
:param str field_name: Field name set in schema.
:param Schema schema: Parent schema.
"""
self.parent = self.parent or schema
self.name = self.name or field_name | [
"def",
"_bind_to_schema",
"(",
"self",
",",
"field_name",
",",
"schema",
")",
":",
"self",
".",
"parent",
"=",
"self",
".",
"parent",
"or",
"schema",
"self",
".",
"name",
"=",
"self",
".",
"name",
"or",
"field_name"
] | [
340,
4
] | [
348,
43
] | python | en | ['en', 'en', 'en'] | True |
Field._serialize | (self, value: typing.Any, attr: str, obj: typing.Any, **kwargs) | Serializes ``value`` to a basic Python datatype. Noop by default.
Concrete :class:`Field` classes should implement this method.
Example: ::
class TitleCase(Field):
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return ''
return str(value).title()
:param value: The value to be serialized.
:param str attr: The attribute or key on the object to be serialized.
:param object obj: The object the value was pulled from.
:param dict kwargs: Field-specific keyword arguments.
:return: The serialized value
| Serializes ``value`` to a basic Python datatype. Noop by default.
Concrete :class:`Field` classes should implement this method. | def _serialize(self, value: typing.Any, attr: str, obj: typing.Any, **kwargs):
"""Serializes ``value`` to a basic Python datatype. Noop by default.
Concrete :class:`Field` classes should implement this method.
Example: ::
class TitleCase(Field):
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return ''
return str(value).title()
:param value: The value to be serialized.
:param str attr: The attribute or key on the object to be serialized.
:param object obj: The object the value was pulled from.
:param dict kwargs: Field-specific keyword arguments.
:return: The serialized value
"""
return value | [
"def",
"_serialize",
"(",
"self",
",",
"value",
":",
"typing",
".",
"Any",
",",
"attr",
":",
"str",
",",
"obj",
":",
"typing",
".",
"Any",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"value"
] | [
350,
4
] | [
368,
20
] | python | en | ['en', 'en', 'en'] | True |
Field._deserialize | (
self,
value: typing.Any,
attr: typing.Optional[str],
data: typing.Optional[typing.Mapping[str, typing.Any]],
**kwargs
) | Deserialize value. Concrete :class:`Field` classes should implement this method.
:param value: The value to be deserialized.
:param attr: The attribute/key in `data` to be deserialized.
:param data: The raw input data passed to the `Schema.load`.
:param kwargs: Field-specific keyword arguments.
:raise ValidationError: In case of formatting or validation failure.
:return: The deserialized value.
.. versionchanged:: 2.0.0
Added ``attr`` and ``data`` parameters.
.. versionchanged:: 3.0.0
Added ``**kwargs`` to signature.
| Deserialize value. Concrete :class:`Field` classes should implement this method. | def _deserialize(
self,
value: typing.Any,
attr: typing.Optional[str],
data: typing.Optional[typing.Mapping[str, typing.Any]],
**kwargs
):
"""Deserialize value. Concrete :class:`Field` classes should implement this method.
:param value: The value to be deserialized.
:param attr: The attribute/key in `data` to be deserialized.
:param data: The raw input data passed to the `Schema.load`.
:param kwargs: Field-specific keyword arguments.
:raise ValidationError: In case of formatting or validation failure.
:return: The deserialized value.
.. versionchanged:: 2.0.0
Added ``attr`` and ``data`` parameters.
.. versionchanged:: 3.0.0
Added ``**kwargs`` to signature.
"""
return value | [
"def",
"_deserialize",
"(",
"self",
",",
"value",
":",
"typing",
".",
"Any",
",",
"attr",
":",
"typing",
".",
"Optional",
"[",
"str",
"]",
",",
"data",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Mapping",
"[",
"str",
",",
"typing",
".",
"Any",
"]",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"value"
] | [
370,
4
] | [
392,
20
] | python | en | ['en', 'fr', 'en'] | True |
Field.context | (self) | The context dictionary for the parent :class:`Schema`. | The context dictionary for the parent :class:`Schema`. | def context(self):
"""The context dictionary for the parent :class:`Schema`."""
return self.parent.context | [
"def",
"context",
"(",
"self",
")",
":",
"return",
"self",
".",
"parent",
".",
"context"
] | [
397,
4
] | [
399,
34
] | python | en | ['en', 'en', 'en'] | True |
Field.root | (self) | Reference to the `Schema` that this field belongs to even if it is buried in a
container field (e.g. `List`).
Return `None` for unbound fields.
| Reference to the `Schema` that this field belongs to even if it is buried in a
container field (e.g. `List`).
Return `None` for unbound fields.
| def root(self):
"""Reference to the `Schema` that this field belongs to even if it is buried in a
container field (e.g. `List`).
Return `None` for unbound fields.
"""
ret = self
while hasattr(ret, "parent"):
ret = ret.parent
return ret if isinstance(ret, SchemaABC) else None | [
"def",
"root",
"(",
"self",
")",
":",
"ret",
"=",
"self",
"while",
"hasattr",
"(",
"ret",
",",
"\"parent\"",
")",
":",
"ret",
"=",
"ret",
".",
"parent",
"return",
"ret",
"if",
"isinstance",
"(",
"ret",
",",
"SchemaABC",
")",
"else",
"None"
] | [
402,
4
] | [
410,
58
] | python | en | ['en', 'en', 'en'] | True |
Nested.schema | (self) | The nested Schema object.
.. versionchanged:: 1.0.0
Renamed from `serializer` to `schema`.
| The nested Schema object. | def schema(self):
"""The nested Schema object.
.. versionchanged:: 1.0.0
Renamed from `serializer` to `schema`.
"""
if not self._schema:
# Inherit context from parent.
context = getattr(self.parent, "context", {})
if callable(self.nested) and not isinstance(self.nested, type):
nested = self.nested()
else:
nested = self.nested
if isinstance(nested, SchemaABC):
self._schema = copy.copy(nested)
self._schema.context.update(context)
# Respect only and exclude passed from parent and re-initialize fields
set_class = self._schema.set_class
if self.only is not None:
if self._schema.only is not None:
original = self._schema.only
else: # only=None -> all fields
original = self._schema.fields.keys()
self._schema.only = set_class(self.only) & set_class(original)
if self.exclude:
original = self._schema.exclude
self._schema.exclude = set_class(self.exclude) | set_class(original)
self._schema._init_fields()
else:
if isinstance(nested, type) and issubclass(nested, SchemaABC):
schema_class = nested
elif not isinstance(nested, (str, bytes)):
raise ValueError(
"`Nested` fields must be passed a "
"`Schema`, not {}.".format(nested.__class__)
)
elif nested == "self":
schema_class = self.root.__class__
else:
schema_class = class_registry.get_class(nested)
self._schema = schema_class(
many=self.many,
only=self.only,
exclude=self.exclude,
context=context,
load_only=self._nested_normalized_option("load_only"),
dump_only=self._nested_normalized_option("dump_only"),
)
return self._schema | [
"def",
"schema",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_schema",
":",
"# Inherit context from parent.",
"context",
"=",
"getattr",
"(",
"self",
".",
"parent",
",",
"\"context\"",
",",
"{",
"}",
")",
"if",
"callable",
"(",
"self",
".",
"nested",
")",
"and",
"not",
"isinstance",
"(",
"self",
".",
"nested",
",",
"type",
")",
":",
"nested",
"=",
"self",
".",
"nested",
"(",
")",
"else",
":",
"nested",
"=",
"self",
".",
"nested",
"if",
"isinstance",
"(",
"nested",
",",
"SchemaABC",
")",
":",
"self",
".",
"_schema",
"=",
"copy",
".",
"copy",
"(",
"nested",
")",
"self",
".",
"_schema",
".",
"context",
".",
"update",
"(",
"context",
")",
"# Respect only and exclude passed from parent and re-initialize fields",
"set_class",
"=",
"self",
".",
"_schema",
".",
"set_class",
"if",
"self",
".",
"only",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_schema",
".",
"only",
"is",
"not",
"None",
":",
"original",
"=",
"self",
".",
"_schema",
".",
"only",
"else",
":",
"# only=None -> all fields",
"original",
"=",
"self",
".",
"_schema",
".",
"fields",
".",
"keys",
"(",
")",
"self",
".",
"_schema",
".",
"only",
"=",
"set_class",
"(",
"self",
".",
"only",
")",
"&",
"set_class",
"(",
"original",
")",
"if",
"self",
".",
"exclude",
":",
"original",
"=",
"self",
".",
"_schema",
".",
"exclude",
"self",
".",
"_schema",
".",
"exclude",
"=",
"set_class",
"(",
"self",
".",
"exclude",
")",
"|",
"set_class",
"(",
"original",
")",
"self",
".",
"_schema",
".",
"_init_fields",
"(",
")",
"else",
":",
"if",
"isinstance",
"(",
"nested",
",",
"type",
")",
"and",
"issubclass",
"(",
"nested",
",",
"SchemaABC",
")",
":",
"schema_class",
"=",
"nested",
"elif",
"not",
"isinstance",
"(",
"nested",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"`Nested` fields must be passed a \"",
"\"`Schema`, not {}.\"",
".",
"format",
"(",
"nested",
".",
"__class__",
")",
")",
"elif",
"nested",
"==",
"\"self\"",
":",
"schema_class",
"=",
"self",
".",
"root",
".",
"__class__",
"else",
":",
"schema_class",
"=",
"class_registry",
".",
"get_class",
"(",
"nested",
")",
"self",
".",
"_schema",
"=",
"schema_class",
"(",
"many",
"=",
"self",
".",
"many",
",",
"only",
"=",
"self",
".",
"only",
",",
"exclude",
"=",
"self",
".",
"exclude",
",",
"context",
"=",
"context",
",",
"load_only",
"=",
"self",
".",
"_nested_normalized_option",
"(",
"\"load_only\"",
")",
",",
"dump_only",
"=",
"self",
".",
"_nested_normalized_option",
"(",
"\"dump_only\"",
")",
",",
")",
"return",
"self",
".",
"_schema"
] | [
497,
4
] | [
546,
27
] | python | en | ['en', 'en', 'en'] | True |
Nested._deserialize | (self, value, attr, data, partial=None, **kwargs) | Same as :meth:`Field._deserialize` with additional ``partial`` argument.
:param bool|tuple partial: For nested schemas, the ``partial``
parameter passed to `Schema.load`.
.. versionchanged:: 3.0.0
Add ``partial`` parameter.
| Same as :meth:`Field._deserialize` with additional ``partial`` argument. | def _deserialize(self, value, attr, data, partial=None, **kwargs):
"""Same as :meth:`Field._deserialize` with additional ``partial`` argument.
:param bool|tuple partial: For nested schemas, the ``partial``
parameter passed to `Schema.load`.
.. versionchanged:: 3.0.0
Add ``partial`` parameter.
"""
self._test_collection(value)
return self._load(value, data, partial=partial) | [
"def",
"_deserialize",
"(",
"self",
",",
"value",
",",
"attr",
",",
"data",
",",
"partial",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_test_collection",
"(",
"value",
")",
"return",
"self",
".",
"_load",
"(",
"value",
",",
"data",
",",
"partial",
"=",
"partial",
")"
] | [
579,
4
] | [
589,
55
] | python | en | ['en', 'en', 'en'] | True |
UUID._validated | (self, value) | Format the value or raise a :exc:`ValidationError` if an error occurs. | Format the value or raise a :exc:`ValidationError` if an error occurs. | def _validated(self, value) -> typing.Optional[uuid.UUID]:
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
if isinstance(value, uuid.UUID):
return value
try:
if isinstance(value, bytes) and len(value) == 16:
return uuid.UUID(bytes=value)
else:
return uuid.UUID(value)
except (ValueError, AttributeError, TypeError) as error:
raise self.make_error("invalid_uuid") from error | [
"def",
"_validated",
"(",
"self",
",",
"value",
")",
"->",
"typing",
".",
"Optional",
"[",
"uuid",
".",
"UUID",
"]",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"uuid",
".",
"UUID",
")",
":",
"return",
"value",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
"and",
"len",
"(",
"value",
")",
"==",
"16",
":",
"return",
"uuid",
".",
"UUID",
"(",
"bytes",
"=",
"value",
")",
"else",
":",
"return",
"uuid",
".",
"UUID",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
"as",
"error",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"invalid_uuid\"",
")",
"from",
"error"
] | [
832,
4
] | [
844,
60
] | python | en | ['en', 'en', 'en'] | True |
Number._format_num | (self, value) | Return the number value for value, given this field's `num_type`. | Return the number value for value, given this field's `num_type`. | def _format_num(self, value) -> typing.Any:
"""Return the number value for value, given this field's `num_type`."""
return self.num_type(value) | [
"def",
"_format_num",
"(",
"self",
",",
"value",
")",
"->",
"typing",
".",
"Any",
":",
"return",
"self",
".",
"num_type",
"(",
"value",
")"
] | [
869,
4
] | [
871,
35
] | python | en | ['en', 'en', 'en'] | True |
Number._validated | (self, value) | Format the value or raise a :exc:`ValidationError` if an error occurs. | Format the value or raise a :exc:`ValidationError` if an error occurs. | def _validated(self, value) -> typing.Optional[_T]:
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
# (value is True or value is False) is ~5x faster than isinstance(value, bool)
if value is True or value is False:
raise self.make_error("invalid", input=value)
try:
return self._format_num(value)
except (TypeError, ValueError) as error:
raise self.make_error("invalid", input=value) from error
except OverflowError as error:
raise self.make_error("too_large", input=value) from error | [
"def",
"_validated",
"(",
"self",
",",
"value",
")",
"->",
"typing",
".",
"Optional",
"[",
"_T",
"]",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"# (value is True or value is False) is ~5x faster than isinstance(value, bool)",
"if",
"value",
"is",
"True",
"or",
"value",
"is",
"False",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"invalid\"",
",",
"input",
"=",
"value",
")",
"try",
":",
"return",
"self",
".",
"_format_num",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"error",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"invalid\"",
",",
"input",
"=",
"value",
")",
"from",
"error",
"except",
"OverflowError",
"as",
"error",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"too_large\"",
",",
"input",
"=",
"value",
")",
"from",
"error"
] | [
873,
4
] | [
885,
70
] | python | en | ['en', 'en', 'en'] | True |
Number._serialize | (
self, value, attr, obj, **kwargs
) | Return a string if `self.as_string=True`, otherwise return this field's `num_type`. | Return a string if `self.as_string=True`, otherwise return this field's `num_type`. | def _serialize(
self, value, attr, obj, **kwargs
) -> typing.Optional[typing.Union[str, _T]]:
"""Return a string if `self.as_string=True`, otherwise return this field's `num_type`."""
if value is None:
return None
ret = self._format_num(value) # type: _T
return self._to_string(ret) if self.as_string else ret | [
"def",
"_serialize",
"(",
"self",
",",
"value",
",",
"attr",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
"->",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Union",
"[",
"str",
",",
"_T",
"]",
"]",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"ret",
"=",
"self",
".",
"_format_num",
"(",
"value",
")",
"# type: _T",
"return",
"self",
".",
"_to_string",
"(",
"ret",
")",
"if",
"self",
".",
"as_string",
"else",
"ret"
] | [
890,
4
] | [
897,
62
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.