Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
serve_tcp | (
handler,
port,
*,
host=None,
backlog=None,
handler_nursery=None,
task_status=trio.TASK_STATUS_IGNORED,
) | Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``.
This is a thin convenience wrapper around :func:`open_tcp_listeners` and
:func:`serve_listeners` – see them for full details.
.. warning::
If ``handler`` raises an exception, then this function doesn't do
anything special to catch it – so by default the exception will
propagate out and crash your server. If you don't want this, then catch
exceptions inside your ``handler``, or use a ``handler_nursery`` object
that responds to exceptions in some other way.
When used with ``nursery.start`` you get back the newly opened listeners.
So, for example, if you want to start a server in your test suite and then
connect to it to check that it's working properly, you can use something
like::
from trio.testing import open_stream_to_socket_listener
async with trio.open_nursery() as nursery:
listeners = await nursery.start(serve_tcp, handler, 0)
client_stream = await open_stream_to_socket_listener(listeners[0])
# Then send and receive data on 'client_stream', for example:
await client_stream.send_all(b"GET / HTTP/1.0\\r\\n\\r\\n")
This avoids several common pitfalls:
1. It lets the kernel pick a random open port, so your test suite doesn't
depend on any particular port being open.
2. It waits for the server to be accepting connections on that port before
``start`` returns, so there's no race condition where the incoming
connection arrives before the server is ready.
3. It uses the Listener object to find out which port was picked, so it
can connect to the right place.
Args:
handler: The handler to start for each incoming connection. Passed to
:func:`serve_listeners`.
port: The port to listen on. Use 0 to let the kernel pick an open port.
Passed to :func:`open_tcp_listeners`.
host (str, bytes, or None): The host interface to listen on; use
``None`` to bind to the wildcard address. Passed to
:func:`open_tcp_listeners`.
backlog: The listen backlog, or None to have a good default picked.
Passed to :func:`open_tcp_listeners`.
handler_nursery: The nursery to start handlers in, or None to use an
internal nursery. Passed to :func:`serve_listeners`.
task_status: This function can be used with ``nursery.start``.
Returns:
This function only returns when cancelled.
| Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``. | async def serve_tcp(
handler,
port,
*,
host=None,
backlog=None,
handler_nursery=None,
task_status=trio.TASK_STATUS_IGNORED,
):
"""Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``.
This is a thin convenience wrapper around :func:`open_tcp_listeners` and
:func:`serve_listeners` – see them for full details.
.. warning::
If ``handler`` raises an exception, then this function doesn't do
anything special to catch it – so by default the exception will
propagate out and crash your server. If you don't want this, then catch
exceptions inside your ``handler``, or use a ``handler_nursery`` object
that responds to exceptions in some other way.
When used with ``nursery.start`` you get back the newly opened listeners.
So, for example, if you want to start a server in your test suite and then
connect to it to check that it's working properly, you can use something
like::
from trio.testing import open_stream_to_socket_listener
async with trio.open_nursery() as nursery:
listeners = await nursery.start(serve_tcp, handler, 0)
client_stream = await open_stream_to_socket_listener(listeners[0])
# Then send and receive data on 'client_stream', for example:
await client_stream.send_all(b"GET / HTTP/1.0\\r\\n\\r\\n")
This avoids several common pitfalls:
1. It lets the kernel pick a random open port, so your test suite doesn't
depend on any particular port being open.
2. It waits for the server to be accepting connections on that port before
``start`` returns, so there's no race condition where the incoming
connection arrives before the server is ready.
3. It uses the Listener object to find out which port was picked, so it
can connect to the right place.
Args:
handler: The handler to start for each incoming connection. Passed to
:func:`serve_listeners`.
port: The port to listen on. Use 0 to let the kernel pick an open port.
Passed to :func:`open_tcp_listeners`.
host (str, bytes, or None): The host interface to listen on; use
``None`` to bind to the wildcard address. Passed to
:func:`open_tcp_listeners`.
backlog: The listen backlog, or None to have a good default picked.
Passed to :func:`open_tcp_listeners`.
handler_nursery: The nursery to start handlers in, or None to use an
internal nursery. Passed to :func:`serve_listeners`.
task_status: This function can be used with ``nursery.start``.
Returns:
This function only returns when cancelled.
"""
listeners = await trio.open_tcp_listeners(port, host=host, backlog=backlog)
await trio.serve_listeners(
handler, listeners, handler_nursery=handler_nursery, task_status=task_status
) | [
"async",
"def",
"serve_tcp",
"(",
"handler",
",",
"port",
",",
"*",
",",
"host",
"=",
"None",
",",
"backlog",
"=",
"None",
",",
"handler_nursery",
"=",
"None",
",",
"task_status",
"=",
"trio",
".",
"TASK_STATUS_IGNORED",
",",
")",
":",
"listeners",
"=",
"await",
"trio",
".",
"open_tcp_listeners",
"(",
"port",
",",
"host",
"=",
"host",
",",
"backlog",
"=",
"backlog",
")",
"await",
"trio",
".",
"serve_listeners",
"(",
"handler",
",",
"listeners",
",",
"handler_nursery",
"=",
"handler_nursery",
",",
"task_status",
"=",
"task_status",
")"
] | [
145,
0
] | [
220,
5
] | python | en | ['en', 'en', 'en'] | True |
fix_calldef_decls | (decls, enums, cxx_std) |
some times gccxml report typedefs defined in no namespace
it happens for example in next situation
template< typename X>
void ddd(){ typedef typename X::Y YY;}
if I will fail on this bug next time, the right way to fix it may be
different
|
some times gccxml report typedefs defined in no namespace
it happens for example in next situation
template< typename X>
void ddd(){ typedef typename X::Y YY;}
if I will fail on this bug next time, the right way to fix it may be
different
| def fix_calldef_decls(decls, enums, cxx_std):
"""
some times gccxml report typedefs defined in no namespace
it happens for example in next situation
template< typename X>
void ddd(){ typedef typename X::Y YY;}
if I will fail on this bug next time, the right way to fix it may be
different
"""
default_arg_patcher = default_argument_patcher_t(enums, cxx_std)
# decls should be flat list of all declarations, you want to apply patch on
for decl in decls:
default_arg_patcher(decl)
if isinstance(decl, declarations.casting_operator_t):
_casting_oper_patcher_(decl) | [
"def",
"fix_calldef_decls",
"(",
"decls",
",",
"enums",
",",
"cxx_std",
")",
":",
"default_arg_patcher",
"=",
"default_argument_patcher_t",
"(",
"enums",
",",
"cxx_std",
")",
"# decls should be flat list of all declarations, you want to apply patch on",
"for",
"decl",
"in",
"decls",
":",
"default_arg_patcher",
"(",
"decl",
")",
"if",
"isinstance",
"(",
"decl",
",",
"declarations",
".",
"casting_operator_t",
")",
":",
"_casting_oper_patcher_",
"(",
"decl",
")"
] | [
248,
0
] | [
262,
40
] | python | en | ['en', 'error', 'th'] | False |
update_unnamed_class | (decls) |
Adds name to class_t declarations.
If CastXML is being used, the type definitions with an unnamed
class/struct are split across two nodes in the XML tree. For example,
typedef struct {} cls;
produces
<Struct id="_7" name="" context="_1" .../>
<Typedef id="_8" name="cls" type="_7" context="_1" .../>
For each typedef, we look at which class it refers to, and update the name
accordingly. This helps the matcher classes finding these declarations.
This was the behaviour with gccxml too, so this is important for
backward compatibility.
If the castxml epic version 1 is used, there is even an elaborated type
declaration between the typedef and the struct/class, that also needs to be
taken care of.
Args:
decls (list[declaration_t]): a list of declarations to be patched.
Returns:
None
|
Adds name to class_t declarations. | def update_unnamed_class(decls):
"""
Adds name to class_t declarations.
If CastXML is being used, the type definitions with an unnamed
class/struct are split across two nodes in the XML tree. For example,
typedef struct {} cls;
produces
<Struct id="_7" name="" context="_1" .../>
<Typedef id="_8" name="cls" type="_7" context="_1" .../>
For each typedef, we look at which class it refers to, and update the name
accordingly. This helps the matcher classes finding these declarations.
This was the behaviour with gccxml too, so this is important for
backward compatibility.
If the castxml epic version 1 is used, there is even an elaborated type
declaration between the typedef and the struct/class, that also needs to be
taken care of.
Args:
decls (list[declaration_t]): a list of declarations to be patched.
Returns:
None
"""
for decl in decls:
if isinstance(decl, declarations.typedef_t):
referent = decl.decl_type
if isinstance(referent, declarations.elaborated_t):
referent = referent.base
if not isinstance(referent, declarations.declarated_t):
continue
referent = referent.declaration
if referent.name or not isinstance(referent, declarations.class_t):
continue
referent.name = decl.name | [
"def",
"update_unnamed_class",
"(",
"decls",
")",
":",
"for",
"decl",
"in",
"decls",
":",
"if",
"isinstance",
"(",
"decl",
",",
"declarations",
".",
"typedef_t",
")",
":",
"referent",
"=",
"decl",
".",
"decl_type",
"if",
"isinstance",
"(",
"referent",
",",
"declarations",
".",
"elaborated_t",
")",
":",
"referent",
"=",
"referent",
".",
"base",
"if",
"not",
"isinstance",
"(",
"referent",
",",
"declarations",
".",
"declarated_t",
")",
":",
"continue",
"referent",
"=",
"referent",
".",
"declaration",
"if",
"referent",
".",
"name",
"or",
"not",
"isinstance",
"(",
"referent",
",",
"declarations",
".",
"class_t",
")",
":",
"continue",
"referent",
".",
"name",
"=",
"decl",
".",
"name"
] | [
265,
0
] | [
304,
37
] | python | en | ['en', 'error', 'th'] | False |
test_anonymize_datasource_info_v2_api_custom_subclass | () |
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
Custom v2 API Datasources should continue to be supported.
|
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
Custom v2 API Datasources should continue to be supported.
| def test_anonymize_datasource_info_v2_api_custom_subclass():
"""
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
Custom v2 API Datasources should continue to be supported.
"""
name = "test_pandas_datasource"
yaml_config = f"""
module_name: tests.data_context.fixtures.plugins.my_custom_v2_api_datasource
class_name: MyCustomV2ApiDatasource
"""
config: CommentedMap = yaml.load(yaml_config)
datasource_anonymizer = DatasourceAnonymizer(salt=CONSISTENT_SALT)
anonymized_datasource = datasource_anonymizer.anonymize_datasource_info(
name=name, config=config
)
assert anonymized_datasource == {
"anonymized_class": "c454ace824bf401ea42815c84d0f5717",
"anonymized_name": "2642802d79d90ce6d147b0f9f61c3569",
"parent_class": "PandasDatasource",
} | [
"def",
"test_anonymize_datasource_info_v2_api_custom_subclass",
"(",
")",
":",
"name",
"=",
"\"test_pandas_datasource\"",
"yaml_config",
"=",
"f\"\"\"\nmodule_name: tests.data_context.fixtures.plugins.my_custom_v2_api_datasource\nclass_name: MyCustomV2ApiDatasource\n\"\"\"",
"config",
":",
"CommentedMap",
"=",
"yaml",
".",
"load",
"(",
"yaml_config",
")",
"datasource_anonymizer",
"=",
"DatasourceAnonymizer",
"(",
"salt",
"=",
"CONSISTENT_SALT",
")",
"anonymized_datasource",
"=",
"datasource_anonymizer",
".",
"anonymize_datasource_info",
"(",
"name",
"=",
"name",
",",
"config",
"=",
"config",
")",
"assert",
"anonymized_datasource",
"==",
"{",
"\"anonymized_class\"",
":",
"\"c454ace824bf401ea42815c84d0f5717\"",
",",
"\"anonymized_name\"",
":",
"\"2642802d79d90ce6d147b0f9f61c3569\"",
",",
"\"parent_class\"",
":",
"\"PandasDatasource\"",
",",
"}"
] | [
131,
0
] | [
152,
5
] | python | en | ['en', 'error', 'th'] | False |
ExpectTableRowCountToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
91,
4
] | [
105,
85
] | python | en | ['en', 'error', 'th'] | False |
check_store_backend_store_backend_id_functionality | (
store_backend: StoreBackend, store_backend_id: str = None
) |
Assertions to check if a store backend is handling reading and writing a store_backend_id appropriately.
Args:
store_backend: Instance of subclass of StoreBackend to test e.g. TupleFilesystemStoreBackend
store_backend_id: Manually input store_backend_id
Returns:
None
|
Assertions to check if a store backend is handling reading and writing a store_backend_id appropriately.
Args:
store_backend: Instance of subclass of StoreBackend to test e.g. TupleFilesystemStoreBackend
store_backend_id: Manually input store_backend_id
Returns:
None
| def check_store_backend_store_backend_id_functionality(
store_backend: StoreBackend, store_backend_id: str = None
) -> None:
"""
Assertions to check if a store backend is handling reading and writing a store_backend_id appropriately.
Args:
store_backend: Instance of subclass of StoreBackend to test e.g. TupleFilesystemStoreBackend
store_backend_id: Manually input store_backend_id
Returns:
None
"""
# Check that store_backend_id exists can be read
assert store_backend.store_backend_id is not None
store_error_uuid = "00000000-0000-0000-0000-00000000e003"
assert store_backend.store_backend_id != store_error_uuid
if store_backend_id:
assert store_backend.store_backend_id == store_backend_id
# Check that store_backend_id is a valid UUID
assert test_utils.validate_uuid4(store_backend.store_backend_id)
# Check in file stores that the actual file exists
assert store_backend.has_key(key=(".ge_store_backend_id",))
# Check file stores for the file in the correct format
store_backend_id_from_file = store_backend.get(key=(".ge_store_backend_id",))
store_backend_id_file_parser = "store_backend_id = " + pp.Word(pp.hexnums + "-")
parsed_store_backend_id = store_backend_id_file_parser.parseString(
store_backend_id_from_file
)
assert test_utils.validate_uuid4(parsed_store_backend_id[1]) | [
"def",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
":",
"StoreBackend",
",",
"store_backend_id",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"# Check that store_backend_id exists can be read",
"assert",
"store_backend",
".",
"store_backend_id",
"is",
"not",
"None",
"store_error_uuid",
"=",
"\"00000000-0000-0000-0000-00000000e003\"",
"assert",
"store_backend",
".",
"store_backend_id",
"!=",
"store_error_uuid",
"if",
"store_backend_id",
":",
"assert",
"store_backend",
".",
"store_backend_id",
"==",
"store_backend_id",
"# Check that store_backend_id is a valid UUID",
"assert",
"test_utils",
".",
"validate_uuid4",
"(",
"store_backend",
".",
"store_backend_id",
")",
"# Check in file stores that the actual file exists",
"assert",
"store_backend",
".",
"has_key",
"(",
"key",
"=",
"(",
"\".ge_store_backend_id\"",
",",
")",
")",
"# Check file stores for the file in the correct format",
"store_backend_id_from_file",
"=",
"store_backend",
".",
"get",
"(",
"key",
"=",
"(",
"\".ge_store_backend_id\"",
",",
")",
")",
"store_backend_id_file_parser",
"=",
"\"store_backend_id = \"",
"+",
"pp",
".",
"Word",
"(",
"pp",
".",
"hexnums",
"+",
"\"-\"",
")",
"parsed_store_backend_id",
"=",
"store_backend_id_file_parser",
".",
"parseString",
"(",
"store_backend_id_from_file",
")",
"assert",
"test_utils",
".",
"validate_uuid4",
"(",
"parsed_store_backend_id",
"[",
"1",
"]",
")"
] | [
49,
0
] | [
77,
64
] | python | en | ['en', 'error', 'th'] | False |
test_StoreBackend_id_initialization | (tmp_path_factory) |
What does this test and why?
A StoreBackend should have a store_backend_id property. That store_backend_id should be read and initialized
from an existing persistent store_backend_id during instantiation, or a new store_backend_id should be generated
and persisted. The store_backend_id should be a valid UUIDv4
If a new store_backend_id cannot be persisted, use an ephemeral store_backend_id.
Persistence should be in a .ge_store_backend_id file for for filesystem and blob-stores.
Note: StoreBackend & TupleStoreBackend are abstract classes, so we will test the
concrete classes that inherit from them.
See also test_database_store_backend::test_database_store_backend_id_initialization
|
What does this test and why? | def test_StoreBackend_id_initialization(tmp_path_factory):
"""
What does this test and why?
A StoreBackend should have a store_backend_id property. That store_backend_id should be read and initialized
from an existing persistent store_backend_id during instantiation, or a new store_backend_id should be generated
and persisted. The store_backend_id should be a valid UUIDv4
If a new store_backend_id cannot be persisted, use an ephemeral store_backend_id.
Persistence should be in a .ge_store_backend_id file for for filesystem and blob-stores.
Note: StoreBackend & TupleStoreBackend are abstract classes, so we will test the
concrete classes that inherit from them.
See also test_database_store_backend::test_database_store_backend_id_initialization
"""
# InMemoryStoreBackend
# Initialize without store_backend_id and check that it is generated correctly
in_memory_store_backend = InMemoryStoreBackend()
check_store_backend_store_backend_id_functionality(
store_backend=in_memory_store_backend
)
# Create a new store with the same config and make sure it reports the same store_backend_id
# in_memory_store_backend_duplicate = InMemoryStoreBackend()
# assert in_memory_store_backend.store_backend_id == in_memory_store_backend_duplicate.store_backend_id
# This is not currently implemented for the InMemoryStoreBackend, the store_backend_id is ephemeral since
# there is no place to persist it.
# TupleFilesystemStoreBackend
# Initialize without store_backend_id and check that it is generated correctly
path = "dummy_str"
project_path = str(
tmp_path_factory.mktemp("test_StoreBackend_id_initialization__dir")
)
tuple_filesystem_store_backend = TupleFilesystemStoreBackend(
root_directory=project_path,
base_directory=os.path.join(project_path, path),
)
# Check that store_backend_id is created on instantiation, before being accessed
desired_directory_tree_str = """\
test_StoreBackend_id_initialization__dir0/
dummy_str/
.ge_store_backend_id
"""
assert gen_directory_tree_str(project_path) == desired_directory_tree_str
check_store_backend_store_backend_id_functionality(
store_backend=tuple_filesystem_store_backend
)
assert gen_directory_tree_str(project_path) == desired_directory_tree_str
# Repeat the above with a filepath template
project_path_with_filepath_template = str(
tmp_path_factory.mktemp("test_StoreBackend_id_initialization__dir")
)
tuple_filesystem_store_backend_with_filepath_template = TupleFilesystemStoreBackend(
root_directory=os.path.join(project_path, path),
base_directory=project_path_with_filepath_template,
filepath_template="my_file_{0}",
)
check_store_backend_store_backend_id_functionality(
store_backend=tuple_filesystem_store_backend_with_filepath_template
)
assert (
gen_directory_tree_str(project_path_with_filepath_template)
== """\
test_StoreBackend_id_initialization__dir1/
.ge_store_backend_id
"""
)
# Create a new store with the same config and make sure it reports the same store_backend_id
tuple_filesystem_store_backend_duplicate = TupleFilesystemStoreBackend(
root_directory=project_path,
base_directory=os.path.join(project_path, path),
# filepath_template="my_file_{0}",
)
check_store_backend_store_backend_id_functionality(
store_backend=tuple_filesystem_store_backend_duplicate
)
assert (
tuple_filesystem_store_backend.store_backend_id
== tuple_filesystem_store_backend_duplicate.store_backend_id
)
# TupleS3StoreBackend
# Initialize without store_backend_id and check that it is generated correctly
bucket = "leakybucket"
prefix = "this_is_a_test_prefix"
# create a bucket in Moto's mock AWS environment
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
s3_store_backend = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
)
check_store_backend_store_backend_id_functionality(store_backend=s3_store_backend)
# Create a new store with the same config and make sure it reports the same store_backend_id
s3_store_backend_duplicate = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
)
check_store_backend_store_backend_id_functionality(
store_backend=s3_store_backend_duplicate
)
assert (
s3_store_backend.store_backend_id == s3_store_backend_duplicate.store_backend_id
)
# TODO: Fix GCS Testing
# TupleGCSStoreBackend
# Initialize without store_backend_id and check that it is generated correctly
bucket = "leakybucket"
prefix = "this_is_a_test_prefix"
project = "dummy-project"
base_public_path = "http://www.test.com/"
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
gcs_store_backend_with_base_public_path = TupleGCSStoreBackend(
filepath_template=None,
bucket=bucket,
prefix=prefix,
project=project,
base_public_path=base_public_path,
)
gcs_store_backend_with_base_public_path_duplicate = TupleGCSStoreBackend(
filepath_template=None,
bucket=bucket,
prefix=prefix,
project=project,
base_public_path=base_public_path,
)
assert gcs_store_backend_with_base_public_path.store_backend_id is not None
# Currently we don't have a good way to mock GCS functionality
# check_store_backend_store_backend_id_functionality(store_backend=gcs_store_backend_with_base_public_path)
# Create a new store with the same config and make sure it reports the same store_backend_id
assert (
gcs_store_backend_with_base_public_path.store_backend_id
== gcs_store_backend_with_base_public_path_duplicate.store_backend_id
)
store_error_uuid = "00000000-0000-0000-0000-00000000e003"
assert (
gcs_store_backend_with_base_public_path.store_backend_id != store_error_uuid
)
assert (
gcs_store_backend_with_base_public_path_duplicate.store_backend_id
!= store_error_uuid
) | [
"def",
"test_StoreBackend_id_initialization",
"(",
"tmp_path_factory",
")",
":",
"# InMemoryStoreBackend",
"# Initialize without store_backend_id and check that it is generated correctly",
"in_memory_store_backend",
"=",
"InMemoryStoreBackend",
"(",
")",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"in_memory_store_backend",
")",
"# Create a new store with the same config and make sure it reports the same store_backend_id",
"# in_memory_store_backend_duplicate = InMemoryStoreBackend()",
"# assert in_memory_store_backend.store_backend_id == in_memory_store_backend_duplicate.store_backend_id",
"# This is not currently implemented for the InMemoryStoreBackend, the store_backend_id is ephemeral since",
"# there is no place to persist it.",
"# TupleFilesystemStoreBackend",
"# Initialize without store_backend_id and check that it is generated correctly",
"path",
"=",
"\"dummy_str\"",
"project_path",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_StoreBackend_id_initialization__dir\"",
")",
")",
"tuple_filesystem_store_backend",
"=",
"TupleFilesystemStoreBackend",
"(",
"root_directory",
"=",
"project_path",
",",
"base_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"path",
")",
",",
")",
"# Check that store_backend_id is created on instantiation, before being accessed",
"desired_directory_tree_str",
"=",
"\"\"\"\\\ntest_StoreBackend_id_initialization__dir0/\n dummy_str/\n .ge_store_backend_id\n\"\"\"",
"assert",
"gen_directory_tree_str",
"(",
"project_path",
")",
"==",
"desired_directory_tree_str",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"tuple_filesystem_store_backend",
")",
"assert",
"gen_directory_tree_str",
"(",
"project_path",
")",
"==",
"desired_directory_tree_str",
"# Repeat the above with a filepath template",
"project_path_with_filepath_template",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_StoreBackend_id_initialization__dir\"",
")",
")",
"tuple_filesystem_store_backend_with_filepath_template",
"=",
"TupleFilesystemStoreBackend",
"(",
"root_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"path",
")",
",",
"base_directory",
"=",
"project_path_with_filepath_template",
",",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
")",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"tuple_filesystem_store_backend_with_filepath_template",
")",
"assert",
"(",
"gen_directory_tree_str",
"(",
"project_path_with_filepath_template",
")",
"==",
"\"\"\"\\\ntest_StoreBackend_id_initialization__dir1/\n .ge_store_backend_id\n\"\"\"",
")",
"# Create a new store with the same config and make sure it reports the same store_backend_id",
"tuple_filesystem_store_backend_duplicate",
"=",
"TupleFilesystemStoreBackend",
"(",
"root_directory",
"=",
"project_path",
",",
"base_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"path",
")",
",",
"# filepath_template=\"my_file_{0}\",",
")",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"tuple_filesystem_store_backend_duplicate",
")",
"assert",
"(",
"tuple_filesystem_store_backend",
".",
"store_backend_id",
"==",
"tuple_filesystem_store_backend_duplicate",
".",
"store_backend_id",
")",
"# TupleS3StoreBackend",
"# Initialize without store_backend_id and check that it is generated correctly",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"# create a bucket in Moto's mock AWS environment",
"conn",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
",",
"region_name",
"=",
"\"us-east-1\"",
")",
"conn",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket",
")",
"s3_store_backend",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
")",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"s3_store_backend",
")",
"# Create a new store with the same config and make sure it reports the same store_backend_id",
"s3_store_backend_duplicate",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
")",
"check_store_backend_store_backend_id_functionality",
"(",
"store_backend",
"=",
"s3_store_backend_duplicate",
")",
"assert",
"(",
"s3_store_backend",
".",
"store_backend_id",
"==",
"s3_store_backend_duplicate",
".",
"store_backend_id",
")",
"# TODO: Fix GCS Testing",
"# TupleGCSStoreBackend",
"# Initialize without store_backend_id and check that it is generated correctly",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"project",
"=",
"\"dummy-project\"",
"base_public_path",
"=",
"\"http://www.test.com/\"",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"gcs_store_backend_with_base_public_path",
"=",
"TupleGCSStoreBackend",
"(",
"filepath_template",
"=",
"None",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"project",
"=",
"project",
",",
"base_public_path",
"=",
"base_public_path",
",",
")",
"gcs_store_backend_with_base_public_path_duplicate",
"=",
"TupleGCSStoreBackend",
"(",
"filepath_template",
"=",
"None",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"project",
"=",
"project",
",",
"base_public_path",
"=",
"base_public_path",
",",
")",
"assert",
"gcs_store_backend_with_base_public_path",
".",
"store_backend_id",
"is",
"not",
"None",
"# Currently we don't have a good way to mock GCS functionality",
"# check_store_backend_store_backend_id_functionality(store_backend=gcs_store_backend_with_base_public_path)",
"# Create a new store with the same config and make sure it reports the same store_backend_id",
"assert",
"(",
"gcs_store_backend_with_base_public_path",
".",
"store_backend_id",
"==",
"gcs_store_backend_with_base_public_path_duplicate",
".",
"store_backend_id",
")",
"store_error_uuid",
"=",
"\"00000000-0000-0000-0000-00000000e003\"",
"assert",
"(",
"gcs_store_backend_with_base_public_path",
".",
"store_backend_id",
"!=",
"store_error_uuid",
")",
"assert",
"(",
"gcs_store_backend_with_base_public_path_duplicate",
".",
"store_backend_id",
"!=",
"store_error_uuid",
")"
] | [
81,
0
] | [
237,
9
] | python | en | ['en', 'error', 'th'] | False |
test_TupleS3StoreBackend_with_prefix | () |
What does this test test and why?
We will exercise the store backend's set method twice and then verify
that the we calling get and list methods will return the expected keys.
We will also check that the objects are stored on S3 at the expected location,
and that the correct S3 URL for the object can be retrieved.
|
What does this test test and why? | def test_TupleS3StoreBackend_with_prefix():
"""
What does this test test and why?
We will exercise the store backend's set method twice and then verify
that the we calling get and list methods will return the expected keys.
We will also check that the objects are stored on S3 at the expected location,
and that the correct S3 URL for the object can be retrieved.
"""
bucket = "leakybucket"
prefix = "this_is_a_test_prefix"
base_public_path = "http://www.test.com/"
# create a bucket in Moto's mock AWS environment
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
my_store = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
)
# We should be able to list keys, even when empty
keys = my_store.list_keys()
assert len(keys) == 1
my_store.set(("AAA",), "aaa", content_type="text/html; charset=utf-8")
assert my_store.get(("AAA",)) == "aaa"
obj = boto3.client("s3").get_object(Bucket=bucket, Key=prefix + "/my_file_AAA")
assert obj["ContentType"] == "text/html; charset=utf-8"
assert obj["ContentEncoding"] == "utf-8"
my_store.set(("BBB",), "bbb")
assert my_store.get(("BBB",)) == "bbb"
assert set(my_store.list_keys()) == {("AAA",), ("BBB",), (".ge_store_backend_id",)}
assert {
s3_object_info["Key"]
for s3_object_info in boto3.client("s3").list_objects_v2(
Bucket=bucket, Prefix=prefix
)["Contents"]
} == {
"this_is_a_test_prefix/.ge_store_backend_id",
"this_is_a_test_prefix/my_file_AAA",
"this_is_a_test_prefix/my_file_BBB",
}
assert my_store.get_url_for_key(
("AAA",)
) == "https://s3.amazonaws.com/{}/{}/my_file_AAA".format(bucket, prefix)
assert my_store.get_url_for_key(
("BBB",)
) == "https://s3.amazonaws.com/{}/{}/my_file_BBB".format(bucket, prefix)
my_store.remove_key(("BBB",))
with pytest.raises(InvalidKeyError):
my_store.get(("BBB",))
# testing base_public_path
my_new_store = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
base_public_path=base_public_path,
)
my_new_store.set(("BBB",), "bbb", content_type="text/html; charset=utf-8")
assert (
my_new_store.get_public_url_for_key(("BBB",))
== "http://www.test.com/my_file_BBB"
) | [
"def",
"test_TupleS3StoreBackend_with_prefix",
"(",
")",
":",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"base_public_path",
"=",
"\"http://www.test.com/\"",
"# create a bucket in Moto's mock AWS environment",
"conn",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
",",
"region_name",
"=",
"\"us-east-1\"",
")",
"conn",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket",
")",
"my_store",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
")",
"# We should be able to list keys, even when empty",
"keys",
"=",
"my_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"keys",
")",
"==",
"1",
"my_store",
".",
"set",
"(",
"(",
"\"AAA\"",
",",
")",
",",
"\"aaa\"",
",",
"content_type",
"=",
"\"text/html; charset=utf-8\"",
")",
"assert",
"my_store",
".",
"get",
"(",
"(",
"\"AAA\"",
",",
")",
")",
"==",
"\"aaa\"",
"obj",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"get_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"prefix",
"+",
"\"/my_file_AAA\"",
")",
"assert",
"obj",
"[",
"\"ContentType\"",
"]",
"==",
"\"text/html; charset=utf-8\"",
"assert",
"obj",
"[",
"\"ContentEncoding\"",
"]",
"==",
"\"utf-8\"",
"my_store",
".",
"set",
"(",
"(",
"\"BBB\"",
",",
")",
",",
"\"bbb\"",
")",
"assert",
"my_store",
".",
"get",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"==",
"\"bbb\"",
"assert",
"set",
"(",
"my_store",
".",
"list_keys",
"(",
")",
")",
"==",
"{",
"(",
"\"AAA\"",
",",
")",
",",
"(",
"\"BBB\"",
",",
")",
",",
"(",
"\".ge_store_backend_id\"",
",",
")",
"}",
"assert",
"{",
"s3_object_info",
"[",
"\"Key\"",
"]",
"for",
"s3_object_info",
"in",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
")",
"[",
"\"Contents\"",
"]",
"}",
"==",
"{",
"\"this_is_a_test_prefix/.ge_store_backend_id\"",
",",
"\"this_is_a_test_prefix/my_file_AAA\"",
",",
"\"this_is_a_test_prefix/my_file_BBB\"",
",",
"}",
"assert",
"my_store",
".",
"get_url_for_key",
"(",
"(",
"\"AAA\"",
",",
")",
")",
"==",
"\"https://s3.amazonaws.com/{}/{}/my_file_AAA\"",
".",
"format",
"(",
"bucket",
",",
"prefix",
")",
"assert",
"my_store",
".",
"get_url_for_key",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"==",
"\"https://s3.amazonaws.com/{}/{}/my_file_BBB\"",
".",
"format",
"(",
"bucket",
",",
"prefix",
")",
"my_store",
".",
"remove_key",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"with",
"pytest",
".",
"raises",
"(",
"InvalidKeyError",
")",
":",
"my_store",
".",
"get",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"# testing base_public_path",
"my_new_store",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"base_public_path",
"=",
"base_public_path",
",",
")",
"my_new_store",
".",
"set",
"(",
"(",
"\"BBB\"",
",",
")",
",",
"\"bbb\"",
",",
"content_type",
"=",
"\"text/html; charset=utf-8\"",
")",
"assert",
"(",
"my_new_store",
".",
"get_public_url_for_key",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"==",
"\"http://www.test.com/my_file_BBB\"",
")"
] | [
431,
0
] | [
506,
5
] | python | en | ['en', 'error', 'th'] | False |
test_TupleS3StoreBackend_with_empty_prefixes | () |
What does this test test and why?
We will exercise the store backend's set method twice and then verify
that the we calling get and list methods will return the expected keys.
We will also check that the objects are stored on S3 at the expected location,
and that the correct S3 URL for the object can be retrieved.
|
What does this test test and why? | def test_TupleS3StoreBackend_with_empty_prefixes():
"""
What does this test test and why?
We will exercise the store backend's set method twice and then verify
that the we calling get and list methods will return the expected keys.
We will also check that the objects are stored on S3 at the expected location,
and that the correct S3 URL for the object can be retrieved.
"""
bucket = "leakybucket"
prefix = ""
# create a bucket in Moto's mock AWS environment
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
my_store = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
)
# We should be able to list keys, even when empty
keys = my_store.list_keys()
assert len(keys) == 1
my_store.set(("AAA",), "aaa", content_type="text/html; charset=utf-8")
assert my_store.get(("AAA",)) == "aaa"
obj = boto3.client("s3").get_object(Bucket=bucket, Key=prefix + "my_file_AAA")
assert my_store._build_s3_object_key(("AAA",)) == "my_file_AAA"
assert obj["ContentType"] == "text/html; charset=utf-8"
assert obj["ContentEncoding"] == "utf-8"
my_store.set(("BBB",), "bbb")
assert my_store.get(("BBB",)) == "bbb"
assert set(my_store.list_keys()) == {("AAA",), ("BBB",), (".ge_store_backend_id",)}
assert {
s3_object_info["Key"]
for s3_object_info in boto3.client("s3").list_objects_v2(
Bucket=bucket, Prefix=prefix
)["Contents"]
} == {"my_file_AAA", "my_file_BBB", ".ge_store_backend_id"}
assert (
my_store.get_url_for_key(("AAA",))
== "https://s3.amazonaws.com/leakybucket/my_file_AAA"
)
assert (
my_store.get_url_for_key(("BBB",))
== "https://s3.amazonaws.com/leakybucket/my_file_BBB"
) | [
"def",
"test_TupleS3StoreBackend_with_empty_prefixes",
"(",
")",
":",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"\"",
"# create a bucket in Moto's mock AWS environment",
"conn",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
",",
"region_name",
"=",
"\"us-east-1\"",
")",
"conn",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket",
")",
"my_store",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
")",
"# We should be able to list keys, even when empty",
"keys",
"=",
"my_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"keys",
")",
"==",
"1",
"my_store",
".",
"set",
"(",
"(",
"\"AAA\"",
",",
")",
",",
"\"aaa\"",
",",
"content_type",
"=",
"\"text/html; charset=utf-8\"",
")",
"assert",
"my_store",
".",
"get",
"(",
"(",
"\"AAA\"",
",",
")",
")",
"==",
"\"aaa\"",
"obj",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"get_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"prefix",
"+",
"\"my_file_AAA\"",
")",
"assert",
"my_store",
".",
"_build_s3_object_key",
"(",
"(",
"\"AAA\"",
",",
")",
")",
"==",
"\"my_file_AAA\"",
"assert",
"obj",
"[",
"\"ContentType\"",
"]",
"==",
"\"text/html; charset=utf-8\"",
"assert",
"obj",
"[",
"\"ContentEncoding\"",
"]",
"==",
"\"utf-8\"",
"my_store",
".",
"set",
"(",
"(",
"\"BBB\"",
",",
")",
",",
"\"bbb\"",
")",
"assert",
"my_store",
".",
"get",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"==",
"\"bbb\"",
"assert",
"set",
"(",
"my_store",
".",
"list_keys",
"(",
")",
")",
"==",
"{",
"(",
"\"AAA\"",
",",
")",
",",
"(",
"\"BBB\"",
",",
")",
",",
"(",
"\".ge_store_backend_id\"",
",",
")",
"}",
"assert",
"{",
"s3_object_info",
"[",
"\"Key\"",
"]",
"for",
"s3_object_info",
"in",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
")",
"[",
"\"Contents\"",
"]",
"}",
"==",
"{",
"\"my_file_AAA\"",
",",
"\"my_file_BBB\"",
",",
"\".ge_store_backend_id\"",
"}",
"assert",
"(",
"my_store",
".",
"get_url_for_key",
"(",
"(",
"\"AAA\"",
",",
")",
")",
"==",
"\"https://s3.amazonaws.com/leakybucket/my_file_AAA\"",
")",
"assert",
"(",
"my_store",
".",
"get_url_for_key",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"==",
"\"https://s3.amazonaws.com/leakybucket/my_file_BBB\"",
")"
] | [
698,
0
] | [
751,
5
] | python | en | ['en', 'error', 'th'] | False |
test_TupleGCSStoreBackend_base_public_path | () |
What does this test and why?
the base_public_path parameter allows users to point to a custom DNS when hosting Data docs.
This test will exercise the get_url_for_key method twice to see that we are getting the expected url,
with or without base_public_path
|
What does this test and why? | def test_TupleGCSStoreBackend_base_public_path():
"""
What does this test and why?
the base_public_path parameter allows users to point to a custom DNS when hosting Data docs.
This test will exercise the get_url_for_key method twice to see that we are getting the expected url,
with or without base_public_path
"""
bucket = "leakybucket"
prefix = "this_is_a_test_prefix"
project = "dummy-project"
base_public_path = "http://www.test.com/"
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_client = mock_gcs_client.return_value
mock_bucket = mock_client.get_bucket.return_value
mock_blob = mock_bucket.blob.return_value
my_store_with_base_public_path = TupleGCSStoreBackend(
filepath_template=None,
bucket=bucket,
prefix=prefix,
project=project,
base_public_path=base_public_path,
)
my_store_with_base_public_path.set(
("BBB",), b"bbb", content_encoding=None, content_type="image/png"
)
run_id = RunIdentifier("my_run_id", datetime.datetime.utcnow())
key = ValidationResultIdentifier(
ExpectationSuiteIdentifier(expectation_suite_name="my_suite_name"),
run_id,
"my_batch_id",
)
run_time_string = run_id.to_tuple()[1]
url = my_store_with_base_public_path.get_public_url_for_key(key.to_tuple())
assert (
url
== "http://www.test.com/leakybucket"
+ f"/this_is_a_test_prefix/my_suite_name/my_run_id/{run_time_string}/my_batch_id"
) | [
"def",
"test_TupleGCSStoreBackend_base_public_path",
"(",
")",
":",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"project",
"=",
"\"dummy-project\"",
"base_public_path",
"=",
"\"http://www.test.com/\"",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_client",
"=",
"mock_gcs_client",
".",
"return_value",
"mock_bucket",
"=",
"mock_client",
".",
"get_bucket",
".",
"return_value",
"mock_blob",
"=",
"mock_bucket",
".",
"blob",
".",
"return_value",
"my_store_with_base_public_path",
"=",
"TupleGCSStoreBackend",
"(",
"filepath_template",
"=",
"None",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"project",
"=",
"project",
",",
"base_public_path",
"=",
"base_public_path",
",",
")",
"my_store_with_base_public_path",
".",
"set",
"(",
"(",
"\"BBB\"",
",",
")",
",",
"b\"bbb\"",
",",
"content_encoding",
"=",
"None",
",",
"content_type",
"=",
"\"image/png\"",
")",
"run_id",
"=",
"RunIdentifier",
"(",
"\"my_run_id\"",
",",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
"key",
"=",
"ValidationResultIdentifier",
"(",
"ExpectationSuiteIdentifier",
"(",
"expectation_suite_name",
"=",
"\"my_suite_name\"",
")",
",",
"run_id",
",",
"\"my_batch_id\"",
",",
")",
"run_time_string",
"=",
"run_id",
".",
"to_tuple",
"(",
")",
"[",
"1",
"]",
"url",
"=",
"my_store_with_base_public_path",
".",
"get_public_url_for_key",
"(",
"key",
".",
"to_tuple",
"(",
")",
")",
"assert",
"(",
"url",
"==",
"\"http://www.test.com/leakybucket\"",
"+",
"f\"/this_is_a_test_prefix/my_suite_name/my_run_id/{run_time_string}/my_batch_id\"",
")"
] | [
754,
0
] | [
798,
5
] | python | en | ['en', 'error', 'th'] | False |
test_TupleGCSStoreBackend | () |
What does this test test and why?
Since no package like moto exists for GCP services, we mock the GCS client
and assert that the store backend makes the right calls for set, get, and list.
TODO : One option may be to have a GCS Store in Docker, which can be use to "actually" run these tests.
|
What does this test test and why? | def test_TupleGCSStoreBackend():
# pytest.importorskip("google-cloud-storage")
"""
What does this test test and why?
Since no package like moto exists for GCP services, we mock the GCS client
and assert that the store backend makes the right calls for set, get, and list.
TODO : One option may be to have a GCS Store in Docker, which can be use to "actually" run these tests.
"""
bucket = "leakybucket"
prefix = "this_is_a_test_prefix"
project = "dummy-project"
base_public_path = "http://www.test.com/"
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_client = mock_gcs_client.return_value
mock_bucket = mock_client.get_bucket.return_value
mock_blob = mock_bucket.blob.return_value
my_store = TupleGCSStoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
project=project,
)
my_store.set(("AAA",), "aaa", content_type="text/html")
mock_gcs_client.assert_called_with("dummy-project")
mock_client.get_bucket.assert_called_with("leakybucket")
mock_bucket.blob.assert_called_with("this_is_a_test_prefix/my_file_AAA")
# mock_bucket.blob.assert_any_call("this_is_a_test_prefix/.ge_store_backend_id")
mock_blob.upload_from_string.assert_called_with(
b"aaa", content_type="text/html"
)
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_client = mock_gcs_client.return_value
mock_bucket = mock_client.get_bucket.return_value
mock_blob = mock_bucket.blob.return_value
my_store_with_no_filepath_template = TupleGCSStoreBackend(
filepath_template=None, bucket=bucket, prefix=prefix, project=project
)
my_store_with_no_filepath_template.set(
("AAA",), b"aaa", content_encoding=None, content_type="image/png"
)
mock_gcs_client.assert_called_with("dummy-project")
mock_client.get_bucket.assert_called_with("leakybucket")
mock_bucket.blob.assert_called_with("this_is_a_test_prefix/AAA")
# mock_bucket.blob.assert_any_call("this_is_a_test_prefix/.ge_store_backend_id")
mock_blob.upload_from_string.assert_called_with(
b"aaa", content_type="image/png"
)
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_client = mock_gcs_client.return_value
mock_bucket = mock_client.get_bucket.return_value
mock_blob = mock_bucket.get_blob.return_value
mock_str = mock_blob.download_as_string.return_value
my_store.get(("BBB",))
mock_gcs_client.assert_called_once_with("dummy-project")
mock_client.get_bucket.assert_called_once_with("leakybucket")
mock_bucket.get_blob.assert_called_once_with(
"this_is_a_test_prefix/my_file_BBB"
)
mock_blob.download_as_string.assert_called_once()
mock_str.decode.assert_called_once_with("utf-8")
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_client = mock_gcs_client.return_value
my_store.list_keys()
mock_client.list_blobs.assert_called_once_with(
"leakybucket", prefix="this_is_a_test_prefix"
)
my_store.remove_key("leakybucket")
from google.cloud.exceptions import NotFound
try:
mock_client.get_bucket.assert_called_once_with("leakybucket")
except NotFound:
pass
with patch("google.cloud.storage.Client", autospec=True) as mock_gcs_client:
mock_gcs_client.side_effect = InvalidKeyError("Hi I am an InvalidKeyError")
with pytest.raises(InvalidKeyError):
my_store.get(("non_existent_key",))
run_id = RunIdentifier("my_run_id", datetime.datetime.utcnow())
key = ValidationResultIdentifier(
ExpectationSuiteIdentifier(expectation_suite_name="my_suite_name"),
run_id,
"my_batch_id",
)
run_time_string = run_id.to_tuple()[1]
url = my_store_with_no_filepath_template.get_url_for_key(key.to_tuple())
assert (
url
== "https://storage.googleapis.com/leakybucket"
+ f"/this_is_a_test_prefix/my_suite_name/my_run_id/{run_time_string}/my_batch_id"
) | [
"def",
"test_TupleGCSStoreBackend",
"(",
")",
":",
"# pytest.importorskip(\"google-cloud-storage\")",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"project",
"=",
"\"dummy-project\"",
"base_public_path",
"=",
"\"http://www.test.com/\"",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_client",
"=",
"mock_gcs_client",
".",
"return_value",
"mock_bucket",
"=",
"mock_client",
".",
"get_bucket",
".",
"return_value",
"mock_blob",
"=",
"mock_bucket",
".",
"blob",
".",
"return_value",
"my_store",
"=",
"TupleGCSStoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"project",
"=",
"project",
",",
")",
"my_store",
".",
"set",
"(",
"(",
"\"AAA\"",
",",
")",
",",
"\"aaa\"",
",",
"content_type",
"=",
"\"text/html\"",
")",
"mock_gcs_client",
".",
"assert_called_with",
"(",
"\"dummy-project\"",
")",
"mock_client",
".",
"get_bucket",
".",
"assert_called_with",
"(",
"\"leakybucket\"",
")",
"mock_bucket",
".",
"blob",
".",
"assert_called_with",
"(",
"\"this_is_a_test_prefix/my_file_AAA\"",
")",
"# mock_bucket.blob.assert_any_call(\"this_is_a_test_prefix/.ge_store_backend_id\")",
"mock_blob",
".",
"upload_from_string",
".",
"assert_called_with",
"(",
"b\"aaa\"",
",",
"content_type",
"=",
"\"text/html\"",
")",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_client",
"=",
"mock_gcs_client",
".",
"return_value",
"mock_bucket",
"=",
"mock_client",
".",
"get_bucket",
".",
"return_value",
"mock_blob",
"=",
"mock_bucket",
".",
"blob",
".",
"return_value",
"my_store_with_no_filepath_template",
"=",
"TupleGCSStoreBackend",
"(",
"filepath_template",
"=",
"None",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
"project",
"=",
"project",
")",
"my_store_with_no_filepath_template",
".",
"set",
"(",
"(",
"\"AAA\"",
",",
")",
",",
"b\"aaa\"",
",",
"content_encoding",
"=",
"None",
",",
"content_type",
"=",
"\"image/png\"",
")",
"mock_gcs_client",
".",
"assert_called_with",
"(",
"\"dummy-project\"",
")",
"mock_client",
".",
"get_bucket",
".",
"assert_called_with",
"(",
"\"leakybucket\"",
")",
"mock_bucket",
".",
"blob",
".",
"assert_called_with",
"(",
"\"this_is_a_test_prefix/AAA\"",
")",
"# mock_bucket.blob.assert_any_call(\"this_is_a_test_prefix/.ge_store_backend_id\")",
"mock_blob",
".",
"upload_from_string",
".",
"assert_called_with",
"(",
"b\"aaa\"",
",",
"content_type",
"=",
"\"image/png\"",
")",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_client",
"=",
"mock_gcs_client",
".",
"return_value",
"mock_bucket",
"=",
"mock_client",
".",
"get_bucket",
".",
"return_value",
"mock_blob",
"=",
"mock_bucket",
".",
"get_blob",
".",
"return_value",
"mock_str",
"=",
"mock_blob",
".",
"download_as_string",
".",
"return_value",
"my_store",
".",
"get",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"mock_gcs_client",
".",
"assert_called_once_with",
"(",
"\"dummy-project\"",
")",
"mock_client",
".",
"get_bucket",
".",
"assert_called_once_with",
"(",
"\"leakybucket\"",
")",
"mock_bucket",
".",
"get_blob",
".",
"assert_called_once_with",
"(",
"\"this_is_a_test_prefix/my_file_BBB\"",
")",
"mock_blob",
".",
"download_as_string",
".",
"assert_called_once",
"(",
")",
"mock_str",
".",
"decode",
".",
"assert_called_once_with",
"(",
"\"utf-8\"",
")",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_client",
"=",
"mock_gcs_client",
".",
"return_value",
"my_store",
".",
"list_keys",
"(",
")",
"mock_client",
".",
"list_blobs",
".",
"assert_called_once_with",
"(",
"\"leakybucket\"",
",",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
")",
"my_store",
".",
"remove_key",
"(",
"\"leakybucket\"",
")",
"from",
"google",
".",
"cloud",
".",
"exceptions",
"import",
"NotFound",
"try",
":",
"mock_client",
".",
"get_bucket",
".",
"assert_called_once_with",
"(",
"\"leakybucket\"",
")",
"except",
"NotFound",
":",
"pass",
"with",
"patch",
"(",
"\"google.cloud.storage.Client\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_gcs_client",
":",
"mock_gcs_client",
".",
"side_effect",
"=",
"InvalidKeyError",
"(",
"\"Hi I am an InvalidKeyError\"",
")",
"with",
"pytest",
".",
"raises",
"(",
"InvalidKeyError",
")",
":",
"my_store",
".",
"get",
"(",
"(",
"\"non_existent_key\"",
",",
")",
")",
"run_id",
"=",
"RunIdentifier",
"(",
"\"my_run_id\"",
",",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
"key",
"=",
"ValidationResultIdentifier",
"(",
"ExpectationSuiteIdentifier",
"(",
"expectation_suite_name",
"=",
"\"my_suite_name\"",
")",
",",
"run_id",
",",
"\"my_batch_id\"",
",",
")",
"run_time_string",
"=",
"run_id",
".",
"to_tuple",
"(",
")",
"[",
"1",
"]",
"url",
"=",
"my_store_with_no_filepath_template",
".",
"get_url_for_key",
"(",
"key",
".",
"to_tuple",
"(",
")",
")",
"assert",
"(",
"url",
"==",
"\"https://storage.googleapis.com/leakybucket\"",
"+",
"f\"/this_is_a_test_prefix/my_suite_name/my_run_id/{run_time_string}/my_batch_id\"",
")"
] | [
801,
0
] | [
915,
5
] | python | en | ['en', 'error', 'th'] | False |
test_TupleAzureBlobStoreBackend | () |
What does this test test and why?
Since no package like moto exists for Azure-Blob services, we mock the Azure-blob client
and assert that the store backend makes the right calls for set, get, and list.
|
What does this test test and why?
Since no package like moto exists for Azure-Blob services, we mock the Azure-blob client
and assert that the store backend makes the right calls for set, get, and list.
| def test_TupleAzureBlobStoreBackend():
pytest.importorskip("azure-storage-blob")
"""
What does this test test and why?
Since no package like moto exists for Azure-Blob services, we mock the Azure-blob client
and assert that the store backend makes the right calls for set, get, and list.
"""
connection_string = "this_is_a_test_conn_string"
prefix = "this_is_a_test_prefix"
container = "dummy-container"
my_store = TupleAzureBlobStoreBackend(
connection_string=connection_string, prefix=prefix, container=container
)
with patch(
"azure.storage.blob.BlobServiceClient", autospec=True
) as mock_azure_blob_client:
mock_container_client = mock_azure_blob_client.get_container_client.return_value
my_store.set(("AAA",), "aaa")
mock_azure_blob_client.from_connection_string.assert_called_once()
mock_container_client.assert_called_once()
mock_container_client.upload_blob.assert_called_once_with(
name="AAA", data=b"aaa", encoding="utf-8"
)
with patch(
"azure.storage.blob.BlobServiceClient", autospec=True
) as mock_azure_blob_client:
mock_container_client = mock_azure_blob_client.get_container_client.return_value
my_store.set(("BBB",), b"bbb")
mock_azure_blob_client.from_connection_string.assert_called_once()
mock_container_client.assert_called_once()
mock_container_client.upload_blob.assert_called_once_with(
name="AAA", data=b"aaa"
)
with patch(
"azure.storage.blob.BlobServiceClient", autospec=True
) as mock_azure_blob_client:
mock_container_client = mock_azure_blob_client.get_container_client.return_value
my_store.get(("BBB",))
mock_azure_blob_client.from_connection_string.assert_called_once()
mock_container_client.assert_called_once()
mock_container_client.download_blob.assert_called_once_with("BBB")
with patch(
"azure.storage.blob.BlobServiceClient", autospec=True
) as mock_azure_blob_client:
mock_container_client = mock_azure_blob_client.get_container_client.return_value
my_store.list_keys()
mock_azure_blob_client.from_connection_string.assert_called_once()
mock_container_client.assert_called_once()
mock_container_client.list_blobs.assert_called_once_with("this_is_a_prefix") | [
"def",
"test_TupleAzureBlobStoreBackend",
"(",
")",
":",
"pytest",
".",
"importorskip",
"(",
"\"azure-storage-blob\"",
")",
"connection_string",
"=",
"\"this_is_a_test_conn_string\"",
"prefix",
"=",
"\"this_is_a_test_prefix\"",
"container",
"=",
"\"dummy-container\"",
"my_store",
"=",
"TupleAzureBlobStoreBackend",
"(",
"connection_string",
"=",
"connection_string",
",",
"prefix",
"=",
"prefix",
",",
"container",
"=",
"container",
")",
"with",
"patch",
"(",
"\"azure.storage.blob.BlobServiceClient\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_azure_blob_client",
":",
"mock_container_client",
"=",
"mock_azure_blob_client",
".",
"get_container_client",
".",
"return_value",
"my_store",
".",
"set",
"(",
"(",
"\"AAA\"",
",",
")",
",",
"\"aaa\"",
")",
"mock_azure_blob_client",
".",
"from_connection_string",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"upload_blob",
".",
"assert_called_once_with",
"(",
"name",
"=",
"\"AAA\"",
",",
"data",
"=",
"b\"aaa\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"with",
"patch",
"(",
"\"azure.storage.blob.BlobServiceClient\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_azure_blob_client",
":",
"mock_container_client",
"=",
"mock_azure_blob_client",
".",
"get_container_client",
".",
"return_value",
"my_store",
".",
"set",
"(",
"(",
"\"BBB\"",
",",
")",
",",
"b\"bbb\"",
")",
"mock_azure_blob_client",
".",
"from_connection_string",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"upload_blob",
".",
"assert_called_once_with",
"(",
"name",
"=",
"\"AAA\"",
",",
"data",
"=",
"b\"aaa\"",
")",
"with",
"patch",
"(",
"\"azure.storage.blob.BlobServiceClient\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_azure_blob_client",
":",
"mock_container_client",
"=",
"mock_azure_blob_client",
".",
"get_container_client",
".",
"return_value",
"my_store",
".",
"get",
"(",
"(",
"\"BBB\"",
",",
")",
")",
"mock_azure_blob_client",
".",
"from_connection_string",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"download_blob",
".",
"assert_called_once_with",
"(",
"\"BBB\"",
")",
"with",
"patch",
"(",
"\"azure.storage.blob.BlobServiceClient\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_azure_blob_client",
":",
"mock_container_client",
"=",
"mock_azure_blob_client",
".",
"get_container_client",
".",
"return_value",
"my_store",
".",
"list_keys",
"(",
")",
"mock_azure_blob_client",
".",
"from_connection_string",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"assert_called_once",
"(",
")",
"mock_container_client",
".",
"list_blobs",
".",
"assert_called_once_with",
"(",
"\"this_is_a_prefix\"",
")"
] | [
918,
0
] | [
983,
84
] | python | en | ['en', 'error', 'th'] | False |
test_TupleS3StoreBackend_list_over_1000_keys | () |
What does this test test and why?
TupleS3StoreBackend.list_keys() should be able to list over 1000 keys
which is the current limit for boto3.list_objects and boto3.list_objects_v2 methods.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/paginators.html
We will create a bucket with over 1000 keys, list them with TupleS3StoreBackend.list_keys()
and make sure all are accounted for.
|
What does this test test and why? | def test_TupleS3StoreBackend_list_over_1000_keys():
"""
What does this test test and why?
TupleS3StoreBackend.list_keys() should be able to list over 1000 keys
which is the current limit for boto3.list_objects and boto3.list_objects_v2 methods.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/paginators.html
We will create a bucket with over 1000 keys, list them with TupleS3StoreBackend.list_keys()
and make sure all are accounted for.
"""
bucket = "leakybucket"
prefix = "my_prefix"
# create a bucket in Moto's mock AWS environment
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
# Assert that the bucket is empty before creating store
assert (
boto3.client("s3").list_objects_v2(Bucket=bucket, Prefix=prefix).get("Contents")
is None
)
my_store = TupleS3StoreBackend(
filepath_template="my_file_{0}",
bucket=bucket,
prefix=prefix,
)
# We should be able to list keys, even when empty
# len(keys) == 1 because of the .ge_store_backend_id
keys = my_store.list_keys()
assert len(keys) == 1
# Add more than 1000 keys
max_keys_in_a_single_call = 1000
num_keys_to_add = int(1.2 * max_keys_in_a_single_call)
for key_num in range(num_keys_to_add):
my_store.set(
(f"AAA_{key_num}",),
f"aaa_{key_num}",
content_type="text/html; charset=utf-8",
)
assert my_store.get(("AAA_0",)) == "aaa_0"
assert my_store.get((f"AAA_{num_keys_to_add-1}",)) == f"aaa_{num_keys_to_add-1}"
# Without pagination only list max_keys_in_a_single_call
# This is belt and suspenders to make sure mocking s3 list_objects_v2 implements
# the same limit as the actual s3 api
assert (
len(
boto3.client("s3").list_objects_v2(Bucket=bucket, Prefix=prefix)["Contents"]
)
== max_keys_in_a_single_call
)
# With pagination list all keys
keys = my_store.list_keys()
# len(keys) == num_keys_to_add + 1 because of the .ge_store_backend_id
assert len(keys) == num_keys_to_add + 1 | [
"def",
"test_TupleS3StoreBackend_list_over_1000_keys",
"(",
")",
":",
"bucket",
"=",
"\"leakybucket\"",
"prefix",
"=",
"\"my_prefix\"",
"# create a bucket in Moto's mock AWS environment",
"conn",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
",",
"region_name",
"=",
"\"us-east-1\"",
")",
"conn",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket",
")",
"# Assert that the bucket is empty before creating store",
"assert",
"(",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
")",
".",
"get",
"(",
"\"Contents\"",
")",
"is",
"None",
")",
"my_store",
"=",
"TupleS3StoreBackend",
"(",
"filepath_template",
"=",
"\"my_file_{0}\"",
",",
"bucket",
"=",
"bucket",
",",
"prefix",
"=",
"prefix",
",",
")",
"# We should be able to list keys, even when empty",
"# len(keys) == 1 because of the .ge_store_backend_id",
"keys",
"=",
"my_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"keys",
")",
"==",
"1",
"# Add more than 1000 keys",
"max_keys_in_a_single_call",
"=",
"1000",
"num_keys_to_add",
"=",
"int",
"(",
"1.2",
"*",
"max_keys_in_a_single_call",
")",
"for",
"key_num",
"in",
"range",
"(",
"num_keys_to_add",
")",
":",
"my_store",
".",
"set",
"(",
"(",
"f\"AAA_{key_num}\"",
",",
")",
",",
"f\"aaa_{key_num}\"",
",",
"content_type",
"=",
"\"text/html; charset=utf-8\"",
",",
")",
"assert",
"my_store",
".",
"get",
"(",
"(",
"\"AAA_0\"",
",",
")",
")",
"==",
"\"aaa_0\"",
"assert",
"my_store",
".",
"get",
"(",
"(",
"f\"AAA_{num_keys_to_add-1}\"",
",",
")",
")",
"==",
"f\"aaa_{num_keys_to_add-1}\"",
"# Without pagination only list max_keys_in_a_single_call",
"# This is belt and suspenders to make sure mocking s3 list_objects_v2 implements",
"# the same limit as the actual s3 api",
"assert",
"(",
"len",
"(",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"bucket",
",",
"Prefix",
"=",
"prefix",
")",
"[",
"\"Contents\"",
"]",
")",
"==",
"max_keys_in_a_single_call",
")",
"# With pagination list all keys",
"keys",
"=",
"my_store",
".",
"list_keys",
"(",
")",
"# len(keys) == num_keys_to_add + 1 because of the .ge_store_backend_id",
"assert",
"len",
"(",
"keys",
")",
"==",
"num_keys_to_add",
"+",
"1"
] | [
987,
0
] | [
1048,
43
] | python | en | ['en', 'error', 'th'] | False |
test_GeCloudStoreBackend | () |
What does this test test and why?
Since GeCloudStoreBackend relies on GE Cloud, we mock requests.post, requests.get, and
requests.patch and assert that the right calls are made for set, get, list, and remove_key.
|
What does this test test and why? | def test_GeCloudStoreBackend():
"""
What does this test test and why?
Since GeCloudStoreBackend relies on GE Cloud, we mock requests.post, requests.get, and
requests.patch and assert that the right calls are made for set, get, list, and remove_key.
"""
ge_cloud_base_url = "https://app.greatexpectations.io/"
ge_cloud_credentials = {
"access_token": "1234",
"account_id": "51379b8b-86d3-4fe7-84e9-e1a52f4a414c",
}
ge_cloud_resource_type = "checkpoint"
my_simple_checkpoint_config: CheckpointConfig = CheckpointConfig(
name="my_minimal_simple_checkpoint",
class_name="SimpleCheckpoint",
config_version=1,
)
my_simple_checkpoint_config_serialized = (
my_simple_checkpoint_config.get_schema_class()().dump(
my_simple_checkpoint_config
)
)
# test .set
with patch("requests.post", autospec=True) as mock_post:
my_store_backend = GeCloudStoreBackend(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_credentials=ge_cloud_credentials,
ge_cloud_resource_type=ge_cloud_resource_type,
)
my_store_backend.set(
("my_checkpoint_name",), my_simple_checkpoint_config_serialized
)
mock_post.assert_called_with(
"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints",
json={
"data": {
"type": "checkpoint",
"attributes": {
"account_id": "51379b8b-86d3-4fe7-84e9-e1a52f4a414c",
"checkpoint_config": OrderedDict(
[
("name", "my_minimal_simple_checkpoint"),
("config_version", 1.0),
("template_name", None),
("module_name", "great_expectations.checkpoint"),
("class_name", "SimpleCheckpoint"),
("run_name_template", None),
("expectation_suite_name", None),
("batch_request", None),
("action_list", []),
("evaluation_parameters", {}),
("runtime_configuration", {}),
("validations", []),
("profilers", []),
("ge_cloud_id", None),
]
),
},
}
},
headers={
"Content-Type": "application/vnd.api+json",
"Authorization": "Bearer 1234",
},
)
# test .get
with patch("requests.get", autospec=True) as mock_get:
my_store_backend = GeCloudStoreBackend(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_credentials=ge_cloud_credentials,
ge_cloud_resource_type=ge_cloud_resource_type,
)
my_store_backend.get(("0ccac18e-7631-4bdd-8a42-3c35cce574c6",))
mock_get.assert_called_with(
"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints/0ccac18e-7631-4bdd-8a42-3c35cce574c6",
headers={
"Content-Type": "application/vnd.api+json",
"Authorization": "Bearer 1234",
},
)
# test .list_keys
with patch("requests.get", autospec=True) as mock_get:
my_store_backend = GeCloudStoreBackend(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_credentials=ge_cloud_credentials,
ge_cloud_resource_type=ge_cloud_resource_type,
)
my_store_backend.list_keys()
mock_get.assert_called_with(
"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints",
headers={
"Content-Type": "application/vnd.api+json",
"Authorization": "Bearer 1234",
},
)
# test .remove_key
with patch("requests.patch", autospec=True) as mock_patch:
mock_response = mock_patch.return_value
mock_response.status_code = 200
my_store_backend = GeCloudStoreBackend(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_credentials=ge_cloud_credentials,
ge_cloud_resource_type=ge_cloud_resource_type,
)
my_store_backend.remove_key(("0ccac18e-7631-4bdd-8a42-3c35cce574c6",))
mock_patch.assert_called_with(
"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints/0ccac18e-7631-4bdd-8a42-3c35cce574c6",
json={
"data": {
"type": "checkpoint",
"id": "0ccac18e-7631-4bdd-8a42-3c35cce574c6",
"attributes": {"deleted": True},
}
},
headers={
"Content-Type": "application/vnd.api+json",
"Authorization": "Bearer 1234",
},
) | [
"def",
"test_GeCloudStoreBackend",
"(",
")",
":",
"ge_cloud_base_url",
"=",
"\"https://app.greatexpectations.io/\"",
"ge_cloud_credentials",
"=",
"{",
"\"access_token\"",
":",
"\"1234\"",
",",
"\"account_id\"",
":",
"\"51379b8b-86d3-4fe7-84e9-e1a52f4a414c\"",
",",
"}",
"ge_cloud_resource_type",
"=",
"\"checkpoint\"",
"my_simple_checkpoint_config",
":",
"CheckpointConfig",
"=",
"CheckpointConfig",
"(",
"name",
"=",
"\"my_minimal_simple_checkpoint\"",
",",
"class_name",
"=",
"\"SimpleCheckpoint\"",
",",
"config_version",
"=",
"1",
",",
")",
"my_simple_checkpoint_config_serialized",
"=",
"(",
"my_simple_checkpoint_config",
".",
"get_schema_class",
"(",
")",
"(",
")",
".",
"dump",
"(",
"my_simple_checkpoint_config",
")",
")",
"# test .set",
"with",
"patch",
"(",
"\"requests.post\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_post",
":",
"my_store_backend",
"=",
"GeCloudStoreBackend",
"(",
"ge_cloud_base_url",
"=",
"ge_cloud_base_url",
",",
"ge_cloud_credentials",
"=",
"ge_cloud_credentials",
",",
"ge_cloud_resource_type",
"=",
"ge_cloud_resource_type",
",",
")",
"my_store_backend",
".",
"set",
"(",
"(",
"\"my_checkpoint_name\"",
",",
")",
",",
"my_simple_checkpoint_config_serialized",
")",
"mock_post",
".",
"assert_called_with",
"(",
"\"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints\"",
",",
"json",
"=",
"{",
"\"data\"",
":",
"{",
"\"type\"",
":",
"\"checkpoint\"",
",",
"\"attributes\"",
":",
"{",
"\"account_id\"",
":",
"\"51379b8b-86d3-4fe7-84e9-e1a52f4a414c\"",
",",
"\"checkpoint_config\"",
":",
"OrderedDict",
"(",
"[",
"(",
"\"name\"",
",",
"\"my_minimal_simple_checkpoint\"",
")",
",",
"(",
"\"config_version\"",
",",
"1.0",
")",
",",
"(",
"\"template_name\"",
",",
"None",
")",
",",
"(",
"\"module_name\"",
",",
"\"great_expectations.checkpoint\"",
")",
",",
"(",
"\"class_name\"",
",",
"\"SimpleCheckpoint\"",
")",
",",
"(",
"\"run_name_template\"",
",",
"None",
")",
",",
"(",
"\"expectation_suite_name\"",
",",
"None",
")",
",",
"(",
"\"batch_request\"",
",",
"None",
")",
",",
"(",
"\"action_list\"",
",",
"[",
"]",
")",
",",
"(",
"\"evaluation_parameters\"",
",",
"{",
"}",
")",
",",
"(",
"\"runtime_configuration\"",
",",
"{",
"}",
")",
",",
"(",
"\"validations\"",
",",
"[",
"]",
")",
",",
"(",
"\"profilers\"",
",",
"[",
"]",
")",
",",
"(",
"\"ge_cloud_id\"",
",",
"None",
")",
",",
"]",
")",
",",
"}",
",",
"}",
"}",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/vnd.api+json\"",
",",
"\"Authorization\"",
":",
"\"Bearer 1234\"",
",",
"}",
",",
")",
"# test .get",
"with",
"patch",
"(",
"\"requests.get\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_get",
":",
"my_store_backend",
"=",
"GeCloudStoreBackend",
"(",
"ge_cloud_base_url",
"=",
"ge_cloud_base_url",
",",
"ge_cloud_credentials",
"=",
"ge_cloud_credentials",
",",
"ge_cloud_resource_type",
"=",
"ge_cloud_resource_type",
",",
")",
"my_store_backend",
".",
"get",
"(",
"(",
"\"0ccac18e-7631-4bdd-8a42-3c35cce574c6\"",
",",
")",
")",
"mock_get",
".",
"assert_called_with",
"(",
"\"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints/0ccac18e-7631-4bdd-8a42-3c35cce574c6\"",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/vnd.api+json\"",
",",
"\"Authorization\"",
":",
"\"Bearer 1234\"",
",",
"}",
",",
")",
"# test .list_keys",
"with",
"patch",
"(",
"\"requests.get\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_get",
":",
"my_store_backend",
"=",
"GeCloudStoreBackend",
"(",
"ge_cloud_base_url",
"=",
"ge_cloud_base_url",
",",
"ge_cloud_credentials",
"=",
"ge_cloud_credentials",
",",
"ge_cloud_resource_type",
"=",
"ge_cloud_resource_type",
",",
")",
"my_store_backend",
".",
"list_keys",
"(",
")",
"mock_get",
".",
"assert_called_with",
"(",
"\"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints\"",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/vnd.api+json\"",
",",
"\"Authorization\"",
":",
"\"Bearer 1234\"",
",",
"}",
",",
")",
"# test .remove_key",
"with",
"patch",
"(",
"\"requests.patch\"",
",",
"autospec",
"=",
"True",
")",
"as",
"mock_patch",
":",
"mock_response",
"=",
"mock_patch",
".",
"return_value",
"mock_response",
".",
"status_code",
"=",
"200",
"my_store_backend",
"=",
"GeCloudStoreBackend",
"(",
"ge_cloud_base_url",
"=",
"ge_cloud_base_url",
",",
"ge_cloud_credentials",
"=",
"ge_cloud_credentials",
",",
"ge_cloud_resource_type",
"=",
"ge_cloud_resource_type",
",",
")",
"my_store_backend",
".",
"remove_key",
"(",
"(",
"\"0ccac18e-7631-4bdd-8a42-3c35cce574c6\"",
",",
")",
")",
"mock_patch",
".",
"assert_called_with",
"(",
"\"https://app.greatexpectations.io/accounts/51379b8b-86d3-4fe7-84e9-e1a52f4a414c/checkpoints/0ccac18e-7631-4bdd-8a42-3c35cce574c6\"",
",",
"json",
"=",
"{",
"\"data\"",
":",
"{",
"\"type\"",
":",
"\"checkpoint\"",
",",
"\"id\"",
":",
"\"0ccac18e-7631-4bdd-8a42-3c35cce574c6\"",
",",
"\"attributes\"",
":",
"{",
"\"deleted\"",
":",
"True",
"}",
",",
"}",
"}",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/vnd.api+json\"",
",",
"\"Authorization\"",
":",
"\"Bearer 1234\"",
",",
"}",
",",
")"
] | [
1051,
0
] | [
1175,
13
] | python | en | ['en', 'error', 'th'] | False |
assertDeepAlmostEqual | (expected, actual, *args, **kwargs) |
Assert that two complex structures have almost equal contents.
Compares lists, dicts and tuples recursively. Checks numeric values
using pyteset.approx and checks all other values with an assertion equality statement
Accepts additional positional and keyword arguments and pass those
intact to pytest.approx() (that's how you specify comparison
precision).
|
Assert that two complex structures have almost equal contents. | def assertDeepAlmostEqual(expected, actual, *args, **kwargs):
"""
Assert that two complex structures have almost equal contents.
Compares lists, dicts and tuples recursively. Checks numeric values
using pyteset.approx and checks all other values with an assertion equality statement
Accepts additional positional and keyword arguments and pass those
intact to pytest.approx() (that's how you specify comparison
precision).
"""
is_root = "__trace" not in kwargs
trace = kwargs.pop("__trace", "ROOT")
try:
# if isinstance(expected, (int, float, long, complex)):
if isinstance(expected, (int, float, complex)):
assert expected == pytest.approx(actual, *args, **kwargs)
elif isinstance(expected, (list, tuple, np.ndarray)):
assert len(expected) == len(actual)
for index in range(len(expected)):
v1, v2 = expected[index], actual[index]
assertDeepAlmostEqual(v1, v2, __trace=repr(index), *args, **kwargs)
elif isinstance(expected, dict):
assert set(expected) == set(actual)
for key in expected:
assertDeepAlmostEqual(
expected[key], actual[key], __trace=repr(key), *args, **kwargs
)
else:
assert expected == actual
except AssertionError as exc:
exc.__dict__.setdefault("traces", []).append(trace)
if is_root:
trace = " -> ".join(reversed(exc.traces))
exc = AssertionError("{}\nTRACE: {}".format(str(exc), trace))
raise exc | [
"def",
"assertDeepAlmostEqual",
"(",
"expected",
",",
"actual",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"is_root",
"=",
"\"__trace\"",
"not",
"in",
"kwargs",
"trace",
"=",
"kwargs",
".",
"pop",
"(",
"\"__trace\"",
",",
"\"ROOT\"",
")",
"try",
":",
"# if isinstance(expected, (int, float, long, complex)):",
"if",
"isinstance",
"(",
"expected",
",",
"(",
"int",
",",
"float",
",",
"complex",
")",
")",
":",
"assert",
"expected",
"==",
"pytest",
".",
"approx",
"(",
"actual",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"expected",
",",
"(",
"list",
",",
"tuple",
",",
"np",
".",
"ndarray",
")",
")",
":",
"assert",
"len",
"(",
"expected",
")",
"==",
"len",
"(",
"actual",
")",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"expected",
")",
")",
":",
"v1",
",",
"v2",
"=",
"expected",
"[",
"index",
"]",
",",
"actual",
"[",
"index",
"]",
"assertDeepAlmostEqual",
"(",
"v1",
",",
"v2",
",",
"__trace",
"=",
"repr",
"(",
"index",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"expected",
",",
"dict",
")",
":",
"assert",
"set",
"(",
"expected",
")",
"==",
"set",
"(",
"actual",
")",
"for",
"key",
"in",
"expected",
":",
"assertDeepAlmostEqual",
"(",
"expected",
"[",
"key",
"]",
",",
"actual",
"[",
"key",
"]",
",",
"__trace",
"=",
"repr",
"(",
"key",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"assert",
"expected",
"==",
"actual",
"except",
"AssertionError",
"as",
"exc",
":",
"exc",
".",
"__dict__",
".",
"setdefault",
"(",
"\"traces\"",
",",
"[",
"]",
")",
".",
"append",
"(",
"trace",
")",
"if",
"is_root",
":",
"trace",
"=",
"\" -> \"",
".",
"join",
"(",
"reversed",
"(",
"exc",
".",
"traces",
")",
")",
"exc",
"=",
"AssertionError",
"(",
"\"{}\\nTRACE: {}\"",
".",
"format",
"(",
"str",
"(",
"exc",
")",
",",
"trace",
")",
")",
"raise",
"exc"
] | [
27,
0
] | [
62,
17
] | python | en | ['en', 'error', 'th'] | False |
validate_uuid4 | (uuid_string: str) |
Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
From https://gist.github.com/ShawnMilo/7777304
Args:
uuid_string: string to check whether it is a valid UUID or not
Returns:
True if uuid_string is a valid UUID or False if not
|
Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
From https://gist.github.com/ShawnMilo/7777304 | def validate_uuid4(uuid_string: str) -> bool:
"""
Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
From https://gist.github.com/ShawnMilo/7777304
Args:
uuid_string: string to check whether it is a valid UUID or not
Returns:
True if uuid_string is a valid UUID or False if not
"""
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code,
# but an invalid uuid4,
# the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == uuid_string.replace("-", "") | [
"def",
"validate_uuid4",
"(",
"uuid_string",
":",
"str",
")",
"->",
"bool",
":",
"try",
":",
"val",
"=",
"uuid",
".",
"UUID",
"(",
"uuid_string",
",",
"version",
"=",
"4",
")",
"except",
"ValueError",
":",
"# If it's a value error, then the string",
"# is not a valid hex code for a UUID.",
"return",
"False",
"# If the uuid_string is a valid hex code,",
"# but an invalid uuid4,",
"# the UUID.__init__ will convert it to a",
"# valid uuid4. This is bad for validation purposes.",
"return",
"val",
".",
"hex",
"==",
"uuid_string",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")"
] | [
137,
0
] | [
164,
50
] | python | en | ['en', 'error', 'th'] | False |
CEVAE.__init__ | (self, outcome_dist="studentt", latent_dim=20, hidden_dim=200, num_epochs=50, num_layers=3,
batch_size=100, learning_rate=1e-3, learning_rate_decay=0.1, num_samples=1000, weight_decay=1e-4) |
Initializes CEVAE.
Args:
outcome_dist (str): Outcome distribution as one of: "bernoulli" , "exponential", "laplace", "normal",
and "studentt"
latent_dim (int) : Dimension of the latent variable
hidden_dim (int) : Dimension of hidden layers of fully connected networks
num_epochs (int): Number of training epochs
num_layers (int): Number of hidden layers in fully connected networks
batch_size (int): Batch size
learning_rate (int): Learning rate
learning_rate_decay (float/int): Learning rate decay over all epochs; the per-step decay rate will
depend on batch size and number of epochs such that the initial
learning rate will be learning_rate and the
final learning rate will be learning_rate * learning_rate_decay
num_samples (int) : Number of samples to calculate ITE
weight_decay (float) : Weight decay
|
Initializes CEVAE. | def __init__(self, outcome_dist="studentt", latent_dim=20, hidden_dim=200, num_epochs=50, num_layers=3,
batch_size=100, learning_rate=1e-3, learning_rate_decay=0.1, num_samples=1000, weight_decay=1e-4):
"""
Initializes CEVAE.
Args:
outcome_dist (str): Outcome distribution as one of: "bernoulli" , "exponential", "laplace", "normal",
and "studentt"
latent_dim (int) : Dimension of the latent variable
hidden_dim (int) : Dimension of hidden layers of fully connected networks
num_epochs (int): Number of training epochs
num_layers (int): Number of hidden layers in fully connected networks
batch_size (int): Batch size
learning_rate (int): Learning rate
learning_rate_decay (float/int): Learning rate decay over all epochs; the per-step decay rate will
depend on batch size and number of epochs such that the initial
learning rate will be learning_rate and the
final learning rate will be learning_rate * learning_rate_decay
num_samples (int) : Number of samples to calculate ITE
weight_decay (float) : Weight decay
"""
self.outcome_dist = outcome_dist
self.latent_dim = latent_dim
self.hidden_dim = hidden_dim
self.num_epochs = num_epochs
self.num_layers = num_layers
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.num_samples = num_samples
self.weight_decay = weight_decay | [
"def",
"__init__",
"(",
"self",
",",
"outcome_dist",
"=",
"\"studentt\"",
",",
"latent_dim",
"=",
"20",
",",
"hidden_dim",
"=",
"200",
",",
"num_epochs",
"=",
"50",
",",
"num_layers",
"=",
"3",
",",
"batch_size",
"=",
"100",
",",
"learning_rate",
"=",
"1e-3",
",",
"learning_rate_decay",
"=",
"0.1",
",",
"num_samples",
"=",
"1000",
",",
"weight_decay",
"=",
"1e-4",
")",
":",
"self",
".",
"outcome_dist",
"=",
"outcome_dist",
"self",
".",
"latent_dim",
"=",
"latent_dim",
"self",
".",
"hidden_dim",
"=",
"hidden_dim",
"self",
".",
"num_epochs",
"=",
"num_epochs",
"self",
".",
"num_layers",
"=",
"num_layers",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"learning_rate",
"=",
"learning_rate",
"self",
".",
"learning_rate_decay",
"=",
"learning_rate_decay",
"self",
".",
"num_samples",
"=",
"num_samples",
"self",
".",
"weight_decay",
"=",
"weight_decay"
] | [
37,
4
] | [
67,
40
] | python | en | ['en', 'error', 'th'] | False |
CEVAE.fit | (self, X, treatment, y, p=None) |
Fits CEVAE.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
|
Fits CEVAE. | def fit(self, X, treatment, y, p=None):
"""
Fits CEVAE.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.cevae = CEVAEModel(outcome_dist=self.outcome_dist,
feature_dim=X.shape[-1],
latent_dim=self.latent_dim,
hidden_dim=self.hidden_dim,
num_layers=self.num_layers)
self.cevae.fit(x=torch.tensor(X, dtype=torch.float),
t=torch.tensor(treatment, dtype=torch.float),
y=torch.tensor(y, dtype=torch.float),
num_epochs=self.num_epochs,
batch_size=self.batch_size,
learning_rate=self.learning_rate,
learning_rate_decay=self.learning_rate_decay,
weight_decay=self.weight_decay) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"self",
".",
"cevae",
"=",
"CEVAEModel",
"(",
"outcome_dist",
"=",
"self",
".",
"outcome_dist",
",",
"feature_dim",
"=",
"X",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"latent_dim",
"=",
"self",
".",
"latent_dim",
",",
"hidden_dim",
"=",
"self",
".",
"hidden_dim",
",",
"num_layers",
"=",
"self",
".",
"num_layers",
")",
"self",
".",
"cevae",
".",
"fit",
"(",
"x",
"=",
"torch",
".",
"tensor",
"(",
"X",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
",",
"t",
"=",
"torch",
".",
"tensor",
"(",
"treatment",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
",",
"y",
"=",
"torch",
".",
"tensor",
"(",
"y",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
",",
"num_epochs",
"=",
"self",
".",
"num_epochs",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
",",
"learning_rate",
"=",
"self",
".",
"learning_rate",
",",
"learning_rate_decay",
"=",
"self",
".",
"learning_rate_decay",
",",
"weight_decay",
"=",
"self",
".",
"weight_decay",
")"
] | [
69,
4
] | [
93,
54
] | python | en | ['en', 'error', 'th'] | False |
CEVAE.predict | (self, X, treatment=None, y=None, p=None) |
Calls predict on fitted DragonNet.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
Returns:
(np.ndarray): Predictions of treatment effects.
|
Calls predict on fitted DragonNet. | def predict(self, X, treatment=None, y=None, p=None):
"""
Calls predict on fitted DragonNet.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
Returns:
(np.ndarray): Predictions of treatment effects.
"""
return self.cevae.ite(torch.tensor(X, dtype=torch.float),
num_samples=self.num_samples,
batch_size=self.batch_size).cpu().numpy() | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
")",
":",
"return",
"self",
".",
"cevae",
".",
"ite",
"(",
"torch",
".",
"tensor",
"(",
"X",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
",",
"num_samples",
"=",
"self",
".",
"num_samples",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
")",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")"
] | [
95,
4
] | [
106,
71
] | python | en | ['en', 'error', 'th'] | False |
CEVAE.fit_predict | (self, X, treatment, y, p=None) |
Fits the CEVAE model and then predicts.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
Returns:
(np.ndarray): Predictions of treatment effects.
|
Fits the CEVAE model and then predicts. | def fit_predict(self, X, treatment, y, p=None):
"""
Fits the CEVAE model and then predicts.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
Returns:
(np.ndarray): Predictions of treatment effects.
"""
self.fit(X, treatment, y)
return self.predict(X) | [
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
")",
":",
"self",
".",
"fit",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"return",
"self",
".",
"predict",
"(",
"X",
")"
] | [
108,
4
] | [
120,
30
] | python | en | ['en', 'error', 'th'] | False |
XmlToString | (content, encoding='utf-8', pretty=False) | Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
| Writes the XML content to disk, touching the file only if it has changed. | def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts) | [
"def",
"XmlToString",
"(",
"content",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty",
"=",
"False",
")",
":",
"# We create a huge list of all the elements of the file.",
"xml_parts",
"=",
"[",
"'<?xml version=\"1.0\" encoding=\"%s\"?>'",
"%",
"encoding",
"]",
"if",
"pretty",
":",
"xml_parts",
".",
"append",
"(",
"'\\n'",
")",
"_ConstructContentList",
"(",
"xml_parts",
",",
"content",
",",
"pretty",
")",
"# Convert it to a string",
"return",
"''",
".",
"join",
"(",
"xml_parts",
")"
] | [
9,
0
] | [
54,
27
] | python | en | ['en', 'en', 'en'] | True |
_ConstructContentList | (xml_parts, specification, pretty, level=0) | Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
| Appends the XML parts corresponding to the specification. | def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line) | [
"def",
"_ConstructContentList",
"(",
"xml_parts",
",",
"specification",
",",
"pretty",
",",
"level",
"=",
"0",
")",
":",
"# The first item in a specification is the name of the element.",
"if",
"pretty",
":",
"indentation",
"=",
"' '",
"*",
"level",
"new_line",
"=",
"'\\n'",
"else",
":",
"indentation",
"=",
"''",
"new_line",
"=",
"''",
"name",
"=",
"specification",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"name",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"'The first item of an EasyXml specification should be '",
"'a string. Specification was '",
"+",
"str",
"(",
"specification",
")",
")",
"xml_parts",
".",
"append",
"(",
"indentation",
"+",
"'<'",
"+",
"name",
")",
"# Optionally in second position is a dictionary of the attributes.",
"rest",
"=",
"specification",
"[",
"1",
":",
"]",
"if",
"rest",
"and",
"isinstance",
"(",
"rest",
"[",
"0",
"]",
",",
"dict",
")",
":",
"for",
"at",
",",
"val",
"in",
"sorted",
"(",
"rest",
"[",
"0",
"]",
".",
"iteritems",
"(",
")",
")",
":",
"xml_parts",
".",
"append",
"(",
"' %s=\"%s\"'",
"%",
"(",
"at",
",",
"_XmlEscape",
"(",
"val",
",",
"attr",
"=",
"True",
")",
")",
")",
"rest",
"=",
"rest",
"[",
"1",
":",
"]",
"if",
"rest",
":",
"xml_parts",
".",
"append",
"(",
"'>'",
")",
"all_strings",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"and",
"isinstance",
"(",
"y",
",",
"str",
")",
",",
"rest",
",",
"True",
")",
"multi_line",
"=",
"not",
"all_strings",
"if",
"multi_line",
"and",
"new_line",
":",
"xml_parts",
".",
"append",
"(",
"new_line",
")",
"for",
"child_spec",
"in",
"rest",
":",
"# If it's a string, append a text node.",
"# Otherwise recurse over that child definition",
"if",
"isinstance",
"(",
"child_spec",
",",
"str",
")",
":",
"xml_parts",
".",
"append",
"(",
"_XmlEscape",
"(",
"child_spec",
")",
")",
"else",
":",
"_ConstructContentList",
"(",
"xml_parts",
",",
"child_spec",
",",
"pretty",
",",
"level",
"+",
"1",
")",
"if",
"multi_line",
"and",
"indentation",
":",
"xml_parts",
".",
"append",
"(",
"indentation",
")",
"xml_parts",
".",
"append",
"(",
"'</%s>%s'",
"%",
"(",
"name",
",",
"new_line",
")",
")",
"else",
":",
"xml_parts",
".",
"append",
"(",
"'/>%s'",
"%",
"new_line",
")"
] | [
57,
0
] | [
102,
39
] | python | en | ['en', 'en', 'en'] | True |
WriteXmlIfChanged | (content, path, encoding='utf-8', pretty=False,
win32=False) | Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
| Writes the XML content to disk, touching the file only if it has changed. | def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding.upper() != encoding.upper():
xml_string = xml_string.decode(default_encoding).encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close() | [
"def",
"WriteXmlIfChanged",
"(",
"content",
",",
"path",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty",
"=",
"False",
",",
"win32",
"=",
"False",
")",
":",
"xml_string",
"=",
"XmlToString",
"(",
"content",
",",
"encoding",
",",
"pretty",
")",
"if",
"win32",
"and",
"os",
".",
"linesep",
"!=",
"'\\r\\n'",
":",
"xml_string",
"=",
"xml_string",
".",
"replace",
"(",
"'\\n'",
",",
"'\\r\\n'",
")",
"default_encoding",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"[",
"1",
"]",
"if",
"default_encoding",
".",
"upper",
"(",
")",
"!=",
"encoding",
".",
"upper",
"(",
")",
":",
"xml_string",
"=",
"xml_string",
".",
"decode",
"(",
"default_encoding",
")",
".",
"encode",
"(",
"encoding",
")",
"# Get the old content",
"try",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'r'",
")",
"existing",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"except",
":",
"existing",
"=",
"None",
"# It has changed, write it",
"if",
"existing",
"!=",
"xml_string",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"xml_string",
")",
"f",
".",
"close",
"(",
")"
] | [
105,
0
] | [
135,
13
] | python | en | ['en', 'en', 'en'] | True |
_XmlEscape | (value, attr=False) | Escape a string for inclusion in XML. | Escape a string for inclusion in XML. | def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value) | [
"def",
"_XmlEscape",
"(",
"value",
",",
"attr",
"=",
"False",
")",
":",
"def",
"replace",
"(",
"match",
")",
":",
"m",
"=",
"match",
".",
"string",
"[",
"match",
".",
"start",
"(",
")",
":",
"match",
".",
"end",
"(",
")",
"]",
"# don't replace single quotes in attrs",
"if",
"attr",
"and",
"m",
"==",
"\"'\"",
":",
"return",
"m",
"return",
"_xml_escape_map",
"[",
"m",
"]",
"return",
"_xml_escape_re",
".",
"sub",
"(",
"replace",
",",
"value",
")"
] | [
153,
0
] | [
161,
43
] | python | en | ['en', 'it', 'en'] | True |
store | () | Store operations | Store operations | def store():
"""Store operations"""
pass | [
"def",
"store",
"(",
")",
":",
"pass"
] | [
7,
0
] | [
9,
8
] | python | en | ['en', 'en', 'en'] | False |
store_list | (directory) | List known Stores. | List known Stores. | def store_list(directory):
"""List known Stores."""
context = toolkit.load_data_context_with_error_handling(directory)
try:
stores = context.list_stores()
if len(stores) == 0:
cli_message("No Stores found")
toolkit.send_usage_message(
data_context=context, event="cli.store.list", success=True
)
return
elif len(stores) == 1:
list_intro_string = "1 Store found:"
else:
list_intro_string = "{} Stores found:".format(len(stores))
cli_message(list_intro_string)
for store in stores:
cli_message("")
cli_message_dict(store)
toolkit.send_usage_message(
data_context=context, event="cli.store.list", success=True
)
except Exception as e:
toolkit.send_usage_message(
data_context=context, event="cli.store.list", success=False
)
raise e | [
"def",
"store_list",
"(",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"try",
":",
"stores",
"=",
"context",
".",
"list_stores",
"(",
")",
"if",
"len",
"(",
"stores",
")",
"==",
"0",
":",
"cli_message",
"(",
"\"No Stores found\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.store.list\"",
",",
"success",
"=",
"True",
")",
"return",
"elif",
"len",
"(",
"stores",
")",
"==",
"1",
":",
"list_intro_string",
"=",
"\"1 Store found:\"",
"else",
":",
"list_intro_string",
"=",
"\"{} Stores found:\"",
".",
"format",
"(",
"len",
"(",
"stores",
")",
")",
"cli_message",
"(",
"list_intro_string",
")",
"for",
"store",
"in",
"stores",
":",
"cli_message",
"(",
"\"\"",
")",
"cli_message_dict",
"(",
"store",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.store.list\"",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.store.list\"",
",",
"success",
"=",
"False",
")",
"raise",
"e"
] | [
19,
0
] | [
50,
15
] | python | en | ['en', 'en', 'en'] | True |
Finder.__init__ | (self, reader: Optional[BaseReader], retriever: Optional[BaseRetriever]) |
Initialize a Finder instance.
:param reader: Reader instance
:param retriever: Retriever instance
|
Initialize a Finder instance. | def __init__(self, reader: Optional[BaseReader], retriever: Optional[BaseRetriever]):
"""
Initialize a Finder instance.
:param reader: Reader instance
:param retriever: Retriever instance
"""
logger.warning(
"""DEPRECATION WARNINGS:
1. The 'Finder' class will be deprecated in the next Haystack release in
favour of a new `Pipeline` class that supports building custom search pipelines using Haystack components
including Retriever, Readers, and Generators.
For more details, please refer to the issue: https://github.com/deepset-ai/haystack/issues/544
2. The `question` parameter in search requests & results is renamed to `query`."""
)
self.retriever = retriever
self.reader = reader
if self.reader is None and self.retriever is None:
raise AttributeError("Finder: self.reader and self.retriever can not be both None") | [
"def",
"__init__",
"(",
"self",
",",
"reader",
":",
"Optional",
"[",
"BaseReader",
"]",
",",
"retriever",
":",
"Optional",
"[",
"BaseRetriever",
"]",
")",
":",
"logger",
".",
"warning",
"(",
"\"\"\"DEPRECATION WARNINGS: \n 1. The 'Finder' class will be deprecated in the next Haystack release in \n favour of a new `Pipeline` class that supports building custom search pipelines using Haystack components\n including Retriever, Readers, and Generators.\n For more details, please refer to the issue: https://github.com/deepset-ai/haystack/issues/544\n 2. The `question` parameter in search requests & results is renamed to `query`.\"\"\"",
")",
"self",
".",
"retriever",
"=",
"retriever",
"self",
".",
"reader",
"=",
"reader",
"if",
"self",
".",
"reader",
"is",
"None",
"and",
"self",
".",
"retriever",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Finder: self.reader and self.retriever can not be both None\"",
")"
] | [
23,
4
] | [
41,
95
] | python | en | ['en', 'error', 'th'] | False |
Finder.get_answers | (self, question: str, top_k_reader: int = 1, top_k_retriever: int = 10, filters: Optional[dict] = None, index: str = None) |
Get top k answers for a given question.
:param question: The question string
:param top_k_reader: Number of answers returned by the reader
:param top_k_retriever: Number of text units to be retrieved
:param filters: Limit scope to documents having the given meta data values.
The format for the dict is `{"key-1": ["value-1", "value-2"], "key-2": ["value-3]" ...}``
:param index: Index to retrieve documents from
:return:
|
Get top k answers for a given question. | def get_answers(self, question: str, top_k_reader: int = 1, top_k_retriever: int = 10, filters: Optional[dict] = None, index: str = None):
"""
Get top k answers for a given question.
:param question: The question string
:param top_k_reader: Number of answers returned by the reader
:param top_k_retriever: Number of text units to be retrieved
:param filters: Limit scope to documents having the given meta data values.
The format for the dict is `{"key-1": ["value-1", "value-2"], "key-2": ["value-3]" ...}``
:param index: Index to retrieve documents from
:return:
"""
logger.warning(
"""DEPRECATION WARNINGS:
1. The 'Finder' class will be deprecated in the next Haystack release in
favour of a new `Pipeline` class that supports building custom search pipelines using Haystack components
including Retriever, Readers, and Generators.
For more details, please refer to the issue: https://github.com/deepset-ai/haystack/issues/544
2. The `question` parameter in search requests & results is renamed to `query`."""
)
if self.retriever is None or self.reader is None:
raise AttributeError("Finder.get_answers requires self.retriever AND self.reader")
# 1) Apply retriever(with optional filters) to get fast candidate documents
documents = self.retriever.retrieve(question, filters=filters, top_k=top_k_retriever, index=index)
logger.info(f"Got {len(documents)} candidates from retriever")
logger.debug(f"Retrieved document IDs: {[doc.id for doc in documents]}")
if len(documents) == 0:
logger.info("Retriever did not return any documents. Skipping reader ...")
empty_result = {"question": question, "answers": []}
return empty_result
# 2) Apply reader to get granular answer(s)
len_chars = sum([len(d.text) for d in documents])
logger.info(f"Reader is looking for detailed answer in {len_chars} chars ...")
results = self.reader.predict(query=question,
documents=documents,
top_k=top_k_reader) # type: Dict[str, Any]
results["question"] = results["query"]
# Add corresponding document_name and more meta data, if an answer contains the document_id
for ans in results["answers"]:
ans["meta"] = {}
for doc in documents:
if doc.id == ans["document_id"]:
ans["meta"] = deepcopy(doc.meta)
return results | [
"def",
"get_answers",
"(",
"self",
",",
"question",
":",
"str",
",",
"top_k_reader",
":",
"int",
"=",
"1",
",",
"top_k_retriever",
":",
"int",
"=",
"10",
",",
"filters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"index",
":",
"str",
"=",
"None",
")",
":",
"logger",
".",
"warning",
"(",
"\"\"\"DEPRECATION WARNINGS: \n 1. The 'Finder' class will be deprecated in the next Haystack release in \n favour of a new `Pipeline` class that supports building custom search pipelines using Haystack components\n including Retriever, Readers, and Generators.\n For more details, please refer to the issue: https://github.com/deepset-ai/haystack/issues/544\n 2. The `question` parameter in search requests & results is renamed to `query`.\"\"\"",
")",
"if",
"self",
".",
"retriever",
"is",
"None",
"or",
"self",
".",
"reader",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Finder.get_answers requires self.retriever AND self.reader\"",
")",
"# 1) Apply retriever(with optional filters) to get fast candidate documents",
"documents",
"=",
"self",
".",
"retriever",
".",
"retrieve",
"(",
"question",
",",
"filters",
"=",
"filters",
",",
"top_k",
"=",
"top_k_retriever",
",",
"index",
"=",
"index",
")",
"logger",
".",
"info",
"(",
"f\"Got {len(documents)} candidates from retriever\"",
")",
"logger",
".",
"debug",
"(",
"f\"Retrieved document IDs: {[doc.id for doc in documents]}\"",
")",
"if",
"len",
"(",
"documents",
")",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Retriever did not return any documents. Skipping reader ...\"",
")",
"empty_result",
"=",
"{",
"\"question\"",
":",
"question",
",",
"\"answers\"",
":",
"[",
"]",
"}",
"return",
"empty_result",
"# 2) Apply reader to get granular answer(s)",
"len_chars",
"=",
"sum",
"(",
"[",
"len",
"(",
"d",
".",
"text",
")",
"for",
"d",
"in",
"documents",
"]",
")",
"logger",
".",
"info",
"(",
"f\"Reader is looking for detailed answer in {len_chars} chars ...\"",
")",
"results",
"=",
"self",
".",
"reader",
".",
"predict",
"(",
"query",
"=",
"question",
",",
"documents",
"=",
"documents",
",",
"top_k",
"=",
"top_k_reader",
")",
"# type: Dict[str, Any]",
"results",
"[",
"\"question\"",
"]",
"=",
"results",
"[",
"\"query\"",
"]",
"# Add corresponding document_name and more meta data, if an answer contains the document_id",
"for",
"ans",
"in",
"results",
"[",
"\"answers\"",
"]",
":",
"ans",
"[",
"\"meta\"",
"]",
"=",
"{",
"}",
"for",
"doc",
"in",
"documents",
":",
"if",
"doc",
".",
"id",
"==",
"ans",
"[",
"\"document_id\"",
"]",
":",
"ans",
"[",
"\"meta\"",
"]",
"=",
"deepcopy",
"(",
"doc",
".",
"meta",
")",
"return",
"results"
] | [
43,
4
] | [
93,
22
] | python | en | ['en', 'error', 'th'] | False |
Finder.get_answers_via_similar_questions | (self, question: str, top_k_retriever: int = 10, filters: Optional[dict] = None, index: str = None) |
Get top k answers for a given question using only a retriever.
:param question: The question string
:param top_k_retriever: Number of text units to be retrieved
:param filters: Limit scope to documents having the given meta data values.
The format for the dict is ``{"key-1": ["value-1", "value-2"], "key-2": ["value-3]" ...}``
:param index: Index to retrieve documents from
:return:
|
Get top k answers for a given question using only a retriever. | def get_answers_via_similar_questions(self, question: str, top_k_retriever: int = 10, filters: Optional[dict] = None, index: str = None):
"""
Get top k answers for a given question using only a retriever.
:param question: The question string
:param top_k_retriever: Number of text units to be retrieved
:param filters: Limit scope to documents having the given meta data values.
The format for the dict is ``{"key-1": ["value-1", "value-2"], "key-2": ["value-3]" ...}``
:param index: Index to retrieve documents from
:return:
"""
if self.retriever is None:
raise AttributeError("Finder.get_answers_via_similar_questions requires self.retriever")
results = {"question": question, "answers": []} # type: Dict[str, Any]
# 1) Apply retriever to match similar questions via cosine similarity of embeddings
documents = self.retriever.retrieve(question, top_k=top_k_retriever, filters=filters, index=index)
# 2) Format response
for doc in documents:
#TODO proper calibratation of pseudo probabilities
cur_answer = {
"question": doc.question,
"answer": doc.text,
"document_id": doc.id,
"context": doc.text,
"score": doc.score,
"probability": doc.probability,
"offset_start": 0,
"offset_end": len(doc.text),
"meta": doc.meta
}
results["answers"].append(cur_answer)
return results | [
"def",
"get_answers_via_similar_questions",
"(",
"self",
",",
"question",
":",
"str",
",",
"top_k_retriever",
":",
"int",
"=",
"10",
",",
"filters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"index",
":",
"str",
"=",
"None",
")",
":",
"if",
"self",
".",
"retriever",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Finder.get_answers_via_similar_questions requires self.retriever\"",
")",
"results",
"=",
"{",
"\"question\"",
":",
"question",
",",
"\"answers\"",
":",
"[",
"]",
"}",
"# type: Dict[str, Any]",
"# 1) Apply retriever to match similar questions via cosine similarity of embeddings",
"documents",
"=",
"self",
".",
"retriever",
".",
"retrieve",
"(",
"question",
",",
"top_k",
"=",
"top_k_retriever",
",",
"filters",
"=",
"filters",
",",
"index",
"=",
"index",
")",
"# 2) Format response",
"for",
"doc",
"in",
"documents",
":",
"#TODO proper calibratation of pseudo probabilities",
"cur_answer",
"=",
"{",
"\"question\"",
":",
"doc",
".",
"question",
",",
"\"answer\"",
":",
"doc",
".",
"text",
",",
"\"document_id\"",
":",
"doc",
".",
"id",
",",
"\"context\"",
":",
"doc",
".",
"text",
",",
"\"score\"",
":",
"doc",
".",
"score",
",",
"\"probability\"",
":",
"doc",
".",
"probability",
",",
"\"offset_start\"",
":",
"0",
",",
"\"offset_end\"",
":",
"len",
"(",
"doc",
".",
"text",
")",
",",
"\"meta\"",
":",
"doc",
".",
"meta",
"}",
"results",
"[",
"\"answers\"",
"]",
".",
"append",
"(",
"cur_answer",
")",
"return",
"results"
] | [
95,
4
] | [
133,
22
] | python | en | ['en', 'error', 'th'] | False |
Finder.eval | (
self,
label_index: str,
doc_index: str,
label_origin: str = "gold_label",
top_k_retriever: int = 10,
top_k_reader: int = 10,
return_preds: bool = False,
) |
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline
:param label_index: Elasticsearch index where labeled questions are stored
:type label_index: str
:param doc_index: Elasticsearch index where documents that are used for evaluation are stored
:type doc_index: str
:param top_k_retriever: How many documents per question to return and pass to reader
:type top_k_retriever: int
:param top_k_reader: How many answers to return per question
:type top_k_reader: int
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
:type return_preds: bool
|
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline | def eval(
self,
label_index: str,
doc_index: str,
label_origin: str = "gold_label",
top_k_retriever: int = 10,
top_k_reader: int = 10,
return_preds: bool = False,
):
"""
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline
:param label_index: Elasticsearch index where labeled questions are stored
:type label_index: str
:param doc_index: Elasticsearch index where documents that are used for evaluation are stored
:type doc_index: str
:param top_k_retriever: How many documents per question to return and pass to reader
:type top_k_retriever: int
:param top_k_reader: How many answers to return per question
:type top_k_reader: int
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
:type return_preds: bool
"""
if not self.reader or not self.retriever:
raise Exception("Finder needs to have a reader and retriever for the evaluation.")
finder_start_time = time.time()
# extract all questions for evaluation
filters = {"origin": [label_origin]}
questions = self.retriever.document_store.get_all_labels_aggregated(index=label_index, filters=filters)
counts = defaultdict(float) # type: Dict[str, float]
retrieve_times = []
read_times = []
# retrieve documents
questions_with_docs = []
retriever_start_time = time.time()
for q_idx, question in enumerate(questions):
question_string = question.question
single_retrieve_start = time.time()
retrieved_docs = self.retriever.retrieve(question_string, top_k=top_k_retriever, index=doc_index)
retrieve_times.append(time.time() - single_retrieve_start)
number_relevant_docs = len(set(question.multiple_document_ids))
# check if correct doc among retrieved docs
found_relevant_doc = False
relevant_docs_found = 0
current_avg_precision = 0.0
for doc_idx, doc in enumerate(retrieved_docs):
if doc.id in question.multiple_document_ids:
relevant_docs_found += 1
if not found_relevant_doc:
counts["correct_retrievals"] += 1
counts["summed_reciprocal_rank_retriever"] += 1 / (doc_idx + 1)
current_avg_precision += relevant_docs_found / (doc_idx + 1)
found_relevant_doc = True
if relevant_docs_found == number_relevant_docs:
break
if found_relevant_doc:
all_relevant_docs = len(set(question.multiple_document_ids))
counts["summed_avg_precision_retriever"] += current_avg_precision / all_relevant_docs
if found_relevant_doc:
questions_with_docs.append({
"question": question,
"docs": retrieved_docs
})
retriever_total_time = time.time() - retriever_start_time
counts["number_of_questions"] = q_idx + 1
previous_return_no_answers = self.reader.return_no_answers
self.reader.return_no_answers = True
predictions = []
# extract answers
reader_start_time = time.time()
for q_idx, question_docs in enumerate(questions_with_docs):
if (q_idx + 1) % 100 == 0:
print(f"Processed {q_idx+1} questions.")
question = question_docs["question"] # type: ignore
question_string = question.question
docs = question_docs["docs"] # type: ignore
single_reader_start = time.time()
predicted_answers = self.reader.predict(question_string, docs, top_k=top_k_reader) # type: ignore
read_times.append(time.time() - single_reader_start)
if return_preds:
predictions.append(predicted_answers)
counts = eval_counts_reader(question, predicted_answers, counts)
counts["number_of_has_answer"] = counts["correct_retrievals"] - counts["number_of_no_answer"]
reader_total_time = time.time() - reader_start_time
finder_total_time = time.time() - finder_start_time
self.reader.return_no_answers = previous_return_no_answers # type: ignore
logger.info((f"{counts['correct_readings_topk']} out of {counts['number_of_questions']} questions were correctly"
f" answered {(counts['correct_readings_topk']/counts['number_of_questions']):.2%})."))
logger.info((f"{counts['number_of_questions']-counts['correct_retrievals']} questions could not be answered due "
f"to the retriever."))
logger.info((f"{counts['correct_retrievals']-counts['correct_readings_topk']} questions could not be answered "
f"due to the reader."))
eval_results = self.calc_eval_results(counts)
eval_results["total_retrieve_time"] = retriever_total_time
eval_results["avg_retrieve_time"] = mean(retrieve_times)
eval_results["total_reader_time"] = reader_total_time
eval_results["avg_reader_time"] = mean(read_times)
eval_results["total_finder_time"] = finder_total_time
if return_preds:
return {"metrics": eval_results, "predictions": predictions}
else:
return eval_results | [
"def",
"eval",
"(",
"self",
",",
"label_index",
":",
"str",
",",
"doc_index",
":",
"str",
",",
"label_origin",
":",
"str",
"=",
"\"gold_label\"",
",",
"top_k_retriever",
":",
"int",
"=",
"10",
",",
"top_k_reader",
":",
"int",
"=",
"10",
",",
"return_preds",
":",
"bool",
"=",
"False",
",",
")",
":",
"if",
"not",
"self",
".",
"reader",
"or",
"not",
"self",
".",
"retriever",
":",
"raise",
"Exception",
"(",
"\"Finder needs to have a reader and retriever for the evaluation.\"",
")",
"finder_start_time",
"=",
"time",
".",
"time",
"(",
")",
"# extract all questions for evaluation",
"filters",
"=",
"{",
"\"origin\"",
":",
"[",
"label_origin",
"]",
"}",
"questions",
"=",
"self",
".",
"retriever",
".",
"document_store",
".",
"get_all_labels_aggregated",
"(",
"index",
"=",
"label_index",
",",
"filters",
"=",
"filters",
")",
"counts",
"=",
"defaultdict",
"(",
"float",
")",
"# type: Dict[str, float]",
"retrieve_times",
"=",
"[",
"]",
"read_times",
"=",
"[",
"]",
"# retrieve documents",
"questions_with_docs",
"=",
"[",
"]",
"retriever_start_time",
"=",
"time",
".",
"time",
"(",
")",
"for",
"q_idx",
",",
"question",
"in",
"enumerate",
"(",
"questions",
")",
":",
"question_string",
"=",
"question",
".",
"question",
"single_retrieve_start",
"=",
"time",
".",
"time",
"(",
")",
"retrieved_docs",
"=",
"self",
".",
"retriever",
".",
"retrieve",
"(",
"question_string",
",",
"top_k",
"=",
"top_k_retriever",
",",
"index",
"=",
"doc_index",
")",
"retrieve_times",
".",
"append",
"(",
"time",
".",
"time",
"(",
")",
"-",
"single_retrieve_start",
")",
"number_relevant_docs",
"=",
"len",
"(",
"set",
"(",
"question",
".",
"multiple_document_ids",
")",
")",
"# check if correct doc among retrieved docs",
"found_relevant_doc",
"=",
"False",
"relevant_docs_found",
"=",
"0",
"current_avg_precision",
"=",
"0.0",
"for",
"doc_idx",
",",
"doc",
"in",
"enumerate",
"(",
"retrieved_docs",
")",
":",
"if",
"doc",
".",
"id",
"in",
"question",
".",
"multiple_document_ids",
":",
"relevant_docs_found",
"+=",
"1",
"if",
"not",
"found_relevant_doc",
":",
"counts",
"[",
"\"correct_retrievals\"",
"]",
"+=",
"1",
"counts",
"[",
"\"summed_reciprocal_rank_retriever\"",
"]",
"+=",
"1",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"current_avg_precision",
"+=",
"relevant_docs_found",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"found_relevant_doc",
"=",
"True",
"if",
"relevant_docs_found",
"==",
"number_relevant_docs",
":",
"break",
"if",
"found_relevant_doc",
":",
"all_relevant_docs",
"=",
"len",
"(",
"set",
"(",
"question",
".",
"multiple_document_ids",
")",
")",
"counts",
"[",
"\"summed_avg_precision_retriever\"",
"]",
"+=",
"current_avg_precision",
"/",
"all_relevant_docs",
"if",
"found_relevant_doc",
":",
"questions_with_docs",
".",
"append",
"(",
"{",
"\"question\"",
":",
"question",
",",
"\"docs\"",
":",
"retrieved_docs",
"}",
")",
"retriever_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"retriever_start_time",
"counts",
"[",
"\"number_of_questions\"",
"]",
"=",
"q_idx",
"+",
"1",
"previous_return_no_answers",
"=",
"self",
".",
"reader",
".",
"return_no_answers",
"self",
".",
"reader",
".",
"return_no_answers",
"=",
"True",
"predictions",
"=",
"[",
"]",
"# extract answers",
"reader_start_time",
"=",
"time",
".",
"time",
"(",
")",
"for",
"q_idx",
",",
"question_docs",
"in",
"enumerate",
"(",
"questions_with_docs",
")",
":",
"if",
"(",
"q_idx",
"+",
"1",
")",
"%",
"100",
"==",
"0",
":",
"print",
"(",
"f\"Processed {q_idx+1} questions.\"",
")",
"question",
"=",
"question_docs",
"[",
"\"question\"",
"]",
"# type: ignore",
"question_string",
"=",
"question",
".",
"question",
"docs",
"=",
"question_docs",
"[",
"\"docs\"",
"]",
"# type: ignore",
"single_reader_start",
"=",
"time",
".",
"time",
"(",
")",
"predicted_answers",
"=",
"self",
".",
"reader",
".",
"predict",
"(",
"question_string",
",",
"docs",
",",
"top_k",
"=",
"top_k_reader",
")",
"# type: ignore",
"read_times",
".",
"append",
"(",
"time",
".",
"time",
"(",
")",
"-",
"single_reader_start",
")",
"if",
"return_preds",
":",
"predictions",
".",
"append",
"(",
"predicted_answers",
")",
"counts",
"=",
"eval_counts_reader",
"(",
"question",
",",
"predicted_answers",
",",
"counts",
")",
"counts",
"[",
"\"number_of_has_answer\"",
"]",
"=",
"counts",
"[",
"\"correct_retrievals\"",
"]",
"-",
"counts",
"[",
"\"number_of_no_answer\"",
"]",
"reader_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"reader_start_time",
"finder_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"finder_start_time",
"self",
".",
"reader",
".",
"return_no_answers",
"=",
"previous_return_no_answers",
"# type: ignore",
"logger",
".",
"info",
"(",
"(",
"f\"{counts['correct_readings_topk']} out of {counts['number_of_questions']} questions were correctly\"",
"f\" answered {(counts['correct_readings_topk']/counts['number_of_questions']):.2%}).\"",
")",
")",
"logger",
".",
"info",
"(",
"(",
"f\"{counts['number_of_questions']-counts['correct_retrievals']} questions could not be answered due \"",
"f\"to the retriever.\"",
")",
")",
"logger",
".",
"info",
"(",
"(",
"f\"{counts['correct_retrievals']-counts['correct_readings_topk']} questions could not be answered \"",
"f\"due to the reader.\"",
")",
")",
"eval_results",
"=",
"self",
".",
"calc_eval_results",
"(",
"counts",
")",
"eval_results",
"[",
"\"total_retrieve_time\"",
"]",
"=",
"retriever_total_time",
"eval_results",
"[",
"\"avg_retrieve_time\"",
"]",
"=",
"mean",
"(",
"retrieve_times",
")",
"eval_results",
"[",
"\"total_reader_time\"",
"]",
"=",
"reader_total_time",
"eval_results",
"[",
"\"avg_reader_time\"",
"]",
"=",
"mean",
"(",
"read_times",
")",
"eval_results",
"[",
"\"total_finder_time\"",
"]",
"=",
"finder_total_time",
"if",
"return_preds",
":",
"return",
"{",
"\"metrics\"",
":",
"eval_results",
",",
"\"predictions\"",
":",
"predictions",
"}",
"else",
":",
"return",
"eval_results"
] | [
135,
4
] | [
289,
31
] | python | en | ['en', 'error', 'th'] | False |
Finder.eval_batch | (
self,
label_index: str,
doc_index : str,
label_origin: str = "gold_label",
top_k_retriever: int = 10,
top_k_reader: int = 10,
batch_size: int = 50,
return_preds: bool = False,
) |
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever. Passes all retrieved question-document pairs to the Reader at once.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline
:param label_index: Elasticsearch index where labeled questions are stored
:type label_index: str
:param doc_index: Elasticsearch index where documents that are used for evaluation are stored
:type doc_index: str
:param top_k_retriever: How many documents per question to return and pass to reader
:type top_k_retriever: int
:param top_k_reader: How many answers to return per question
:type top_k_reader: int
:param batch_size: Number of samples per batch computed at once
:type batch_size: int
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
:type return_preds: bool
|
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever. Passes all retrieved question-document pairs to the Reader at once.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline | def eval_batch(
self,
label_index: str,
doc_index : str,
label_origin: str = "gold_label",
top_k_retriever: int = 10,
top_k_reader: int = 10,
batch_size: int = 50,
return_preds: bool = False,
):
"""
Evaluation of the whole pipeline by first evaluating the Retriever and then evaluating the Reader on the result
of the Retriever. Passes all retrieved question-document pairs to the Reader at once.
Returns a dict containing the following metrics:
- ``"retriever_recall"``: Proportion of questions for which correct document is among retrieved documents
- ``"retriever_map"``: Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. Average precision is normalized by
the number of all relevant documents per query.
- ``"retriever_mrr"``: Mean of reciprocal rank for each question. Rewards retrievers that give relevant
documents a higher rank. Only considers the highest ranked relevant document.
- ``"reader_top1_accuracy"``: Proportion of highest ranked predicted answers that overlap with corresponding correct answer
- ``"reader_top1_accuracy_has_answer"``: Proportion of highest ranked predicted answers that overlap
with corresponding correct answer for answerable questions
- ``"reader_top_k_accuracy"``: Proportion of predicted answers that overlap with corresponding correct answer
- ``"reader_topk_accuracy_has_answer"``: Proportion of predicted answers that overlap with corresponding correct answer
for answerable questions
- ``"reader_top1_em"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers
- ``"reader_top1_em_has_answer"``: Proportion of exact matches of highest ranked predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_topk_em"``: Proportion of exact matches of predicted answers with their corresponding correct answers
- ``"reader_topk_em_has_answer"``: Proportion of exact matches of predicted answers with their corresponding
correct answers for answerable questions
- ``"reader_top1_f1"``: Average overlap between highest ranked predicted answers and their corresponding correct answers
- ``"reader_top1_f1_has_answer"``: Average overlap between highest ranked predicted answers and their corresponding
correct answers for answerable questions
- ``"reader_topk_f1"``: Average overlap between predicted answers and their corresponding correct answers
- ``"reader_topk_f1_has_answer"``: Average overlap between predicted answers and their corresponding correct answers
for answerable questions
- ``"reader_top1_no_answer_accuracy"``: Proportion of correct predicting unanswerable question at highest ranked prediction
- ``"reader_topk_no_answer_accuracy"``: Proportion of correct predicting unanswerable question among all predictions
- ``"total_retrieve_time"``: Time retriever needed to retrieve documents for all questions
- ``"avg_retrieve_time"``: Average time needed to retrieve documents for one question
- ``"total_reader_time"``: Time reader needed to extract answer out of retrieved documents for all questions
where the correct document is among the retrieved ones
- ``"avg_reader_time"``: Average time needed to extract answer out of retrieved documents for one question
- ``"total_finder_time"``: Total time for whole pipeline
:param label_index: Elasticsearch index where labeled questions are stored
:type label_index: str
:param doc_index: Elasticsearch index where documents that are used for evaluation are stored
:type doc_index: str
:param top_k_retriever: How many documents per question to return and pass to reader
:type top_k_retriever: int
:param top_k_reader: How many answers to return per question
:type top_k_reader: int
:param batch_size: Number of samples per batch computed at once
:type batch_size: int
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
:type return_preds: bool
"""
if not self.reader or not self.retriever:
raise Exception("Finder needs to have a reader and retriever for the evaluation.")
counts = defaultdict(float) # type: Dict[str, float]
finder_start_time = time.time()
# extract all questions for evaluation
filters = {"origin": [label_origin]}
questions = self.retriever.document_store.get_all_labels_aggregated(index=label_index, filters=filters)
number_of_questions = len(questions)
# retrieve documents
retriever_start_time = time.time()
questions_with_docs = self._retrieve_docs(questions, top_k=top_k_retriever, doc_index=doc_index)
retriever_total_time = time.time() - retriever_start_time
questions_with_correct_doc, \
summed_avg_precision_retriever, \
summed_reciprocal_rank_retriever = calculate_average_precision_and_reciprocal_rank(questions_with_docs)
correct_retrievals = len(questions_with_correct_doc)
# extract answers
previous_return_no_answers = self.reader.return_no_answers
self.reader.return_no_answers = True
reader_start_time = time.time()
predictions = self.reader.predict_batch(questions_with_correct_doc,
top_k=top_k_reader, batch_size=batch_size)
reader_total_time = time.time() - reader_start_time
for pred in predictions:
counts = eval_counts_reader_batch(pred, counts)
finder_total_time = time.time() - finder_start_time
results = calculate_reader_metrics(counts, correct_retrievals)
results["retriever_recall"] = correct_retrievals / number_of_questions
results["retriever_map"] = summed_avg_precision_retriever / number_of_questions
results["retriever_mrr"] = summed_reciprocal_rank_retriever / number_of_questions
results["total_retrieve_time"] = retriever_total_time
results["avg_retrieve_time"] = retriever_total_time / number_of_questions
results["total_reader_time"] = reader_total_time
results["avg_reader_time"] = reader_total_time / correct_retrievals
results["total_finder_time"] = finder_total_time
logger.info((f"{counts['correct_readings_topk']} out of {number_of_questions} questions were correctly "
f"answered ({(counts['correct_readings_topk'] / number_of_questions):.2%})."))
logger.info(f"{number_of_questions - correct_retrievals} questions could not be answered due to the retriever.")
logger.info(f"{correct_retrievals - counts['correct_readings_topk']} questions could not be answered due to the reader.")
if return_preds:
return {"metrics": results, "predictions": predictions}
else:
return results | [
"def",
"eval_batch",
"(",
"self",
",",
"label_index",
":",
"str",
",",
"doc_index",
":",
"str",
",",
"label_origin",
":",
"str",
"=",
"\"gold_label\"",
",",
"top_k_retriever",
":",
"int",
"=",
"10",
",",
"top_k_reader",
":",
"int",
"=",
"10",
",",
"batch_size",
":",
"int",
"=",
"50",
",",
"return_preds",
":",
"bool",
"=",
"False",
",",
")",
":",
"if",
"not",
"self",
".",
"reader",
"or",
"not",
"self",
".",
"retriever",
":",
"raise",
"Exception",
"(",
"\"Finder needs to have a reader and retriever for the evaluation.\"",
")",
"counts",
"=",
"defaultdict",
"(",
"float",
")",
"# type: Dict[str, float]",
"finder_start_time",
"=",
"time",
".",
"time",
"(",
")",
"# extract all questions for evaluation",
"filters",
"=",
"{",
"\"origin\"",
":",
"[",
"label_origin",
"]",
"}",
"questions",
"=",
"self",
".",
"retriever",
".",
"document_store",
".",
"get_all_labels_aggregated",
"(",
"index",
"=",
"label_index",
",",
"filters",
"=",
"filters",
")",
"number_of_questions",
"=",
"len",
"(",
"questions",
")",
"# retrieve documents",
"retriever_start_time",
"=",
"time",
".",
"time",
"(",
")",
"questions_with_docs",
"=",
"self",
".",
"_retrieve_docs",
"(",
"questions",
",",
"top_k",
"=",
"top_k_retriever",
",",
"doc_index",
"=",
"doc_index",
")",
"retriever_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"retriever_start_time",
"questions_with_correct_doc",
",",
"summed_avg_precision_retriever",
",",
"summed_reciprocal_rank_retriever",
"=",
"calculate_average_precision_and_reciprocal_rank",
"(",
"questions_with_docs",
")",
"correct_retrievals",
"=",
"len",
"(",
"questions_with_correct_doc",
")",
"# extract answers",
"previous_return_no_answers",
"=",
"self",
".",
"reader",
".",
"return_no_answers",
"self",
".",
"reader",
".",
"return_no_answers",
"=",
"True",
"reader_start_time",
"=",
"time",
".",
"time",
"(",
")",
"predictions",
"=",
"self",
".",
"reader",
".",
"predict_batch",
"(",
"questions_with_correct_doc",
",",
"top_k",
"=",
"top_k_reader",
",",
"batch_size",
"=",
"batch_size",
")",
"reader_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"reader_start_time",
"for",
"pred",
"in",
"predictions",
":",
"counts",
"=",
"eval_counts_reader_batch",
"(",
"pred",
",",
"counts",
")",
"finder_total_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"finder_start_time",
"results",
"=",
"calculate_reader_metrics",
"(",
"counts",
",",
"correct_retrievals",
")",
"results",
"[",
"\"retriever_recall\"",
"]",
"=",
"correct_retrievals",
"/",
"number_of_questions",
"results",
"[",
"\"retriever_map\"",
"]",
"=",
"summed_avg_precision_retriever",
"/",
"number_of_questions",
"results",
"[",
"\"retriever_mrr\"",
"]",
"=",
"summed_reciprocal_rank_retriever",
"/",
"number_of_questions",
"results",
"[",
"\"total_retrieve_time\"",
"]",
"=",
"retriever_total_time",
"results",
"[",
"\"avg_retrieve_time\"",
"]",
"=",
"retriever_total_time",
"/",
"number_of_questions",
"results",
"[",
"\"total_reader_time\"",
"]",
"=",
"reader_total_time",
"results",
"[",
"\"avg_reader_time\"",
"]",
"=",
"reader_total_time",
"/",
"correct_retrievals",
"results",
"[",
"\"total_finder_time\"",
"]",
"=",
"finder_total_time",
"logger",
".",
"info",
"(",
"(",
"f\"{counts['correct_readings_topk']} out of {number_of_questions} questions were correctly \"",
"f\"answered ({(counts['correct_readings_topk'] / number_of_questions):.2%}).\"",
")",
")",
"logger",
".",
"info",
"(",
"f\"{number_of_questions - correct_retrievals} questions could not be answered due to the retriever.\"",
")",
"logger",
".",
"info",
"(",
"f\"{correct_retrievals - counts['correct_readings_topk']} questions could not be answered due to the reader.\"",
")",
"if",
"return_preds",
":",
"return",
"{",
"\"metrics\"",
":",
"results",
",",
"\"predictions\"",
":",
"predictions",
"}",
"else",
":",
"return",
"results"
] | [
291,
4
] | [
407,
26
] | python | en | ['en', 'error', 'th'] | False |
suite | (ctx) | Expectation Suite operations | Expectation Suite operations | def suite(ctx):
"""Expectation Suite operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.suite.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end" | [
"def",
"suite",
"(",
"ctx",
")",
":",
"directory",
":",
"str",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"context",
":",
"DataContext",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
",",
")",
"# TODO consider moving this all the way up in to the CLIState constructor",
"ctx",
".",
"obj",
".",
"data_context",
"=",
"context",
"usage_stats_prefix",
"=",
"f\"cli.suite.{ctx.invoked_subcommand}\"",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"f\"{usage_stats_prefix}.begin\"",
",",
"success",
"=",
"True",
",",
")",
"ctx",
".",
"obj",
".",
"usage_event_end",
"=",
"f\"{usage_stats_prefix}.end\""
] | [
34,
0
] | [
52,
57
] | python | en | ['ca', 'en', 'en'] | True |
suite_new | (
ctx,
expectation_suite,
interactive_flag,
manual_flag,
profile,
batch_request,
no_jupyter,
) |
Create a new Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag.
|
Create a new Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag.
| def suite_new(
ctx,
expectation_suite,
interactive_flag,
manual_flag,
profile,
batch_request,
no_jupyter,
):
"""
Create a new Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag.
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
processed_flags: Dict[str, Optional[bool]] = _process_suite_new_flags_and_prompt(
context=context,
usage_event_end=usage_event_end,
interactive_flag=interactive_flag,
manual_flag=manual_flag,
profile=profile,
batch_request=batch_request,
)
_suite_new_workflow(
context=context,
expectation_suite_name=expectation_suite,
interactive=processed_flags["interactive"],
profile=processed_flags["profile"],
no_jupyter=no_jupyter,
usage_event=usage_event_end,
batch_request=batch_request,
) | [
"def",
"suite_new",
"(",
"ctx",
",",
"expectation_suite",
",",
"interactive_flag",
",",
"manual_flag",
",",
"profile",
",",
"batch_request",
",",
"no_jupyter",
",",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"processed_flags",
":",
"Dict",
"[",
"str",
",",
"Optional",
"[",
"bool",
"]",
"]",
"=",
"_process_suite_new_flags_and_prompt",
"(",
"context",
"=",
"context",
",",
"usage_event_end",
"=",
"usage_event_end",
",",
"interactive_flag",
"=",
"interactive_flag",
",",
"manual_flag",
"=",
"manual_flag",
",",
"profile",
"=",
"profile",
",",
"batch_request",
"=",
"batch_request",
",",
")",
"_suite_new_workflow",
"(",
"context",
"=",
"context",
",",
"expectation_suite_name",
"=",
"expectation_suite",
",",
"interactive",
"=",
"processed_flags",
"[",
"\"interactive\"",
"]",
",",
"profile",
"=",
"processed_flags",
"[",
"\"profile\"",
"]",
",",
"no_jupyter",
"=",
"no_jupyter",
",",
"usage_event",
"=",
"usage_event_end",
",",
"batch_request",
"=",
"batch_request",
",",
")"
] | [
105,
0
] | [
138,
5
] | python | en | ['en', 'error', 'th'] | False |
_process_suite_new_flags_and_prompt | (
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
profile: bool,
batch_request: Optional[str] = None,
) |
Process various optional suite new flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
profile: --profile from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
Dictionary with keys of processed parameters and boolean values e.g.
{"interactive": True, "profile": False}
|
Process various optional suite new flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
profile: --profile from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command | def _process_suite_new_flags_and_prompt(
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
profile: bool,
batch_request: Optional[str] = None,
) -> Dict[str, Optional[bool]]:
"""
Process various optional suite new flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
profile: --profile from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
Dictionary with keys of processed parameters and boolean values e.g.
{"interactive": True, "profile": False}
"""
error_message: Optional[str] = None
# Convert interactive / no-interactive flags to interactive
interactive: Optional[bool] = None
if interactive_flag is True and manual_flag is True:
error_message = """Please choose either --interactive or --manual, you may not choose both."""
elif interactive_flag is False and manual_flag is False:
interactive = None
elif interactive_flag is True and manual_flag is False:
interactive = True
elif interactive_flag is False and manual_flag is True:
interactive = False
if error_message is not None:
cli_message(string=f"<red>{error_message}</red>")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
sys.exit(1)
user_provided_any_flag_skip_prompt: bool = any(
((interactive is not None), (profile is True), (batch_request is not None))
)
# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates
# that a user did not supply either flag.
if user_provided_any_flag_skip_prompt:
# Assume batch needed if user passes --profile
if profile and interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --profile flag</green>"
)
interactive = True
elif profile and interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag</yellow>"
)
interactive = True
# Assume batch needed if user passes --batch-request
elif (batch_request is not None) and (interactive is None):
cli_message(
"<green>Entering interactive mode since you passed the --batch-request flag</green>"
)
interactive = True
elif (batch_request is not None) and (interactive is False):
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>"
)
interactive = True
else:
suite_create_method: str = click.prompt(
"""\
How would you like to create your Expectation Suite?
1. Manually, without interacting with a sample batch of data (default)
2. Interactively, with a sample batch of data
3. Automatically, using a profiler
""",
type=click.Choice(["1", "2", "3"]),
show_choices=False,
default="1",
show_default=False,
)
# Default option
if suite_create_method == "":
interactive = False
profile = False
elif suite_create_method == "1":
interactive = False
profile = False
elif suite_create_method == "2":
interactive = True
profile = False
elif suite_create_method == "3":
interactive = True
profile = True
return {"interactive": interactive, "profile": profile} | [
"def",
"_process_suite_new_flags_and_prompt",
"(",
"context",
":",
"DataContext",
",",
"usage_event_end",
":",
"str",
",",
"interactive_flag",
":",
"bool",
",",
"manual_flag",
":",
"bool",
",",
"profile",
":",
"bool",
",",
"batch_request",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
"Dict",
"[",
"str",
",",
"Optional",
"[",
"bool",
"]",
"]",
":",
"error_message",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"# Convert interactive / no-interactive flags to interactive",
"interactive",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
"if",
"interactive_flag",
"is",
"True",
"and",
"manual_flag",
"is",
"True",
":",
"error_message",
"=",
"\"\"\"Please choose either --interactive or --manual, you may not choose both.\"\"\"",
"elif",
"interactive_flag",
"is",
"False",
"and",
"manual_flag",
"is",
"False",
":",
"interactive",
"=",
"None",
"elif",
"interactive_flag",
"is",
"True",
"and",
"manual_flag",
"is",
"False",
":",
"interactive",
"=",
"True",
"elif",
"interactive_flag",
"is",
"False",
"and",
"manual_flag",
"is",
"True",
":",
"interactive",
"=",
"False",
"if",
"error_message",
"is",
"not",
"None",
":",
"cli_message",
"(",
"string",
"=",
"f\"<red>{error_message}</red>\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"user_provided_any_flag_skip_prompt",
":",
"bool",
"=",
"any",
"(",
"(",
"(",
"interactive",
"is",
"not",
"None",
")",
",",
"(",
"profile",
"is",
"True",
")",
",",
"(",
"batch_request",
"is",
"not",
"None",
")",
")",
")",
"# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates",
"# that a user did not supply either flag.",
"if",
"user_provided_any_flag_skip_prompt",
":",
"# Assume batch needed if user passes --profile",
"if",
"profile",
"and",
"interactive",
"is",
"None",
":",
"cli_message",
"(",
"\"<green>Entering interactive mode since you passed the --profile flag</green>\"",
")",
"interactive",
"=",
"True",
"elif",
"profile",
"and",
"interactive",
"is",
"False",
":",
"cli_message",
"(",
"\"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag</yellow>\"",
")",
"interactive",
"=",
"True",
"# Assume batch needed if user passes --batch-request",
"elif",
"(",
"batch_request",
"is",
"not",
"None",
")",
"and",
"(",
"interactive",
"is",
"None",
")",
":",
"cli_message",
"(",
"\"<green>Entering interactive mode since you passed the --batch-request flag</green>\"",
")",
"interactive",
"=",
"True",
"elif",
"(",
"batch_request",
"is",
"not",
"None",
")",
"and",
"(",
"interactive",
"is",
"False",
")",
":",
"cli_message",
"(",
"\"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>\"",
")",
"interactive",
"=",
"True",
"else",
":",
"suite_create_method",
":",
"str",
"=",
"click",
".",
"prompt",
"(",
"\"\"\"\\\nHow would you like to create your Expectation Suite?\n 1. Manually, without interacting with a sample batch of data (default)\n 2. Interactively, with a sample batch of data\n 3. Automatically, using a profiler\n\"\"\"",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
",",
"\"3\"",
"]",
")",
",",
"show_choices",
"=",
"False",
",",
"default",
"=",
"\"1\"",
",",
"show_default",
"=",
"False",
",",
")",
"# Default option",
"if",
"suite_create_method",
"==",
"\"\"",
":",
"interactive",
"=",
"False",
"profile",
"=",
"False",
"elif",
"suite_create_method",
"==",
"\"1\"",
":",
"interactive",
"=",
"False",
"profile",
"=",
"False",
"elif",
"suite_create_method",
"==",
"\"2\"",
":",
"interactive",
"=",
"True",
"profile",
"=",
"False",
"elif",
"suite_create_method",
"==",
"\"3\"",
":",
"interactive",
"=",
"True",
"profile",
"=",
"True",
"return",
"{",
"\"interactive\"",
":",
"interactive",
",",
"\"profile\"",
":",
"profile",
"}"
] | [
141,
0
] | [
240,
59
] | python | en | ['en', 'error', 'th'] | False |
suite_edit | (
ctx,
expectation_suite,
interactive_flag,
manual_flag,
datasource_name,
batch_request,
no_jupyter,
) |
Edit an existing Expectation Suite.
The SUITE argument is required. This is the name you gave to the suite
when you created it.
The edit command will help you specify a batch interactively. Or you can
specify them manually by providing --batch-request in valid JSON format.
Read more about specifying batches of data in the documentation: https://docs.greatexpectations.io/
|
Edit an existing Expectation Suite. | def suite_edit(
ctx,
expectation_suite,
interactive_flag,
manual_flag,
datasource_name,
batch_request,
no_jupyter,
):
"""
Edit an existing Expectation Suite.
The SUITE argument is required. This is the name you gave to the suite
when you created it.
The edit command will help you specify a batch interactively. Or you can
specify them manually by providing --batch-request in valid JSON format.
Read more about specifying batches of data in the documentation: https://docs.greatexpectations.io/
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
interactive: bool = _process_suite_edit_flags_and_prompt(
context=context,
usage_event_end=usage_event_end,
interactive_flag=interactive_flag,
manual_flag=manual_flag,
datasource_name=datasource_name,
batch_request=batch_request,
)
additional_batch_request_args: Optional[
Dict[str, Union[str, int, Dict[str, Any]]]
] = {"limit": 1000}
_suite_edit_workflow(
context=context,
expectation_suite_name=expectation_suite,
profile=False,
usage_event=usage_event_end,
interactive=interactive,
no_jupyter=no_jupyter,
create_if_not_exist=False,
datasource_name=datasource_name,
batch_request=batch_request,
additional_batch_request_args=additional_batch_request_args,
suppress_usage_message=False,
assume_yes=False,
) | [
"def",
"suite_edit",
"(",
"ctx",
",",
"expectation_suite",
",",
"interactive_flag",
",",
"manual_flag",
",",
"datasource_name",
",",
"batch_request",
",",
"no_jupyter",
",",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"interactive",
":",
"bool",
"=",
"_process_suite_edit_flags_and_prompt",
"(",
"context",
"=",
"context",
",",
"usage_event_end",
"=",
"usage_event_end",
",",
"interactive_flag",
"=",
"interactive_flag",
",",
"manual_flag",
"=",
"manual_flag",
",",
"datasource_name",
"=",
"datasource_name",
",",
"batch_request",
"=",
"batch_request",
",",
")",
"additional_batch_request_args",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"int",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"]",
"=",
"{",
"\"limit\"",
":",
"1000",
"}",
"_suite_edit_workflow",
"(",
"context",
"=",
"context",
",",
"expectation_suite_name",
"=",
"expectation_suite",
",",
"profile",
"=",
"False",
",",
"usage_event",
"=",
"usage_event_end",
",",
"interactive",
"=",
"interactive",
",",
"no_jupyter",
"=",
"no_jupyter",
",",
"create_if_not_exist",
"=",
"False",
",",
"datasource_name",
"=",
"datasource_name",
",",
"batch_request",
"=",
"batch_request",
",",
"additional_batch_request_args",
"=",
"additional_batch_request_args",
",",
"suppress_usage_message",
"=",
"False",
",",
"assume_yes",
"=",
"False",
",",
")"
] | [
391,
0
] | [
440,
5
] | python | en | ['en', 'error', 'th'] | False |
_process_suite_edit_flags_and_prompt | (
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
datasource_name: Optional[str] = None,
batch_request: Optional[str] = None,
) |
Process various optional suite edit flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
datasource_name: --datasource-name from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
boolean of whether to enter interactive mode
|
Process various optional suite edit flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
datasource_name: --datasource-name from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command | def _process_suite_edit_flags_and_prompt(
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
datasource_name: Optional[str] = None,
batch_request: Optional[str] = None,
) -> bool:
"""
Process various optional suite edit flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
datasource_name: --datasource-name from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
boolean of whether to enter interactive mode
"""
error_message: Optional[str] = None
# Convert interactive / no-interactive flags to interactive
interactive: Optional[bool] = None
if interactive_flag is True and manual_flag is True:
error_message = """Please choose either --interactive or --manual, you may not choose both."""
elif interactive_flag is False and manual_flag is False:
interactive = None
elif interactive_flag is True and manual_flag is False:
interactive = True
elif interactive_flag is False and manual_flag is True:
interactive = False
if (datasource_name is not None) and (batch_request is not None):
error_message = """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \
options can be used.
"""
if error_message is not None:
cli_message(string=f"<red>{error_message}</red>")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
sys.exit(1)
user_provided_any_flag_skip_prompt: bool = any(
(
(interactive is not None),
(datasource_name is not None),
(batch_request is not None),
)
)
# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates
# that a user did not supply either flag.
if user_provided_any_flag_skip_prompt:
if datasource_name is not None:
if interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --datasource-name flag</green>"
)
elif interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag</yellow>"
)
interactive = True
elif batch_request is not None:
if interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --batch-request flag</green>"
)
elif interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>"
)
interactive = True
else:
suite_edit_method: str = click.prompt(
"""\
How would you like to edit your Expectation Suite?
1. Manually, without interacting with a sample batch of data (default)
2. Interactively, with a sample batch of data
""",
type=click.Choice(["1", "2"]),
show_choices=False,
default="1",
show_default=False,
)
# Default option
if suite_edit_method == "":
interactive = False
if suite_edit_method == "1":
interactive = False
elif suite_edit_method == "2":
interactive = True
return interactive | [
"def",
"_process_suite_edit_flags_and_prompt",
"(",
"context",
":",
"DataContext",
",",
"usage_event_end",
":",
"str",
",",
"interactive_flag",
":",
"bool",
",",
"manual_flag",
":",
"bool",
",",
"datasource_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_request",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
"bool",
":",
"error_message",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"# Convert interactive / no-interactive flags to interactive",
"interactive",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
"if",
"interactive_flag",
"is",
"True",
"and",
"manual_flag",
"is",
"True",
":",
"error_message",
"=",
"\"\"\"Please choose either --interactive or --manual, you may not choose both.\"\"\"",
"elif",
"interactive_flag",
"is",
"False",
"and",
"manual_flag",
"is",
"False",
":",
"interactive",
"=",
"None",
"elif",
"interactive_flag",
"is",
"True",
"and",
"manual_flag",
"is",
"False",
":",
"interactive",
"=",
"True",
"elif",
"interactive_flag",
"is",
"False",
"and",
"manual_flag",
"is",
"True",
":",
"interactive",
"=",
"False",
"if",
"(",
"datasource_name",
"is",
"not",
"None",
")",
"and",
"(",
"batch_request",
"is",
"not",
"None",
")",
":",
"error_message",
"=",
"\"\"\"Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \\\noptions can be used.\n\"\"\"",
"if",
"error_message",
"is",
"not",
"None",
":",
"cli_message",
"(",
"string",
"=",
"f\"<red>{error_message}</red>\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"user_provided_any_flag_skip_prompt",
":",
"bool",
"=",
"any",
"(",
"(",
"(",
"interactive",
"is",
"not",
"None",
")",
",",
"(",
"datasource_name",
"is",
"not",
"None",
")",
",",
"(",
"batch_request",
"is",
"not",
"None",
")",
",",
")",
")",
"# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates",
"# that a user did not supply either flag.",
"if",
"user_provided_any_flag_skip_prompt",
":",
"if",
"datasource_name",
"is",
"not",
"None",
":",
"if",
"interactive",
"is",
"None",
":",
"cli_message",
"(",
"\"<green>Entering interactive mode since you passed the --datasource-name flag</green>\"",
")",
"elif",
"interactive",
"is",
"False",
":",
"cli_message",
"(",
"\"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag</yellow>\"",
")",
"interactive",
"=",
"True",
"elif",
"batch_request",
"is",
"not",
"None",
":",
"if",
"interactive",
"is",
"None",
":",
"cli_message",
"(",
"\"<green>Entering interactive mode since you passed the --batch-request flag</green>\"",
")",
"elif",
"interactive",
"is",
"False",
":",
"cli_message",
"(",
"\"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>\"",
")",
"interactive",
"=",
"True",
"else",
":",
"suite_edit_method",
":",
"str",
"=",
"click",
".",
"prompt",
"(",
"\"\"\"\\\nHow would you like to edit your Expectation Suite?\n 1. Manually, without interacting with a sample batch of data (default)\n 2. Interactively, with a sample batch of data\n\"\"\"",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
"]",
")",
",",
"show_choices",
"=",
"False",
",",
"default",
"=",
"\"1\"",
",",
"show_default",
"=",
"False",
",",
")",
"# Default option",
"if",
"suite_edit_method",
"==",
"\"\"",
":",
"interactive",
"=",
"False",
"if",
"suite_edit_method",
"==",
"\"1\"",
":",
"interactive",
"=",
"False",
"elif",
"suite_edit_method",
"==",
"\"2\"",
":",
"interactive",
"=",
"True",
"return",
"interactive"
] | [
443,
0
] | [
541,
22
] | python | en | ['en', 'error', 'th'] | False |
suite_demo | (ctx) | This command is not supported in the v3 (Batch Request) API. | This command is not supported in the v3 (Batch Request) API. | def suite_demo(ctx):
"""This command is not supported in the v3 (Batch Request) API."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
cli_message(
string="This command is not supported in the v3 (Batch Request) API. Please use `suite new` instead."
) | [
"def",
"suite_demo",
"(",
"ctx",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"cli_message",
"(",
"string",
"=",
"\"This command is not supported in the v3 (Batch Request) API. Please use `suite new` instead.\"",
")"
] | [
695,
0
] | [
704,
5
] | python | en | ['en', 'en', 'en'] | True |
suite_delete | (ctx, suite) |
Delete an Expectation Suite from the Expectation Store.
|
Delete an Expectation Suite from the Expectation Store.
| def suite_delete(ctx, suite):
"""
Delete an Expectation Suite from the Expectation Store.
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
suite_names: List[str] = context.list_expectation_suite_names()
except Exception as e:
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
raise e
if not suite_names:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
suppress_usage_message=False,
message="<red>No expectation suites found in the project.</red>",
)
if suite not in suite_names:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
suppress_usage_message=False,
message=f"<red>No expectation suite named {suite} found.</red>",
)
if not (
ctx.obj.assume_yes
or toolkit.confirm_proceed_or_exit(
exit_on_no=False, data_context=context, usage_stats_event=usage_event_end
)
):
cli_message(string=f"Suite `{suite}` was not deleted.")
sys.exit(0)
context.delete_expectation_suite(suite)
cli_message(string=f"Deleted the expectation suite named: {suite}")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
) | [
"def",
"suite_delete",
"(",
"ctx",
",",
"suite",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"suite_names",
":",
"List",
"[",
"str",
"]",
"=",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"False",
")",
"raise",
"e",
"if",
"not",
"suite_names",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"suppress_usage_message",
"=",
"False",
",",
"message",
"=",
"\"<red>No expectation suites found in the project.</red>\"",
",",
")",
"if",
"suite",
"not",
"in",
"suite_names",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"suppress_usage_message",
"=",
"False",
",",
"message",
"=",
"f\"<red>No expectation suite named {suite} found.</red>\"",
",",
")",
"if",
"not",
"(",
"ctx",
".",
"obj",
".",
"assume_yes",
"or",
"toolkit",
".",
"confirm_proceed_or_exit",
"(",
"exit_on_no",
"=",
"False",
",",
"data_context",
"=",
"context",
",",
"usage_stats_event",
"=",
"usage_event_end",
")",
")",
":",
"cli_message",
"(",
"string",
"=",
"f\"Suite `{suite}` was not deleted.\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"context",
".",
"delete_expectation_suite",
"(",
"suite",
")",
"cli_message",
"(",
"string",
"=",
"f\"Deleted the expectation suite named: {suite}\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")"
] | [
710,
0
] | [
752,
5
] | python | en | ['en', 'error', 'th'] | False |
suite_list | (ctx) | List existing Expectation Suites. | List existing Expectation Suites. | def suite_list(ctx):
"""List existing Expectation Suites."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
suite_names: List[str] = context.list_expectation_suite_names()
except Exception as e:
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
raise e
suite_names_styled: List[str] = [
f" - <cyan>{suite_name}</cyan>" for suite_name in suite_names
]
if len(suite_names_styled) == 0:
cli_message(string="No Expectation Suites found")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
return
list_intro_string: str
if len(suite_names_styled) == 1:
list_intro_string = "1 Expectation Suite found:"
else:
list_intro_string = f"{len(suite_names_styled)} Expectation Suites found:"
cli_message_list(
string_list=suite_names_styled, list_intro_string=list_intro_string
)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
) | [
"def",
"suite_list",
"(",
"ctx",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"suite_names",
":",
"List",
"[",
"str",
"]",
"=",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"False",
")",
"raise",
"e",
"suite_names_styled",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"f\" - <cyan>{suite_name}</cyan>\"",
"for",
"suite_name",
"in",
"suite_names",
"]",
"if",
"len",
"(",
"suite_names_styled",
")",
"==",
"0",
":",
"cli_message",
"(",
"string",
"=",
"\"No Expectation Suites found\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"return",
"list_intro_string",
":",
"str",
"if",
"len",
"(",
"suite_names_styled",
")",
"==",
"1",
":",
"list_intro_string",
"=",
"\"1 Expectation Suite found:\"",
"else",
":",
"list_intro_string",
"=",
"f\"{len(suite_names_styled)} Expectation Suites found:\"",
"cli_message_list",
"(",
"string_list",
"=",
"suite_names_styled",
",",
"list_intro_string",
"=",
"list_intro_string",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")"
] | [
757,
0
] | [
789,
5
] | python | en | ['ca', 'en', 'en'] | True |
ColumnsExistProfiler._profile | (cls, dataset, configuration=None) |
This function will take a dataset and add expectations that each column present exists.
Args:
dataset (great_expectations.dataset): The dataset to profile and to which to add expectations.
configuration: Configuration for select profilers.
|
This function will take a dataset and add expectations that each column present exists. | def _profile(cls, dataset, configuration=None):
"""
This function will take a dataset and add expectations that each column present exists.
Args:
dataset (great_expectations.dataset): The dataset to profile and to which to add expectations.
configuration: Configuration for select profilers.
"""
if not hasattr(dataset, "get_table_columns"):
warnings.warn("No columns list found in dataset; no profiling performed.")
raise NotImplementedError(
"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property"
)
table_columns = dataset.get_table_columns()
if table_columns is None:
warnings.warn("No columns list found in dataset; no profiling performed.")
raise NotImplementedError(
"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property"
)
create_multiple_expectations(dataset, table_columns, "expect_column_to_exist")
return dataset.get_expectation_suite(suppress_warnings=True) | [
"def",
"_profile",
"(",
"cls",
",",
"dataset",
",",
"configuration",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"dataset",
",",
"\"get_table_columns\"",
")",
":",
"warnings",
".",
"warn",
"(",
"\"No columns list found in dataset; no profiling performed.\"",
")",
"raise",
"NotImplementedError",
"(",
"\"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property\"",
")",
"table_columns",
"=",
"dataset",
".",
"get_table_columns",
"(",
")",
"if",
"table_columns",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"No columns list found in dataset; no profiling performed.\"",
")",
"raise",
"NotImplementedError",
"(",
"\"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property\"",
")",
"create_multiple_expectations",
"(",
"dataset",
",",
"table_columns",
",",
"\"expect_column_to_exist\"",
")",
"return",
"dataset",
".",
"get_expectation_suite",
"(",
"suppress_warnings",
"=",
"True",
")"
] | [
9,
4
] | [
34,
68
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnMostCommonValueToBeInSet.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) | Validating that user has inputted a value set and that configuration has been initialized | Validating that user has inputted a value set and that configuration has been initialized | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""Validating that user has inputted a value set and that configuration has been initialized"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
try:
assert "value_set" in configuration.kwargs, "value_set is required"
assert isinstance(
configuration.kwargs["value_set"], (list, set, dict)
), "value_set must be a list or a set"
if isinstance(configuration.kwargs["value_set"], dict):
assert (
"$PARAMETER" in configuration.kwargs["value_set"]
), 'Evaluation Parameter dict for value_set_kwarg must have "$PARAMETER" key'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"if",
"configuration",
"is",
"None",
":",
"configuration",
"=",
"self",
".",
"configuration",
"try",
":",
"assert",
"\"value_set\"",
"in",
"configuration",
".",
"kwargs",
",",
"\"value_set is required\"",
"assert",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"value_set\"",
"]",
",",
"(",
"list",
",",
"set",
",",
"dict",
")",
")",
",",
"\"value_set must be a list or a set\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"value_set\"",
"]",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
"[",
"\"value_set\"",
"]",
")",
",",
"'Evaluation Parameter dict for value_set_kwarg must have \"$PARAMETER\" key'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
96,
4
] | [
112,
19
] | python | en | ['en', 'en', 'en'] | True |
open_process | (
command, *, stdin=None, stdout=None, stderr=None, **options
) | r"""Execute a child program in a new process.
After construction, you can interact with the child process by writing
data to its `~Process.stdin` stream (a `~trio.abc.SendStream`), reading
data from its `~Process.stdout` and/or `~Process.stderr` streams (both
`~trio.abc.ReceiveStream`\s), sending it signals using
`~Process.terminate`, `~Process.kill`, or `~Process.send_signal`, and
waiting for it to exit using `~Process.wait`. See `Process` for details.
Each standard stream is only available if you specify that a pipe should
be created for it. For example, if you pass ``stdin=subprocess.PIPE``, you
can write to the `~Process.stdin` stream, else `~Process.stdin` will be
``None``.
Args:
command (list or str): The command to run. Typically this is a
sequence of strings such as ``['ls', '-l', 'directory with spaces']``,
where the first element names the executable to invoke and the other
elements specify its arguments. With ``shell=True`` in the
``**options``, or on Windows, ``command`` may alternatively
be a string, which will be parsed following platform-dependent
:ref:`quoting rules <subprocess-quoting>`.
stdin: Specifies what the child process's standard input
stream should connect to: output written by the parent
(``subprocess.PIPE``), nothing (``subprocess.DEVNULL``),
or an open file (pass a file descriptor or something whose
``fileno`` method returns one). If ``stdin`` is unspecified,
the child process will have the same standard input stream
as its parent.
stdout: Like ``stdin``, but for the child process's standard output
stream.
stderr: Like ``stdin``, but for the child process's standard error
stream. An additional value ``subprocess.STDOUT`` is supported,
which causes the child's standard output and standard error
messages to be intermixed on a single standard output stream,
attached to whatever the ``stdout`` option says to attach it to.
**options: Other :ref:`general subprocess options <subprocess-options>`
are also accepted.
Returns:
A new `Process` object.
Raises:
OSError: if the process spawning fails, for example because the
specified command could not be found.
| r"""Execute a child program in a new process. | async def open_process(
command, *, stdin=None, stdout=None, stderr=None, **options
) -> Process:
r"""Execute a child program in a new process.
After construction, you can interact with the child process by writing
data to its `~Process.stdin` stream (a `~trio.abc.SendStream`), reading
data from its `~Process.stdout` and/or `~Process.stderr` streams (both
`~trio.abc.ReceiveStream`\s), sending it signals using
`~Process.terminate`, `~Process.kill`, or `~Process.send_signal`, and
waiting for it to exit using `~Process.wait`. See `Process` for details.
Each standard stream is only available if you specify that a pipe should
be created for it. For example, if you pass ``stdin=subprocess.PIPE``, you
can write to the `~Process.stdin` stream, else `~Process.stdin` will be
``None``.
Args:
command (list or str): The command to run. Typically this is a
sequence of strings such as ``['ls', '-l', 'directory with spaces']``,
where the first element names the executable to invoke and the other
elements specify its arguments. With ``shell=True`` in the
``**options``, or on Windows, ``command`` may alternatively
be a string, which will be parsed following platform-dependent
:ref:`quoting rules <subprocess-quoting>`.
stdin: Specifies what the child process's standard input
stream should connect to: output written by the parent
(``subprocess.PIPE``), nothing (``subprocess.DEVNULL``),
or an open file (pass a file descriptor or something whose
``fileno`` method returns one). If ``stdin`` is unspecified,
the child process will have the same standard input stream
as its parent.
stdout: Like ``stdin``, but for the child process's standard output
stream.
stderr: Like ``stdin``, but for the child process's standard error
stream. An additional value ``subprocess.STDOUT`` is supported,
which causes the child's standard output and standard error
messages to be intermixed on a single standard output stream,
attached to whatever the ``stdout`` option says to attach it to.
**options: Other :ref:`general subprocess options <subprocess-options>`
are also accepted.
Returns:
A new `Process` object.
Raises:
OSError: if the process spawning fails, for example because the
specified command could not be found.
"""
for key in ("universal_newlines", "text", "encoding", "errors", "bufsize"):
if options.get(key):
raise TypeError(
"trio.Process only supports communicating over "
"unbuffered byte streams; the '{}' option is not supported".format(key)
)
if os.name == "posix":
if isinstance(command, str) and not options.get("shell"):
raise TypeError(
"command must be a sequence (not a string) if shell=False "
"on UNIX systems"
)
if not isinstance(command, str) and options.get("shell"):
raise TypeError(
"command must be a string (not a sequence) if shell=True "
"on UNIX systems"
)
trio_stdin = None # type: Optional[SendStream]
trio_stdout = None # type: Optional[ReceiveStream]
trio_stderr = None # type: Optional[ReceiveStream]
if stdin == subprocess.PIPE:
trio_stdin, stdin = create_pipe_to_child_stdin()
if stdout == subprocess.PIPE:
trio_stdout, stdout = create_pipe_from_child_output()
if stderr == subprocess.STDOUT:
# If we created a pipe for stdout, pass the same pipe for
# stderr. If stdout was some non-pipe thing (DEVNULL or a
# given FD), pass the same thing. If stdout was passed as
# None, keep stderr as STDOUT to allow subprocess to dup
# our stdout. Regardless of which of these is applicable,
# don't create a new Trio stream for stderr -- if stdout
# is piped, stderr will be intermixed on the stdout stream.
if stdout is not None:
stderr = stdout
elif stderr == subprocess.PIPE:
trio_stderr, stderr = create_pipe_from_child_output()
try:
popen = await trio.to_thread.run_sync(
partial(
subprocess.Popen,
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
**options,
)
)
finally:
# Close the parent's handle for each child side of a pipe;
# we want the child to have the only copy, so that when
# it exits we can read EOF on our side.
if trio_stdin is not None:
os.close(stdin)
if trio_stdout is not None:
os.close(stdout)
if trio_stderr is not None:
os.close(stderr)
return Process._create(popen, trio_stdin, trio_stdout, trio_stderr) | [
"async",
"def",
"open_process",
"(",
"command",
",",
"*",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"*",
"*",
"options",
")",
"->",
"Process",
":",
"for",
"key",
"in",
"(",
"\"universal_newlines\"",
",",
"\"text\"",
",",
"\"encoding\"",
",",
"\"errors\"",
",",
"\"bufsize\"",
")",
":",
"if",
"options",
".",
"get",
"(",
"key",
")",
":",
"raise",
"TypeError",
"(",
"\"trio.Process only supports communicating over \"",
"\"unbuffered byte streams; the '{}' option is not supported\"",
".",
"format",
"(",
"key",
")",
")",
"if",
"os",
".",
"name",
"==",
"\"posix\"",
":",
"if",
"isinstance",
"(",
"command",
",",
"str",
")",
"and",
"not",
"options",
".",
"get",
"(",
"\"shell\"",
")",
":",
"raise",
"TypeError",
"(",
"\"command must be a sequence (not a string) if shell=False \"",
"\"on UNIX systems\"",
")",
"if",
"not",
"isinstance",
"(",
"command",
",",
"str",
")",
"and",
"options",
".",
"get",
"(",
"\"shell\"",
")",
":",
"raise",
"TypeError",
"(",
"\"command must be a string (not a sequence) if shell=True \"",
"\"on UNIX systems\"",
")",
"trio_stdin",
"=",
"None",
"# type: Optional[SendStream]",
"trio_stdout",
"=",
"None",
"# type: Optional[ReceiveStream]",
"trio_stderr",
"=",
"None",
"# type: Optional[ReceiveStream]",
"if",
"stdin",
"==",
"subprocess",
".",
"PIPE",
":",
"trio_stdin",
",",
"stdin",
"=",
"create_pipe_to_child_stdin",
"(",
")",
"if",
"stdout",
"==",
"subprocess",
".",
"PIPE",
":",
"trio_stdout",
",",
"stdout",
"=",
"create_pipe_from_child_output",
"(",
")",
"if",
"stderr",
"==",
"subprocess",
".",
"STDOUT",
":",
"# If we created a pipe for stdout, pass the same pipe for",
"# stderr. If stdout was some non-pipe thing (DEVNULL or a",
"# given FD), pass the same thing. If stdout was passed as",
"# None, keep stderr as STDOUT to allow subprocess to dup",
"# our stdout. Regardless of which of these is applicable,",
"# don't create a new Trio stream for stderr -- if stdout",
"# is piped, stderr will be intermixed on the stdout stream.",
"if",
"stdout",
"is",
"not",
"None",
":",
"stderr",
"=",
"stdout",
"elif",
"stderr",
"==",
"subprocess",
".",
"PIPE",
":",
"trio_stderr",
",",
"stderr",
"=",
"create_pipe_from_child_output",
"(",
")",
"try",
":",
"popen",
"=",
"await",
"trio",
".",
"to_thread",
".",
"run_sync",
"(",
"partial",
"(",
"subprocess",
".",
"Popen",
",",
"command",
",",
"stdin",
"=",
"stdin",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"*",
"*",
"options",
",",
")",
")",
"finally",
":",
"# Close the parent's handle for each child side of a pipe;",
"# we want the child to have the only copy, so that when",
"# it exits we can read EOF on our side.",
"if",
"trio_stdin",
"is",
"not",
"None",
":",
"os",
".",
"close",
"(",
"stdin",
")",
"if",
"trio_stdout",
"is",
"not",
"None",
":",
"os",
".",
"close",
"(",
"stdout",
")",
"if",
"trio_stderr",
"is",
"not",
"None",
":",
"os",
".",
"close",
"(",
"stderr",
")",
"return",
"Process",
".",
"_create",
"(",
"popen",
",",
"trio_stdin",
",",
"trio_stdout",
",",
"trio_stderr",
")"
] | [
278,
0
] | [
390,
71
] | python | en | ['en', 'en', 'en'] | True |
run_process | (
command,
*,
stdin=b"",
capture_stdout=False,
capture_stderr=False,
check=True,
deliver_cancel=None,
**options,
) | Run ``command`` in a subprocess, wait for it to complete, and
return a :class:`subprocess.CompletedProcess` instance describing
the results.
If cancelled, :func:`run_process` terminates the subprocess and
waits for it to exit before propagating the cancellation, like
:meth:`Process.aclose`.
**Input:** The subprocess's standard input stream is set up to
receive the bytes provided as ``stdin``. Once the given input has
been fully delivered, or if none is provided, the subprocess will
receive end-of-file when reading from its standard input.
Alternatively, if you want the subprocess to read its
standard input from the same place as the parent Trio process, you
can pass ``stdin=None``.
**Output:** By default, any output produced by the subprocess is
passed through to the standard output and error streams of the
parent Trio process. If you would like to capture this output and
do something with it, you can pass ``capture_stdout=True`` to
capture the subprocess's standard output, and/or
``capture_stderr=True`` to capture its standard error. Captured
data is provided as the
:attr:`~subprocess.CompletedProcess.stdout` and/or
:attr:`~subprocess.CompletedProcess.stderr` attributes of the
returned :class:`~subprocess.CompletedProcess` object. The value
for any stream that was not captured will be ``None``.
If you want to capture both stdout and stderr while keeping them
separate, pass ``capture_stdout=True, capture_stderr=True``.
If you want to capture both stdout and stderr but mixed together
in the order they were printed, use: ``capture_stdout=True, stderr=subprocess.STDOUT``.
This directs the child's stderr into its stdout, so the combined
output will be available in the `~subprocess.CompletedProcess.stdout`
attribute.
**Error checking:** If the subprocess exits with a nonzero status
code, indicating failure, :func:`run_process` raises a
:exc:`subprocess.CalledProcessError` exception rather than
returning normally. The captured outputs are still available as
the :attr:`~subprocess.CalledProcessError.stdout` and
:attr:`~subprocess.CalledProcessError.stderr` attributes of that
exception. To disable this behavior, so that :func:`run_process`
returns normally even if the subprocess exits abnormally, pass
``check=False``.
Args:
command (list or str): The command to run. Typically this is a
sequence of strings such as ``['ls', '-l', 'directory with spaces']``,
where the first element names the executable to invoke and the other
elements specify its arguments. With ``shell=True`` in the
``**options``, or on Windows, ``command`` may alternatively
be a string, which will be parsed following platform-dependent
:ref:`quoting rules <subprocess-quoting>`.
stdin (:obj:`bytes`, file descriptor, or None): The bytes to provide to
the subprocess on its standard input stream, or ``None`` if the
subprocess's standard input should come from the same place as
the parent Trio process's standard input. As is the case with
the :mod:`subprocess` module, you can also pass a
file descriptor or an object with a ``fileno()`` method,
in which case the subprocess's standard input will come from
that file.
capture_stdout (bool): If true, capture the bytes that the subprocess
writes to its standard output stream and return them in the
:attr:`~subprocess.CompletedProcess.stdout` attribute
of the returned :class:`~subprocess.CompletedProcess` object.
capture_stderr (bool): If true, capture the bytes that the subprocess
writes to its standard error stream and return them in the
:attr:`~subprocess.CompletedProcess.stderr` attribute
of the returned :class:`~subprocess.CompletedProcess` object.
check (bool): If false, don't validate that the subprocess exits
successfully. You should be sure to check the
``returncode`` attribute of the returned object if you pass
``check=False``, so that errors don't pass silently.
deliver_cancel (async function or None): If `run_process` is cancelled,
then it needs to kill the child process. There are multiple ways to
do this, so we let you customize it.
If you pass None (the default), then the behavior depends on the
platform:
- On Windows, Trio calls ``TerminateProcess``, which should kill the
process immediately.
- On Unix-likes, the default behavior is to send a ``SIGTERM``, wait
5 seconds, and send a ``SIGKILL``.
Alternatively, you can customize this behavior by passing in an
arbitrary async function, which will be called with the `Process`
object as an argument. For example, the default Unix behavior could
be implemented like this::
async def my_deliver_cancel(process):
process.send_signal(signal.SIGTERM)
await trio.sleep(5)
process.send_signal(signal.SIGKILL)
When the process actually exits, the ``deliver_cancel`` function
will automatically be cancelled – so if the process exits after
``SIGTERM``, then we'll never reach the ``SIGKILL``.
In any case, `run_process` will always wait for the child process to
exit before raising `Cancelled`.
**options: :func:`run_process` also accepts any :ref:`general subprocess
options <subprocess-options>` and passes them on to the
:class:`~trio.Process` constructor. This includes the
``stdout`` and ``stderr`` options, which provide additional
redirection possibilities such as ``stderr=subprocess.STDOUT``,
``stdout=subprocess.DEVNULL``, or file descriptors.
Returns:
A :class:`subprocess.CompletedProcess` instance describing the
return code and outputs.
Raises:
UnicodeError: if ``stdin`` is specified as a Unicode string, rather
than bytes
ValueError: if multiple redirections are specified for the same
stream, e.g., both ``capture_stdout=True`` and
``stdout=subprocess.DEVNULL``
subprocess.CalledProcessError: if ``check=False`` is not passed
and the process exits with a nonzero exit status
OSError: if an error is encountered starting or communicating with
the process
.. note:: The child process runs in the same process group as the parent
Trio process, so a Ctrl+C will be delivered simultaneously to both
parent and child. If you don't want this behavior, consult your
platform's documentation for starting child processes in a different
process group.
| Run ``command`` in a subprocess, wait for it to complete, and
return a :class:`subprocess.CompletedProcess` instance describing
the results. | async def run_process(
command,
*,
stdin=b"",
capture_stdout=False,
capture_stderr=False,
check=True,
deliver_cancel=None,
**options,
):
"""Run ``command`` in a subprocess, wait for it to complete, and
return a :class:`subprocess.CompletedProcess` instance describing
the results.
If cancelled, :func:`run_process` terminates the subprocess and
waits for it to exit before propagating the cancellation, like
:meth:`Process.aclose`.
**Input:** The subprocess's standard input stream is set up to
receive the bytes provided as ``stdin``. Once the given input has
been fully delivered, or if none is provided, the subprocess will
receive end-of-file when reading from its standard input.
Alternatively, if you want the subprocess to read its
standard input from the same place as the parent Trio process, you
can pass ``stdin=None``.
**Output:** By default, any output produced by the subprocess is
passed through to the standard output and error streams of the
parent Trio process. If you would like to capture this output and
do something with it, you can pass ``capture_stdout=True`` to
capture the subprocess's standard output, and/or
``capture_stderr=True`` to capture its standard error. Captured
data is provided as the
:attr:`~subprocess.CompletedProcess.stdout` and/or
:attr:`~subprocess.CompletedProcess.stderr` attributes of the
returned :class:`~subprocess.CompletedProcess` object. The value
for any stream that was not captured will be ``None``.
If you want to capture both stdout and stderr while keeping them
separate, pass ``capture_stdout=True, capture_stderr=True``.
If you want to capture both stdout and stderr but mixed together
in the order they were printed, use: ``capture_stdout=True, stderr=subprocess.STDOUT``.
This directs the child's stderr into its stdout, so the combined
output will be available in the `~subprocess.CompletedProcess.stdout`
attribute.
**Error checking:** If the subprocess exits with a nonzero status
code, indicating failure, :func:`run_process` raises a
:exc:`subprocess.CalledProcessError` exception rather than
returning normally. The captured outputs are still available as
the :attr:`~subprocess.CalledProcessError.stdout` and
:attr:`~subprocess.CalledProcessError.stderr` attributes of that
exception. To disable this behavior, so that :func:`run_process`
returns normally even if the subprocess exits abnormally, pass
``check=False``.
Args:
command (list or str): The command to run. Typically this is a
sequence of strings such as ``['ls', '-l', 'directory with spaces']``,
where the first element names the executable to invoke and the other
elements specify its arguments. With ``shell=True`` in the
``**options``, or on Windows, ``command`` may alternatively
be a string, which will be parsed following platform-dependent
:ref:`quoting rules <subprocess-quoting>`.
stdin (:obj:`bytes`, file descriptor, or None): The bytes to provide to
the subprocess on its standard input stream, or ``None`` if the
subprocess's standard input should come from the same place as
the parent Trio process's standard input. As is the case with
the :mod:`subprocess` module, you can also pass a
file descriptor or an object with a ``fileno()`` method,
in which case the subprocess's standard input will come from
that file.
capture_stdout (bool): If true, capture the bytes that the subprocess
writes to its standard output stream and return them in the
:attr:`~subprocess.CompletedProcess.stdout` attribute
of the returned :class:`~subprocess.CompletedProcess` object.
capture_stderr (bool): If true, capture the bytes that the subprocess
writes to its standard error stream and return them in the
:attr:`~subprocess.CompletedProcess.stderr` attribute
of the returned :class:`~subprocess.CompletedProcess` object.
check (bool): If false, don't validate that the subprocess exits
successfully. You should be sure to check the
``returncode`` attribute of the returned object if you pass
``check=False``, so that errors don't pass silently.
deliver_cancel (async function or None): If `run_process` is cancelled,
then it needs to kill the child process. There are multiple ways to
do this, so we let you customize it.
If you pass None (the default), then the behavior depends on the
platform:
- On Windows, Trio calls ``TerminateProcess``, which should kill the
process immediately.
- On Unix-likes, the default behavior is to send a ``SIGTERM``, wait
5 seconds, and send a ``SIGKILL``.
Alternatively, you can customize this behavior by passing in an
arbitrary async function, which will be called with the `Process`
object as an argument. For example, the default Unix behavior could
be implemented like this::
async def my_deliver_cancel(process):
process.send_signal(signal.SIGTERM)
await trio.sleep(5)
process.send_signal(signal.SIGKILL)
When the process actually exits, the ``deliver_cancel`` function
will automatically be cancelled – so if the process exits after
``SIGTERM``, then we'll never reach the ``SIGKILL``.
In any case, `run_process` will always wait for the child process to
exit before raising `Cancelled`.
**options: :func:`run_process` also accepts any :ref:`general subprocess
options <subprocess-options>` and passes them on to the
:class:`~trio.Process` constructor. This includes the
``stdout`` and ``stderr`` options, which provide additional
redirection possibilities such as ``stderr=subprocess.STDOUT``,
``stdout=subprocess.DEVNULL``, or file descriptors.
Returns:
A :class:`subprocess.CompletedProcess` instance describing the
return code and outputs.
Raises:
UnicodeError: if ``stdin`` is specified as a Unicode string, rather
than bytes
ValueError: if multiple redirections are specified for the same
stream, e.g., both ``capture_stdout=True`` and
``stdout=subprocess.DEVNULL``
subprocess.CalledProcessError: if ``check=False`` is not passed
and the process exits with a nonzero exit status
OSError: if an error is encountered starting or communicating with
the process
.. note:: The child process runs in the same process group as the parent
Trio process, so a Ctrl+C will be delivered simultaneously to both
parent and child. If you don't want this behavior, consult your
platform's documentation for starting child processes in a different
process group.
"""
if isinstance(stdin, str):
raise UnicodeError("process stdin must be bytes, not str")
if stdin == subprocess.PIPE:
raise ValueError(
"stdin=subprocess.PIPE doesn't make sense since the pipe "
"is internal to run_process(); pass the actual data you "
"want to send over that pipe instead"
)
if isinstance(stdin, (bytes, bytearray, memoryview)):
input = stdin
options["stdin"] = subprocess.PIPE
else:
# stdin should be something acceptable to Process
# (None, DEVNULL, a file descriptor, etc) and Process
# will raise if it's not
input = None
options["stdin"] = stdin
if capture_stdout:
if "stdout" in options:
raise ValueError("can't specify both stdout and capture_stdout")
options["stdout"] = subprocess.PIPE
if capture_stderr:
if "stderr" in options:
raise ValueError("can't specify both stderr and capture_stderr")
options["stderr"] = subprocess.PIPE
if deliver_cancel is None:
if os.name == "nt":
deliver_cancel = _windows_deliver_cancel
else:
assert os.name == "posix"
deliver_cancel = _posix_deliver_cancel
stdout_chunks = []
stderr_chunks = []
async with await open_process(command, **options) as proc:
async def feed_input():
async with proc.stdin:
try:
await proc.stdin.send_all(input)
except trio.BrokenResourceError:
pass
async def read_output(stream, chunks):
async with stream:
async for chunk in stream:
chunks.append(chunk)
async with trio.open_nursery() as nursery:
if proc.stdin is not None:
nursery.start_soon(feed_input)
if proc.stdout is not None:
nursery.start_soon(read_output, proc.stdout, stdout_chunks)
if proc.stderr is not None:
nursery.start_soon(read_output, proc.stderr, stderr_chunks)
try:
await proc.wait()
except trio.Cancelled:
with trio.CancelScope(shield=True):
killer_cscope = trio.CancelScope(shield=True)
async def killer():
with killer_cscope:
await deliver_cancel(proc)
nursery.start_soon(killer)
await proc.wait()
killer_cscope.cancel()
raise
stdout = b"".join(stdout_chunks) if proc.stdout is not None else None
stderr = b"".join(stderr_chunks) if proc.stderr is not None else None
if proc.returncode and check:
raise subprocess.CalledProcessError(
proc.returncode, proc.args, output=stdout, stderr=stderr
)
else:
return subprocess.CompletedProcess(proc.args, proc.returncode, stdout, stderr) | [
"async",
"def",
"run_process",
"(",
"command",
",",
"*",
",",
"stdin",
"=",
"b\"\"",
",",
"capture_stdout",
"=",
"False",
",",
"capture_stderr",
"=",
"False",
",",
"check",
"=",
"True",
",",
"deliver_cancel",
"=",
"None",
",",
"*",
"*",
"options",
",",
")",
":",
"if",
"isinstance",
"(",
"stdin",
",",
"str",
")",
":",
"raise",
"UnicodeError",
"(",
"\"process stdin must be bytes, not str\"",
")",
"if",
"stdin",
"==",
"subprocess",
".",
"PIPE",
":",
"raise",
"ValueError",
"(",
"\"stdin=subprocess.PIPE doesn't make sense since the pipe \"",
"\"is internal to run_process(); pass the actual data you \"",
"\"want to send over that pipe instead\"",
")",
"if",
"isinstance",
"(",
"stdin",
",",
"(",
"bytes",
",",
"bytearray",
",",
"memoryview",
")",
")",
":",
"input",
"=",
"stdin",
"options",
"[",
"\"stdin\"",
"]",
"=",
"subprocess",
".",
"PIPE",
"else",
":",
"# stdin should be something acceptable to Process",
"# (None, DEVNULL, a file descriptor, etc) and Process",
"# will raise if it's not",
"input",
"=",
"None",
"options",
"[",
"\"stdin\"",
"]",
"=",
"stdin",
"if",
"capture_stdout",
":",
"if",
"\"stdout\"",
"in",
"options",
":",
"raise",
"ValueError",
"(",
"\"can't specify both stdout and capture_stdout\"",
")",
"options",
"[",
"\"stdout\"",
"]",
"=",
"subprocess",
".",
"PIPE",
"if",
"capture_stderr",
":",
"if",
"\"stderr\"",
"in",
"options",
":",
"raise",
"ValueError",
"(",
"\"can't specify both stderr and capture_stderr\"",
")",
"options",
"[",
"\"stderr\"",
"]",
"=",
"subprocess",
".",
"PIPE",
"if",
"deliver_cancel",
"is",
"None",
":",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"deliver_cancel",
"=",
"_windows_deliver_cancel",
"else",
":",
"assert",
"os",
".",
"name",
"==",
"\"posix\"",
"deliver_cancel",
"=",
"_posix_deliver_cancel",
"stdout_chunks",
"=",
"[",
"]",
"stderr_chunks",
"=",
"[",
"]",
"async",
"with",
"await",
"open_process",
"(",
"command",
",",
"*",
"*",
"options",
")",
"as",
"proc",
":",
"async",
"def",
"feed_input",
"(",
")",
":",
"async",
"with",
"proc",
".",
"stdin",
":",
"try",
":",
"await",
"proc",
".",
"stdin",
".",
"send_all",
"(",
"input",
")",
"except",
"trio",
".",
"BrokenResourceError",
":",
"pass",
"async",
"def",
"read_output",
"(",
"stream",
",",
"chunks",
")",
":",
"async",
"with",
"stream",
":",
"async",
"for",
"chunk",
"in",
"stream",
":",
"chunks",
".",
"append",
"(",
"chunk",
")",
"async",
"with",
"trio",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"if",
"proc",
".",
"stdin",
"is",
"not",
"None",
":",
"nursery",
".",
"start_soon",
"(",
"feed_input",
")",
"if",
"proc",
".",
"stdout",
"is",
"not",
"None",
":",
"nursery",
".",
"start_soon",
"(",
"read_output",
",",
"proc",
".",
"stdout",
",",
"stdout_chunks",
")",
"if",
"proc",
".",
"stderr",
"is",
"not",
"None",
":",
"nursery",
".",
"start_soon",
"(",
"read_output",
",",
"proc",
".",
"stderr",
",",
"stderr_chunks",
")",
"try",
":",
"await",
"proc",
".",
"wait",
"(",
")",
"except",
"trio",
".",
"Cancelled",
":",
"with",
"trio",
".",
"CancelScope",
"(",
"shield",
"=",
"True",
")",
":",
"killer_cscope",
"=",
"trio",
".",
"CancelScope",
"(",
"shield",
"=",
"True",
")",
"async",
"def",
"killer",
"(",
")",
":",
"with",
"killer_cscope",
":",
"await",
"deliver_cancel",
"(",
"proc",
")",
"nursery",
".",
"start_soon",
"(",
"killer",
")",
"await",
"proc",
".",
"wait",
"(",
")",
"killer_cscope",
".",
"cancel",
"(",
")",
"raise",
"stdout",
"=",
"b\"\"",
".",
"join",
"(",
"stdout_chunks",
")",
"if",
"proc",
".",
"stdout",
"is",
"not",
"None",
"else",
"None",
"stderr",
"=",
"b\"\"",
".",
"join",
"(",
"stderr_chunks",
")",
"if",
"proc",
".",
"stderr",
"is",
"not",
"None",
"else",
"None",
"if",
"proc",
".",
"returncode",
"and",
"check",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"proc",
".",
"returncode",
",",
"proc",
".",
"args",
",",
"output",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
")",
"else",
":",
"return",
"subprocess",
".",
"CompletedProcess",
"(",
"proc",
".",
"args",
",",
"proc",
".",
"returncode",
",",
"stdout",
",",
"stderr",
")"
] | [
418,
0
] | [
649,
86
] | python | en | ['en', 'en', 'en'] | True |
Process.returncode | (self) | The exit status of the process (an integer), or ``None`` if it's
still running.
By convention, a return code of zero indicates success. On
UNIX, negative values indicate termination due to a signal,
e.g., -11 if terminated by signal 11 (``SIGSEGV``). On
Windows, a process that exits due to a call to
:meth:`Process.terminate` will have an exit status of 1.
Unlike the standard library `subprocess.Popen.returncode`, you don't
have to call `poll` or `wait` to update this attribute; it's
automatically updated as needed, and will always give you the latest
information.
| The exit status of the process (an integer), or ``None`` if it's
still running. | def returncode(self):
"""The exit status of the process (an integer), or ``None`` if it's
still running.
By convention, a return code of zero indicates success. On
UNIX, negative values indicate termination due to a signal,
e.g., -11 if terminated by signal 11 (``SIGSEGV``). On
Windows, a process that exits due to a call to
:meth:`Process.terminate` will have an exit status of 1.
Unlike the standard library `subprocess.Popen.returncode`, you don't
have to call `poll` or `wait` to update this attribute; it's
automatically updated as needed, and will always give you the latest
information.
"""
result = self._proc.poll()
if result is not None:
self._close_pidfd()
return result | [
"def",
"returncode",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"_proc",
".",
"poll",
"(",
")",
"if",
"result",
"is",
"not",
"None",
":",
"self",
".",
"_close_pidfd",
"(",
")",
"return",
"result"
] | [
161,
4
] | [
180,
21
] | python | en | ['en', 'en', 'en'] | True |
Process.aclose | (self) | Close any pipes we have to the process (both input and output)
and wait for it to exit.
If cancelled, kills the process and waits for it to finish
exiting before propagating the cancellation.
| Close any pipes we have to the process (both input and output)
and wait for it to exit. | async def aclose(self):
"""Close any pipes we have to the process (both input and output)
and wait for it to exit.
If cancelled, kills the process and waits for it to finish
exiting before propagating the cancellation.
"""
with trio.CancelScope(shield=True):
if self.stdin is not None:
await self.stdin.aclose()
if self.stdout is not None:
await self.stdout.aclose()
if self.stderr is not None:
await self.stderr.aclose()
try:
await self.wait()
finally:
if self._proc.returncode is None:
self.kill()
with trio.CancelScope(shield=True):
await self.wait() | [
"async",
"def",
"aclose",
"(",
"self",
")",
":",
"with",
"trio",
".",
"CancelScope",
"(",
"shield",
"=",
"True",
")",
":",
"if",
"self",
".",
"stdin",
"is",
"not",
"None",
":",
"await",
"self",
".",
"stdin",
".",
"aclose",
"(",
")",
"if",
"self",
".",
"stdout",
"is",
"not",
"None",
":",
"await",
"self",
".",
"stdout",
".",
"aclose",
"(",
")",
"if",
"self",
".",
"stderr",
"is",
"not",
"None",
":",
"await",
"self",
".",
"stderr",
".",
"aclose",
"(",
")",
"try",
":",
"await",
"self",
".",
"wait",
"(",
")",
"finally",
":",
"if",
"self",
".",
"_proc",
".",
"returncode",
"is",
"None",
":",
"self",
".",
"kill",
"(",
")",
"with",
"trio",
".",
"CancelScope",
"(",
"shield",
"=",
"True",
")",
":",
"await",
"self",
".",
"wait",
"(",
")"
] | [
182,
4
] | [
202,
37
] | python | en | ['en', 'en', 'en'] | True |
Process.wait | (self) | Block until the process exits.
Returns:
The exit status of the process; see :attr:`returncode`.
| Block until the process exits. | async def wait(self):
"""Block until the process exits.
Returns:
The exit status of the process; see :attr:`returncode`.
"""
async with self._wait_lock:
if self.poll() is None:
if self._pidfd is not None:
await trio.lowlevel.wait_readable(self._pidfd)
else:
await wait_child_exiting(self)
# We have to use .wait() here, not .poll(), because on macOS
# (and maybe other systems, who knows), there's a race
# condition inside the kernel that creates a tiny window where
# kqueue reports that the process has exited, but
# waitpid(WNOHANG) can't yet reap it. So this .wait() may
# actually block for a tiny fraction of a second.
self._proc.wait()
self._close_pidfd()
assert self._proc.returncode is not None
return self._proc.returncode | [
"async",
"def",
"wait",
"(",
"self",
")",
":",
"async",
"with",
"self",
".",
"_wait_lock",
":",
"if",
"self",
".",
"poll",
"(",
")",
"is",
"None",
":",
"if",
"self",
".",
"_pidfd",
"is",
"not",
"None",
":",
"await",
"trio",
".",
"lowlevel",
".",
"wait_readable",
"(",
"self",
".",
"_pidfd",
")",
"else",
":",
"await",
"wait_child_exiting",
"(",
"self",
")",
"# We have to use .wait() here, not .poll(), because on macOS",
"# (and maybe other systems, who knows), there's a race",
"# condition inside the kernel that creates a tiny window where",
"# kqueue reports that the process has exited, but",
"# waitpid(WNOHANG) can't yet reap it. So this .wait() may",
"# actually block for a tiny fraction of a second.",
"self",
".",
"_proc",
".",
"wait",
"(",
")",
"self",
".",
"_close_pidfd",
"(",
")",
"assert",
"self",
".",
"_proc",
".",
"returncode",
"is",
"not",
"None",
"return",
"self",
".",
"_proc",
".",
"returncode"
] | [
209,
4
] | [
230,
36
] | python | en | ['en', 'ca', 'en'] | True |
Process.poll | (self) | Returns the exit status of the process (an integer), or ``None`` if
it's still running.
Note that on Trio (unlike the standard library `subprocess.Popen`),
``process.poll()`` and ``process.returncode`` always give the same
result. See `returncode` for more details. This method is only
included to make it easier to port code from `subprocess`.
| Returns the exit status of the process (an integer), or ``None`` if
it's still running. | def poll(self):
"""Returns the exit status of the process (an integer), or ``None`` if
it's still running.
Note that on Trio (unlike the standard library `subprocess.Popen`),
``process.poll()`` and ``process.returncode`` always give the same
result. See `returncode` for more details. This method is only
included to make it easier to port code from `subprocess`.
"""
return self.returncode | [
"def",
"poll",
"(",
"self",
")",
":",
"return",
"self",
".",
"returncode"
] | [
232,
4
] | [
242,
30
] | python | en | ['en', 'en', 'en'] | True |
Process.send_signal | (self, sig) | Send signal ``sig`` to the process.
On UNIX, ``sig`` may be any signal defined in the
:mod:`signal` module, such as ``signal.SIGINT`` or
``signal.SIGTERM``. On Windows, it may be anything accepted by
the standard library :meth:`subprocess.Popen.send_signal`.
| Send signal ``sig`` to the process. | def send_signal(self, sig):
"""Send signal ``sig`` to the process.
On UNIX, ``sig`` may be any signal defined in the
:mod:`signal` module, such as ``signal.SIGINT`` or
``signal.SIGTERM``. On Windows, it may be anything accepted by
the standard library :meth:`subprocess.Popen.send_signal`.
"""
self._proc.send_signal(sig) | [
"def",
"send_signal",
"(",
"self",
",",
"sig",
")",
":",
"self",
".",
"_proc",
".",
"send_signal",
"(",
"sig",
")"
] | [
244,
4
] | [
252,
35
] | python | en | ['en', 'en', 'en'] | True |
Process.terminate | (self) | Terminate the process, politely if possible.
On UNIX, this is equivalent to
``send_signal(signal.SIGTERM)``; by convention this requests
graceful termination, but a misbehaving or buggy process might
ignore it. On Windows, :meth:`terminate` forcibly terminates the
process in the same manner as :meth:`kill`.
| Terminate the process, politely if possible. | def terminate(self):
"""Terminate the process, politely if possible.
On UNIX, this is equivalent to
``send_signal(signal.SIGTERM)``; by convention this requests
graceful termination, but a misbehaving or buggy process might
ignore it. On Windows, :meth:`terminate` forcibly terminates the
process in the same manner as :meth:`kill`.
"""
self._proc.terminate() | [
"def",
"terminate",
"(",
"self",
")",
":",
"self",
".",
"_proc",
".",
"terminate",
"(",
")"
] | [
254,
4
] | [
263,
30
] | python | en | ['en', 'en', 'en'] | True |
Process.kill | (self) | Immediately terminate the process.
On UNIX, this is equivalent to
``send_signal(signal.SIGKILL)``. On Windows, it calls
``TerminateProcess``. In both cases, the process cannot
prevent itself from being killed, but the termination will be
delivered asynchronously; use :meth:`wait` if you want to
ensure the process is actually dead before proceeding.
| Immediately terminate the process. | def kill(self):
"""Immediately terminate the process.
On UNIX, this is equivalent to
``send_signal(signal.SIGKILL)``. On Windows, it calls
``TerminateProcess``. In both cases, the process cannot
prevent itself from being killed, but the termination will be
delivered asynchronously; use :meth:`wait` if you want to
ensure the process is actually dead before proceeding.
"""
self._proc.kill() | [
"def",
"kill",
"(",
"self",
")",
":",
"self",
".",
"_proc",
".",
"kill",
"(",
")"
] | [
265,
4
] | [
275,
25
] | python | en | ['en', 'en', 'en'] | True |
BaseLearner.bootstrap | (self, X, treatment, y, p=None, size=10000) | Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population. | Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population. | def bootstrap(self, X, treatment, y, p=None, size=10000):
"""Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population."""
idxs = np.random.choice(np.arange(0, X.shape[0]), size=size)
X_b = X[idxs]
if p is not None:
p_b = {group: _p[idxs] for group, _p in p.items()}
else:
p_b = None
treatment_b = treatment[idxs]
y_b = y[idxs]
self.fit(X=X_b, treatment=treatment_b, y=y_b, p=p_b)
return self.predict(X=X, p=p) | [
"def",
"bootstrap",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"size",
"=",
"10000",
")",
":",
"idxs",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
",",
"size",
"=",
"size",
")",
"X_b",
"=",
"X",
"[",
"idxs",
"]",
"if",
"p",
"is",
"not",
"None",
":",
"p_b",
"=",
"{",
"group",
":",
"_p",
"[",
"idxs",
"]",
"for",
"group",
",",
"_p",
"in",
"p",
".",
"items",
"(",
")",
"}",
"else",
":",
"p_b",
"=",
"None",
"treatment_b",
"=",
"treatment",
"[",
"idxs",
"]",
"y_b",
"=",
"y",
"[",
"idxs",
"]",
"self",
".",
"fit",
"(",
"X",
"=",
"X_b",
",",
"treatment",
"=",
"treatment_b",
",",
"y",
"=",
"y_b",
",",
"p",
"=",
"p_b",
")",
"return",
"self",
".",
"predict",
"(",
"X",
"=",
"X",
",",
"p",
"=",
"p",
")"
] | [
32,
4
] | [
45,
37
] | python | en | ['en', 'en', 'en'] | True |
BaseLearner._format_p | (p, t_groups) | Format propensity scores into a dictionary of {treatment group: propensity scores}.
Args:
p (np.ndarray, pd.Series, or dict): propensity scores
t_groups (list): treatment group names.
Returns:
dict of {treatment group: propensity scores}
| Format propensity scores into a dictionary of {treatment group: propensity scores}. | def _format_p(p, t_groups):
"""Format propensity scores into a dictionary of {treatment group: propensity scores}.
Args:
p (np.ndarray, pd.Series, or dict): propensity scores
t_groups (list): treatment group names.
Returns:
dict of {treatment group: propensity scores}
"""
check_p_conditions(p, t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
return p | [
"def",
"_format_p",
"(",
"p",
",",
"t_groups",
")",
":",
"check_p_conditions",
"(",
"p",
",",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"treatment_name",
"=",
"t_groups",
"[",
"0",
"]",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
")",
"}",
"elif",
"isinstance",
"(",
"p",
",",
"dict",
")",
":",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
".",
"items",
"(",
")",
"}",
"return",
"p"
] | [
48,
4
] | [
66,
16
] | python | en | ['en', 'en', 'en'] | True |
BaseLearner._set_propensity_models | (self, X, treatment, y) | Set self.propensity and self.propensity_models.
It trains propensity models for all treatment groups, save them in self.propensity_models, and
save propensity scores in self.propensity in dictionaries with treatment groups as keys.
It will use self.model_p if available to train propensity models. Otherwise, it will use a default
PropensityModel (i.e. ElasticNetPropensityModel).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
| Set self.propensity and self.propensity_models. | def _set_propensity_models(self, X, treatment, y):
"""Set self.propensity and self.propensity_models.
It trains propensity models for all treatment groups, save them in self.propensity_models, and
save propensity scores in self.propensity in dictionaries with treatment groups as keys.
It will use self.model_p if available to train propensity models. Otherwise, it will use a default
PropensityModel (i.e. ElasticNetPropensityModel).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
"""
logger.info('Generating propensity score')
p = dict()
p_model = dict()
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
w_filt = (treatment_filt == group).astype(int)
w = (treatment == group).astype(int)
propensity_model = self.model_p if hasattr(self, 'model_p') else None
p[group], p_model[group] = compute_propensity_score(X=X_filt, treatment=w_filt,
p_model=propensity_model,
X_pred=X, treatment_pred=w)
self.propensity_model = p_model
self.propensity = p | [
"def",
"_set_propensity_models",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
")",
":",
"logger",
".",
"info",
"(",
"'Generating propensity score'",
")",
"p",
"=",
"dict",
"(",
")",
"p_model",
"=",
"dict",
"(",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"X_filt",
"=",
"X",
"[",
"mask",
"]",
"w_filt",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"w",
"=",
"(",
"treatment",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"propensity_model",
"=",
"self",
".",
"model_p",
"if",
"hasattr",
"(",
"self",
",",
"'model_p'",
")",
"else",
"None",
"p",
"[",
"group",
"]",
",",
"p_model",
"[",
"group",
"]",
"=",
"compute_propensity_score",
"(",
"X",
"=",
"X_filt",
",",
"treatment",
"=",
"w_filt",
",",
"p_model",
"=",
"propensity_model",
",",
"X_pred",
"=",
"X",
",",
"treatment_pred",
"=",
"w",
")",
"self",
".",
"propensity_model",
"=",
"p_model",
"self",
".",
"propensity",
"=",
"p"
] | [
68,
4
] | [
96,
27
] | python | en | ['en', 'en', 'en'] | True |
BaseLearner.get_importance | (self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None) |
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
|
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method. | def get_importance(self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(method=method, control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes, normalize=normalize,
test_size=test_size, random_state=random_state)
return explainer.get_importance() | [
"def",
"get_importance",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"method",
"=",
"'auto'",
",",
"normalize",
"=",
"True",
",",
"test_size",
"=",
"0.3",
",",
"random_state",
"=",
"None",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"method",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
"normalize",
"=",
"normalize",
",",
"test_size",
"=",
"test_size",
",",
"random_state",
"=",
"random_state",
")",
"return",
"explainer",
".",
"get_importance",
"(",
")"
] | [
98,
4
] | [
129,
41
] | python | en | ['en', 'error', 'th'] | False |
BaseLearner.get_shap_values | (self, X=None, model_tau_feature=None, tau=None, features=None) |
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
|
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
| def get_shap_values(self, X=None, model_tau_feature=None, tau=None, features=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
"""
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes)
return explainer.get_shap_values() | [
"def",
"get_shap_values",
"(",
"self",
",",
"X",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"features",
"=",
"None",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"'shapley'",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
")",
"return",
"explainer",
".",
"get_shap_values",
"(",
")"
] | [
131,
4
] | [
143,
42
] | python | en | ['en', 'error', 'th'] | False |
BaseLearner.plot_importance | (self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None) |
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
|
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method. | def plot_importance(self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None):
"""
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(method=method, control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes, normalize=normalize,
test_size=test_size, random_state=random_state)
explainer.plot_importance() | [
"def",
"plot_importance",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"method",
"=",
"'auto'",
",",
"normalize",
"=",
"True",
",",
"test_size",
"=",
"0.3",
",",
"random_state",
"=",
"None",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"method",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
"normalize",
"=",
"normalize",
",",
"test_size",
"=",
"test_size",
",",
"random_state",
"=",
"random_state",
")",
"explainer",
".",
"plot_importance",
"(",
")"
] | [
145,
4
] | [
176,
35
] | python | en | ['en', 'error', 'th'] | False |
BaseLearner.plot_shap_values | (self, X=None, tau=None, model_tau_feature=None, features=None, shap_dict=None, **kwargs) |
Plots distribution of shapley values.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix. Required if shap_dict is None.
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
|
Plots distribution of shapley values. | def plot_shap_values(self, X=None, tau=None, model_tau_feature=None, features=None, shap_dict=None, **kwargs):
"""
Plots distribution of shapley values.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix. Required if shap_dict is None.
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, override_checks=override_checks, classes=self._classes)
explainer.plot_shap_values(shap_dict=shap_dict) | [
"def",
"plot_shap_values",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"shap_dict",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"override_checks",
"=",
"False",
"if",
"shap_dict",
"is",
"None",
"else",
"True",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"'shapley'",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"override_checks",
"=",
"override_checks",
",",
"classes",
"=",
"self",
".",
"_classes",
")",
"explainer",
".",
"plot_shap_values",
"(",
"shap_dict",
"=",
"shap_dict",
")"
] | [
178,
4
] | [
197,
55
] | python | en | ['en', 'error', 'th'] | False |
BaseLearner.plot_shap_dependence | (self, treatment_group, feature_idx, X, tau, model_tau_feature=None, features=None,
shap_dict=None, interaction_idx='auto', **kwargs) |
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
|
Plots dependency of shapley values for a specified feature, colored by an interaction feature. | def plot_shap_dependence(self, treatment_group, feature_idx, X, tau, model_tau_feature=None, features=None,
shap_dict=None, interaction_idx='auto', **kwargs):
"""
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, override_checks=override_checks,
classes=self._classes)
explainer.plot_shap_dependence(treatment_group=treatment_group,
feature_idx=feature_idx,
shap_dict=shap_dict,
interaction_idx=interaction_idx,
**kwargs) | [
"def",
"plot_shap_dependence",
"(",
"self",
",",
"treatment_group",
",",
"feature_idx",
",",
"X",
",",
"tau",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"shap_dict",
"=",
"None",
",",
"interaction_idx",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"override_checks",
"=",
"False",
"if",
"shap_dict",
"is",
"None",
"else",
"True",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"'shapley'",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"override_checks",
"=",
"override_checks",
",",
"classes",
"=",
"self",
".",
"_classes",
")",
"explainer",
".",
"plot_shap_dependence",
"(",
"treatment_group",
"=",
"treatment_group",
",",
"feature_idx",
"=",
"feature_idx",
",",
"shap_dict",
"=",
"shap_dict",
",",
"interaction_idx",
"=",
"interaction_idx",
",",
"*",
"*",
"kwargs",
")"
] | [
199,
4
] | [
235,
48
] | python | en | ['en', 'error', 'th'] | False |
VimLexer.is_in | (self, w, mapping) | r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
| r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like:: | def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w:
return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False | [
"def",
"is_in",
"(",
"self",
",",
"w",
",",
"mapping",
")",
":",
"p",
"=",
"bisect",
"(",
"mapping",
",",
"(",
"w",
",",
")",
")",
"if",
"p",
">",
"0",
":",
"if",
"mapping",
"[",
"p",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"w",
"[",
":",
"len",
"(",
"mapping",
"[",
"p",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"]",
"and",
"mapping",
"[",
"p",
"-",
"1",
"]",
"[",
"1",
"]",
"[",
":",
"len",
"(",
"w",
")",
"]",
"==",
"w",
":",
"return",
"True",
"if",
"p",
"<",
"len",
"(",
"mapping",
")",
":",
"return",
"mapping",
"[",
"p",
"]",
"[",
"0",
"]",
"==",
"w",
"[",
":",
"len",
"(",
"mapping",
"[",
"p",
"]",
"[",
"0",
"]",
")",
"]",
"and",
"mapping",
"[",
"p",
"]",
"[",
"1",
"]",
"[",
":",
"len",
"(",
"w",
")",
"]",
"==",
"w",
"return",
"False"
] | [
130,
4
] | [
151,
20
] | python | cy | ['en', 'cy', 'hi'] | False |
_sql_session_rollback | (self, attr) |
Inject SQLDocumentStore at runtime to do a session rollback each time it is called. This allows to catch
errors where an intended operation is still in a transaction, but not committed to the database.
|
Inject SQLDocumentStore at runtime to do a session rollback each time it is called. This allows to catch
errors where an intended operation is still in a transaction, but not committed to the database.
| def _sql_session_rollback(self, attr):
"""
Inject SQLDocumentStore at runtime to do a session rollback each time it is called. This allows to catch
errors where an intended operation is still in a transaction, but not committed to the database.
"""
method = object.__getattribute__(self, attr)
if callable(method):
try:
self.session.rollback()
except AttributeError:
pass
return method | [
"def",
"_sql_session_rollback",
"(",
"self",
",",
"attr",
")",
":",
"method",
"=",
"object",
".",
"__getattribute__",
"(",
"self",
",",
"attr",
")",
"if",
"callable",
"(",
"method",
")",
":",
"try",
":",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"method"
] | [
28,
0
] | [
40,
17
] | python | en | ['en', 'error', 'th'] | False |
project | () | Project operations | Project operations | def project():
"""Project operations"""
pass | [
"def",
"project",
"(",
")",
":",
"pass"
] | [
14,
0
] | [
16,
8
] | python | en | ['en', 'en', 'en'] | False |
project_check_config | (ctx) | Check a config for validity and help with migrations. | Check a config for validity and help with migrations. | def project_check_config(ctx):
"""Check a config for validity and help with migrations."""
cli_message("Checking your config files for validity...\n")
directory = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
is_config_ok, error_message, context = do_config_check(directory)
if context:
toolkit.send_usage_message(
data_context=context, event="cli.project.check_config", success=True
)
if not is_config_ok:
cli_message("Unfortunately, your config appears to be invalid:\n")
cli_message("<red>{}</red>".format(error_message))
sys.exit(1)
cli_message("<green>Your config file appears valid!</green>") | [
"def",
"project_check_config",
"(",
"ctx",
")",
":",
"cli_message",
"(",
"\"Checking your config files for validity...\\n\"",
")",
"directory",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"is_config_ok",
",",
"error_message",
",",
"context",
"=",
"do_config_check",
"(",
"directory",
")",
"if",
"context",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.project.check_config\"",
",",
"success",
"=",
"True",
")",
"if",
"not",
"is_config_ok",
":",
"cli_message",
"(",
"\"Unfortunately, your config appears to be invalid:\\n\"",
")",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"error_message",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"cli_message",
"(",
"\"<green>Your config file appears valid!</green>\"",
")"
] | [
21,
0
] | [
37,
65
] | python | en | ['en', 'en', 'en'] | True |
project_upgrade | (ctx) | Upgrade a project after installing the next Great Expectations major version. | Upgrade a project after installing the next Great Expectations major version. | def project_upgrade(ctx):
"""Upgrade a project after installing the next Great Expectations major version."""
cli_message("\nChecking project...")
cli_message(SECTION_SEPARATOR)
directory = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
if load_data_context_with_error_handling(
directory=directory, from_cli_upgrade_command=True
):
up_to_date_message = (
"Your project is up-to-date - no further upgrade is necessary.\n"
)
cli_message(f"<green>{up_to_date_message}</green>")
sys.exit(0) | [
"def",
"project_upgrade",
"(",
"ctx",
")",
":",
"cli_message",
"(",
"\"\\nChecking project...\"",
")",
"cli_message",
"(",
"SECTION_SEPARATOR",
")",
"directory",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"if",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"True",
")",
":",
"up_to_date_message",
"=",
"(",
"\"Your project is up-to-date - no further upgrade is necessary.\\n\"",
")",
"cli_message",
"(",
"f\"<green>{up_to_date_message}</green>\"",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
42,
0
] | [
56,
19
] | python | en | ['en', 'en', 'en'] | True |
binary_classification_loss | (concat_true, concat_pred) |
Implements a classification (binary cross-entropy) loss function for DragonNet architecture.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): binary cross-entropy loss
|
Implements a classification (binary cross-entropy) loss function for DragonNet architecture. | def binary_classification_loss(concat_true, concat_pred):
"""
Implements a classification (binary cross-entropy) loss function for DragonNet architecture.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): binary cross-entropy loss
"""
t_true = concat_true[:, 1]
t_pred = concat_pred[:, 2]
t_pred = (t_pred + 0.001) / 1.002
losst = tf.reduce_sum(K.binary_crossentropy(t_true, t_pred))
return losst | [
"def",
"binary_classification_loss",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"t_true",
"=",
"concat_true",
"[",
":",
",",
"1",
"]",
"t_pred",
"=",
"concat_pred",
"[",
":",
",",
"2",
"]",
"t_pred",
"=",
"(",
"t_pred",
"+",
"0.001",
")",
"/",
"1.002",
"losst",
"=",
"tf",
".",
"reduce_sum",
"(",
"K",
".",
"binary_crossentropy",
"(",
"t_true",
",",
"t_pred",
")",
")",
"return",
"losst"
] | [
6,
0
] | [
23,
16
] | python | en | ['en', 'error', 'th'] | False |
regression_loss | (concat_true, concat_pred) |
Implements a regression (squared error) loss function for DragonNet architecture.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): aggregated regression loss
|
Implements a regression (squared error) loss function for DragonNet architecture. | def regression_loss(concat_true, concat_pred):
"""
Implements a regression (squared error) loss function for DragonNet architecture.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): aggregated regression loss
"""
y_true = concat_true[:, 0]
t_true = concat_true[:, 1]
y0_pred = concat_pred[:, 0]
y1_pred = concat_pred[:, 1]
loss0 = tf.reduce_sum((1. - t_true) * tf.square(y_true - y0_pred))
loss1 = tf.reduce_sum(t_true * tf.square(y_true - y1_pred))
return loss0 + loss1 | [
"def",
"regression_loss",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"y_true",
"=",
"concat_true",
"[",
":",
",",
"0",
"]",
"t_true",
"=",
"concat_true",
"[",
":",
",",
"1",
"]",
"y0_pred",
"=",
"concat_pred",
"[",
":",
",",
"0",
"]",
"y1_pred",
"=",
"concat_pred",
"[",
":",
",",
"1",
"]",
"loss0",
"=",
"tf",
".",
"reduce_sum",
"(",
"(",
"1.",
"-",
"t_true",
")",
"*",
"tf",
".",
"square",
"(",
"y_true",
"-",
"y0_pred",
")",
")",
"loss1",
"=",
"tf",
".",
"reduce_sum",
"(",
"t_true",
"*",
"tf",
".",
"square",
"(",
"y_true",
"-",
"y1_pred",
")",
")",
"return",
"loss0",
"+",
"loss1"
] | [
26,
0
] | [
47,
24
] | python | en | ['en', 'error', 'th'] | False |
dragonnet_loss_binarycross | (concat_true, concat_pred) |
Implements regression + classification loss in one wrapper function.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): aggregated regression + classification loss
|
Implements regression + classification loss in one wrapper function. | def dragonnet_loss_binarycross(concat_true, concat_pred):
"""
Implements regression + classification loss in one wrapper function.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): aggregated regression + classification loss
"""
return regression_loss(concat_true, concat_pred) + binary_classification_loss(concat_true, concat_pred) | [
"def",
"dragonnet_loss_binarycross",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"return",
"regression_loss",
"(",
"concat_true",
",",
"concat_pred",
")",
"+",
"binary_classification_loss",
"(",
"concat_true",
",",
"concat_pred",
")"
] | [
50,
0
] | [
62,
107
] | python | en | ['en', 'error', 'th'] | False |
treatment_accuracy | (concat_true, concat_pred) |
Returns keras' binary_accuracy between treatment and prediction of propensity.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): binary accuracy
|
Returns keras' binary_accuracy between treatment and prediction of propensity. | def treatment_accuracy(concat_true, concat_pred):
"""
Returns keras' binary_accuracy between treatment and prediction of propensity.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): binary accuracy
"""
t_true = concat_true[:, 1]
t_pred = concat_pred[:, 2]
return binary_accuracy(t_true, t_pred) | [
"def",
"treatment_accuracy",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"t_true",
"=",
"concat_true",
"[",
":",
",",
"1",
"]",
"t_pred",
"=",
"concat_pred",
"[",
":",
",",
"2",
"]",
"return",
"binary_accuracy",
"(",
"t_true",
",",
"t_pred",
")"
] | [
65,
0
] | [
79,
42
] | python | en | ['en', 'error', 'th'] | False |
track_epsilon | (concat_true, concat_pred) |
Tracks the mean absolute value of epsilon.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): mean absolute value of epsilon
|
Tracks the mean absolute value of epsilon. | def track_epsilon(concat_true, concat_pred):
"""
Tracks the mean absolute value of epsilon.
Args:
- concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
Each row in concat_true is comprised of (y, treatment)
- concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
Returns:
- (float): mean absolute value of epsilon
"""
epsilons = concat_pred[:, 3]
return tf.abs(tf.reduce_mean(epsilons)) | [
"def",
"track_epsilon",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"epsilons",
"=",
"concat_pred",
"[",
":",
",",
"3",
"]",
"return",
"tf",
".",
"abs",
"(",
"tf",
".",
"reduce_mean",
"(",
"epsilons",
")",
")"
] | [
82,
0
] | [
95,
43
] | python | en | ['en', 'error', 'th'] | False |
make_tarreg_loss | (ratio=1., dragonnet_loss=dragonnet_loss_binarycross) |
Given a specified loss function, returns the same loss function with targeted regularization.
Args:
ratio (float): weight assigned to the targeted regularization loss component
dragonnet_loss (function): a loss function
Returns:
(function): loss function with targeted regularization, weighted by specified ratio
|
Given a specified loss function, returns the same loss function with targeted regularization. | def make_tarreg_loss(ratio=1., dragonnet_loss=dragonnet_loss_binarycross):
"""
Given a specified loss function, returns the same loss function with targeted regularization.
Args:
ratio (float): weight assigned to the targeted regularization loss component
dragonnet_loss (function): a loss function
Returns:
(function): loss function with targeted regularization, weighted by specified ratio
"""
def tarreg_ATE_unbounded_domain_loss(concat_true, concat_pred):
"""
Returns the loss function (specified in outer function) with targeted regularization.
"""
vanilla_loss = dragonnet_loss(concat_true, concat_pred)
y_true = concat_true[:, 0]
t_true = concat_true[:, 1]
y0_pred = concat_pred[:, 0]
y1_pred = concat_pred[:, 1]
t_pred = concat_pred[:, 2]
epsilons = concat_pred[:, 3]
t_pred = (t_pred + 0.01) / 1.02
# t_pred = tf.clip_by_value(t_pred,0.01, 0.99,name='t_pred')
y_pred = t_true * y1_pred + (1 - t_true) * y0_pred
h = t_true / t_pred - (1 - t_true) / (1 - t_pred)
y_pert = y_pred + epsilons * h
targeted_regularization = tf.reduce_sum(tf.square(y_true - y_pert))
# final
loss = vanilla_loss + ratio * targeted_regularization
return loss
return tarreg_ATE_unbounded_domain_loss | [
"def",
"make_tarreg_loss",
"(",
"ratio",
"=",
"1.",
",",
"dragonnet_loss",
"=",
"dragonnet_loss_binarycross",
")",
":",
"def",
"tarreg_ATE_unbounded_domain_loss",
"(",
"concat_true",
",",
"concat_pred",
")",
":",
"\"\"\"\n Returns the loss function (specified in outer function) with targeted regularization.\n \"\"\"",
"vanilla_loss",
"=",
"dragonnet_loss",
"(",
"concat_true",
",",
"concat_pred",
")",
"y_true",
"=",
"concat_true",
"[",
":",
",",
"0",
"]",
"t_true",
"=",
"concat_true",
"[",
":",
",",
"1",
"]",
"y0_pred",
"=",
"concat_pred",
"[",
":",
",",
"0",
"]",
"y1_pred",
"=",
"concat_pred",
"[",
":",
",",
"1",
"]",
"t_pred",
"=",
"concat_pred",
"[",
":",
",",
"2",
"]",
"epsilons",
"=",
"concat_pred",
"[",
":",
",",
"3",
"]",
"t_pred",
"=",
"(",
"t_pred",
"+",
"0.01",
")",
"/",
"1.02",
"# t_pred = tf.clip_by_value(t_pred,0.01, 0.99,name='t_pred')",
"y_pred",
"=",
"t_true",
"*",
"y1_pred",
"+",
"(",
"1",
"-",
"t_true",
")",
"*",
"y0_pred",
"h",
"=",
"t_true",
"/",
"t_pred",
"-",
"(",
"1",
"-",
"t_true",
")",
"/",
"(",
"1",
"-",
"t_pred",
")",
"y_pert",
"=",
"y_pred",
"+",
"epsilons",
"*",
"h",
"targeted_regularization",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"y_true",
"-",
"y_pert",
")",
")",
"# final",
"loss",
"=",
"vanilla_loss",
"+",
"ratio",
"*",
"targeted_regularization",
"return",
"loss",
"return",
"tarreg_ATE_unbounded_domain_loss"
] | [
98,
0
] | [
136,
43
] | python | en | ['en', 'error', 'th'] | False |
EpsilonLayer.__init__ | (self) |
Inherits keras' Layer object.
|
Inherits keras' Layer object.
| def __init__(self):
"""
Inherits keras' Layer object.
"""
super(EpsilonLayer, self).__init__() | [
"def",
"__init__",
"(",
"self",
")",
":",
"super",
"(",
"EpsilonLayer",
",",
"self",
")",
".",
"__init__",
"(",
")"
] | [
143,
4
] | [
147,
44
] | python | en | ['en', 'error', 'th'] | False |
EpsilonLayer.build | (self, input_shape) |
Creates a trainable weight variable for this layer.
|
Creates a trainable weight variable for this layer.
| def build(self, input_shape):
"""
Creates a trainable weight variable for this layer.
"""
self.epsilon = self.add_weight(name='epsilon',
shape=[1, 1],
initializer='RandomNormal',
trainable=True)
super(EpsilonLayer, self).build(input_shape) | [
"def",
"build",
"(",
"self",
",",
"input_shape",
")",
":",
"self",
".",
"epsilon",
"=",
"self",
".",
"add_weight",
"(",
"name",
"=",
"'epsilon'",
",",
"shape",
"=",
"[",
"1",
",",
"1",
"]",
",",
"initializer",
"=",
"'RandomNormal'",
",",
"trainable",
"=",
"True",
")",
"super",
"(",
"EpsilonLayer",
",",
"self",
")",
".",
"build",
"(",
"input_shape",
")"
] | [
149,
4
] | [
157,
52
] | python | en | ['en', 'error', 'th'] | False |
test_glob_reader_generator | (basic_pandas_datasource, tmp_path_factory) | Provides an example of how glob generator works: we specify our own
names for data_assets, and an associated glob; the generator
will take care of providing batches consisting of one file per
batch corresponding to the glob. | Provides an example of how glob generator works: we specify our own
names for data_assets, and an associated glob; the generator
will take care of providing batches consisting of one file per
batch corresponding to the glob. | def test_glob_reader_generator(basic_pandas_datasource, tmp_path_factory):
"""Provides an example of how glob generator works: we specify our own
names for data_assets, and an associated glob; the generator
will take care of providing batches consisting of one file per
batch corresponding to the glob."""
basedir = str(tmp_path_factory.mktemp("test_glob_reader_generator"))
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f5.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f6.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g2 = GlobReaderBatchKwargsGenerator(
base_directory=basedir,
datasource=basic_pandas_datasource,
asset_globs={"blargs": {"glob": "*.blarg"}, "fs": {"glob": "f*"}},
)
g2_assets = g2.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g2_assets["names"]) == {("blargs", "path"), ("fs", "path")}
blargs_kwargs = [x["path"] for x in g2.get_iterator(data_asset_name="blargs")]
real_blargs = [
os.path.join(basedir, "f1.blarg"),
os.path.join(basedir, "f3.blarg"),
os.path.join(basedir, "f4.blarg"),
os.path.join(basedir, "f5.blarg"),
os.path.join(basedir, "f6.blarg"),
]
for kwargs in real_blargs:
assert kwargs in blargs_kwargs
assert len(blargs_kwargs) == len(real_blargs) | [
"def",
"test_glob_reader_generator",
"(",
"basic_pandas_datasource",
",",
"tmp_path_factory",
")",
":",
"basedir",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_glob_reader_generator\"",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f1.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f2.csv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f3.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f4.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f5.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f6.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f7.xls\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f8.parquet\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f9.xls\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f0.json\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"g2",
"=",
"GlobReaderBatchKwargsGenerator",
"(",
"base_directory",
"=",
"basedir",
",",
"datasource",
"=",
"basic_pandas_datasource",
",",
"asset_globs",
"=",
"{",
"\"blargs\"",
":",
"{",
"\"glob\"",
":",
"\"*.blarg\"",
"}",
",",
"\"fs\"",
":",
"{",
"\"glob\"",
":",
"\"f*\"",
"}",
"}",
",",
")",
"g2_assets",
"=",
"g2",
".",
"get_available_data_asset_names",
"(",
")",
"# Use set in test to avoid order issues",
"assert",
"set",
"(",
"g2_assets",
"[",
"\"names\"",
"]",
")",
"==",
"{",
"(",
"\"blargs\"",
",",
"\"path\"",
")",
",",
"(",
"\"fs\"",
",",
"\"path\"",
")",
"}",
"blargs_kwargs",
"=",
"[",
"x",
"[",
"\"path\"",
"]",
"for",
"x",
"in",
"g2",
".",
"get_iterator",
"(",
"data_asset_name",
"=",
"\"blargs\"",
")",
"]",
"real_blargs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f1.blarg\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f3.blarg\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f4.blarg\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f5.blarg\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f6.blarg\"",
")",
",",
"]",
"for",
"kwargs",
"in",
"real_blargs",
":",
"assert",
"kwargs",
"in",
"blargs_kwargs",
"assert",
"len",
"(",
"blargs_kwargs",
")",
"==",
"len",
"(",
"real_blargs",
")"
] | [
63,
0
] | [
113,
49
] | python | en | ['en', 'en', 'en'] | True |
test_file_kwargs_generator_extensions | (tmp_path_factory) | csv, xls, parquet, json should be recognized file extensions | csv, xls, parquet, json should be recognized file extensions | def test_file_kwargs_generator_extensions(tmp_path_factory):
"""csv, xls, parquet, json should be recognized file extensions"""
basedir = str(tmp_path_factory.mktemp("test_file_kwargs_generator_extensions"))
# Do not include: invalid extension
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid subdir, but no valid files in it
os.mkdir(os.path.join(basedir, "f3"))
with open(os.path.join(basedir, "f3", "f3_1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3", "f3_2.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid subdir with valid files
os.mkdir(os.path.join(basedir, "f4"))
with open(os.path.join(basedir, "f4", "f4_1.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4", "f4_2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid extension, but dot prefix
with open(os.path.join(basedir, ".f5.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid extensions
with open(os.path.join(basedir, "f6.tsv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g1 = SubdirReaderBatchKwargsGenerator(datasource="foo", base_directory=basedir)
g1_assets = g1.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g1_assets["names"]) == {
("f7", "file"),
("f4", "directory"),
("f6", "file"),
("f0", "file"),
("f2", "file"),
("f9", "file"),
("f8", "file"),
} | [
"def",
"test_file_kwargs_generator_extensions",
"(",
"tmp_path_factory",
")",
":",
"basedir",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_file_kwargs_generator_extensions\"",
")",
")",
"# Do not include: invalid extension",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f1.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"# Include",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f2.csv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"# Do not include: valid subdir, but no valid files in it",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f3\"",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f3\"",
",",
"\"f3_1.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f3\"",
",",
"\"f3_2.blarg\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"# Include: valid subdir with valid files",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f4\"",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f4\"",
",",
"\"f4_1.csv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f4\"",
",",
"\"f4_2.csv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"# Do not include: valid extension, but dot prefix",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\".f5.csv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"# Include: valid extensions",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f6.tsv\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f7.xls\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f8.parquet\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f9.xls\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"f0.json\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\\n\\n\"",
")",
"g1",
"=",
"SubdirReaderBatchKwargsGenerator",
"(",
"datasource",
"=",
"\"foo\"",
",",
"base_directory",
"=",
"basedir",
")",
"g1_assets",
"=",
"g1",
".",
"get_available_data_asset_names",
"(",
")",
"# Use set in test to avoid order issues",
"assert",
"set",
"(",
"g1_assets",
"[",
"\"names\"",
"]",
")",
"==",
"{",
"(",
"\"f7\"",
",",
"\"file\"",
")",
",",
"(",
"\"f4\"",
",",
"\"directory\"",
")",
",",
"(",
"\"f6\"",
",",
"\"file\"",
")",
",",
"(",
"\"f0\"",
",",
"\"file\"",
")",
",",
"(",
"\"f2\"",
",",
"\"file\"",
")",
",",
"(",
"\"f9\"",
",",
"\"file\"",
")",
",",
"(",
"\"f8\"",
",",
"\"file\"",
")",
",",
"}"
] | [
116,
0
] | [
166,
5
] | python | en | ['en', 'fr', 'en'] | True |
__init__ | (self, project_config, context_root_dir=None, runtime_environment=None) | DataContext constructor
Args:
context_root_dir: location to look for the ``great_expectations.yml`` file. If None, searches for the file \
based on conventions for project subdirectories.
runtime_environment: a dictionary of config variables that
override both those set in config_variables.yml and the environment
Returns:
None
| DataContext constructor | def __init__(self, project_config, context_root_dir=None, runtime_environment=None):
"""DataContext constructor
Args:
context_root_dir: location to look for the ``great_expectations.yml`` file. If None, searches for the file \
based on conventions for project subdirectories.
runtime_environment: a dictionary of config variables that
override both those set in config_variables.yml and the environment
Returns:
None
"""
if not BaseDataContext.validate_config(project_config):
raise ge_exceptions.InvalidConfigError(
"Your project_config is not valid. Try using the CLI check-config command."
)
self._project_config = project_config
self._apply_global_config_overrides()
if context_root_dir is not None:
context_root_dir = os.path.abspath(context_root_dir)
self._context_root_directory = context_root_dir
self.runtime_environment = runtime_environment or {}
# Init plugin support
if self.plugins_directory is not None and os.path.exists(
self.plugins_directory
):
sys.path.append(self.plugins_directory)
# We want to have directories set up before initializing usage statistics so that we can obtain a context instance id
self._in_memory_instance_id = (
None # This variable *may* be used in case we cannot save an instance id
)
# Init stores
self._stores = dict()
self._init_stores(self.project_config_with_variables_substituted.stores)
# Init data_context_id
self._data_context_id = self._construct_data_context_id()
# Override the project_config data_context_id if an expectations_store was already set up
self._project_config.anonymous_usage_statistics.data_context_id = (
self._data_context_id
)
self._initialize_usage_statistics(
self._project_config.anonymous_usage_statistics
)
# Store cached datasources but don't init them
self._cached_datasources = {}
# Build the datasources we know about and have access to
self._init_datasources(self.project_config_with_variables_substituted)
# Init validation operators
# NOTE - 20200522 - JPC - A consistent approach to lazy loading for plugins will be useful here, harmonizing
# the way that execution environments (AKA datasources), validation operators, site builders and other
# plugins are built.
self.validation_operators = {}
# NOTE - 20210112 - Alex Sherstinsky - Validation Operators are planned to be deprecated.
if (
"validation_operators" in self.get_config().commented_map
and self._project_config.validation_operators
):
for (
validation_operator_name,
validation_operator_config,
) in self._project_config.validation_operators.items():
self.add_validation_operator(
validation_operator_name,
validation_operator_config,
)
self._evaluation_parameter_dependencies_compiled = False
self._evaluation_parameter_dependencies = {} | [
"def",
"__init__",
"(",
"self",
",",
"project_config",
",",
"context_root_dir",
"=",
"None",
",",
"runtime_environment",
"=",
"None",
")",
":",
"if",
"not",
"BaseDataContext",
".",
"validate_config",
"(",
"project_config",
")",
":",
"raise",
"ge_exceptions",
".",
"InvalidConfigError",
"(",
"\"Your project_config is not valid. Try using the CLI check-config command.\"",
")",
"self",
".",
"_project_config",
"=",
"project_config",
"self",
".",
"_apply_global_config_overrides",
"(",
")",
"if",
"context_root_dir",
"is",
"not",
"None",
":",
"context_root_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"context_root_dir",
")",
"self",
".",
"_context_root_directory",
"=",
"context_root_dir",
"self",
".",
"runtime_environment",
"=",
"runtime_environment",
"or",
"{",
"}",
"# Init plugin support",
"if",
"self",
".",
"plugins_directory",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"plugins_directory",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"self",
".",
"plugins_directory",
")",
"# We want to have directories set up before initializing usage statistics so that we can obtain a context instance id",
"self",
".",
"_in_memory_instance_id",
"=",
"(",
"None",
"# This variable *may* be used in case we cannot save an instance id",
")",
"# Init stores",
"self",
".",
"_stores",
"=",
"dict",
"(",
")",
"self",
".",
"_init_stores",
"(",
"self",
".",
"project_config_with_variables_substituted",
".",
"stores",
")",
"# Init data_context_id",
"self",
".",
"_data_context_id",
"=",
"self",
".",
"_construct_data_context_id",
"(",
")",
"# Override the project_config data_context_id if an expectations_store was already set up",
"self",
".",
"_project_config",
".",
"anonymous_usage_statistics",
".",
"data_context_id",
"=",
"(",
"self",
".",
"_data_context_id",
")",
"self",
".",
"_initialize_usage_statistics",
"(",
"self",
".",
"_project_config",
".",
"anonymous_usage_statistics",
")",
"# Store cached datasources but don't init them",
"self",
".",
"_cached_datasources",
"=",
"{",
"}",
"# Build the datasources we know about and have access to",
"self",
".",
"_init_datasources",
"(",
"self",
".",
"project_config_with_variables_substituted",
")",
"# Init validation operators",
"# NOTE - 20200522 - JPC - A consistent approach to lazy loading for plugins will be useful here, harmonizing",
"# the way that execution environments (AKA datasources), validation operators, site builders and other",
"# plugins are built.",
"self",
".",
"validation_operators",
"=",
"{",
"}",
"# NOTE - 20210112 - Alex Sherstinsky - Validation Operators are planned to be deprecated.",
"if",
"(",
"\"validation_operators\"",
"in",
"self",
".",
"get_config",
"(",
")",
".",
"commented_map",
"and",
"self",
".",
"_project_config",
".",
"validation_operators",
")",
":",
"for",
"(",
"validation_operator_name",
",",
"validation_operator_config",
",",
")",
"in",
"self",
".",
"_project_config",
".",
"validation_operators",
".",
"items",
"(",
")",
":",
"self",
".",
"add_validation_operator",
"(",
"validation_operator_name",
",",
"validation_operator_config",
",",
")",
"self",
".",
"_evaluation_parameter_dependencies_compiled",
"=",
"False",
"self",
".",
"_evaluation_parameter_dependencies",
"=",
"{",
"}"
] | [
298,
4
] | [
375,
52
] | python | en | ['en', 'lb', 'en'] | False |
_init_stores | (self, store_configs) | Initialize all Stores for this DataContext.
Stores are a good fit for reading/writing objects that:
1. follow a clear key-value pattern, and
2. are usually edited programmatically, using the Context
Note that stores do NOT manage plugins.
| Initialize all Stores for this DataContext. | def _init_stores(self, store_configs):
"""Initialize all Stores for this DataContext.
Stores are a good fit for reading/writing objects that:
1. follow a clear key-value pattern, and
2. are usually edited programmatically, using the Context
Note that stores do NOT manage plugins.
"""
for store_name, store_config in store_configs.items():
self._build_store_from_config(store_name, store_config) | [
"def",
"_init_stores",
"(",
"self",
",",
"store_configs",
")",
":",
"for",
"store_name",
",",
"store_config",
"in",
"store_configs",
".",
"items",
"(",
")",
":",
"self",
".",
"_build_store_from_config",
"(",
"store_name",
",",
"store_config",
")"
] | [
410,
4
] | [
420,
67
] | python | en | ['en', 'en', 'en'] | True |
_construct_data_context_id | (self) |
Choose the id of the currently-configured expectations store, if available and a persistent store.
If not, it should choose the id stored in DataContextConfig.
Returns:
UUID to use as the data_context_id
|
Choose the id of the currently-configured expectations store, if available and a persistent store.
If not, it should choose the id stored in DataContextConfig.
Returns:
UUID to use as the data_context_id
| def _construct_data_context_id(self) -> str:
"""
Choose the id of the currently-configured expectations store, if available and a persistent store.
If not, it should choose the id stored in DataContextConfig.
Returns:
UUID to use as the data_context_id
"""
# Choose the id of the currently-configured expectations store, if it is a persistent store
expectations_store = self._stores[
self.project_config_with_variables_substituted.expectations_store_name
]
if isinstance(expectations_store.store_backend, TupleStoreBackend):
# suppress_warnings since a warning will already have been issued during the store creation if there was an invalid store config
return expectations_store.store_backend_id_warnings_suppressed
# Otherwise choose the id stored in the project_config
else:
return (
self.project_config_with_variables_substituted.anonymous_usage_statistics.data_context_id
) | [
"def",
"_construct_data_context_id",
"(",
"self",
")",
"->",
"str",
":",
"# Choose the id of the currently-configured expectations store, if it is a persistent store",
"expectations_store",
"=",
"self",
".",
"_stores",
"[",
"self",
".",
"project_config_with_variables_substituted",
".",
"expectations_store_name",
"]",
"if",
"isinstance",
"(",
"expectations_store",
".",
"store_backend",
",",
"TupleStoreBackend",
")",
":",
"# suppress_warnings since a warning will already have been issued during the store creation if there was an invalid store config",
"return",
"expectations_store",
".",
"store_backend_id_warnings_suppressed",
"# Otherwise choose the id stored in the project_config",
"else",
":",
"return",
"(",
"self",
".",
"project_config_with_variables_substituted",
".",
"anonymous_usage_statistics",
".",
"data_context_id",
")"
] | [
539,
4
] | [
559,
13
] | python | en | ['en', 'error', 'th'] | False |
_initialize_usage_statistics | (
self, usage_statistics_config: AnonymizedUsageStatisticsConfig
) | Initialize the usage statistics system. | Initialize the usage statistics system. | def _initialize_usage_statistics(
self, usage_statistics_config: AnonymizedUsageStatisticsConfig
):
"""Initialize the usage statistics system."""
if not usage_statistics_config.enabled:
logger.info("Usage statistics is disabled; skipping initialization.")
self._usage_statistics_handler = None
return
self._usage_statistics_handler = UsageStatisticsHandler(
data_context=self,
data_context_id=self._data_context_id,
usage_statistics_url=usage_statistics_config.usage_statistics_url,
) | [
"def",
"_initialize_usage_statistics",
"(",
"self",
",",
"usage_statistics_config",
":",
"AnonymizedUsageStatisticsConfig",
")",
":",
"if",
"not",
"usage_statistics_config",
".",
"enabled",
":",
"logger",
".",
"info",
"(",
"\"Usage statistics is disabled; skipping initialization.\"",
")",
"self",
".",
"_usage_statistics_handler",
"=",
"None",
"return",
"self",
".",
"_usage_statistics_handler",
"=",
"UsageStatisticsHandler",
"(",
"data_context",
"=",
"self",
",",
"data_context_id",
"=",
"self",
".",
"_data_context_id",
",",
"usage_statistics_url",
"=",
"usage_statistics_config",
".",
"usage_statistics_url",
",",
")"
] | [
561,
4
] | [
574,
9
] | python | en | ['en', 'en', 'en'] | True |
add_store | (self, store_name, store_config) | Add a new Store to the DataContext and (for convenience) return the instantiated Store object.
Args:
store_name (str): a key for the new Store in in self._stores
store_config (dict): a config for the Store to add
Returns:
store (Store)
| Add a new Store to the DataContext and (for convenience) return the instantiated Store object. | def add_store(self, store_name, store_config):
"""Add a new Store to the DataContext and (for convenience) return the instantiated Store object.
Args:
store_name (str): a key for the new Store in in self._stores
store_config (dict): a config for the Store to add
Returns:
store (Store)
"""
self._project_config["stores"][store_name] = store_config
return self._build_store_from_config(store_name, store_config) | [
"def",
"add_store",
"(",
"self",
",",
"store_name",
",",
"store_config",
")",
":",
"self",
".",
"_project_config",
"[",
"\"stores\"",
"]",
"[",
"store_name",
"]",
"=",
"store_config",
"return",
"self",
".",
"_build_store_from_config",
"(",
"store_name",
",",
"store_config",
")"
] | [
576,
4
] | [
588,
70
] | python | en | ['en', 'en', 'en'] | True |
add_validation_operator | (
self, validation_operator_name, validation_operator_config
) | Add a new ValidationOperator to the DataContext and (for convenience) return the instantiated object.
Args:
validation_operator_name (str): a key for the new ValidationOperator in in self._validation_operators
validation_operator_config (dict): a config for the ValidationOperator to add
Returns:
validation_operator (ValidationOperator)
| Add a new ValidationOperator to the DataContext and (for convenience) return the instantiated object. | def add_validation_operator(
self, validation_operator_name, validation_operator_config
):
"""Add a new ValidationOperator to the DataContext and (for convenience) return the instantiated object.
Args:
validation_operator_name (str): a key for the new ValidationOperator in in self._validation_operators
validation_operator_config (dict): a config for the ValidationOperator to add
Returns:
validation_operator (ValidationOperator)
"""
self._project_config["validation_operators"][
validation_operator_name
] = validation_operator_config
config = self.project_config_with_variables_substituted.validation_operators[
validation_operator_name
]
module_name = "great_expectations.validation_operators"
new_validation_operator = instantiate_class_from_config(
config=config,
runtime_environment={
"data_context": self,
"name": validation_operator_name,
},
config_defaults={"module_name": module_name},
)
if not new_validation_operator:
raise ge_exceptions.ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=config["class_name"],
)
self.validation_operators[validation_operator_name] = new_validation_operator
return new_validation_operator | [
"def",
"add_validation_operator",
"(",
"self",
",",
"validation_operator_name",
",",
"validation_operator_config",
")",
":",
"self",
".",
"_project_config",
"[",
"\"validation_operators\"",
"]",
"[",
"validation_operator_name",
"]",
"=",
"validation_operator_config",
"config",
"=",
"self",
".",
"project_config_with_variables_substituted",
".",
"validation_operators",
"[",
"validation_operator_name",
"]",
"module_name",
"=",
"\"great_expectations.validation_operators\"",
"new_validation_operator",
"=",
"instantiate_class_from_config",
"(",
"config",
"=",
"config",
",",
"runtime_environment",
"=",
"{",
"\"data_context\"",
":",
"self",
",",
"\"name\"",
":",
"validation_operator_name",
",",
"}",
",",
"config_defaults",
"=",
"{",
"\"module_name\"",
":",
"module_name",
"}",
",",
")",
"if",
"not",
"new_validation_operator",
":",
"raise",
"ge_exceptions",
".",
"ClassInstantiationError",
"(",
"module_name",
"=",
"module_name",
",",
"package_name",
"=",
"None",
",",
"class_name",
"=",
"config",
"[",
"\"class_name\"",
"]",
",",
")",
"self",
".",
"validation_operators",
"[",
"validation_operator_name",
"]",
"=",
"new_validation_operator",
"return",
"new_validation_operator"
] | [
590,
4
] | [
625,
38
] | python | en | ['en', 'en', 'en'] | True |
get_site_names | (self) | Get a list of configured site names. | Get a list of configured site names. | def get_site_names(self) -> List[str]:
"""Get a list of configured site names."""
return list(
self.project_config_with_variables_substituted.data_docs_sites.keys()
) | [
"def",
"get_site_names",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"list",
"(",
"self",
".",
"project_config_with_variables_substituted",
".",
"data_docs_sites",
".",
"keys",
"(",
")",
")"
] | [
643,
4
] | [
647,
9
] | python | en | ['en', 'en', 'en'] | True |
get_docs_sites_urls | (
self,
resource_identifier=None,
site_name: Optional[str] = None,
only_if_exists=True,
site_names: Optional[List[str]] = None,
) |
Get URLs for a resource for all data docs sites.
This function will return URLs for any configured site even if the sites
have not been built yet.
Args:
resource_identifier (object): optional. It can be an identifier of
ExpectationSuite's, ValidationResults and other resources that
have typed identifiers. If not provided, the method will return
the URLs of the index page.
site_name: Optionally specify which site to open. If not specified,
return all urls in the project.
site_names: Optionally specify which sites are active. Sites not in
this list are not processed, even if specified in site_name.
Returns:
list: a list of URLs. Each item is the URL for the resource for a
data docs site
|
Get URLs for a resource for all data docs sites. | def get_docs_sites_urls(
self,
resource_identifier=None,
site_name: Optional[str] = None,
only_if_exists=True,
site_names: Optional[List[str]] = None,
) -> List[Dict[str, str]]:
"""
Get URLs for a resource for all data docs sites.
This function will return URLs for any configured site even if the sites
have not been built yet.
Args:
resource_identifier (object): optional. It can be an identifier of
ExpectationSuite's, ValidationResults and other resources that
have typed identifiers. If not provided, the method will return
the URLs of the index page.
site_name: Optionally specify which site to open. If not specified,
return all urls in the project.
site_names: Optionally specify which sites are active. Sites not in
this list are not processed, even if specified in site_name.
Returns:
list: a list of URLs. Each item is the URL for the resource for a
data docs site
"""
unfiltered_sites = (
self.project_config_with_variables_substituted.data_docs_sites
)
# Filter out sites that are not in site_names
sites = (
{k: v for k, v in unfiltered_sites.items() if k in site_names}
if site_names
else unfiltered_sites
)
if not sites:
logger.debug("Found no data_docs_sites.")
return []
logger.debug(f"Found {len(sites)} data_docs_sites.")
if site_name:
if site_name not in sites.keys():
raise ge_exceptions.DataContextError(
f"Could not find site named {site_name}. Please check your configurations"
)
site = sites[site_name]
site_builder = self._load_site_builder_from_site_config(site)
url = site_builder.get_resource_url(
resource_identifier=resource_identifier, only_if_exists=only_if_exists
)
return [{"site_name": site_name, "site_url": url}]
site_urls = []
for _site_name, site_config in sites.items():
site_builder = self._load_site_builder_from_site_config(site_config)
url = site_builder.get_resource_url(
resource_identifier=resource_identifier, only_if_exists=only_if_exists
)
site_urls.append({"site_name": _site_name, "site_url": url})
return site_urls | [
"def",
"get_docs_sites_urls",
"(",
"self",
",",
"resource_identifier",
"=",
"None",
",",
"site_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"only_if_exists",
"=",
"True",
",",
"site_names",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
":",
"unfiltered_sites",
"=",
"(",
"self",
".",
"project_config_with_variables_substituted",
".",
"data_docs_sites",
")",
"# Filter out sites that are not in site_names",
"sites",
"=",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"unfiltered_sites",
".",
"items",
"(",
")",
"if",
"k",
"in",
"site_names",
"}",
"if",
"site_names",
"else",
"unfiltered_sites",
")",
"if",
"not",
"sites",
":",
"logger",
".",
"debug",
"(",
"\"Found no data_docs_sites.\"",
")",
"return",
"[",
"]",
"logger",
".",
"debug",
"(",
"f\"Found {len(sites)} data_docs_sites.\"",
")",
"if",
"site_name",
":",
"if",
"site_name",
"not",
"in",
"sites",
".",
"keys",
"(",
")",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"f\"Could not find site named {site_name}. Please check your configurations\"",
")",
"site",
"=",
"sites",
"[",
"site_name",
"]",
"site_builder",
"=",
"self",
".",
"_load_site_builder_from_site_config",
"(",
"site",
")",
"url",
"=",
"site_builder",
".",
"get_resource_url",
"(",
"resource_identifier",
"=",
"resource_identifier",
",",
"only_if_exists",
"=",
"only_if_exists",
")",
"return",
"[",
"{",
"\"site_name\"",
":",
"site_name",
",",
"\"site_url\"",
":",
"url",
"}",
"]",
"site_urls",
"=",
"[",
"]",
"for",
"_site_name",
",",
"site_config",
"in",
"sites",
".",
"items",
"(",
")",
":",
"site_builder",
"=",
"self",
".",
"_load_site_builder_from_site_config",
"(",
"site_config",
")",
"url",
"=",
"site_builder",
".",
"get_resource_url",
"(",
"resource_identifier",
"=",
"resource_identifier",
",",
"only_if_exists",
"=",
"only_if_exists",
")",
"site_urls",
".",
"append",
"(",
"{",
"\"site_name\"",
":",
"_site_name",
",",
"\"site_url\"",
":",
"url",
"}",
")",
"return",
"site_urls"
] | [
649,
4
] | [
712,
24
] | python | en | ['en', 'error', 'th'] | False |
open_data_docs | (
self,
resource_identifier: Optional[str] = None,
site_name: Optional[str] = None,
only_if_exists: Optional[bool] = True,
) |
A stdlib cross-platform way to open a file in a browser.
Args:
resource_identifier: ExpectationSuiteIdentifier,
ValidationResultIdentifier or any other type's identifier. The
argument is optional - when not supplied, the method returns the
URL of the index page.
site_name: Optionally specify which site to open. If not specified,
open all docs found in the project.
only_if_exists: Optionally specify flag to pass to "self.get_docs_sites_urls()".
|
A stdlib cross-platform way to open a file in a browser. | def open_data_docs(
self,
resource_identifier: Optional[str] = None,
site_name: Optional[str] = None,
only_if_exists: Optional[bool] = True,
) -> None:
"""
A stdlib cross-platform way to open a file in a browser.
Args:
resource_identifier: ExpectationSuiteIdentifier,
ValidationResultIdentifier or any other type's identifier. The
argument is optional - when not supplied, the method returns the
URL of the index page.
site_name: Optionally specify which site to open. If not specified,
open all docs found in the project.
only_if_exists: Optionally specify flag to pass to "self.get_docs_sites_urls()".
"""
data_docs_urls: List[Dict[str, str]] = self.get_docs_sites_urls(
resource_identifier=resource_identifier,
site_name=site_name,
only_if_exists=only_if_exists,
)
urls_to_open: List[str] = [site["site_url"] for site in data_docs_urls]
for url in urls_to_open:
if url is not None:
logger.debug(f"Opening Data Docs found here: {url}")
webbrowser.open(url) | [
"def",
"open_data_docs",
"(",
"self",
",",
"resource_identifier",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"site_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"only_if_exists",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
")",
"->",
"None",
":",
"data_docs_urls",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"self",
".",
"get_docs_sites_urls",
"(",
"resource_identifier",
"=",
"resource_identifier",
",",
"site_name",
"=",
"site_name",
",",
"only_if_exists",
"=",
"only_if_exists",
",",
")",
"urls_to_open",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"site",
"[",
"\"site_url\"",
"]",
"for",
"site",
"in",
"data_docs_urls",
"]",
"for",
"url",
"in",
"urls_to_open",
":",
"if",
"url",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"f\"Opening Data Docs found here: {url}\"",
")",
"webbrowser",
".",
"open",
"(",
"url",
")"
] | [
735,
4
] | [
763,
36
] | python | en | ['en', 'error', 'th'] | False |
root_directory | (self) | The root directory for configuration objects in the data context; the location in which
``great_expectations.yml`` is located. | The root directory for configuration objects in the data context; the location in which
``great_expectations.yml`` is located. | def root_directory(self):
"""The root directory for configuration objects in the data context; the location in which
``great_expectations.yml`` is located."""
return self._context_root_directory | [
"def",
"root_directory",
"(",
"self",
")",
":",
"return",
"self",
".",
"_context_root_directory"
] | [
766,
4
] | [
769,
43
] | python | en | ['en', 'en', 'en'] | True |
plugins_directory | (self) | The directory in which custom plugin modules should be placed. | The directory in which custom plugin modules should be placed. | def plugins_directory(self):
"""The directory in which custom plugin modules should be placed."""
return self._normalize_absolute_or_relative_path(
self.project_config_with_variables_substituted.plugins_directory
) | [
"def",
"plugins_directory",
"(",
"self",
")",
":",
"return",
"self",
".",
"_normalize_absolute_or_relative_path",
"(",
"self",
".",
"project_config_with_variables_substituted",
".",
"plugins_directory",
")"
] | [
772,
4
] | [
776,
9
] | python | en | ['en', 'en', 'en'] | True |
stores | (self) | A single holder for all Stores in this context | A single holder for all Stores in this context | def stores(self):
"""A single holder for all Stores in this context"""
return self._stores | [
"def",
"stores",
"(",
"self",
")",
":",
"return",
"self",
".",
"_stores"
] | [
791,
4
] | [
793,
27
] | python | en | ['en', 'en', 'en'] | True |
datasources | (self) | A single holder for all Datasources in this context | A single holder for all Datasources in this context | def datasources(self) -> Dict[str, Union[LegacyDatasource, BaseDatasource]]:
"""A single holder for all Datasources in this context"""
return self._cached_datasources | [
"def",
"datasources",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Union",
"[",
"LegacyDatasource",
",",
"BaseDatasource",
"]",
"]",
":",
"return",
"self",
".",
"_cached_datasources"
] | [
796,
4
] | [
798,
39
] | python | en | ['en', 'en', 'en'] | True |
_load_config_variables_file | (self) | Get all config variables from the default location. | Get all config variables from the default location. | def _load_config_variables_file(self):
"""Get all config variables from the default location."""
config_variables_file_path = self.get_config().config_variables_file_path
if config_variables_file_path:
try:
# If the user specifies the config variable path with an environment variable, we want to substitute it
defined_path = substitute_config_variable(
config_variables_file_path, dict(os.environ)
)
if not os.path.isabs(defined_path):
# A BaseDataContext will not have a root directory; in that case use the current directory
# for any non-absolute path
root_directory = self.root_directory or os.curdir
else:
root_directory = ""
var_path = os.path.join(root_directory, defined_path)
with open(var_path) as config_variables_file:
return yaml.load(config_variables_file) or {}
except OSError as e:
if e.errno != errno.ENOENT:
raise
logger.debug("Generating empty config variables file.")
return {}
else:
return {} | [
"def",
"_load_config_variables_file",
"(",
"self",
")",
":",
"config_variables_file_path",
"=",
"self",
".",
"get_config",
"(",
")",
".",
"config_variables_file_path",
"if",
"config_variables_file_path",
":",
"try",
":",
"# If the user specifies the config variable path with an environment variable, we want to substitute it",
"defined_path",
"=",
"substitute_config_variable",
"(",
"config_variables_file_path",
",",
"dict",
"(",
"os",
".",
"environ",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"defined_path",
")",
":",
"# A BaseDataContext will not have a root directory; in that case use the current directory",
"# for any non-absolute path",
"root_directory",
"=",
"self",
".",
"root_directory",
"or",
"os",
".",
"curdir",
"else",
":",
"root_directory",
"=",
"\"\"",
"var_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_directory",
",",
"defined_path",
")",
"with",
"open",
"(",
"var_path",
")",
"as",
"config_variables_file",
":",
"return",
"yaml",
".",
"load",
"(",
"config_variables_file",
")",
"or",
"{",
"}",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"logger",
".",
"debug",
"(",
"\"Generating empty config variables file.\"",
")",
"return",
"{",
"}",
"else",
":",
"return",
"{",
"}"
] | [
878,
4
] | [
902,
21
] | python | en | ['en', 'en', 'en'] | True |
escape_all_config_variables | (
self,
value: Union[str, dict, list],
dollar_sign_escape_string: str = DOLLAR_SIGN_ESCAPE_STRING,
skip_if_substitution_variable: bool = True,
) |
Replace all `$` characters with the DOLLAR_SIGN_ESCAPE_STRING
Args:
value: config variable value
dollar_sign_escape_string: replaces instances of `$`
skip_if_substitution_variable: skip if the value is of the form ${MYVAR} or $MYVAR
Returns:
input value with all `$` characters replaced with the escape string
|
Replace all `$` characters with the DOLLAR_SIGN_ESCAPE_STRING | def escape_all_config_variables(
self,
value: Union[str, dict, list],
dollar_sign_escape_string: str = DOLLAR_SIGN_ESCAPE_STRING,
skip_if_substitution_variable: bool = True,
) -> Union[str, dict, list]:
"""
Replace all `$` characters with the DOLLAR_SIGN_ESCAPE_STRING
Args:
value: config variable value
dollar_sign_escape_string: replaces instances of `$`
skip_if_substitution_variable: skip if the value is of the form ${MYVAR} or $MYVAR
Returns:
input value with all `$` characters replaced with the escape string
"""
if isinstance(value, dict) or isinstance(value, OrderedDict):
return {
k: self.escape_all_config_variables(
v, dollar_sign_escape_string, skip_if_substitution_variable
)
for k, v in value.items()
}
elif isinstance(value, list):
return [
self.escape_all_config_variables(
v, dollar_sign_escape_string, skip_if_substitution_variable
)
for v in value
]
if skip_if_substitution_variable:
if parse_substitution_variable(value) is None:
return value.replace("$", dollar_sign_escape_string)
else:
return value
else:
return value.replace("$", dollar_sign_escape_string) | [
"def",
"escape_all_config_variables",
"(",
"self",
",",
"value",
":",
"Union",
"[",
"str",
",",
"dict",
",",
"list",
"]",
",",
"dollar_sign_escape_string",
":",
"str",
"=",
"DOLLAR_SIGN_ESCAPE_STRING",
",",
"skip_if_substitution_variable",
":",
"bool",
"=",
"True",
",",
")",
"->",
"Union",
"[",
"str",
",",
"dict",
",",
"list",
"]",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
"or",
"isinstance",
"(",
"value",
",",
"OrderedDict",
")",
":",
"return",
"{",
"k",
":",
"self",
".",
"escape_all_config_variables",
"(",
"v",
",",
"dollar_sign_escape_string",
",",
"skip_if_substitution_variable",
")",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"escape_all_config_variables",
"(",
"v",
",",
"dollar_sign_escape_string",
",",
"skip_if_substitution_variable",
")",
"for",
"v",
"in",
"value",
"]",
"if",
"skip_if_substitution_variable",
":",
"if",
"parse_substitution_variable",
"(",
"value",
")",
"is",
"None",
":",
"return",
"value",
".",
"replace",
"(",
"\"$\"",
",",
"dollar_sign_escape_string",
")",
"else",
":",
"return",
"value",
"else",
":",
"return",
"value",
".",
"replace",
"(",
"\"$\"",
",",
"dollar_sign_escape_string",
")"
] | [
927,
4
] | [
966,
64
] | python | en | ['en', 'error', 'th'] | False |
save_config_variable | (
self, config_variable_name, value, skip_if_substitution_variable: bool = True
) | r"""Save config variable value
Escapes $ unless they are used in substitution variables e.g. the $ characters in ${SOME_VAR} or $SOME_VAR are not escaped
Args:
config_variable_name: name of the property
value: the value to save for the property
skip_if_substitution_variable: set to False to escape $ in values in substitution variable form e.g. ${SOME_VAR} -> r"\${SOME_VAR}" or $SOME_VAR -> r"\$SOME_VAR"
Returns:
None
| r"""Save config variable value
Escapes $ unless they are used in substitution variables e.g. the $ characters in ${SOME_VAR} or $SOME_VAR are not escaped | def save_config_variable(
self, config_variable_name, value, skip_if_substitution_variable: bool = True
):
r"""Save config variable value
Escapes $ unless they are used in substitution variables e.g. the $ characters in ${SOME_VAR} or $SOME_VAR are not escaped
Args:
config_variable_name: name of the property
value: the value to save for the property
skip_if_substitution_variable: set to False to escape $ in values in substitution variable form e.g. ${SOME_VAR} -> r"\${SOME_VAR}" or $SOME_VAR -> r"\$SOME_VAR"
Returns:
None
"""
config_variables = self._load_config_variables_file()
value = self.escape_all_config_variables(
value,
self.DOLLAR_SIGN_ESCAPE_STRING,
skip_if_substitution_variable=skip_if_substitution_variable,
)
config_variables[config_variable_name] = value
config_variables_filepath = self.get_config().config_variables_file_path
if not config_variables_filepath:
raise ge_exceptions.InvalidConfigError(
"'config_variables_file_path' property is not found in config - setting it is required to use this feature"
)
config_variables_filepath = os.path.join(
self.root_directory, config_variables_filepath
)
os.makedirs(os.path.dirname(config_variables_filepath), exist_ok=True)
if not os.path.isfile(config_variables_filepath):
logger.info(
"Creating new substitution_variables file at {config_variables_filepath}".format(
config_variables_filepath=config_variables_filepath
)
)
with open(config_variables_filepath, "w") as template:
template.write(CONFIG_VARIABLES_TEMPLATE)
with open(config_variables_filepath, "w") as config_variables_file:
yaml.dump(config_variables, config_variables_file) | [
"def",
"save_config_variable",
"(",
"self",
",",
"config_variable_name",
",",
"value",
",",
"skip_if_substitution_variable",
":",
"bool",
"=",
"True",
")",
":",
"config_variables",
"=",
"self",
".",
"_load_config_variables_file",
"(",
")",
"value",
"=",
"self",
".",
"escape_all_config_variables",
"(",
"value",
",",
"self",
".",
"DOLLAR_SIGN_ESCAPE_STRING",
",",
"skip_if_substitution_variable",
"=",
"skip_if_substitution_variable",
",",
")",
"config_variables",
"[",
"config_variable_name",
"]",
"=",
"value",
"config_variables_filepath",
"=",
"self",
".",
"get_config",
"(",
")",
".",
"config_variables_file_path",
"if",
"not",
"config_variables_filepath",
":",
"raise",
"ge_exceptions",
".",
"InvalidConfigError",
"(",
"\"'config_variables_file_path' property is not found in config - setting it is required to use this feature\"",
")",
"config_variables_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root_directory",
",",
"config_variables_filepath",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"config_variables_filepath",
")",
",",
"exist_ok",
"=",
"True",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"config_variables_filepath",
")",
":",
"logger",
".",
"info",
"(",
"\"Creating new substitution_variables file at {config_variables_filepath}\"",
".",
"format",
"(",
"config_variables_filepath",
"=",
"config_variables_filepath",
")",
")",
"with",
"open",
"(",
"config_variables_filepath",
",",
"\"w\"",
")",
"as",
"template",
":",
"template",
".",
"write",
"(",
"CONFIG_VARIABLES_TEMPLATE",
")",
"with",
"open",
"(",
"config_variables_filepath",
",",
"\"w\"",
")",
"as",
"config_variables_file",
":",
"yaml",
".",
"dump",
"(",
"config_variables",
",",
"config_variables_file",
")"
] | [
968,
4
] | [
1010,
62
] | python | en | ['nl', 'en', 'en'] | True |
delete_datasource | (self, datasource_name: str) | Delete a data source
Args:
datasource_name: The name of the datasource to delete.
Raises:
ValueError: If the datasource name isn't provided or cannot be found.
| Delete a data source
Args:
datasource_name: The name of the datasource to delete. | def delete_datasource(self, datasource_name: str):
"""Delete a data source
Args:
datasource_name: The name of the datasource to delete.
Raises:
ValueError: If the datasource name isn't provided or cannot be found.
"""
if datasource_name is None:
raise ValueError("Datasource names must be a datasource name")
else:
datasource = self.get_datasource(datasource_name=datasource_name)
if datasource:
# remove key until we have a delete method on project_config
# self.project_config_with_variables_substituted.datasources[
# datasource_name].remove()
del self._project_config["datasources"][datasource_name]
del self._cached_datasources[datasource_name]
else:
raise ValueError(f"Datasource {datasource_name} not found") | [
"def",
"delete_datasource",
"(",
"self",
",",
"datasource_name",
":",
"str",
")",
":",
"if",
"datasource_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Datasource names must be a datasource name\"",
")",
"else",
":",
"datasource",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_name",
"=",
"datasource_name",
")",
"if",
"datasource",
":",
"# remove key until we have a delete method on project_config",
"# self.project_config_with_variables_substituted.datasources[",
"# datasource_name].remove()",
"del",
"self",
".",
"_project_config",
"[",
"\"datasources\"",
"]",
"[",
"datasource_name",
"]",
"del",
"self",
".",
"_cached_datasources",
"[",
"datasource_name",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Datasource {datasource_name} not found\"",
")"
] | [
1012,
4
] | [
1031,
75
] | python | en | ['en', 'it', 'en'] | True |
get_available_data_asset_names | (
self, datasource_names=None, batch_kwargs_generator_names=None
) | Inspect datasource and batch kwargs generators to provide available data_asset objects.
Args:
datasource_names: list of datasources for which to provide available data_asset_name objects. If None, \
return available data assets for all datasources.
batch_kwargs_generator_names: list of batch kwargs generators for which to provide available
data_asset_name objects.
Returns:
data_asset_names (dict): Dictionary describing available data assets
::
{
datasource_name: {
batch_kwargs_generator_name: [ data_asset_1, data_asset_2, ... ]
...
}
...
}
| Inspect datasource and batch kwargs generators to provide available data_asset objects. | def get_available_data_asset_names(
self, datasource_names=None, batch_kwargs_generator_names=None
):
"""Inspect datasource and batch kwargs generators to provide available data_asset objects.
Args:
datasource_names: list of datasources for which to provide available data_asset_name objects. If None, \
return available data assets for all datasources.
batch_kwargs_generator_names: list of batch kwargs generators for which to provide available
data_asset_name objects.
Returns:
data_asset_names (dict): Dictionary describing available data assets
::
{
datasource_name: {
batch_kwargs_generator_name: [ data_asset_1, data_asset_2, ... ]
...
}
...
}
"""
data_asset_names = {}
if datasource_names is None:
datasource_names = [
datasource["name"] for datasource in self.list_datasources()
]
elif isinstance(datasource_names, str):
datasource_names = [datasource_names]
elif not isinstance(datasource_names, list):
raise ValueError(
"Datasource names must be a datasource name, list of datasource names or None (to list all datasources)"
)
if batch_kwargs_generator_names is not None:
if isinstance(batch_kwargs_generator_names, str):
batch_kwargs_generator_names = [batch_kwargs_generator_names]
if len(batch_kwargs_generator_names) == len(
datasource_names
): # Iterate over both together
for idx, datasource_name in enumerate(datasource_names):
datasource = self.get_datasource(datasource_name)
data_asset_names[
datasource_name
] = datasource.get_available_data_asset_names(
batch_kwargs_generator_names[idx]
)
elif len(batch_kwargs_generator_names) == 1:
datasource = self.get_datasource(datasource_names[0])
datasource_names[
datasource_names[0]
] = datasource.get_available_data_asset_names(
batch_kwargs_generator_names
)
else:
raise ValueError(
"If providing batch kwargs generator, you must either specify one for each datasource or only "
"one datasource."
)
else: # generator_names is None
for datasource_name in datasource_names:
try:
datasource = self.get_datasource(datasource_name)
data_asset_names[
datasource_name
] = datasource.get_available_data_asset_names()
except ValueError:
# handle the edge case of a non-existent datasource
data_asset_names[datasource_name] = {}
return data_asset_names | [
"def",
"get_available_data_asset_names",
"(",
"self",
",",
"datasource_names",
"=",
"None",
",",
"batch_kwargs_generator_names",
"=",
"None",
")",
":",
"data_asset_names",
"=",
"{",
"}",
"if",
"datasource_names",
"is",
"None",
":",
"datasource_names",
"=",
"[",
"datasource",
"[",
"\"name\"",
"]",
"for",
"datasource",
"in",
"self",
".",
"list_datasources",
"(",
")",
"]",
"elif",
"isinstance",
"(",
"datasource_names",
",",
"str",
")",
":",
"datasource_names",
"=",
"[",
"datasource_names",
"]",
"elif",
"not",
"isinstance",
"(",
"datasource_names",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"Datasource names must be a datasource name, list of datasource names or None (to list all datasources)\"",
")",
"if",
"batch_kwargs_generator_names",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"batch_kwargs_generator_names",
",",
"str",
")",
":",
"batch_kwargs_generator_names",
"=",
"[",
"batch_kwargs_generator_names",
"]",
"if",
"len",
"(",
"batch_kwargs_generator_names",
")",
"==",
"len",
"(",
"datasource_names",
")",
":",
"# Iterate over both together",
"for",
"idx",
",",
"datasource_name",
"in",
"enumerate",
"(",
"datasource_names",
")",
":",
"datasource",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_name",
")",
"data_asset_names",
"[",
"datasource_name",
"]",
"=",
"datasource",
".",
"get_available_data_asset_names",
"(",
"batch_kwargs_generator_names",
"[",
"idx",
"]",
")",
"elif",
"len",
"(",
"batch_kwargs_generator_names",
")",
"==",
"1",
":",
"datasource",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_names",
"[",
"0",
"]",
")",
"datasource_names",
"[",
"datasource_names",
"[",
"0",
"]",
"]",
"=",
"datasource",
".",
"get_available_data_asset_names",
"(",
"batch_kwargs_generator_names",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"If providing batch kwargs generator, you must either specify one for each datasource or only \"",
"\"one datasource.\"",
")",
"else",
":",
"# generator_names is None",
"for",
"datasource_name",
"in",
"datasource_names",
":",
"try",
":",
"datasource",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_name",
")",
"data_asset_names",
"[",
"datasource_name",
"]",
"=",
"datasource",
".",
"get_available_data_asset_names",
"(",
")",
"except",
"ValueError",
":",
"# handle the edge case of a non-existent datasource",
"data_asset_names",
"[",
"datasource_name",
"]",
"=",
"{",
"}",
"return",
"data_asset_names"
] | [
1033,
4
] | [
1107,
31
] | python | en | ['en', 'en', 'en'] | True |
build_batch_kwargs | (
self,
datasource,
batch_kwargs_generator,
data_asset_name=None,
partition_id=None,
**kwargs,
) | Builds batch kwargs using the provided datasource, batch kwargs generator, and batch_parameters.
Args:
datasource (str): the name of the datasource for which to build batch_kwargs
batch_kwargs_generator (str): the name of the batch kwargs generator to use to build batch_kwargs
data_asset_name (str): an optional name batch_parameter
**kwargs: additional batch_parameters
Returns:
BatchKwargs
| Builds batch kwargs using the provided datasource, batch kwargs generator, and batch_parameters. | def build_batch_kwargs(
self,
datasource,
batch_kwargs_generator,
data_asset_name=None,
partition_id=None,
**kwargs,
):
"""Builds batch kwargs using the provided datasource, batch kwargs generator, and batch_parameters.
Args:
datasource (str): the name of the datasource for which to build batch_kwargs
batch_kwargs_generator (str): the name of the batch kwargs generator to use to build batch_kwargs
data_asset_name (str): an optional name batch_parameter
**kwargs: additional batch_parameters
Returns:
BatchKwargs
"""
if kwargs.get("name"):
if data_asset_name:
raise ValueError(
"Cannot provide both 'name' and 'data_asset_name'. Please use 'data_asset_name' only."
)
warnings.warn(
"name is being deprecated as a batch_parameter. Please use data_asset_name instead.",
DeprecationWarning,
)
data_asset_name = kwargs.pop("name")
datasource_obj = self.get_datasource(datasource)
batch_kwargs = datasource_obj.build_batch_kwargs(
batch_kwargs_generator=batch_kwargs_generator,
data_asset_name=data_asset_name,
partition_id=partition_id,
**kwargs,
)
return batch_kwargs | [
"def",
"build_batch_kwargs",
"(",
"self",
",",
"datasource",
",",
"batch_kwargs_generator",
",",
"data_asset_name",
"=",
"None",
",",
"partition_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"\"name\"",
")",
":",
"if",
"data_asset_name",
":",
"raise",
"ValueError",
"(",
"\"Cannot provide both 'name' and 'data_asset_name'. Please use 'data_asset_name' only.\"",
")",
"warnings",
".",
"warn",
"(",
"\"name is being deprecated as a batch_parameter. Please use data_asset_name instead.\"",
",",
"DeprecationWarning",
",",
")",
"data_asset_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"name\"",
")",
"datasource_obj",
"=",
"self",
".",
"get_datasource",
"(",
"datasource",
")",
"batch_kwargs",
"=",
"datasource_obj",
".",
"build_batch_kwargs",
"(",
"batch_kwargs_generator",
"=",
"batch_kwargs_generator",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"partition_id",
"=",
"partition_id",
",",
"*",
"*",
"kwargs",
",",
")",
"return",
"batch_kwargs"
] | [
1109,
4
] | [
1146,
27
] | python | en | ['en', 'en', 'en'] | True |
_get_batch_v2 | (
self,
batch_kwargs: Union[dict, BatchKwargs],
expectation_suite_name: Union[str, ExpectationSuite],
data_asset_type=None,
batch_parameters=None,
) | Build a batch of data using batch_kwargs, and return a DataAsset with expectation_suite_name attached. If
batch_parameters are included, they will be available as attributes of the batch.
Args:
batch_kwargs: the batch_kwargs to use; must include a datasource key
expectation_suite_name: The ExpectationSuite or the name of the expectation_suite to get
data_asset_type: the type of data_asset to build, with associated expectation implementations. This can
generally be inferred from the datasource.
batch_parameters: optional parameters to store as the reference description of the batch. They should
reflect parameters that would provide the passed BatchKwargs.
Returns:
DataAsset
| Build a batch of data using batch_kwargs, and return a DataAsset with expectation_suite_name attached. If
batch_parameters are included, they will be available as attributes of the batch.
Args:
batch_kwargs: the batch_kwargs to use; must include a datasource key
expectation_suite_name: The ExpectationSuite or the name of the expectation_suite to get
data_asset_type: the type of data_asset to build, with associated expectation implementations. This can
generally be inferred from the datasource.
batch_parameters: optional parameters to store as the reference description of the batch. They should
reflect parameters that would provide the passed BatchKwargs.
Returns:
DataAsset
| def _get_batch_v2(
self,
batch_kwargs: Union[dict, BatchKwargs],
expectation_suite_name: Union[str, ExpectationSuite],
data_asset_type=None,
batch_parameters=None,
) -> DataAsset:
"""Build a batch of data using batch_kwargs, and return a DataAsset with expectation_suite_name attached. If
batch_parameters are included, they will be available as attributes of the batch.
Args:
batch_kwargs: the batch_kwargs to use; must include a datasource key
expectation_suite_name: The ExpectationSuite or the name of the expectation_suite to get
data_asset_type: the type of data_asset to build, with associated expectation implementations. This can
generally be inferred from the datasource.
batch_parameters: optional parameters to store as the reference description of the batch. They should
reflect parameters that would provide the passed BatchKwargs.
Returns:
DataAsset
"""
if isinstance(batch_kwargs, dict):
batch_kwargs = BatchKwargs(batch_kwargs)
if not isinstance(batch_kwargs, BatchKwargs):
raise ge_exceptions.BatchKwargsError(
"BatchKwargs must be a BatchKwargs object or dictionary."
)
if not isinstance(
expectation_suite_name, (ExpectationSuite, ExpectationSuiteIdentifier, str)
):
raise ge_exceptions.DataContextError(
"expectation_suite_name must be an ExpectationSuite, "
"ExpectationSuiteIdentifier or string."
)
if isinstance(expectation_suite_name, ExpectationSuite):
expectation_suite = expectation_suite_name
elif isinstance(expectation_suite_name, ExpectationSuiteIdentifier):
expectation_suite = self.get_expectation_suite(
expectation_suite_name.expectation_suite_name
)
else:
expectation_suite = self.get_expectation_suite(expectation_suite_name)
datasource = self.get_datasource(batch_kwargs.get("datasource"))
batch = datasource.get_batch(
batch_kwargs=batch_kwargs, batch_parameters=batch_parameters
)
if data_asset_type is None:
data_asset_type = datasource.config.get("data_asset_type")
validator = BridgeValidator(
batch=batch,
expectation_suite=expectation_suite,
expectation_engine=data_asset_type,
)
return validator.get_dataset() | [
"def",
"_get_batch_v2",
"(",
"self",
",",
"batch_kwargs",
":",
"Union",
"[",
"dict",
",",
"BatchKwargs",
"]",
",",
"expectation_suite_name",
":",
"Union",
"[",
"str",
",",
"ExpectationSuite",
"]",
",",
"data_asset_type",
"=",
"None",
",",
"batch_parameters",
"=",
"None",
",",
")",
"->",
"DataAsset",
":",
"if",
"isinstance",
"(",
"batch_kwargs",
",",
"dict",
")",
":",
"batch_kwargs",
"=",
"BatchKwargs",
"(",
"batch_kwargs",
")",
"if",
"not",
"isinstance",
"(",
"batch_kwargs",
",",
"BatchKwargs",
")",
":",
"raise",
"ge_exceptions",
".",
"BatchKwargsError",
"(",
"\"BatchKwargs must be a BatchKwargs object or dictionary.\"",
")",
"if",
"not",
"isinstance",
"(",
"expectation_suite_name",
",",
"(",
"ExpectationSuite",
",",
"ExpectationSuiteIdentifier",
",",
"str",
")",
")",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"\"expectation_suite_name must be an ExpectationSuite, \"",
"\"ExpectationSuiteIdentifier or string.\"",
")",
"if",
"isinstance",
"(",
"expectation_suite_name",
",",
"ExpectationSuite",
")",
":",
"expectation_suite",
"=",
"expectation_suite_name",
"elif",
"isinstance",
"(",
"expectation_suite_name",
",",
"ExpectationSuiteIdentifier",
")",
":",
"expectation_suite",
"=",
"self",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
".",
"expectation_suite_name",
")",
"else",
":",
"expectation_suite",
"=",
"self",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
")",
"datasource",
"=",
"self",
".",
"get_datasource",
"(",
"batch_kwargs",
".",
"get",
"(",
"\"datasource\"",
")",
")",
"batch",
"=",
"datasource",
".",
"get_batch",
"(",
"batch_kwargs",
"=",
"batch_kwargs",
",",
"batch_parameters",
"=",
"batch_parameters",
")",
"if",
"data_asset_type",
"is",
"None",
":",
"data_asset_type",
"=",
"datasource",
".",
"config",
".",
"get",
"(",
"\"data_asset_type\"",
")",
"validator",
"=",
"BridgeValidator",
"(",
"batch",
"=",
"batch",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"expectation_engine",
"=",
"data_asset_type",
",",
")",
"return",
"validator",
".",
"get_dataset",
"(",
")"
] | [
1148,
4
] | [
1203,
38
] | python | en | ['en', 'en', 'en'] | True |
_get_batch_v3 | (
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) | Get exactly one batch, based on a variety of flexible input types.
Args:
datasource_name
data_connector_name
data_asset_name
batch_request
batch_data
data_connector_query
batch_identifiers
batch_filter_parameters
limit
index
custom_filter_function
batch_spec_passthrough
sampling_method
sampling_kwargs
splitter_method
splitter_kwargs
**kwargs
Returns:
(Batch) The requested batch
This method does not require typed or nested inputs.
Instead, it is intended to help the user pick the right parameters.
This method attempts to return exactly one batch.
If 0 or more than 1 batches would be returned, it raises an error.
| Get exactly one batch, based on a variety of flexible input types. | def _get_batch_v3(
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) -> Union[Batch, DataAsset]:
"""Get exactly one batch, based on a variety of flexible input types.
Args:
datasource_name
data_connector_name
data_asset_name
batch_request
batch_data
data_connector_query
batch_identifiers
batch_filter_parameters
limit
index
custom_filter_function
batch_spec_passthrough
sampling_method
sampling_kwargs
splitter_method
splitter_kwargs
**kwargs
Returns:
(Batch) The requested batch
This method does not require typed or nested inputs.
Instead, it is intended to help the user pick the right parameters.
This method attempts to return exactly one batch.
If 0 or more than 1 batches would be returned, it raises an error.
"""
batch_list: List[Batch] = self.get_batch_list(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_request=batch_request,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
batch_spec_passthrough=batch_spec_passthrough,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
splitter_method=splitter_method,
splitter_kwargs=splitter_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
**kwargs,
)
# NOTE: Alex 20201202 - The check below is duplicate of code in Datasource.get_single_batch_from_batch_request()
warnings.warn(
"get_batch will be deprecated for the V3 Batch Request API in a future version of GE. Please use"
"get_batch_list instead.",
DeprecationWarning,
)
if len(batch_list) != 1:
raise ValueError(
f"Got {len(batch_list)} batches instead of a single batch. If you would like to use a BatchRequest to "
f"return multiple batches, please use get_batch_list directly instead of calling get_batch"
)
return batch_list[0] | [
"def",
"_get_batch_v3",
"(",
"self",
",",
"datasource_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_connector_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"BatchRequest",
",",
"RuntimeBatchRequest",
"]",
"]",
"=",
"None",
",",
"batch_data",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
",",
"data_connector_query",
":",
"Optional",
"[",
"Union",
"[",
"IDDict",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"batch_identifiers",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"index",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"list",
",",
"tuple",
",",
"slice",
",",
"str",
"]",
"]",
"=",
"None",
",",
"custom_filter_function",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sampling_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"sampling_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"splitter_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"splitter_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"runtime_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"query",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_filter_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
",",
")",
"->",
"Union",
"[",
"Batch",
",",
"DataAsset",
"]",
":",
"batch_list",
":",
"List",
"[",
"Batch",
"]",
"=",
"self",
".",
"get_batch_list",
"(",
"datasource_name",
"=",
"datasource_name",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"batch_request",
"=",
"batch_request",
",",
"batch_data",
"=",
"batch_data",
",",
"data_connector_query",
"=",
"data_connector_query",
",",
"batch_identifiers",
"=",
"batch_identifiers",
",",
"limit",
"=",
"limit",
",",
"index",
"=",
"index",
",",
"custom_filter_function",
"=",
"custom_filter_function",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
"sampling_method",
"=",
"sampling_method",
",",
"sampling_kwargs",
"=",
"sampling_kwargs",
",",
"splitter_method",
"=",
"splitter_method",
",",
"splitter_kwargs",
"=",
"splitter_kwargs",
",",
"runtime_parameters",
"=",
"runtime_parameters",
",",
"query",
"=",
"query",
",",
"path",
"=",
"path",
",",
"batch_filter_parameters",
"=",
"batch_filter_parameters",
",",
"*",
"*",
"kwargs",
",",
")",
"# NOTE: Alex 20201202 - The check below is duplicate of code in Datasource.get_single_batch_from_batch_request()",
"warnings",
".",
"warn",
"(",
"\"get_batch will be deprecated for the V3 Batch Request API in a future version of GE. Please use\"",
"\"get_batch_list instead.\"",
",",
"DeprecationWarning",
",",
")",
"if",
"len",
"(",
"batch_list",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"f\"Got {len(batch_list)} batches instead of a single batch. If you would like to use a BatchRequest to \"",
"f\"return multiple batches, please use get_batch_list directly instead of calling get_batch\"",
")",
"return",
"batch_list",
"[",
"0",
"]"
] | [
1205,
4
] | [
1298,
28
] | python | en | ['en', 'en', 'en'] | True |
run_validation_operator | (
self,
validation_operator_name: str,
assets_to_validate: List,
run_id: Optional[Union[str, RunIdentifier]] = None,
evaluation_parameters: Optional[dict] = None,
run_name: Optional[str] = None,
run_time: Optional[Union[str, datetime.datetime]] = None,
result_format: Optional[Union[str, dict]] = None,
**kwargs,
) |
Run a validation operator to validate data assets and to perform the business logic around
validation that the operator implements.
Args:
validation_operator_name: name of the operator, as appears in the context's config file
assets_to_validate: a list that specifies the data assets that the operator will validate. The members of
the list can be either batches, or a tuple that will allow the operator to fetch the batch:
(batch_kwargs, expectation_suite_name)
run_name: The run_name for the validation; if None, a default value will be used
**kwargs: Additional kwargs to pass to the validation operator
Returns:
ValidationOperatorResult
|
Run a validation operator to validate data assets and to perform the business logic around
validation that the operator implements. | def run_validation_operator(
self,
validation_operator_name: str,
assets_to_validate: List,
run_id: Optional[Union[str, RunIdentifier]] = None,
evaluation_parameters: Optional[dict] = None,
run_name: Optional[str] = None,
run_time: Optional[Union[str, datetime.datetime]] = None,
result_format: Optional[Union[str, dict]] = None,
**kwargs,
):
"""
Run a validation operator to validate data assets and to perform the business logic around
validation that the operator implements.
Args:
validation_operator_name: name of the operator, as appears in the context's config file
assets_to_validate: a list that specifies the data assets that the operator will validate. The members of
the list can be either batches, or a tuple that will allow the operator to fetch the batch:
(batch_kwargs, expectation_suite_name)
run_name: The run_name for the validation; if None, a default value will be used
**kwargs: Additional kwargs to pass to the validation operator
Returns:
ValidationOperatorResult
"""
result_format = result_format or {"result_format": "SUMMARY"}
if not assets_to_validate:
raise ge_exceptions.DataContextError(
"No batches of data were passed in. These are required"
)
for batch in assets_to_validate:
if not isinstance(batch, (tuple, DataAsset, Validator)):
raise ge_exceptions.DataContextError(
"Batches are required to be of type DataAsset or Validator"
)
try:
validation_operator = self.validation_operators[validation_operator_name]
except KeyError:
raise ge_exceptions.DataContextError(
f"No validation operator `{validation_operator_name}` was found in your project. Please verify this in your great_expectations.yml"
)
if run_id is None and run_name is None:
run_name = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
logger.info("Setting run_name to: {}".format(run_name))
if evaluation_parameters is None:
return validation_operator.run(
assets_to_validate=assets_to_validate,
run_id=run_id,
run_name=run_name,
run_time=run_time,
result_format=result_format,
**kwargs,
)
else:
return validation_operator.run(
assets_to_validate=assets_to_validate,
run_id=run_id,
evaluation_parameters=evaluation_parameters,
run_name=run_name,
run_time=run_time,
result_format=result_format,
**kwargs,
) | [
"def",
"run_validation_operator",
"(",
"self",
",",
"validation_operator_name",
":",
"str",
",",
"assets_to_validate",
":",
"List",
",",
"run_id",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"RunIdentifier",
"]",
"]",
"=",
"None",
",",
"evaluation_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"run_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"run_time",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"datetime",
".",
"datetime",
"]",
"]",
"=",
"None",
",",
"result_format",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"result_format",
"=",
"result_format",
"or",
"{",
"\"result_format\"",
":",
"\"SUMMARY\"",
"}",
"if",
"not",
"assets_to_validate",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"\"No batches of data were passed in. These are required\"",
")",
"for",
"batch",
"in",
"assets_to_validate",
":",
"if",
"not",
"isinstance",
"(",
"batch",
",",
"(",
"tuple",
",",
"DataAsset",
",",
"Validator",
")",
")",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"\"Batches are required to be of type DataAsset or Validator\"",
")",
"try",
":",
"validation_operator",
"=",
"self",
".",
"validation_operators",
"[",
"validation_operator_name",
"]",
"except",
"KeyError",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"f\"No validation operator `{validation_operator_name}` was found in your project. Please verify this in your great_expectations.yml\"",
")",
"if",
"run_id",
"is",
"None",
"and",
"run_name",
"is",
"None",
":",
"run_name",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"datetime",
".",
"timezone",
".",
"utc",
")",
".",
"strftime",
"(",
"\"%Y%m%dT%H%M%S.%fZ\"",
")",
"logger",
".",
"info",
"(",
"\"Setting run_name to: {}\"",
".",
"format",
"(",
"run_name",
")",
")",
"if",
"evaluation_parameters",
"is",
"None",
":",
"return",
"validation_operator",
".",
"run",
"(",
"assets_to_validate",
"=",
"assets_to_validate",
",",
"run_id",
"=",
"run_id",
",",
"run_name",
"=",
"run_name",
",",
"run_time",
"=",
"run_time",
",",
"result_format",
"=",
"result_format",
",",
"*",
"*",
"kwargs",
",",
")",
"else",
":",
"return",
"validation_operator",
".",
"run",
"(",
"assets_to_validate",
"=",
"assets_to_validate",
",",
"run_id",
"=",
"run_id",
",",
"evaluation_parameters",
"=",
"evaluation_parameters",
",",
"run_name",
"=",
"run_name",
",",
"run_time",
"=",
"run_time",
",",
"result_format",
"=",
"result_format",
",",
"*",
"*",
"kwargs",
",",
")"
] | [
1304,
4
] | [
1372,
13
] | python | en | ['en', 'error', 'th'] | False |
_get_data_context_version | (self, arg1: Any, **kwargs) |
arg1: the first positional argument (can take on various types)
**kwargs: variable arguments
First check:
Returns "v3" if the "0.13" entities are specified in the **kwargs.
Otherwise:
Returns None if no datasources have been configured (or if there is an exception while getting the datasource).
Returns "v3" if the datasource is a subclass of the BaseDatasource class.
Returns "v2" if the datasource is an instance of the LegacyDatasource class.
|
arg1: the first positional argument (can take on various types) | def _get_data_context_version(self, arg1: Any, **kwargs) -> Optional[str]:
"""
arg1: the first positional argument (can take on various types)
**kwargs: variable arguments
First check:
Returns "v3" if the "0.13" entities are specified in the **kwargs.
Otherwise:
Returns None if no datasources have been configured (or if there is an exception while getting the datasource).
Returns "v3" if the datasource is a subclass of the BaseDatasource class.
Returns "v2" if the datasource is an instance of the LegacyDatasource class.
"""
if {
"datasource_name",
"data_connector_name",
"data_asset_name",
"batch_request",
"batch_data",
}.intersection(set(kwargs.keys())):
return "v3"
if not self.datasources:
return None
api_version: Optional[str] = None
datasource_name: Any
if "datasource_name" in kwargs:
datasource_name = kwargs.pop("datasource_name", None)
else:
datasource_name = arg1
try:
datasource: Union[LegacyDatasource, BaseDatasource] = self.get_datasource(
datasource_name=datasource_name
)
if issubclass(type(datasource), BaseDatasource):
api_version = "v3"
except (ValueError, TypeError):
if "batch_kwargs" in kwargs:
batch_kwargs = kwargs.get("batch_kwargs", None)
else:
batch_kwargs = arg1
if isinstance(batch_kwargs, dict):
datasource_name = batch_kwargs.get("datasource")
if datasource_name is not None:
try:
datasource: Union[
LegacyDatasource, BaseDatasource
] = self.get_datasource(datasource_name=datasource_name)
if isinstance(datasource, LegacyDatasource):
api_version = "v2"
except (ValueError, TypeError):
pass
return api_version | [
"def",
"_get_data_context_version",
"(",
"self",
",",
"arg1",
":",
"Any",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"{",
"\"datasource_name\"",
",",
"\"data_connector_name\"",
",",
"\"data_asset_name\"",
",",
"\"batch_request\"",
",",
"\"batch_data\"",
",",
"}",
".",
"intersection",
"(",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
":",
"return",
"\"v3\"",
"if",
"not",
"self",
".",
"datasources",
":",
"return",
"None",
"api_version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"datasource_name",
":",
"Any",
"if",
"\"datasource_name\"",
"in",
"kwargs",
":",
"datasource_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"datasource_name\"",
",",
"None",
")",
"else",
":",
"datasource_name",
"=",
"arg1",
"try",
":",
"datasource",
":",
"Union",
"[",
"LegacyDatasource",
",",
"BaseDatasource",
"]",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_name",
"=",
"datasource_name",
")",
"if",
"issubclass",
"(",
"type",
"(",
"datasource",
")",
",",
"BaseDatasource",
")",
":",
"api_version",
"=",
"\"v3\"",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"if",
"\"batch_kwargs\"",
"in",
"kwargs",
":",
"batch_kwargs",
"=",
"kwargs",
".",
"get",
"(",
"\"batch_kwargs\"",
",",
"None",
")",
"else",
":",
"batch_kwargs",
"=",
"arg1",
"if",
"isinstance",
"(",
"batch_kwargs",
",",
"dict",
")",
":",
"datasource_name",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"datasource\"",
")",
"if",
"datasource_name",
"is",
"not",
"None",
":",
"try",
":",
"datasource",
":",
"Union",
"[",
"LegacyDatasource",
",",
"BaseDatasource",
"]",
"=",
"self",
".",
"get_datasource",
"(",
"datasource_name",
"=",
"datasource_name",
")",
"if",
"isinstance",
"(",
"datasource",
",",
"LegacyDatasource",
")",
":",
"api_version",
"=",
"\"v2\"",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"return",
"api_version"
] | [
1374,
4
] | [
1429,
26
] | python | en | ['en', 'error', 'th'] | False |
get_batch | (
self, arg1: Any = None, arg2: Any = None, arg3: Any = None, **kwargs
) | Get exactly one batch, based on a variety of flexible input types.
The method `get_batch` is the main user-facing method for getting batches; it supports both the new (V3) and the
Legacy (V2) Datasource schemas. The version-specific implementations are contained in "_get_batch_v2()" and
"_get_batch_v3()", respectively, both of which are in the present module.
For the V3 API parameters, please refer to the signature and parameter description of method "_get_batch_v3()".
For the Legacy usage, please refer to the signature and parameter description of the method "_get_batch_v2()".
Args:
arg1: the first positional argument (can take on various types)
arg2: the second positional argument (can take on various types)
arg3: the third positional argument (can take on various types)
**kwargs: variable arguments
Returns:
Batch (V3) or DataAsset (V2) -- the requested batch
Processing Steps:
1. Determine the version (possible values are "v3" or "v2").
2. Convert the positional arguments to the appropriate named arguments, based on the version.
3. Package the remaining arguments as variable keyword arguments (applies only to V3).
4. Call the version-specific method ("_get_batch_v3()" or "_get_batch_v2()") with the appropriate arguments.
| Get exactly one batch, based on a variety of flexible input types.
The method `get_batch` is the main user-facing method for getting batches; it supports both the new (V3) and the
Legacy (V2) Datasource schemas. The version-specific implementations are contained in "_get_batch_v2()" and
"_get_batch_v3()", respectively, both of which are in the present module. | def get_batch(
self, arg1: Any = None, arg2: Any = None, arg3: Any = None, **kwargs
) -> Union[Batch, DataAsset]:
"""Get exactly one batch, based on a variety of flexible input types.
The method `get_batch` is the main user-facing method for getting batches; it supports both the new (V3) and the
Legacy (V2) Datasource schemas. The version-specific implementations are contained in "_get_batch_v2()" and
"_get_batch_v3()", respectively, both of which are in the present module.
For the V3 API parameters, please refer to the signature and parameter description of method "_get_batch_v3()".
For the Legacy usage, please refer to the signature and parameter description of the method "_get_batch_v2()".
Args:
arg1: the first positional argument (can take on various types)
arg2: the second positional argument (can take on various types)
arg3: the third positional argument (can take on various types)
**kwargs: variable arguments
Returns:
Batch (V3) or DataAsset (V2) -- the requested batch
Processing Steps:
1. Determine the version (possible values are "v3" or "v2").
2. Convert the positional arguments to the appropriate named arguments, based on the version.
3. Package the remaining arguments as variable keyword arguments (applies only to V3).
4. Call the version-specific method ("_get_batch_v3()" or "_get_batch_v2()") with the appropriate arguments.
"""
api_version: Optional[str] = self._get_data_context_version(arg1=arg1, **kwargs)
if api_version == "v3":
if "datasource_name" in kwargs:
datasource_name = kwargs.pop("datasource_name", None)
else:
datasource_name = arg1
if "data_connector_name" in kwargs:
data_connector_name = kwargs.pop("data_connector_name", None)
else:
data_connector_name = arg2
if "data_asset_name" in kwargs:
data_asset_name = kwargs.pop("data_asset_name", None)
else:
data_asset_name = arg3
return self._get_batch_v3(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
**kwargs,
)
if "batch_kwargs" in kwargs:
batch_kwargs = kwargs.get("batch_kwargs", None)
else:
batch_kwargs = arg1
if "expectation_suite_name" in kwargs:
expectation_suite_name = kwargs.get("expectation_suite_name", None)
else:
expectation_suite_name = arg2
if "data_asset_type" in kwargs:
data_asset_type = kwargs.get("data_asset_type", None)
else:
data_asset_type = arg3
batch_parameters = kwargs.get("batch_parameters")
return self._get_batch_v2(
batch_kwargs=batch_kwargs,
expectation_suite_name=expectation_suite_name,
data_asset_type=data_asset_type,
batch_parameters=batch_parameters,
) | [
"def",
"get_batch",
"(",
"self",
",",
"arg1",
":",
"Any",
"=",
"None",
",",
"arg2",
":",
"Any",
"=",
"None",
",",
"arg3",
":",
"Any",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"Union",
"[",
"Batch",
",",
"DataAsset",
"]",
":",
"api_version",
":",
"Optional",
"[",
"str",
"]",
"=",
"self",
".",
"_get_data_context_version",
"(",
"arg1",
"=",
"arg1",
",",
"*",
"*",
"kwargs",
")",
"if",
"api_version",
"==",
"\"v3\"",
":",
"if",
"\"datasource_name\"",
"in",
"kwargs",
":",
"datasource_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"datasource_name\"",
",",
"None",
")",
"else",
":",
"datasource_name",
"=",
"arg1",
"if",
"\"data_connector_name\"",
"in",
"kwargs",
":",
"data_connector_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"data_connector_name\"",
",",
"None",
")",
"else",
":",
"data_connector_name",
"=",
"arg2",
"if",
"\"data_asset_name\"",
"in",
"kwargs",
":",
"data_asset_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"data_asset_name\"",
",",
"None",
")",
"else",
":",
"data_asset_name",
"=",
"arg3",
"return",
"self",
".",
"_get_batch_v3",
"(",
"datasource_name",
"=",
"datasource_name",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"*",
"*",
"kwargs",
",",
")",
"if",
"\"batch_kwargs\"",
"in",
"kwargs",
":",
"batch_kwargs",
"=",
"kwargs",
".",
"get",
"(",
"\"batch_kwargs\"",
",",
"None",
")",
"else",
":",
"batch_kwargs",
"=",
"arg1",
"if",
"\"expectation_suite_name\"",
"in",
"kwargs",
":",
"expectation_suite_name",
"=",
"kwargs",
".",
"get",
"(",
"\"expectation_suite_name\"",
",",
"None",
")",
"else",
":",
"expectation_suite_name",
"=",
"arg2",
"if",
"\"data_asset_type\"",
"in",
"kwargs",
":",
"data_asset_type",
"=",
"kwargs",
".",
"get",
"(",
"\"data_asset_type\"",
",",
"None",
")",
"else",
":",
"data_asset_type",
"=",
"arg3",
"batch_parameters",
"=",
"kwargs",
".",
"get",
"(",
"\"batch_parameters\"",
")",
"return",
"self",
".",
"_get_batch_v2",
"(",
"batch_kwargs",
"=",
"batch_kwargs",
",",
"expectation_suite_name",
"=",
"expectation_suite_name",
",",
"data_asset_type",
"=",
"data_asset_type",
",",
"batch_parameters",
"=",
"batch_parameters",
",",
")"
] | [
1431,
4
] | [
1497,
9
] | python | en | ['en', 'en', 'en'] | True |
get_batch_list | (
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) | Get the list of zero or more batches, based on a variety of flexible input types.
This method applies only to the new (V3) Datasource schema.
Args:
batch_request
datasource_name
data_connector_name
data_asset_name
batch_request
batch_data
query
runtime_parameters
data_connector_query
batch_identifiers
batch_filter_parameters
limit
index
custom_filter_function
sampling_method
sampling_kwargs
splitter_method
splitter_kwargs
batch_spec_passthrough
**kwargs
Returns:
(Batch) The requested batch
`get_batch` is the main user-facing API for getting batches.
In contrast to virtually all other methods in the class, it does not require typed or nested inputs.
Instead, this method is intended to help the user pick the right parameters
This method attempts to return any number of batches, including an empty list.
| Get the list of zero or more batches, based on a variety of flexible input types.
This method applies only to the new (V3) Datasource schema. | def get_batch_list(
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) -> List[Batch]:
"""Get the list of zero or more batches, based on a variety of flexible input types.
This method applies only to the new (V3) Datasource schema.
Args:
batch_request
datasource_name
data_connector_name
data_asset_name
batch_request
batch_data
query
runtime_parameters
data_connector_query
batch_identifiers
batch_filter_parameters
limit
index
custom_filter_function
sampling_method
sampling_kwargs
splitter_method
splitter_kwargs
batch_spec_passthrough
**kwargs
Returns:
(Batch) The requested batch
`get_batch` is the main user-facing API for getting batches.
In contrast to virtually all other methods in the class, it does not require typed or nested inputs.
Instead, this method is intended to help the user pick the right parameters
This method attempts to return any number of batches, including an empty list.
"""
datasource_name: str
if batch_request:
if not isinstance(batch_request, BatchRequest):
raise TypeError(
f"batch_request must be an instance of BatchRequest object, not {type(batch_request)}"
)
datasource_name = batch_request.datasource_name
datasource: Datasource = cast(Datasource, self.datasources[datasource_name])
if len([arg for arg in [batch_data, query, path] if arg is not None]) > 1:
raise ValueError("Must provide only one of batch_data, query, or path.")
if any(
[
batch_data is not None
and runtime_parameters
and "batch_data" in runtime_parameters,
query and runtime_parameters and "query" in runtime_parameters,
path and runtime_parameters and "path" in runtime_parameters,
]
):
raise ValueError(
"If batch_data, query, or path arguments are provided, the same keys cannot appear in the "
"runtime_parameters argument."
)
if batch_request:
# TODO: Raise a warning if any parameters besides batch_requests are specified
return datasource.get_batch_list_from_batch_request(
batch_request=batch_request
)
elif any([batch_data is not None, query, path, runtime_parameters]):
runtime_parameters = runtime_parameters or {}
if batch_data is not None:
runtime_parameters["batch_data"] = batch_data
elif query is not None:
runtime_parameters["query"] = query
elif path is not None:
runtime_parameters["path"] = path
if batch_identifiers is None:
batch_identifiers = kwargs
else:
# Raise a warning if kwargs exist
pass
batch_request = RuntimeBatchRequest(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_spec_passthrough=batch_spec_passthrough,
runtime_parameters=runtime_parameters,
batch_identifiers=batch_identifiers,
)
else:
if data_connector_query is None:
if (
batch_filter_parameters is not None
and batch_identifiers is not None
):
raise ValueError(
'Must provide either "batch_filter_parameters" or "batch_identifiers", not both.'
)
elif batch_filter_parameters is None and batch_identifiers is not None:
logger.warning(
'Attempting to build data_connector_query but "batch_identifiers" was provided '
'instead of "batch_filter_parameters". The "batch_identifiers" key on '
'data_connector_query has been renamed to "batch_filter_parameters". Please update '
'your code. Falling back on provided "batch_identifiers".'
)
batch_filter_parameters = batch_identifiers
elif batch_filter_parameters is None and batch_identifiers is None:
batch_filter_parameters = kwargs
else:
# Raise a warning if kwargs exist
pass
data_connector_query_params: dict = {
"batch_filter_parameters": batch_filter_parameters,
"limit": limit,
"index": index,
"custom_filter_function": custom_filter_function,
}
data_connector_query = IDDict(data_connector_query_params)
else:
# Raise a warning if batch_filter_parameters or kwargs exist
data_connector_query = IDDict(data_connector_query)
if batch_spec_passthrough is None:
batch_spec_passthrough = {}
if sampling_method is not None:
sampling_params: dict = {
"sampling_method": sampling_method,
}
if sampling_kwargs is not None:
sampling_params["sampling_kwargs"] = sampling_kwargs
batch_spec_passthrough.update(sampling_params)
if splitter_method is not None:
splitter_params: dict = {
"splitter_method": splitter_method,
}
if splitter_kwargs is not None:
splitter_params["splitter_kwargs"] = splitter_kwargs
batch_spec_passthrough.update(splitter_params)
batch_request: BatchRequest = BatchRequest(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
data_connector_query=data_connector_query,
batch_spec_passthrough=batch_spec_passthrough,
)
return datasource.get_batch_list_from_batch_request(batch_request=batch_request) | [
"def",
"get_batch_list",
"(",
"self",
",",
"datasource_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_connector_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"BatchRequest",
",",
"RuntimeBatchRequest",
"]",
"]",
"=",
"None",
",",
"batch_data",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
",",
"data_connector_query",
":",
"Optional",
"[",
"Union",
"[",
"IDDict",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"batch_identifiers",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"index",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"list",
",",
"tuple",
",",
"slice",
",",
"str",
"]",
"]",
"=",
"None",
",",
"custom_filter_function",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sampling_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"sampling_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"splitter_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"splitter_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"runtime_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"query",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_filter_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
",",
")",
"->",
"List",
"[",
"Batch",
"]",
":",
"datasource_name",
":",
"str",
"if",
"batch_request",
":",
"if",
"not",
"isinstance",
"(",
"batch_request",
",",
"BatchRequest",
")",
":",
"raise",
"TypeError",
"(",
"f\"batch_request must be an instance of BatchRequest object, not {type(batch_request)}\"",
")",
"datasource_name",
"=",
"batch_request",
".",
"datasource_name",
"datasource",
":",
"Datasource",
"=",
"cast",
"(",
"Datasource",
",",
"self",
".",
"datasources",
"[",
"datasource_name",
"]",
")",
"if",
"len",
"(",
"[",
"arg",
"for",
"arg",
"in",
"[",
"batch_data",
",",
"query",
",",
"path",
"]",
"if",
"arg",
"is",
"not",
"None",
"]",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Must provide only one of batch_data, query, or path.\"",
")",
"if",
"any",
"(",
"[",
"batch_data",
"is",
"not",
"None",
"and",
"runtime_parameters",
"and",
"\"batch_data\"",
"in",
"runtime_parameters",
",",
"query",
"and",
"runtime_parameters",
"and",
"\"query\"",
"in",
"runtime_parameters",
",",
"path",
"and",
"runtime_parameters",
"and",
"\"path\"",
"in",
"runtime_parameters",
",",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"If batch_data, query, or path arguments are provided, the same keys cannot appear in the \"",
"\"runtime_parameters argument.\"",
")",
"if",
"batch_request",
":",
"# TODO: Raise a warning if any parameters besides batch_requests are specified",
"return",
"datasource",
".",
"get_batch_list_from_batch_request",
"(",
"batch_request",
"=",
"batch_request",
")",
"elif",
"any",
"(",
"[",
"batch_data",
"is",
"not",
"None",
",",
"query",
",",
"path",
",",
"runtime_parameters",
"]",
")",
":",
"runtime_parameters",
"=",
"runtime_parameters",
"or",
"{",
"}",
"if",
"batch_data",
"is",
"not",
"None",
":",
"runtime_parameters",
"[",
"\"batch_data\"",
"]",
"=",
"batch_data",
"elif",
"query",
"is",
"not",
"None",
":",
"runtime_parameters",
"[",
"\"query\"",
"]",
"=",
"query",
"elif",
"path",
"is",
"not",
"None",
":",
"runtime_parameters",
"[",
"\"path\"",
"]",
"=",
"path",
"if",
"batch_identifiers",
"is",
"None",
":",
"batch_identifiers",
"=",
"kwargs",
"else",
":",
"# Raise a warning if kwargs exist",
"pass",
"batch_request",
"=",
"RuntimeBatchRequest",
"(",
"datasource_name",
"=",
"datasource_name",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
"runtime_parameters",
"=",
"runtime_parameters",
",",
"batch_identifiers",
"=",
"batch_identifiers",
",",
")",
"else",
":",
"if",
"data_connector_query",
"is",
"None",
":",
"if",
"(",
"batch_filter_parameters",
"is",
"not",
"None",
"and",
"batch_identifiers",
"is",
"not",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Must provide either \"batch_filter_parameters\" or \"batch_identifiers\", not both.'",
")",
"elif",
"batch_filter_parameters",
"is",
"None",
"and",
"batch_identifiers",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"'Attempting to build data_connector_query but \"batch_identifiers\" was provided '",
"'instead of \"batch_filter_parameters\". The \"batch_identifiers\" key on '",
"'data_connector_query has been renamed to \"batch_filter_parameters\". Please update '",
"'your code. Falling back on provided \"batch_identifiers\".'",
")",
"batch_filter_parameters",
"=",
"batch_identifiers",
"elif",
"batch_filter_parameters",
"is",
"None",
"and",
"batch_identifiers",
"is",
"None",
":",
"batch_filter_parameters",
"=",
"kwargs",
"else",
":",
"# Raise a warning if kwargs exist",
"pass",
"data_connector_query_params",
":",
"dict",
"=",
"{",
"\"batch_filter_parameters\"",
":",
"batch_filter_parameters",
",",
"\"limit\"",
":",
"limit",
",",
"\"index\"",
":",
"index",
",",
"\"custom_filter_function\"",
":",
"custom_filter_function",
",",
"}",
"data_connector_query",
"=",
"IDDict",
"(",
"data_connector_query_params",
")",
"else",
":",
"# Raise a warning if batch_filter_parameters or kwargs exist",
"data_connector_query",
"=",
"IDDict",
"(",
"data_connector_query",
")",
"if",
"batch_spec_passthrough",
"is",
"None",
":",
"batch_spec_passthrough",
"=",
"{",
"}",
"if",
"sampling_method",
"is",
"not",
"None",
":",
"sampling_params",
":",
"dict",
"=",
"{",
"\"sampling_method\"",
":",
"sampling_method",
",",
"}",
"if",
"sampling_kwargs",
"is",
"not",
"None",
":",
"sampling_params",
"[",
"\"sampling_kwargs\"",
"]",
"=",
"sampling_kwargs",
"batch_spec_passthrough",
".",
"update",
"(",
"sampling_params",
")",
"if",
"splitter_method",
"is",
"not",
"None",
":",
"splitter_params",
":",
"dict",
"=",
"{",
"\"splitter_method\"",
":",
"splitter_method",
",",
"}",
"if",
"splitter_kwargs",
"is",
"not",
"None",
":",
"splitter_params",
"[",
"\"splitter_kwargs\"",
"]",
"=",
"splitter_kwargs",
"batch_spec_passthrough",
".",
"update",
"(",
"splitter_params",
")",
"batch_request",
":",
"BatchRequest",
"=",
"BatchRequest",
"(",
"datasource_name",
"=",
"datasource_name",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"data_connector_query",
"=",
"data_connector_query",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
")",
"return",
"datasource",
".",
"get_batch_list_from_batch_request",
"(",
"batch_request",
"=",
"batch_request",
")"
] | [
1499,
4
] | [
1678,
88
] | python | en | ['en', 'en', 'en'] | True |
get_validator | (
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_request_list: List[
Optional[Union[BatchRequest, RuntimeBatchRequest]]
] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
expectation_suite_name: Optional[str] = None,
expectation_suite: Optional[ExpectationSuite] = None,
create_expectation_suite_with_name: Optional[str] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) |
This method applies only to the new (V3) Datasource schema.
|
This method applies only to the new (V3) Datasource schema.
| def get_validator(
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_request_list: List[
Optional[Union[BatchRequest, RuntimeBatchRequest]]
] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
expectation_suite_name: Optional[str] = None,
expectation_suite: Optional[ExpectationSuite] = None,
create_expectation_suite_with_name: Optional[str] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) -> Validator:
"""
This method applies only to the new (V3) Datasource schema.
"""
if (
sum(
bool(x)
for x in [
expectation_suite is not None,
expectation_suite_name is not None,
create_expectation_suite_with_name is not None,
]
)
!= 1
):
raise ValueError(
"Exactly one of expectation_suite_name, expectation_suite, or create_expectation_suite_with_name must be specified"
)
if expectation_suite_name is not None:
expectation_suite = self.get_expectation_suite(expectation_suite_name)
if create_expectation_suite_with_name is not None:
expectation_suite = self.create_expectation_suite(
expectation_suite_name=create_expectation_suite_with_name
)
if (
sum(
bool(x)
for x in [batch_request is not None, batch_request_list is not None]
)
> 1
):
raise ValueError(
"Only one of batch_request or batch_request_list may be specified"
)
if not batch_request_list:
batch_request_list = [batch_request]
batch_list: List = []
for batch_request in batch_request_list:
batch_list.extend(
self.get_batch_list(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_request=batch_request,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
batch_spec_passthrough=batch_spec_passthrough,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
splitter_method=splitter_method,
splitter_kwargs=splitter_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
**kwargs,
)
)
# We get a single batch_definition so we can get the execution_engine here. All batches will share the same one
# So the batch itself doesn't matter. But we use -1 because that will be the latest batch loaded.
batch_definition = batch_list[-1].batch_definition
execution_engine = self.datasources[
batch_definition.datasource_name
].execution_engine
validator = Validator(
execution_engine=execution_engine,
interactive_evaluation=True,
expectation_suite=expectation_suite,
data_context=self,
batches=batch_list,
)
return validator | [
"def",
"get_validator",
"(",
"self",
",",
"datasource_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_connector_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"BatchRequest",
",",
"RuntimeBatchRequest",
"]",
"]",
"=",
"None",
",",
"batch_request_list",
":",
"List",
"[",
"Optional",
"[",
"Union",
"[",
"BatchRequest",
",",
"RuntimeBatchRequest",
"]",
"]",
"]",
"=",
"None",
",",
"batch_data",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
",",
"data_connector_query",
":",
"Optional",
"[",
"Union",
"[",
"IDDict",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"batch_identifiers",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"index",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"list",
",",
"tuple",
",",
"slice",
",",
"str",
"]",
"]",
"=",
"None",
",",
"custom_filter_function",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"expectation_suite_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"expectation_suite",
":",
"Optional",
"[",
"ExpectationSuite",
"]",
"=",
"None",
",",
"create_expectation_suite_with_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sampling_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"sampling_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"splitter_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"splitter_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"runtime_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"query",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_filter_parameters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
",",
")",
"->",
"Validator",
":",
"if",
"(",
"sum",
"(",
"bool",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"expectation_suite",
"is",
"not",
"None",
",",
"expectation_suite_name",
"is",
"not",
"None",
",",
"create_expectation_suite_with_name",
"is",
"not",
"None",
",",
"]",
")",
"!=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Exactly one of expectation_suite_name, expectation_suite, or create_expectation_suite_with_name must be specified\"",
")",
"if",
"expectation_suite_name",
"is",
"not",
"None",
":",
"expectation_suite",
"=",
"self",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
")",
"if",
"create_expectation_suite_with_name",
"is",
"not",
"None",
":",
"expectation_suite",
"=",
"self",
".",
"create_expectation_suite",
"(",
"expectation_suite_name",
"=",
"create_expectation_suite_with_name",
")",
"if",
"(",
"sum",
"(",
"bool",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"batch_request",
"is",
"not",
"None",
",",
"batch_request_list",
"is",
"not",
"None",
"]",
")",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Only one of batch_request or batch_request_list may be specified\"",
")",
"if",
"not",
"batch_request_list",
":",
"batch_request_list",
"=",
"[",
"batch_request",
"]",
"batch_list",
":",
"List",
"=",
"[",
"]",
"for",
"batch_request",
"in",
"batch_request_list",
":",
"batch_list",
".",
"extend",
"(",
"self",
".",
"get_batch_list",
"(",
"datasource_name",
"=",
"datasource_name",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"batch_request",
"=",
"batch_request",
",",
"batch_data",
"=",
"batch_data",
",",
"data_connector_query",
"=",
"data_connector_query",
",",
"batch_identifiers",
"=",
"batch_identifiers",
",",
"limit",
"=",
"limit",
",",
"index",
"=",
"index",
",",
"custom_filter_function",
"=",
"custom_filter_function",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
"sampling_method",
"=",
"sampling_method",
",",
"sampling_kwargs",
"=",
"sampling_kwargs",
",",
"splitter_method",
"=",
"splitter_method",
",",
"splitter_kwargs",
"=",
"splitter_kwargs",
",",
"runtime_parameters",
"=",
"runtime_parameters",
",",
"query",
"=",
"query",
",",
"path",
"=",
"path",
",",
"batch_filter_parameters",
"=",
"batch_filter_parameters",
",",
"*",
"*",
"kwargs",
",",
")",
")",
"# We get a single batch_definition so we can get the execution_engine here. All batches will share the same one",
"# So the batch itself doesn't matter. But we use -1 because that will be the latest batch loaded.",
"batch_definition",
"=",
"batch_list",
"[",
"-",
"1",
"]",
".",
"batch_definition",
"execution_engine",
"=",
"self",
".",
"datasources",
"[",
"batch_definition",
".",
"datasource_name",
"]",
".",
"execution_engine",
"validator",
"=",
"Validator",
"(",
"execution_engine",
"=",
"execution_engine",
",",
"interactive_evaluation",
"=",
"True",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"data_context",
"=",
"self",
",",
"batches",
"=",
"batch_list",
",",
")",
"return",
"validator"
] | [
1680,
4
] | [
1792,
24
] | python | en | ['en', 'error', 'th'] | False |
add_datasource | (
self, name, initialize=True, **kwargs
) | Add a new datasource to the data context, with configuration provided as kwargs.
Args:
name: the name for the new datasource to add
initialize: if False, add the datasource to the config, but do not
initialize it, for example if a user needs to debug database connectivity.
kwargs (keyword arguments): the configuration for the new datasource
Returns:
datasource (Datasource)
| Add a new datasource to the data context, with configuration provided as kwargs.
Args:
name: the name for the new datasource to add
initialize: if False, add the datasource to the config, but do not
initialize it, for example if a user needs to debug database connectivity.
kwargs (keyword arguments): the configuration for the new datasource | def add_datasource(
self, name, initialize=True, **kwargs
) -> Optional[Dict[str, Union[LegacyDatasource, BaseDatasource]]]:
"""Add a new datasource to the data context, with configuration provided as kwargs.
Args:
name: the name for the new datasource to add
initialize: if False, add the datasource to the config, but do not
initialize it, for example if a user needs to debug database connectivity.
kwargs (keyword arguments): the configuration for the new datasource
Returns:
datasource (Datasource)
"""
logger.debug("Starting BaseDataContext.add_datasource for %s" % name)
module_name = kwargs.get("module_name", "great_expectations.datasource")
verify_dynamic_loading_support(module_name=module_name)
class_name = kwargs.get("class_name")
datasource_class = load_class(module_name=module_name, class_name=class_name)
# For any class that should be loaded, it may control its configuration construction
# by implementing a classmethod called build_configuration
config: Union[CommentedMap, dict]
if hasattr(datasource_class, "build_configuration"):
config = datasource_class.build_configuration(**kwargs)
else:
config = kwargs
return self._instantiate_datasource_from_config_and_update_project_config(
name=name,
config=config,
initialize=initialize,
) | [
"def",
"add_datasource",
"(",
"self",
",",
"name",
",",
"initialize",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Union",
"[",
"LegacyDatasource",
",",
"BaseDatasource",
"]",
"]",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Starting BaseDataContext.add_datasource for %s\"",
"%",
"name",
")",
"module_name",
"=",
"kwargs",
".",
"get",
"(",
"\"module_name\"",
",",
"\"great_expectations.datasource\"",
")",
"verify_dynamic_loading_support",
"(",
"module_name",
"=",
"module_name",
")",
"class_name",
"=",
"kwargs",
".",
"get",
"(",
"\"class_name\"",
")",
"datasource_class",
"=",
"load_class",
"(",
"module_name",
"=",
"module_name",
",",
"class_name",
"=",
"class_name",
")",
"# For any class that should be loaded, it may control its configuration construction",
"# by implementing a classmethod called build_configuration",
"config",
":",
"Union",
"[",
"CommentedMap",
",",
"dict",
"]",
"if",
"hasattr",
"(",
"datasource_class",
",",
"\"build_configuration\"",
")",
":",
"config",
"=",
"datasource_class",
".",
"build_configuration",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"config",
"=",
"kwargs",
"return",
"self",
".",
"_instantiate_datasource_from_config_and_update_project_config",
"(",
"name",
"=",
"name",
",",
"config",
"=",
"config",
",",
"initialize",
"=",
"initialize",
",",
")"
] | [
1803,
4
] | [
1835,
9
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.