Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Time._deserialize | (self, value, attr, data, **kwargs) | Deserialize an ISO8601-formatted time to a :class:`datetime.time` object. | Deserialize an ISO8601-formatted time to a :class:`datetime.time` object. | def _deserialize(self, value, attr, data, **kwargs):
"""Deserialize an ISO8601-formatted time to a :class:`datetime.time` object."""
if not value: # falsy values are invalid
raise self.make_error("invalid")
try:
return utils.from_iso_time(value)
except (AttributeError, TypeError, ValueError) as error:
raise self.make_error("invalid") from error | [
"def",
"_deserialize",
"(",
"self",
",",
"value",
",",
"attr",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"value",
":",
"# falsy values are invalid",
"raise",
"self",
".",
"make_error",
"(",
"\"invalid\"",
")",
"try",
":",
"return",
"utils",
".",
"from_iso_time",
"(",
"value",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
",",
"ValueError",
")",
"as",
"error",
":",
"raise",
"self",
".",
"make_error",
"(",
"\"invalid\"",
")",
"from",
"error"
] | [
1311,
4
] | [
1318,
55
] | python | en | ['en', 'en', 'en'] | True |
test_suite_demo_on_context_with_no_datasources | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
We call the "suite demo" command on a data context that has no datasources
configured.
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
We call the "suite demo" command on a data context that has no datasources
configured. | def test_suite_demo_on_context_with_no_datasources(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
We call the "suite demo" command on a data context that has no datasources
configured.
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_root_dir = empty_data_context.root_directory
root_dir = project_root_dir
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert "No datasources found in the context" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_on_context_with_no_datasources",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"root_dir",
"=",
"project_root_dir",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"No datasources found in the context\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
38,
0
] | [
70,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_enter_existing_suite_name_as_arg | (
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
) |
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument | def test_suite_demo_enter_existing_suite_name_as_arg(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
):
"""
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
project_root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
context = DataContext(project_root_dir)
existing_suite_name = "my_dag_node.default"
assert context.list_expectation_suite_names() == [existing_suite_name]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"demo",
"-d",
project_root_dir,
"--suite",
existing_suite_name,
"--no-view",
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_enter_existing_suite_name_as_arg",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
")",
":",
"not_so_empty_data_context",
"=",
"data_context_parameterized_expectation_suite",
"project_root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_root_dir",
",",
"\"uncommitted\"",
")",
")",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"existing_suite_name",
"=",
"\"my_dag_node.default\"",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"existing_suite_name",
"]",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--suite\"",
",",
"existing_suite_name",
",",
"\"--no-view\"",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"f\"An expectation suite named `{existing_suite_name}` already exists.\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`\"",
"in",
"stdout",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
75,
0
] | [
131,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite | (
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
We call the "suite demo" command without the suite name argument
The command should:
- prompt us to enter the name of the expectation suite that will be
created. We answer the prompt with the name of an existing expectation suite.
- display an error message and let us retry until we answer
with a name that is not "taken".
- create an example suite
- NOT open jupyter
- open DataDocs to the new example suite page
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- prompt us to enter the name of the expectation suite that will be
created. We answer the prompt with the name of an existing expectation suite.
- display an error message and let us retry until we answer
with a name that is not "taken".
- create an example suite
- NOT open jupyter
- open DataDocs to the new example suite page
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(root_dir, "uncommitted"))
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
existing_suite_name = "my_dag_node.default"
context = DataContext(root_dir)
assert context.list_expectation_suite_names() == [existing_suite_name]
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\n{existing_suite_name}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_subprocess.call_count == 0
assert mock_webbrowser.call_count == 1
foo = os.path.join(
root_dir, "uncommitted/data_docs/local_site/validations/my_new_suite/"
)
assert f"file://{foo}" in mock_webbrowser.call_args[0][0]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"not_so_empty_data_context",
"=",
"data_context_parameterized_expectation_suite",
"root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
")",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"existing_suite_name",
"=",
"\"my_dag_node.default\"",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"existing_suite_name",
"]",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"f\"{csv_path}\\n{existing_suite_name}\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"f\"An expectation suite named `{existing_suite_name}` already exists.\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`\"",
"in",
"stdout",
")",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new Expectation Suite [f1.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here\"",
"in",
"stdout",
")",
"assert",
"\"open a notebook for you now\"",
"not",
"in",
"stdout",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"foo",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted/data_docs/local_site/validations/my_new_suite/\"",
")",
"assert",
"f\"file://{foo}\"",
"in",
"mock_webbrowser",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
136,
0
] | [
212,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_new_creates_empty_suite | (
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
|
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
| def test_suite_new_creates_empty_suite(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
"""
project_root_dir = data_context_parameterized_expectation_suite.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
root_dir = project_root_dir
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert (
"Because you requested an empty suite, we'll open a notebook for you now to edit it!"
in stdout
)
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 1
call_args = mock_subprocess.call_args[0][0]
assert call_args[0] == "jupyter"
assert call_args[1] == "notebook"
assert expected_notebook in call_args[2]
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_new_creates_empty_suite",
"(",
"mock_webbroser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"project_root_dir",
"=",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_root_dir",
",",
"\"uncommitted\"",
")",
")",
"root_dir",
"=",
"project_root_dir",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"new\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo\"",
"]",
",",
"input",
"=",
"f\"{csv}\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new expectation suite\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"not",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"not",
"in",
"stdout",
"assert",
"\"The following Data Docs sites were built\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will create a new Expectation Suite 'foo' and store it here\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Because you requested an empty suite, we'll open a notebook for you now to edit it!\"",
"in",
"stdout",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"expected_notebook",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"\"foo\"",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"suite",
".",
"expectations",
"==",
"[",
"]",
"citations",
"=",
"suite",
".",
"get_citations",
"(",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"citation_date\"",
",",
"None",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"interactive\"",
",",
"None",
")",
"assert",
"filter_properties_dict",
"(",
"properties",
"=",
"citations",
"[",
"0",
"]",
",",
"clean_falsy",
"=",
"True",
")",
"==",
"{",
"\"batch_kwargs\"",
":",
"{",
"\"data_asset_name\"",
":",
"\"f1\"",
",",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv",
",",
"\"reader_method\"",
":",
"\"read_csv\"",
",",
"}",
",",
"\"comment\"",
":",
"\"New suite added via CLI\"",
",",
"}",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"call_args",
"=",
"mock_subprocess",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"call_args",
"[",
"0",
"]",
"==",
"\"jupyter\"",
"assert",
"call_args",
"[",
"1",
"]",
"==",
"\"notebook\"",
"assert",
"expected_notebook",
"in",
"call_args",
"[",
"2",
"]",
"assert",
"mock_webbroser",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
217,
0
] | [
297,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_new_empty_with_no_jupyter | (
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
|
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
| def test_suite_new_empty_with_no_jupyter(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
"""
os.mkdir(
os.path.join(
data_context_parameterized_expectation_suite.root_directory, "uncommitted"
)
)
root_dir = data_context_parameterized_expectation_suite.root_directory
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo", "--no-jupyter"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 0
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_new_empty_with_no_jupyter",
"(",
"mock_webbroser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
",",
"\"uncommitted\"",
")",
")",
"root_dir",
"=",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"new\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo\"",
",",
"\"--no-jupyter\"",
"]",
",",
"input",
"=",
"f\"{csv}\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new expectation suite\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"not",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"not",
"in",
"stdout",
"assert",
"\"The following Data Docs sites were built\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will create a new Expectation Suite 'foo' and store it here\"",
"in",
"stdout",
")",
"assert",
"\"open a notebook for you now\"",
"not",
"in",
"stdout",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"expected_notebook",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"\"foo\"",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"suite",
".",
"expectations",
"==",
"[",
"]",
"citations",
"=",
"suite",
".",
"get_citations",
"(",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"citation_date\"",
",",
"None",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"interactive\"",
",",
"None",
")",
"assert",
"filter_properties_dict",
"(",
"properties",
"=",
"citations",
"[",
"0",
"]",
",",
"clean_falsy",
"=",
"True",
")",
"==",
"{",
"\"batch_kwargs\"",
":",
"{",
"\"data_asset_name\"",
":",
"\"f1\"",
",",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv",
",",
"\"reader_method\"",
":",
"\"read_csv\"",
",",
"}",
",",
"\"comment\"",
":",
"\"New suite added via CLI\"",
",",
"}",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_webbroser",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
302,
0
] | [
376,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_one_datasource_without_generator_without_suite_name_argument | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
) |
We call the "suite demo" command without the suite name argument
The command should:
- NOT prompt us to choose a datasource (because there is only one)
- prompt us only to enter the path (The datasource has no generator
configured and not to choose from the generator's list of available data
assets).
- We enter the path of the file we want the command to use as the batch to
create the expectation suite.
- prompt us to enter the name of the expectation suite that will be
created
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_one_datasource_without_generator_without_suite_name_argument(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- NOT prompt us to choose a datasource (because there is only one)
- prompt us only to enter the path (The datasource has no generator
configured and not to choose from the generator's list of available data
assets).
- We enter the path of the file we want the command to use as the batch to
create the expectation suite.
- prompt us to enter the name of the expectation suite that will be
created
- open Data Docs
- NOT open jupyter
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
context = empty_data_context
root_dir = context.root_directory
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 1
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_one_datasource_without_generator_without_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
",",
"filesystem_csv_2",
")",
":",
"empty_data_context",
".",
"add_datasource",
"(",
"\"my_datasource\"",
",",
"module_name",
"=",
"\"great_expectations.datasource\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
",",
")",
"context",
"=",
"empty_data_context",
"root_dir",
"=",
"context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"f\"{csv_path}\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new Expectation Suite [f1.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"1",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
381,
0
] | [
456,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
) |
We call the "suite demo" command without the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select a
generator asset from the list.
- The command should prompt us to enter the name of the expectation suite
that will be created.
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
):
"""
We call the "suite demo" command without the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select a
generator asset from the list.
- The command should prompt us to enter the name of the expectation suite
that will be created.
- open Data Docs
- NOT open jupyter
"""
root_dir = (
site_builder_data_context_v013_with_html_store_titanic_random.root_directory
)
os.chdir(root_dir)
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="1\n1\n1\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
"""Select a datasource
1. mydatasource
2. random
3. titanic"""
in stdout
)
assert (
"""Which data would you like to use?
1. random (directory)
2. titanic (directory)"""
in stdout
)
assert "Name the new Expectation Suite [random.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_v013_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"(",
"site_builder_data_context_v013_with_html_store_titanic_random",
".",
"root_directory",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"1\\n1\\n1\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"\"\"Select a datasource\n 1. mydatasource\n 2. random\n 3. titanic\"\"\"",
"in",
"stdout",
")",
"assert",
"(",
"\"\"\"Which data would you like to use?\n 1. random (directory)\n 2. titanic (directory)\"\"\"",
"in",
"stdout",
")",
"assert",
"\"Name the new Expectation Suite [random.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"2",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
461,
0
] | [
543,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
) |
We call the "suite demo" command with the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select
a generator asset from the list.
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command with the suite name argument | def test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
):
"""
We call the "suite demo" command with the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select
a generator asset from the list.
- open Data Docs
- NOT open jupyter
"""
root_dir = (
site_builder_data_context_v013_with_html_store_titanic_random.root_directory
)
os.chdir(root_dir)
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_v013_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"(",
"site_builder_data_context_v013_with_html_store_titanic_random",
".",
"root_directory",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Select a datasource\"",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:\"",
"in",
"stdout",
")",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"2",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
548,
0
] | [
615,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_without_suite_name_raises_error | () | This is really only testing click missing arguments | This is really only testing click missing arguments | def test_suite_edit_without_suite_name_raises_error():
"""This is really only testing click missing arguments"""
runner = CliRunner(mix_stderr=False)
result = runner.invoke(cli, "suite edit", catch_exceptions=False)
assert result.exit_code == 2
assert (
'Error: Missing argument "SUITE".' in result.stderr
or "Error: Missing argument 'SUITE'." in result.stderr
) | [
"def",
"test_suite_edit_without_suite_name_raises_error",
"(",
")",
":",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"\"suite edit\"",
",",
"catch_exceptions",
"=",
"False",
")",
"assert",
"result",
".",
"exit_code",
"==",
"2",
"assert",
"(",
"'Error: Missing argument \"SUITE\".'",
"in",
"result",
".",
"stderr",
"or",
"\"Error: Missing argument 'SUITE'.\"",
"in",
"result",
".",
"stderr",
")"
] | [
618,
0
] | [
626,
5
] | python | en | ['en', 'en', 'en'] | True |
test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", "'{foobar}'"],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are valid JSON." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"\"'{foobar}'\"",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Please check that your batch_kwargs are valid JSON.\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
631,
0
] | [
660,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
context.add_datasource("source", class_name="PandasDatasource")
runner = CliRunner(mix_stderr=False)
batch_kwargs = '{"table": "fake", "datasource": "source"}'
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", batch_kwargs],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "To continue editing this suite" not in stdout
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"context",
".",
"add_datasource",
"(",
"\"source\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"'{\"table\": \"fake\", \"datasource\": \"source\"}'",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"batch_kwargs",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"To continue editing this suite\"",
"not",
"in",
"stdout",
"assert",
"\"Please check that your batch_kwargs are able to load a batch.\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
665,
0
] | [
698,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_non_existent_suite_name_raises_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_non_existent_suite_name_raises_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
assert not empty_data_context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
"suite edit not_a_real_suite -d {}".format(project_dir),
catch_exceptions=False,
)
assert result.exit_code == 1
assert "Could not find a suite named `not_a_real_suite`." in result.output
assert "by running `great_expectations suite list`" in result.output
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_non_existent_suite_name_raises_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"assert",
"not",
"empty_data_context",
".",
"list_expectation_suites",
"(",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"\"suite edit not_a_real_suite -d {}\"",
".",
"format",
"(",
"project_dir",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Could not find a suite named `not_a_real_suite`.\"",
"in",
"result",
".",
"output",
"assert",
"\"by running `great_expectations suite list`\"",
"in",
"result",
".",
"output",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
703,
0
] | [
731,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_non_existent_datasource_shows_helpful_error_message | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
assert context.list_expectation_suites()[0].expectation_suite_name == "foo"
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite edit foo -d {project_dir} --datasource not_real",
catch_exceptions=False,
)
assert result.exit_code == 1
assert (
"Unable to load datasource `not_real` -- no configuration found or invalid configuration."
in result.output
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_non_existent_datasource_shows_helpful_error_message",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"context",
".",
"list_expectation_suites",
"(",
")",
"[",
"0",
"]",
".",
"expectation_suite_name",
"==",
"\"foo\"",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"suite edit foo -d {project_dir} --datasource not_real\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"\"Unable to load datasource `not_real` -- no configuration found or invalid configuration.\"",
"in",
"result",
".",
"output",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
736,
0
] | [
768,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
) |
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
|
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch. | def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = (
site_builder_data_context_v013_with_html_store_titanic_random.root_directory
)
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_webbrowser.reset_mock()
mock_subprocess.reset_mock()
# remove the citations from the suite
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
suite.meta.pop("citations", None)
context.save_expectation_suite(suite)
# Actual testing really starts here
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "A batch of data is required to edit the suite" in stdout
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_v013_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"(",
"site_builder_data_context_v013_with_html_store_titanic_random",
".",
"root_directory",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"# remove the citations from the suite",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo_suite\"",
")",
"assert",
"isinstance",
"(",
"suite",
",",
"ExpectationSuite",
")",
"suite",
".",
"meta",
".",
"pop",
"(",
"\"citations\"",
",",
"None",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"# Actual testing really starts here",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"A batch of data is required to edit the suite\"",
"in",
"stdout",
"assert",
"\"Select a datasource\"",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
773,
0
] | [
856,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
) |
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our
test will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments.
The command should:
- NOT open Data Docs
- NOT open jupyter
|
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch. | def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our
test will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments.
The command should:
- NOT open Data Docs
- NOT open jupyter
"""
root_dir = (
site_builder_data_context_v013_with_html_store_titanic_random.root_directory
)
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo_suite", "-d", root_dir],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_v013_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"(",
"site_builder_data_context_v013_with_html_store_titanic_random",
".",
"root_directory",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo_suite\"",
")",
"assert",
"isinstance",
"(",
"suite",
",",
"ExpectationSuite",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
861,
0
] | [
929,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
) |
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs).
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
|
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs). | def test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_v013_with_html_store_titanic_random,
):
"""
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs).
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = (
site_builder_data_context_v013_with_html_store_titanic_random.root_directory
)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite", "--no-view"],
input="2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
batch_kwargs = {
"datasource": "random",
"path": str(
os.path.join(
os.path.abspath(os.path.join(root_dir, os.pardir)),
"data",
"random",
"f1.csv",
)
),
}
batch_kwargs_arg_str = json.dumps(batch_kwargs)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
"--batch-kwargs",
batch_kwargs_arg_str,
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_v013_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"(",
"site_builder_data_context_v013_with_html_store_titanic_random",
".",
"root_directory",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"\"2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:\"",
"in",
"stdout",
")",
"batch_kwargs",
"=",
"{",
"\"datasource\"",
":",
"\"random\"",
",",
"\"path\"",
":",
"str",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"os",
".",
"pardir",
")",
")",
",",
"\"data\"",
",",
"\"random\"",
",",
"\"f1.csv\"",
",",
")",
")",
",",
"}",
"batch_kwargs_arg_str",
"=",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--batch-kwargs\"",
",",
"batch_kwargs_arg_str",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
934,
0
] | [
1028,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error | (
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
) |
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user should see a nice error and the program halts before notebook
compilation.
- NOT open Data Docs
- NOT open jupyter
|
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}' | def test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user should see a nice error and the program halts before notebook
compilation.
- NOT open Data Docs
- NOT open jupyter
'"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": "../data/Titanic.csv"}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert "Unable to load datasource `None`" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"titanic_data_context",
",",
")",
":",
"project_dir",
"=",
"titanic_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"{",
"\"path\"",
":",
"\"../data/Titanic.csv\"",
"}",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Please check that your batch_kwargs are able to load a batch.\"",
"in",
"stdout",
"assert",
"\"Unable to load datasource `None`\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1033,
0
] | [
1083,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs | (
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
) |
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user gets a working notebook
- NOT open Data Docs
- open jupyter
|
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}' | def test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user gets a working notebook
- NOT open Data Docs
- open jupyter
"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": os.path.join(project_dir, "../", "data", "Titanic.csv")}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
"--datasource",
"mydatasource",
],
catch_exceptions=False,
)
stdout = result.output
assert stdout == ""
assert result.exit_code == 0
expected_notebook_path = os.path.join(project_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(project_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"titanic_data_context",
",",
")",
":",
"project_dir",
"=",
"titanic_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"{",
"\"path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"../\"",
",",
"\"data\"",
",",
"\"Titanic.csv\"",
")",
"}",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
",",
"\"--datasource\"",
",",
"\"mydatasource\"",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"stdout",
"==",
"\"\"",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1088,
0
] | [
1143,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
) |
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has one datasource. The datasource has no generators
configured. The command prompts us to enter the file path.
|
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch. | def test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has one datasource. The datasource has no generators
configured. The command prompts us to enter the file path.
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
not_so_empty_data_context = empty_data_context
project_root_dir = not_so_empty_data_context.root_directory
root_dir = project_root_dir
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="{:s}\nmy_new_suite\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")),
catch_exceptions=False,
)
stdout = result.stdout
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
# remove the citations from the suite
context = DataContext(project_root_dir)
suite = context.get_expectation_suite("my_new_suite")
suite.meta.pop("citations")
context.save_expectation_suite(suite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "my_new_suite", "-d", root_dir],
input="{:s}\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")),
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
assert "Enter the path" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_my_new_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
",",
"filesystem_csv_2",
")",
":",
"empty_data_context",
".",
"add_datasource",
"(",
"\"my_datasource\"",
",",
"module_name",
"=",
"\"great_expectations.datasource\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
",",
")",
"not_so_empty_data_context",
"=",
"empty_data_context",
"project_root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"root_dir",
"=",
"project_root_dir",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"{:s}\\nmy_new_suite\\n\\n\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"# remove the citations from the suite",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"my_new_suite\"",
")",
"suite",
".",
"meta",
".",
"pop",
"(",
"\"citations\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"my_new_suite\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"{:s}\\n\\n\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_my_new_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1148,
0
] | [
1227,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_on_context_with_no_datasource_raises_error | (
mock_subprocess, mock_emit, caplog, empty_data_context_stats_enabled
) |
We call the "suite scaffold" command on a context with no datasource
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
|
We call the "suite scaffold" command on a context with no datasource | def test_suite_scaffold_on_context_with_no_datasource_raises_error(
mock_subprocess, mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command on a context with no datasource
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert (
"No datasources found in the context. To add a datasource, run `great_expectations datasource new`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_scaffold_on_context_with_no_datasource_raises_error",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"empty_data_context_stats_enabled",
")",
":",
"context",
"=",
"empty_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"\"foop\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"\"No datasources found in the context. To add a datasource, run `great_expectations datasource new`\"",
"in",
"stdout",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1424,
0
] | [
1469,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_on_existing_suite_raises_error | (
mock_emit, caplog, empty_data_context_stats_enabled
) |
We call the "suite scaffold" command with an existing suite
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
|
We call the "suite scaffold" command with an existing suite | def test_suite_scaffold_on_existing_suite_raises_error(
mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command with an existing suite
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
suite = context.create_expectation_suite("foop")
context.save_expectation_suite(suite)
assert context.list_expectation_suite_names() == ["foop"]
mock_emit.reset_mock()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "An expectation suite named `foop` already exists." in stdout
assert (
"If you intend to edit the suite please use `great_expectations suite edit foop`."
in stdout
)
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_scaffold_on_existing_suite_raises_error",
"(",
"mock_emit",
",",
"caplog",
",",
"empty_data_context_stats_enabled",
")",
":",
"context",
"=",
"empty_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite",
"=",
"context",
".",
"create_expectation_suite",
"(",
"\"foop\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"foop\"",
"]",
"mock_emit",
".",
"reset_mock",
"(",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"\"foop\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"An expectation suite named `foop` already exists.\"",
"in",
"stdout",
"assert",
"(",
"\"If you intend to edit the suite please use `great_expectations suite edit foop`.\"",
"in",
"stdout",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1475,
0
] | [
1524,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_creates_notebook_and_opens_jupyter | (
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
) |
We call the "suite scaffold" command
The command should:
- create a new notebook
- open the notebook in jupyter
- send a DataContext init success message
- send a scaffold success message
|
We call the "suite scaffold" command | def test_suite_scaffold_creates_notebook_and_opens_jupyter(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold" command
The command should:
- create a new notebook
- open the notebook in jupyter
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert mock_subprocess.call_count == 1
assert mock_subprocess.call_args_list == [
mock.call(["jupyter", "notebook", expected_notebook_path])
]
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_scaffold_creates_notebook_and_opens_jupyter",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"titanic_data_context_stats_enabled",
")",
":",
"context",
"=",
"titanic_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite_name",
"=",
"\"foop\"",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"context",
".",
"GE_EDIT_NOTEBOOK_DIR",
",",
"f\"scaffold_{suite_name}.ipynb\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"suite_name",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"1\\n1\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"[",
"\"jupyter\"",
",",
"\"notebook\"",
",",
"expected_notebook_path",
"]",
")",
"]",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1531,
0
] | [
1583,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_creates_notebook_with_no_jupyter_flag | (
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
) |
We call the "suite scaffold --no-jupyter"
The command should:
- create a new notebook
- NOT open the notebook in jupyter
- tell the user to open the notebook
- send a DataContext init success message
- send a scaffold success message
|
We call the "suite scaffold --no-jupyter" | def test_suite_scaffold_creates_notebook_with_no_jupyter_flag(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold --no-jupyter"
The command should:
- create a new notebook
- NOT open the notebook in jupyter
- tell the user to open the notebook
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir, "--no-jupyter"],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert (
f"To continue scaffolding this suite, run `jupyter notebook {expected_notebook_path}`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_scaffold_creates_notebook_with_no_jupyter_flag",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"titanic_data_context_stats_enabled",
")",
":",
"context",
"=",
"titanic_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite_name",
"=",
"\"foop\"",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"context",
".",
"GE_EDIT_NOTEBOOK_DIR",
",",
"f\"scaffold_{suite_name}.ipynb\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"suite_name",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--no-jupyter\"",
"]",
",",
"input",
"=",
"\"1\\n1\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"assert",
"(",
"f\"To continue scaffolding this suite, run `jupyter notebook {expected_notebook_path}`\"",
"in",
"stdout",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1590,
0
] | [
1645,
5
] | python | en | ['en', 'error', 'th'] | False |
ManageUserView.get_object | (self) | Retrieve and return authentication user | Retrieve and return authentication user | def get_object(self):
"""Retrieve and return authentication user"""
return self.request.user | [
"def",
"get_object",
"(",
"self",
")",
":",
"return",
"self",
".",
"request",
".",
"user"
] | [
24,
4
] | [
26,
32
] | python | en | ['en', 'en', 'en'] | True |
registerNoTpl | (name, cl) | Register a class without template
It can seem not useful to register classes without template (and it wasn't
useful until the SmartPointer template was generated), but those classes
can be used as template argument of classes with template.
| Register a class without template | def registerNoTpl(name, cl):
"""Register a class without template
It can seem not useful to register classes without template (and it wasn't
useful until the SmartPointer template was generated), but those classes
can be used as template argument of classes with template.
"""
itkTemplate.__templates__[normalizeName(name)] = cl | [
"def",
"registerNoTpl",
"(",
"name",
",",
"cl",
")",
":",
"itkTemplate",
".",
"__templates__",
"[",
"normalizeName",
"(",
"name",
")",
"]",
"=",
"cl"
] | [
37,
0
] | [
44,
55
] | python | en | ['en', 'en', 'en'] | True |
normalizeName | (name) | Normalize the class name to remove ambiguity
This function removes the white spaces in the name, and also
remove the pointer declaration "*" (it have no sense in python) | Normalize the class name to remove ambiguity | def normalizeName(name):
"""Normalize the class name to remove ambiguity
This function removes the white spaces in the name, and also
remove the pointer declaration "*" (it have no sense in python) """
name = name.replace(" ", "")
name = name.replace("*", "")
return name | [
"def",
"normalizeName",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"\"*\"",
",",
"\"\"",
")",
"return",
"name"
] | [
47,
0
] | [
56,
15
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.__add__ | (self, paramSetString, cl) | Add a new argument set and the resulting class to the template.
paramSetString is the C++ string which defines the parameters set.
cl is the class which corresponds to the couple template-argument set.
| Add a new argument set and the resulting class to the template. | def __add__(self, paramSetString, cl):
"""Add a new argument set and the resulting class to the template.
paramSetString is the C++ string which defines the parameters set.
cl is the class which corresponds to the couple template-argument set.
"""
# recreate the full name and normalize it to avoid ambiguity
normFullName = normalizeName(
self.__name__ + "<" + paramSetString + ">")
# the full class should not be already registered. If it is, there is a
# problem somewhere so warn the user so he can fix the problem
if normFullName in itkTemplate.__templates__:
message = (
"Template %s\n already defined as %s\n is redefined "
"as %s") % (normFullName, self.__templates__[normFullName], cl)
warnings.warn(message)
# register the class
itkTemplate.__templates__[normFullName] = cl
# __find_param__ will parse the paramSetString and produce a list of
# the same parameters transformed in corresponding python classes.
# we transform this list in tuple to make it usable as key of the dict
param = tuple(self.__find_param__(paramSetString))
# once again, warn the user if the tuple of parameter is already
# defined so he can fix the problem
if param in self.__template__:
message = "Warning: template already defined '%s'" % normFullName
warnings.warn(message)
# and register the parameter tuple
self.__template__[param] = cl
# add in __class_to_template__ dictionary
itkTemplate.__class_to_template__[cl] = (self, param)
# now populate the template
# 2 cases:
# - the template is a SmartPointer. In that case, the attribute name
# will be the full real name of the class without the itk prefix and
# _Pointer suffix
# - the template is not a SmartPointer. In that case, we keep only the
# end of the real class name which is a short string discribing the
# template arguments (for example IUC2)
if cl.__name__.startswith("itk"):
if cl.__name__.endswith("_Pointer"):
# it's a SmartPointer
attributeName = cl.__name__[len("itk"):-len("_Pointer")]
else:
# it's not a SmartPointer
# we need to now the size of the name to keep only the suffix
# short name does not contain :: and nested namespace
# itk::Numerics::Sample -> itkSample
shortNameSize = len(re.sub(r':.*:', '', self.__name__))
attributeName = cl.__name__[shortNameSize:]
elif cl.__name__.startswith("vcl_complex"):
raise AttributeError('former vcl_complex, handling ' + str(cl.__name__))
# C++ name is likely to be std::complex here, instead of the
# expected vcl_complex
attributeName = cl.__name__[len("vcl_complex"):]
else:
shortName = re.sub(r':.*:', '', self.__name__)
if not cl.__name__.startswith(shortName):
shortName = re.sub(r'.*::', '', self.__name__)
attributeName = cl.__name__[len(shortName):]
if attributeName[0].isdigit():
# the attribute name can't start with a number
# add a single x before it to build a valid name.
# Adding an underscore would hide the attributeName in IPython
attributeName = "x" + attributeName
# add the attribute to this object
self.__dict__[attributeName] = cl | [
"def",
"__add__",
"(",
"self",
",",
"paramSetString",
",",
"cl",
")",
":",
"# recreate the full name and normalize it to avoid ambiguity",
"normFullName",
"=",
"normalizeName",
"(",
"self",
".",
"__name__",
"+",
"\"<\"",
"+",
"paramSetString",
"+",
"\">\"",
")",
"# the full class should not be already registered. If it is, there is a",
"# problem somewhere so warn the user so he can fix the problem",
"if",
"normFullName",
"in",
"itkTemplate",
".",
"__templates__",
":",
"message",
"=",
"(",
"\"Template %s\\n already defined as %s\\n is redefined \"",
"\"as %s\"",
")",
"%",
"(",
"normFullName",
",",
"self",
".",
"__templates__",
"[",
"normFullName",
"]",
",",
"cl",
")",
"warnings",
".",
"warn",
"(",
"message",
")",
"# register the class",
"itkTemplate",
".",
"__templates__",
"[",
"normFullName",
"]",
"=",
"cl",
"# __find_param__ will parse the paramSetString and produce a list of",
"# the same parameters transformed in corresponding python classes.",
"# we transform this list in tuple to make it usable as key of the dict",
"param",
"=",
"tuple",
"(",
"self",
".",
"__find_param__",
"(",
"paramSetString",
")",
")",
"# once again, warn the user if the tuple of parameter is already",
"# defined so he can fix the problem",
"if",
"param",
"in",
"self",
".",
"__template__",
":",
"message",
"=",
"\"Warning: template already defined '%s'\"",
"%",
"normFullName",
"warnings",
".",
"warn",
"(",
"message",
")",
"# and register the parameter tuple",
"self",
".",
"__template__",
"[",
"param",
"]",
"=",
"cl",
"# add in __class_to_template__ dictionary",
"itkTemplate",
".",
"__class_to_template__",
"[",
"cl",
"]",
"=",
"(",
"self",
",",
"param",
")",
"# now populate the template",
"# 2 cases:",
"# - the template is a SmartPointer. In that case, the attribute name",
"# will be the full real name of the class without the itk prefix and",
"# _Pointer suffix",
"# - the template is not a SmartPointer. In that case, we keep only the",
"# end of the real class name which is a short string discribing the",
"# template arguments (for example IUC2)",
"if",
"cl",
".",
"__name__",
".",
"startswith",
"(",
"\"itk\"",
")",
":",
"if",
"cl",
".",
"__name__",
".",
"endswith",
"(",
"\"_Pointer\"",
")",
":",
"# it's a SmartPointer",
"attributeName",
"=",
"cl",
".",
"__name__",
"[",
"len",
"(",
"\"itk\"",
")",
":",
"-",
"len",
"(",
"\"_Pointer\"",
")",
"]",
"else",
":",
"# it's not a SmartPointer",
"# we need to now the size of the name to keep only the suffix",
"# short name does not contain :: and nested namespace",
"# itk::Numerics::Sample -> itkSample",
"shortNameSize",
"=",
"len",
"(",
"re",
".",
"sub",
"(",
"r':.*:'",
",",
"''",
",",
"self",
".",
"__name__",
")",
")",
"attributeName",
"=",
"cl",
".",
"__name__",
"[",
"shortNameSize",
":",
"]",
"elif",
"cl",
".",
"__name__",
".",
"startswith",
"(",
"\"vcl_complex\"",
")",
":",
"raise",
"AttributeError",
"(",
"'former vcl_complex, handling '",
"+",
"str",
"(",
"cl",
".",
"__name__",
")",
")",
"# C++ name is likely to be std::complex here, instead of the",
"# expected vcl_complex",
"attributeName",
"=",
"cl",
".",
"__name__",
"[",
"len",
"(",
"\"vcl_complex\"",
")",
":",
"]",
"else",
":",
"shortName",
"=",
"re",
".",
"sub",
"(",
"r':.*:'",
",",
"''",
",",
"self",
".",
"__name__",
")",
"if",
"not",
"cl",
".",
"__name__",
".",
"startswith",
"(",
"shortName",
")",
":",
"shortName",
"=",
"re",
".",
"sub",
"(",
"r'.*::'",
",",
"''",
",",
"self",
".",
"__name__",
")",
"attributeName",
"=",
"cl",
".",
"__name__",
"[",
"len",
"(",
"shortName",
")",
":",
"]",
"if",
"attributeName",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"# the attribute name can't start with a number",
"# add a single x before it to build a valid name.",
"# Adding an underscore would hide the attributeName in IPython",
"attributeName",
"=",
"\"x\"",
"+",
"attributeName",
"# add the attribute to this object",
"self",
".",
"__dict__",
"[",
"attributeName",
"]",
"=",
"cl"
] | [
95,
4
] | [
170,
41
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.__find_param__ | (self, paramSetString) | Find the parameters of the template.
paramSetString is the C++ string which defines the parameters set.
__find_param__ returns a list of itk classes, itkCType, and/or numbers
which correspond to the parameters described in paramSetString.
The parameters MUST have been registered before calling this method,
or __find_param__ will return a string and not the wanted object, and
will display a warning. Registration order is important.
This method is not static only to be able to display the template name
in the warning.
| Find the parameters of the template. | def __find_param__(self, paramSetString):
"""Find the parameters of the template.
paramSetString is the C++ string which defines the parameters set.
__find_param__ returns a list of itk classes, itkCType, and/or numbers
which correspond to the parameters described in paramSetString.
The parameters MUST have been registered before calling this method,
or __find_param__ will return a string and not the wanted object, and
will display a warning. Registration order is important.
This method is not static only to be able to display the template name
in the warning.
"""
# split the string in a list of parameters
paramStrings = []
inner = 0
part = paramSetString.split(",")
for elt in part:
if inner == 0:
paramStrings.append(elt)
else:
paramStrings[-1] += "," + elt
inner += elt.count("<") - elt.count(">")
# convert all string parameters into classes (if possible)
parameters = []
for param in paramStrings:
# the parameter need to be normalized several time below
# do it once here
param = param.strip()
paramNorm = normalizeName(param)
if paramNorm in itkTemplate.__templates__:
# the parameter is registered.
# just get the really class form the dictionary
param = itkTemplate.__templates__[paramNorm]
elif itkCType.GetCType(param):
# the parameter is a c type
# just get the itkCtype instance
param = itkCType.GetCType(param)
elif paramNorm.isdigit():
# the parameter is a number
# convert the string to a number !
param = int(param)
elif paramNorm == "true":
param = True
elif paramNorm == "false":
param = False
else:
# unable to convert the parameter
# use it without changes, but display a warning message, to
# incite developer to fix the problem
message = (
"Warning: Unknown parameter '%s' in "
"template '%s'" % (param, self.__name__))
warnings.warn(message)
parameters.append(param)
return parameters | [
"def",
"__find_param__",
"(",
"self",
",",
"paramSetString",
")",
":",
"# split the string in a list of parameters",
"paramStrings",
"=",
"[",
"]",
"inner",
"=",
"0",
"part",
"=",
"paramSetString",
".",
"split",
"(",
"\",\"",
")",
"for",
"elt",
"in",
"part",
":",
"if",
"inner",
"==",
"0",
":",
"paramStrings",
".",
"append",
"(",
"elt",
")",
"else",
":",
"paramStrings",
"[",
"-",
"1",
"]",
"+=",
"\",\"",
"+",
"elt",
"inner",
"+=",
"elt",
".",
"count",
"(",
"\"<\"",
")",
"-",
"elt",
".",
"count",
"(",
"\">\"",
")",
"# convert all string parameters into classes (if possible)",
"parameters",
"=",
"[",
"]",
"for",
"param",
"in",
"paramStrings",
":",
"# the parameter need to be normalized several time below",
"# do it once here",
"param",
"=",
"param",
".",
"strip",
"(",
")",
"paramNorm",
"=",
"normalizeName",
"(",
"param",
")",
"if",
"paramNorm",
"in",
"itkTemplate",
".",
"__templates__",
":",
"# the parameter is registered.",
"# just get the really class form the dictionary",
"param",
"=",
"itkTemplate",
".",
"__templates__",
"[",
"paramNorm",
"]",
"elif",
"itkCType",
".",
"GetCType",
"(",
"param",
")",
":",
"# the parameter is a c type",
"# just get the itkCtype instance",
"param",
"=",
"itkCType",
".",
"GetCType",
"(",
"param",
")",
"elif",
"paramNorm",
".",
"isdigit",
"(",
")",
":",
"# the parameter is a number",
"# convert the string to a number !",
"param",
"=",
"int",
"(",
"param",
")",
"elif",
"paramNorm",
"==",
"\"true\"",
":",
"param",
"=",
"True",
"elif",
"paramNorm",
"==",
"\"false\"",
":",
"param",
"=",
"False",
"else",
":",
"# unable to convert the parameter",
"# use it without changes, but display a warning message, to",
"# incite developer to fix the problem",
"message",
"=",
"(",
"\"Warning: Unknown parameter '%s' in \"",
"\"template '%s'\"",
"%",
"(",
"param",
",",
"self",
".",
"__name__",
")",
")",
"warnings",
".",
"warn",
"(",
"message",
")",
"parameters",
".",
"append",
"(",
"param",
")",
"return",
"parameters"
] | [
172,
4
] | [
236,
25
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.__getitem__ | (self, parameters) | Return the class which corresponds to the given template parameters.
parameters can be:
- a single parameter (Ex: itk.Index[2])
- a list of elements (Ex: itk.Image[itk.UC, 2])
| Return the class which corresponds to the given template parameters. | def __getitem__(self, parameters):
"""Return the class which corresponds to the given template parameters.
parameters can be:
- a single parameter (Ex: itk.Index[2])
- a list of elements (Ex: itk.Image[itk.UC, 2])
"""
parameters_type = type(parameters)
if not parameters_type is tuple and not parameters_type is list:
# parameters is a single element.
# include it in a list to manage the 2 cases in the same way
parameters = [parameters]
cleanParameters = []
for param in parameters:
# In the case of itk class instance, get the class
name = param.__class__.__name__
isclass = inspect.isclass(param)
if not isclass and name[:3] == 'itk' and name != "itkCType":
param = param.__class__
# append the parameter to the list. If it's not a supported type,
# it is not in the dictionary and we will raise an exception below
cleanParameters.append(param)
try:
return(self.__template__[tuple(cleanParameters)])
except:
self._LoadModules()
try:
return(self.__template__[tuple(cleanParameters)])
except:
raise KeyError(
'itkTemplate : No template %s for the %s class' %
(str(parameters), self.__name__)) | [
"def",
"__getitem__",
"(",
"self",
",",
"parameters",
")",
":",
"parameters_type",
"=",
"type",
"(",
"parameters",
")",
"if",
"not",
"parameters_type",
"is",
"tuple",
"and",
"not",
"parameters_type",
"is",
"list",
":",
"# parameters is a single element.",
"# include it in a list to manage the 2 cases in the same way",
"parameters",
"=",
"[",
"parameters",
"]",
"cleanParameters",
"=",
"[",
"]",
"for",
"param",
"in",
"parameters",
":",
"# In the case of itk class instance, get the class",
"name",
"=",
"param",
".",
"__class__",
".",
"__name__",
"isclass",
"=",
"inspect",
".",
"isclass",
"(",
"param",
")",
"if",
"not",
"isclass",
"and",
"name",
"[",
":",
"3",
"]",
"==",
"'itk'",
"and",
"name",
"!=",
"\"itkCType\"",
":",
"param",
"=",
"param",
".",
"__class__",
"# append the parameter to the list. If it's not a supported type,",
"# it is not in the dictionary and we will raise an exception below",
"cleanParameters",
".",
"append",
"(",
"param",
")",
"try",
":",
"return",
"(",
"self",
".",
"__template__",
"[",
"tuple",
"(",
"cleanParameters",
")",
"]",
")",
"except",
":",
"self",
".",
"_LoadModules",
"(",
")",
"try",
":",
"return",
"(",
"self",
".",
"__template__",
"[",
"tuple",
"(",
"cleanParameters",
")",
"]",
")",
"except",
":",
"raise",
"KeyError",
"(",
"'itkTemplate : No template %s for the %s class'",
"%",
"(",
"str",
"(",
"parameters",
")",
",",
"self",
".",
"__name__",
")",
")"
] | [
238,
4
] | [
273,
53
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.__getattr__ | (self, attr) | Support for lazy loading. | Support for lazy loading. | def __getattr__(self, attr):
"""Support for lazy loading."""
self._LoadModules()
return object.__getattribute__(self, attr) | [
"def",
"__getattr__",
"(",
"self",
",",
"attr",
")",
":",
"self",
".",
"_LoadModules",
"(",
")",
"return",
"object",
".",
"__getattribute__",
"(",
"self",
",",
"attr",
")"
] | [
278,
4
] | [
281,
50
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate._LoadModules | (self) | Loads all the module that may have not been loaded by the lazy loading system.
If multiple modules use the same object, the lazy loading system is only going to
load the module in which the object belongs. The other modules will be loaded only when necessary.
| Loads all the module that may have not been loaded by the lazy loading system. | def _LoadModules(self):
"""Loads all the module that may have not been loaded by the lazy loading system.
If multiple modules use the same object, the lazy loading system is only going to
load the module in which the object belongs. The other modules will be loaded only when necessary.
"""
name=self.__name__.split('::')[-1] # Remove 'itk::' or 'itk::Function::'
modules = itkBase.lazy_attributes[name]
for module in modules:
# find the module's name in sys.modules, or create a new module so named
if sys.version_info >= (3, 4):
this_module = sys.modules.setdefault(module, types.ModuleType(module))
else:
this_module = sys.modules.setdefault(module, imp.new_module(module))
namespace = {}
if not hasattr(this_module, '__templates_loaded'):
itkBase.LoadModule(module, namespace) | [
"def",
"_LoadModules",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"__name__",
".",
"split",
"(",
"'::'",
")",
"[",
"-",
"1",
"]",
"# Remove 'itk::' or 'itk::Function::'",
"modules",
"=",
"itkBase",
".",
"lazy_attributes",
"[",
"name",
"]",
"for",
"module",
"in",
"modules",
":",
"# find the module's name in sys.modules, or create a new module so named",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"4",
")",
":",
"this_module",
"=",
"sys",
".",
"modules",
".",
"setdefault",
"(",
"module",
",",
"types",
".",
"ModuleType",
"(",
"module",
")",
")",
"else",
":",
"this_module",
"=",
"sys",
".",
"modules",
".",
"setdefault",
"(",
"module",
",",
"imp",
".",
"new_module",
"(",
"module",
")",
")",
"namespace",
"=",
"{",
"}",
"if",
"not",
"hasattr",
"(",
"this_module",
",",
"'__templates_loaded'",
")",
":",
"itkBase",
".",
"LoadModule",
"(",
"module",
",",
"namespace",
")"
] | [
284,
4
] | [
300,
53
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.__dir__ | (self) | Returns the list of the attributes available in the current template.
This loads all the modules that might be required by this template first,
and then returns the list of attributes. It is used when dir() is called
or when it tries to autocomplete attribute names.
| Returns the list of the attributes available in the current template. | def __dir__(self):
"""Returns the list of the attributes available in the current template.
This loads all the modules that might be required by this template first,
and then returns the list of attributes. It is used when dir() is called
or when it tries to autocomplete attribute names.
"""
self._LoadModules()
def get_attrs(obj):
if not hasattr(obj, '__dict__'):
return [] # slots only
if sys.version_info >= (3, 0):
dict_types = (dict, types.MappingProxyType)
else:
dict_types = (dict, types.DictProxyType)
if not isinstance(obj.__dict__, dict_types):
raise TypeError("%s.__dict__ is not a dictionary"
"" % obj.__name__)
return obj.__dict__.keys()
def dir2(obj):
attrs = set()
if not hasattr(obj, '__bases__'):
# obj is an instance
if not hasattr(obj, '__class__'):
# slots
return sorted(get_attrs(obj))
klass = obj.__class__
attrs.update(get_attrs(klass))
else:
# obj is a class
klass = obj
for cls in klass.__bases__:
attrs.update(get_attrs(cls))
attrs.update(dir2(cls))
attrs.update(get_attrs(obj))
return list(attrs)
return dir2(self) | [
"def",
"__dir__",
"(",
"self",
")",
":",
"self",
".",
"_LoadModules",
"(",
")",
"def",
"get_attrs",
"(",
"obj",
")",
":",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__dict__'",
")",
":",
"return",
"[",
"]",
"# slots only",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
":",
"dict_types",
"=",
"(",
"dict",
",",
"types",
".",
"MappingProxyType",
")",
"else",
":",
"dict_types",
"=",
"(",
"dict",
",",
"types",
".",
"DictProxyType",
")",
"if",
"not",
"isinstance",
"(",
"obj",
".",
"__dict__",
",",
"dict_types",
")",
":",
"raise",
"TypeError",
"(",
"\"%s.__dict__ is not a dictionary\"",
"\"\"",
"%",
"obj",
".",
"__name__",
")",
"return",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
"def",
"dir2",
"(",
"obj",
")",
":",
"attrs",
"=",
"set",
"(",
")",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__bases__'",
")",
":",
"# obj is an instance",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__class__'",
")",
":",
"# slots",
"return",
"sorted",
"(",
"get_attrs",
"(",
"obj",
")",
")",
"klass",
"=",
"obj",
".",
"__class__",
"attrs",
".",
"update",
"(",
"get_attrs",
"(",
"klass",
")",
")",
"else",
":",
"# obj is a class",
"klass",
"=",
"obj",
"for",
"cls",
"in",
"klass",
".",
"__bases__",
":",
"attrs",
".",
"update",
"(",
"get_attrs",
"(",
"cls",
")",
")",
"attrs",
".",
"update",
"(",
"dir2",
"(",
"cls",
")",
")",
"attrs",
".",
"update",
"(",
"get_attrs",
"(",
"obj",
")",
")",
"return",
"list",
"(",
"attrs",
")",
"return",
"dir2",
"(",
"self",
")"
] | [
302,
4
] | [
342,
25
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.New | (self, *args, **kwargs) | Instantiate the template with a type implied from its input.
Template type specification can be avoided by assuming that the type's
first template argument should have the same type as its primary input.
This is generally true. If it is not true, then specify the types
explicitly.
For example, instead of the explicit type specification::
median = itk.MedianImageFilter[ImageType, ImageType].New()
median.SetInput(reader.GetOutput())
call::
median = itk.MedianImageFilter.New(Input=reader.GetOutput())
or, the shortened::
median = itk.MedianImageFilter.New(reader.GetOutput())
or:
median = itk.MedianImageFilter.New(reader) | Instantiate the template with a type implied from its input. | def New(self, *args, **kwargs):
"""Instantiate the template with a type implied from its input.
Template type specification can be avoided by assuming that the type's
first template argument should have the same type as its primary input.
This is generally true. If it is not true, then specify the types
explicitly.
For example, instead of the explicit type specification::
median = itk.MedianImageFilter[ImageType, ImageType].New()
median.SetInput(reader.GetOutput())
call::
median = itk.MedianImageFilter.New(Input=reader.GetOutput())
or, the shortened::
median = itk.MedianImageFilter.New(reader.GetOutput())
or:
median = itk.MedianImageFilter.New(reader)"""
import itk
keys = self.keys()
cur = itk.auto_pipeline.current
if self.__name__ == "itk::ImageFileReader":
return self._NewImageFileReader(*args, **kwargs)
primary_input_methods = ('Input', 'InputImage', 'Input1')
if len(args) != 0:
# try to find a type suitable for the primary input provided
input_type = output(args[0]).__class__
keys = [k for k in keys if k[0] == input_type]
elif set(primary_input_methods).intersection(kwargs.keys()):
for method in primary_input_methods:
if method in kwargs:
input_type = output(kwargs[method]).__class__
keys = [k for k in keys if k[0] == input_type]
break
elif cur is not None and len(cur) != 0:
# try to find a type suitable for the input provided
input_type = output(cur).__class__
keys = [k for k in keys if k[0] == input_type]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
return self[list(keys)[0]].New(*args, **kwargs) | [
"def",
"New",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"itk",
"keys",
"=",
"self",
".",
"keys",
"(",
")",
"cur",
"=",
"itk",
".",
"auto_pipeline",
".",
"current",
"if",
"self",
".",
"__name__",
"==",
"\"itk::ImageFileReader\"",
":",
"return",
"self",
".",
"_NewImageFileReader",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"primary_input_methods",
"=",
"(",
"'Input'",
",",
"'InputImage'",
",",
"'Input1'",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"0",
":",
"# try to find a type suitable for the primary input provided",
"input_type",
"=",
"output",
"(",
"args",
"[",
"0",
"]",
")",
".",
"__class__",
"keys",
"=",
"[",
"k",
"for",
"k",
"in",
"keys",
"if",
"k",
"[",
"0",
"]",
"==",
"input_type",
"]",
"elif",
"set",
"(",
"primary_input_methods",
")",
".",
"intersection",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
":",
"for",
"method",
"in",
"primary_input_methods",
":",
"if",
"method",
"in",
"kwargs",
":",
"input_type",
"=",
"output",
"(",
"kwargs",
"[",
"method",
"]",
")",
".",
"__class__",
"keys",
"=",
"[",
"k",
"for",
"k",
"in",
"keys",
"if",
"k",
"[",
"0",
"]",
"==",
"input_type",
"]",
"break",
"elif",
"cur",
"is",
"not",
"None",
"and",
"len",
"(",
"cur",
")",
"!=",
"0",
":",
"# try to find a type suitable for the input provided",
"input_type",
"=",
"output",
"(",
"cur",
")",
".",
"__class__",
"keys",
"=",
"[",
"k",
"for",
"k",
"in",
"keys",
"if",
"k",
"[",
"0",
"]",
"==",
"input_type",
"]",
"if",
"len",
"(",
"keys",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"No suitable template parameter can be found.\"",
")",
"return",
"self",
"[",
"list",
"(",
"keys",
")",
"[",
"0",
"]",
"]",
".",
"New",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
354,
4
] | [
401,
55
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.GetTypes | (self) | Helper method which prints out the available template parameters. | Helper method which prints out the available template parameters. | def GetTypes(self):
"""Helper method which prints out the available template parameters."""
print("<itkTemplate %s>" % self.__name__)
print("Options:")
for tp in self.GetTypesAsList():
print(" " + str(tp).replace("(", "[").replace(")", "]")) | [
"def",
"GetTypes",
"(",
"self",
")",
":",
"print",
"(",
"\"<itkTemplate %s>\"",
"%",
"self",
".",
"__name__",
")",
"print",
"(",
"\"Options:\"",
")",
"for",
"tp",
"in",
"self",
".",
"GetTypesAsList",
"(",
")",
":",
"print",
"(",
"\" \"",
"+",
"str",
"(",
"tp",
")",
".",
"replace",
"(",
"\"(\"",
",",
"\"[\"",
")",
".",
"replace",
"(",
"\")\"",
",",
"\"]\"",
")",
")"
] | [
508,
4
] | [
514,
69
] | python | en | ['en', 'en', 'en'] | True |
itkTemplate.GetTypesAsList | (self) | Helper method which returns the available template parameters. | Helper method which returns the available template parameters. | def GetTypesAsList(self):
"""Helper method which returns the available template parameters."""
# Make a list of allowed types, and sort them
ctypes = []
classes = []
others = []
for key_tuple in self.__template__:
key = str(key_tuple)
if "itkCType" in key:
ctypes.append(key)
elif "class" in key:
classes.append(key)
else:
others.append(key)
# Sort the lists
ctypes = sorted(ctypes)
classes = sorted(classes)
others = sorted(others)
return ctypes + classes + others | [
"def",
"GetTypesAsList",
"(",
"self",
")",
":",
"# Make a list of allowed types, and sort them",
"ctypes",
"=",
"[",
"]",
"classes",
"=",
"[",
"]",
"others",
"=",
"[",
"]",
"for",
"key_tuple",
"in",
"self",
".",
"__template__",
":",
"key",
"=",
"str",
"(",
"key_tuple",
")",
"if",
"\"itkCType\"",
"in",
"key",
":",
"ctypes",
".",
"append",
"(",
"key",
")",
"elif",
"\"class\"",
"in",
"key",
":",
"classes",
".",
"append",
"(",
"key",
")",
"else",
":",
"others",
".",
"append",
"(",
"key",
")",
"# Sort the lists",
"ctypes",
"=",
"sorted",
"(",
"ctypes",
")",
"classes",
"=",
"sorted",
"(",
"classes",
")",
"others",
"=",
"sorted",
"(",
"others",
")",
"return",
"ctypes",
"+",
"classes",
"+",
"others"
] | [
516,
4
] | [
537,
40
] | python | en | ['en', 'en', 'en'] | True |
State.__init__ | (self, state_name: str, state_type: str) |
Abstract class to describe the base for Steps.
:param state_name: step's symbolic name.
:param state_type: step's type.
|
Abstract class to describe the base for Steps. | def __init__(self, state_name: str, state_type: str):
"""
Abstract class to describe the base for Steps.
:param state_name: step's symbolic name.
:param state_type: step's type.
"""
self._name: str = state_name
self._type: str = state_type | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
",",
"state_type",
":",
"str",
")",
":",
"self",
".",
"_name",
":",
"str",
"=",
"state_name",
"self",
".",
"_type",
":",
"str",
"=",
"state_type"
] | [
5,
4
] | [
13,
36
] | python | en | ['en', 'error', 'th'] | False |
State.state_as_map | (self) |
Return object as a map.
:return: dictionary containing aws-relevant json properties.
|
Return object as a map. | def state_as_map(self) -> {}:
"""
Return object as a map.
:return: dictionary containing aws-relevant json properties.
"""
data = {"Type": self._type}
return data | [
"def",
"state_as_map",
"(",
"self",
")",
"->",
"{",
"}",
":",
"data",
"=",
"{",
"\"Type\"",
":",
"self",
".",
"_type",
"}",
"return",
"data"
] | [
16,
4
] | [
23,
19
] | python | en | ['en', 'error', 'th'] | False |
StateMachine.__init__ | (self, states: [State], startAt: str) |
State machine definition.
:param states: array of states.
:param startAt: name of the starting state.
|
State machine definition. | def __init__(self, states: [State], startAt: str):
"""
State machine definition.
:param states: array of states.
:param startAt: name of the starting state.
"""
if not states:
raise Exception("You should provide at least one state in the argument array.")
self._states = states
if not startAt:
raise Exception("You should provide a starting step as argument.")
self._startAt = startAt | [
"def",
"__init__",
"(",
"self",
",",
"states",
":",
"[",
"State",
"]",
",",
"startAt",
":",
"str",
")",
":",
"if",
"not",
"states",
":",
"raise",
"Exception",
"(",
"\"You should provide at least one state in the argument array.\"",
")",
"self",
".",
"_states",
"=",
"states",
"if",
"not",
"startAt",
":",
"raise",
"Exception",
"(",
"\"You should provide a starting step as argument.\"",
")",
"self",
".",
"_startAt",
"=",
"startAt"
] | [
27,
4
] | [
41,
31
] | python | en | ['en', 'error', 'th'] | False |
StateMachine.get_as_map | (self) |
Return object as a map.
:return: dictionary containing aws-relevant json properties.
|
Return object as a map. | def get_as_map(self) -> {}:
"""
Return object as a map.
:return: dictionary containing aws-relevant json properties.
"""
data = {}
data["StartAt"] = self._startAt
data["States"] = self.__states_as_map()
return data | [
"def",
"get_as_map",
"(",
"self",
")",
"->",
"{",
"}",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"\"StartAt\"",
"]",
"=",
"self",
".",
"_startAt",
"data",
"[",
"\"States\"",
"]",
"=",
"self",
".",
"__states_as_map",
"(",
")",
"return",
"data"
] | [
43,
4
] | [
54,
19
] | python | en | ['en', 'error', 'th'] | False |
StateMachine.__states_as_map | (self) |
Convert all of the states into maps.
:return: map of states.
|
Convert all of the states into maps. | def __states_as_map(self) -> {}:
"""
Convert all of the states into maps.
:return: map of states.
"""
states = {}
for state in self._states:
states[state._name] = state.state_as_map()
return states | [
"def",
"__states_as_map",
"(",
"self",
")",
"->",
"{",
"}",
":",
"states",
"=",
"{",
"}",
"for",
"state",
"in",
"self",
".",
"_states",
":",
"states",
"[",
"state",
".",
"_name",
"]",
"=",
"state",
".",
"state_as_map",
"(",
")",
"return",
"states"
] | [
56,
4
] | [
66,
21
] | python | en | ['en', 'error', 'th'] | False |
ParallelState.__init__ | (self, state_name: str, branches: [StateMachine], next_step: str) |
Responsible for parallel execution of its branches.
:param branches: array of branches to be executed in parallel
:param next_step: next step to be executed after all the branches finish
|
Responsible for parallel execution of its branches. | def __init__(self, state_name: str, branches: [StateMachine], next_step: str):
"""
Responsible for parallel execution of its branches.
:param branches: array of branches to be executed in parallel
:param next_step: next step to be executed after all the branches finish
"""
super().__init__(state_name, "Parallel")
if not next_step:
raise Exception("You should provide a valid next step as argument.")
self._next = next_step
if len(branches) < 1:
raise Exception("You should provide at least one branch to the parallel task.")
self._branches = branches | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
",",
"branches",
":",
"[",
"StateMachine",
"]",
",",
"next_step",
":",
"str",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"state_name",
",",
"\"Parallel\"",
")",
"if",
"not",
"next_step",
":",
"raise",
"Exception",
"(",
"\"You should provide a valid next step as argument.\"",
")",
"self",
".",
"_next",
"=",
"next_step",
"if",
"len",
"(",
"branches",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"\"You should provide at least one branch to the parallel task.\"",
")",
"self",
".",
"_branches",
"=",
"branches"
] | [
70,
4
] | [
84,
33
] | python | en | ['en', 'error', 'th'] | False |
WaitState.__init__ | (self, state_name: str, next_state: str, seconds: int) |
State used for waiting.
:param next_state: step to be executed after this one.
:param seconds: time in seconds to be spent waiting in this step.
|
State used for waiting. | def __init__(self, state_name: str, next_state: str, seconds: int):
"""
State used for waiting.
:param next_state: step to be executed after this one.
:param seconds: time in seconds to be spent waiting in this step.
"""
super().__init__(state_name, "Wait")
if not next_state:
raise Exception("You should provide a valid next step as argument.")
self._next = next_state
if seconds < 0:
raise Exception("Wait time cannot be less than 0 seconds")
self._seconds = seconds | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
",",
"next_state",
":",
"str",
",",
"seconds",
":",
"int",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"state_name",
",",
"\"Wait\"",
")",
"if",
"not",
"next_state",
":",
"raise",
"Exception",
"(",
"\"You should provide a valid next step as argument.\"",
")",
"self",
".",
"_next",
"=",
"next_state",
"if",
"seconds",
"<",
"0",
":",
"raise",
"Exception",
"(",
"\"Wait time cannot be less than 0 seconds\"",
")",
"self",
".",
"_seconds",
"=",
"seconds"
] | [
100,
4
] | [
115,
31
] | python | en | ['en', 'error', 'th'] | False |
SucceedState.__init__ | (self, state_name: str) |
Terminal state.
|
Terminal state.
| def __init__(self, state_name: str):
"""
Terminal state.
"""
super().__init__(state_name, "Succeed") | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"state_name",
",",
"\"Succeed\"",
")"
] | [
125,
4
] | [
129,
47
] | python | en | ['en', 'error', 'th'] | False |
FailState.__init__ | (self, state_name: str, error: str, cause: str) |
Terminal state that fails current scope.
:param error: error name.
:param cause: human-readable message.
|
Terminal state that fails current scope. | def __init__(self, state_name: str, error: str, cause: str):
"""
Terminal state that fails current scope.
:param error: error name.
:param cause: human-readable message.
"""
super().__init__(state_name, "Fail")
self._error = error
self._cause = cause | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
",",
"error",
":",
"str",
",",
"cause",
":",
"str",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"state_name",
",",
"\"Fail\"",
")",
"self",
".",
"_error",
"=",
"error",
"self",
".",
"_cause",
"=",
"cause"
] | [
136,
4
] | [
145,
27
] | python | en | ['en', 'error', 'th'] | False |
TaskState.__init__ | (
self,
state_name: str,
lambda_arn: str,
next_step: str = "",
timeout: int = 60,
is_end_state: bool = False,
) |
Task state class.
:param timeout: if the step runs longer than timeout - state fails with States.Timeout.
:param lambda_arn: arn of lambda function to be executed withing step.
:param next_step: next step to be executed after he current one.
:param is_end_state: if set to True, this is a terminal state.
|
Task state class. | def __init__(
self,
state_name: str,
lambda_arn: str,
next_step: str = "",
timeout: int = 60,
is_end_state: bool = False,
):
"""
Task state class.
:param timeout: if the step runs longer than timeout - state fails with States.Timeout.
:param lambda_arn: arn of lambda function to be executed withing step.
:param next_step: next step to be executed after he current one.
:param is_end_state: if set to True, this is a terminal state.
"""
super().__init__(state_name, "Task")
self._resource: str = lambda_arn
self._next: str = next_step
self._end: bool = is_end_state
if timeout <= 0:
raise Exception("Timeout value should be a positive value.")
self._timeout = timeout | [
"def",
"__init__",
"(",
"self",
",",
"state_name",
":",
"str",
",",
"lambda_arn",
":",
"str",
",",
"next_step",
":",
"str",
"=",
"\"\"",
",",
"timeout",
":",
"int",
"=",
"60",
",",
"is_end_state",
":",
"bool",
"=",
"False",
",",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"state_name",
",",
"\"Task\"",
")",
"self",
".",
"_resource",
":",
"str",
"=",
"lambda_arn",
"self",
".",
"_next",
":",
"str",
"=",
"next_step",
"self",
".",
"_end",
":",
"bool",
"=",
"is_end_state",
"if",
"timeout",
"<=",
"0",
":",
"raise",
"Exception",
"(",
"\"Timeout value should be a positive value.\"",
")",
"self",
".",
"_timeout",
"=",
"timeout"
] | [
155,
4
] | [
178,
31
] | python | en | ['en', 'error', 'th'] | False |
StepMachine.__init__ | (self, name: str, states: [State], startAt: str, comment: str = "") |
:param name: step machine symbolic name.
:param comment: optional comment.
|
:param name: step machine symbolic name.
:param comment: optional comment.
| def __init__(self, name: str, states: [State], startAt: str, comment: str = ""):
"""
:param name: step machine symbolic name.
:param comment: optional comment.
"""
super().__init__(states, startAt)
if len(name.strip()) < 2:
raise Exception("Step machine name should have at least two characters.")
self._name = name
self._comment = comment
self.state_machine_arn = None | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"states",
":",
"[",
"State",
"]",
",",
"startAt",
":",
"str",
",",
"comment",
":",
"str",
"=",
"\"\"",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"states",
",",
"startAt",
")",
"if",
"len",
"(",
"name",
".",
"strip",
"(",
")",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"\"Step machine name should have at least two characters.\"",
")",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_comment",
"=",
"comment",
"self",
".",
"state_machine_arn",
"=",
"None"
] | [
198,
4
] | [
210,
37
] | python | en | ['en', 'error', 'th'] | False |
StepMachine.get_as_aws_json | (self) |
Convert object to json string.
:return: json-aws state machine definition.
|
Convert object to json string. | def get_as_aws_json(self) -> str:
"""
Convert object to json string.
:return: json-aws state machine definition.
"""
return json.dumps(self.get_as_map()) | [
"def",
"get_as_aws_json",
"(",
"self",
")",
"->",
"str",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"get_as_map",
"(",
")",
")"
] | [
220,
4
] | [
227,
44
] | python | en | ['en', 'error', 'th'] | False |
StepMachine.execute | (self, client, role_arn: str, state_input: str = None) |
Execute the state machine within the current class.
:param client: boto3.client object.
:param role_arn: arn of the role.
:param state_input: optional input.
:return: execution arn.
|
Execute the state machine within the current class. | def execute(self, client, role_arn: str, state_input: str = None) -> str:
"""
Execute the state machine within the current class.
:param client: boto3.client object.
:param role_arn: arn of the role.
:param state_input: optional input.
:return: execution arn.
"""
if not self.state_machine_arn:
self.state_machine_arn = self.__create(client, role_arn)
try:
execution_response = client.start_execution(
stateMachineArn=self.state_machine_arn, input=json.dumps(state_input)
)
except Exception as ex:
print("error during execution - ", ex)
return ""
execution_arn = execution_response.get("executionArn")
return execution_arn | [
"def",
"execute",
"(",
"self",
",",
"client",
",",
"role_arn",
":",
"str",
",",
"state_input",
":",
"str",
"=",
"None",
")",
"->",
"str",
":",
"if",
"not",
"self",
".",
"state_machine_arn",
":",
"self",
".",
"state_machine_arn",
"=",
"self",
".",
"__create",
"(",
"client",
",",
"role_arn",
")",
"try",
":",
"execution_response",
"=",
"client",
".",
"start_execution",
"(",
"stateMachineArn",
"=",
"self",
".",
"state_machine_arn",
",",
"input",
"=",
"json",
".",
"dumps",
"(",
"state_input",
")",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"\"error during execution - \"",
",",
"ex",
")",
"return",
"\"\"",
"execution_arn",
"=",
"execution_response",
".",
"get",
"(",
"\"executionArn\"",
")",
"return",
"execution_arn"
] | [
229,
4
] | [
249,
28
] | python | en | ['en', 'error', 'th'] | False |
StepMachine.__create | (self, client, role_arn) |
Create the state machine, requires object state.
:param client: boto3.client object.
:param role_arn: arn of the role.
:return: arn of the created state machine.
|
Create the state machine, requires object state. | def __create(self, client, role_arn) -> str:
"""
Create the state machine, requires object state.
:param client: boto3.client object.
:param role_arn: arn of the role.
:return: arn of the created state machine.
"""
try:
response = client.create_state_machine(
name=self._name, definition=self.get_as_map(), roleArn=role_arn
)
self.state_machine_arn = response["stateMachineArn"]
except Exception as ex:
print("error: state machine not created - ", ex)
return ""
return response["stateMachineArn"] | [
"def",
"__create",
"(",
"self",
",",
"client",
",",
"role_arn",
")",
"->",
"str",
":",
"try",
":",
"response",
"=",
"client",
".",
"create_state_machine",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"definition",
"=",
"self",
".",
"get_as_map",
"(",
")",
",",
"roleArn",
"=",
"role_arn",
")",
"self",
".",
"state_machine_arn",
"=",
"response",
"[",
"\"stateMachineArn\"",
"]",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"\"error: state machine not created - \"",
",",
"ex",
")",
"return",
"\"\"",
"return",
"response",
"[",
"\"stateMachineArn\"",
"]"
] | [
251,
4
] | [
267,
42
] | python | en | ['en', 'error', 'th'] | False |
StepMachine.delete | (self, client, sm_arn: str = None) |
Delete local step machine, if no arn is specified.
:param client: boto3.client object.
:param sm_arn: optional state machine arn, if not specified - objects sm is deleted.
:return:
|
Delete local step machine, if no arn is specified. | def delete(self, client, sm_arn: str = None) -> str:
"""
Delete local step machine, if no arn is specified.
:param client: boto3.client object.
:param sm_arn: optional state machine arn, if not specified - objects sm is deleted.
:return:
"""
arn = sm_arn
if not arn:
if not self.state_machine_arn:
raise Exception("Argument arn is empty and object has no created state machine.")
arn = self.state_machine_arn
try:
response = client.delete_state_machine(stateMachineArn=arn)
return response
except Exception as ex:
print("error: state machine was not deleted - ", ex) | [
"def",
"delete",
"(",
"self",
",",
"client",
",",
"sm_arn",
":",
"str",
"=",
"None",
")",
"->",
"str",
":",
"arn",
"=",
"sm_arn",
"if",
"not",
"arn",
":",
"if",
"not",
"self",
".",
"state_machine_arn",
":",
"raise",
"Exception",
"(",
"\"Argument arn is empty and object has no created state machine.\"",
")",
"arn",
"=",
"self",
".",
"state_machine_arn",
"try",
":",
"response",
"=",
"client",
".",
"delete_state_machine",
"(",
"stateMachineArn",
"=",
"arn",
")",
"return",
"response",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"\"error: state machine was not deleted - \"",
",",
"ex",
")"
] | [
269,
4
] | [
287,
64
] | python | en | ['en', 'error', 'th'] | False |
_Replacement_write_data | (writer, data, is_attrib=False) | Writes datachars to writer. | Writes datachars to writer. | def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data) | [
"def",
"_Replacement_write_data",
"(",
"writer",
",",
"data",
",",
"is_attrib",
"=",
"False",
")",
":",
"data",
"=",
"data",
".",
"replace",
"(",
"\"&\"",
",",
"\"&\"",
")",
".",
"replace",
"(",
"\"<\"",
",",
"\"<\"",
")",
"data",
"=",
"data",
".",
"replace",
"(",
"\"\\\"\"",
",",
"\""\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\">\"",
")",
"if",
"is_attrib",
":",
"data",
"=",
"data",
".",
"replace",
"(",
"\"\\r\"",
",",
"\"
\"",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"
\"",
")",
".",
"replace",
"(",
"\"\\t\"",
",",
"\"	\"",
")",
"writer",
".",
"write",
"(",
"data",
")"
] | [
15,
0
] | [
24,
20
] | python | en | ['en', 'sn', 'en'] | True |
BaseRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
pass | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"pass"
] | [
18,
4
] | [
28,
12
] | python | en | ['en', 'error', 'th'] | False |
BaseRetriever.timing | (self, fn) | Wrapper method used to time functions. | Wrapper method used to time functions. | def timing(self, fn):
"""Wrapper method used to time functions. """
@wraps(fn)
def wrapper(*args, **kwargs):
if "retrieve_time" not in self.__dict__:
self.retrieve_time = 0
tic = perf_counter()
ret = fn(*args, **kwargs)
toc = perf_counter()
self.retrieve_time += toc - tic
return ret
return wrapper | [
"def",
"timing",
"(",
"self",
",",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"retrieve_time\"",
"not",
"in",
"self",
".",
"__dict__",
":",
"self",
".",
"retrieve_time",
"=",
"0",
"tic",
"=",
"perf_counter",
"(",
")",
"ret",
"=",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"toc",
"=",
"perf_counter",
"(",
")",
"self",
".",
"retrieve_time",
"+=",
"toc",
"-",
"tic",
"return",
"ret",
"return",
"wrapper"
] | [
30,
4
] | [
41,
22
] | python | en | ['en', 'en', 'en'] | True |
BaseRetriever.eval | (
self,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
top_k: int = 10,
open_domain: bool = False,
return_preds: bool = False,
) |
Performs evaluation on the Retriever.
Retriever is evaluated based on whether it finds the correct document given the query string and at which
position in the ranking of documents the correct document is.
| Returns a dict containing the following metrics:
- "recall": Proportion of questions for which correct document is among retrieved documents
- "mrr": Mean of reciprocal rank. Rewards retrievers that give relevant documents a higher rank.
Only considers the highest ranked relevant document.
- "map": Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. If ``open_domain=True``,
average precision is normalized by the number of retrieved relevant documents per query.
If ``open_domain=False``, average precision is normalized by the number of all relevant documents
per query.
:param label_index: Index/Table in DocumentStore where labeled questions are stored
:param doc_index: Index/Table in DocumentStore where documents that are used for evaluation are stored
:param top_k: How many documents to return per query
:param open_domain: If ``True``, retrieval will be evaluated by checking if the answer string to a question is
contained in the retrieved docs (common approach in open-domain QA).
If ``False``, retrieval uses a stricter evaluation that checks if the retrieved document ids
are within ids explicitly stated in the labels.
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
|
Performs evaluation on the Retriever.
Retriever is evaluated based on whether it finds the correct document given the query string and at which
position in the ranking of documents the correct document is. | def eval(
self,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
top_k: int = 10,
open_domain: bool = False,
return_preds: bool = False,
) -> dict:
"""
Performs evaluation on the Retriever.
Retriever is evaluated based on whether it finds the correct document given the query string and at which
position in the ranking of documents the correct document is.
| Returns a dict containing the following metrics:
- "recall": Proportion of questions for which correct document is among retrieved documents
- "mrr": Mean of reciprocal rank. Rewards retrievers that give relevant documents a higher rank.
Only considers the highest ranked relevant document.
- "map": Mean of average precision for each question. Rewards retrievers that give relevant
documents a higher rank. Considers all retrieved relevant documents. If ``open_domain=True``,
average precision is normalized by the number of retrieved relevant documents per query.
If ``open_domain=False``, average precision is normalized by the number of all relevant documents
per query.
:param label_index: Index/Table in DocumentStore where labeled questions are stored
:param doc_index: Index/Table in DocumentStore where documents that are used for evaluation are stored
:param top_k: How many documents to return per query
:param open_domain: If ``True``, retrieval will be evaluated by checking if the answer string to a question is
contained in the retrieved docs (common approach in open-domain QA).
If ``False``, retrieval uses a stricter evaluation that checks if the retrieved document ids
are within ids explicitly stated in the labels.
:param return_preds: Whether to add predictions in the returned dictionary. If True, the returned dictionary
contains the keys "predictions" and "metrics".
"""
# Extract all questions for evaluation
filters = {"origin": [label_origin]}
timed_retrieve = self.timing(self.retrieve)
labels = self.document_store.get_all_labels_aggregated(index=label_index, filters=filters)
correct_retrievals = 0
summed_avg_precision = 0.0
summed_reciprocal_rank = 0.0
# Collect questions and corresponding answers/document_ids in a dict
question_label_dict = {}
for label in labels:
if open_domain:
question_label_dict[label.question] = label.multiple_answers
else:
deduplicated_doc_ids = list(set([str(x) for x in label.multiple_document_ids]))
question_label_dict[label.question] = deduplicated_doc_ids
predictions = []
# Option 1: Open-domain evaluation by checking if the answer string is in the retrieved docs
logger.info("Performing eval queries...")
if open_domain:
for question, gold_answers in tqdm(question_label_dict.items()):
retrieved_docs = timed_retrieve(question, top_k=top_k, index=doc_index)
if return_preds:
predictions.append({"question": question, "retrieved_docs": retrieved_docs})
# check if correct doc in retrieved docs
found_relevant_doc = False
relevant_docs_found = 0
current_avg_precision = 0.0
for doc_idx, doc in enumerate(retrieved_docs):
for gold_answer in gold_answers:
if gold_answer in doc.text:
relevant_docs_found += 1
if not found_relevant_doc:
correct_retrievals += 1
summed_reciprocal_rank += 1 / (doc_idx + 1)
current_avg_precision += relevant_docs_found / (doc_idx + 1)
found_relevant_doc = True
break
if found_relevant_doc:
summed_avg_precision += current_avg_precision / relevant_docs_found
# Option 2: Strict evaluation by document ids that are listed in the labels
else:
for question, gold_ids in tqdm(question_label_dict.items()):
retrieved_docs = timed_retrieve(question, top_k=top_k, index=doc_index)
if return_preds:
predictions.append({"question": question, "retrieved_docs": retrieved_docs})
# check if correct doc in retrieved docs
found_relevant_doc = False
relevant_docs_found = 0
current_avg_precision = 0.0
for doc_idx, doc in enumerate(retrieved_docs):
for gold_id in gold_ids:
if str(doc.id) == gold_id:
relevant_docs_found += 1
if not found_relevant_doc:
correct_retrievals += 1
summed_reciprocal_rank += 1 / (doc_idx + 1)
current_avg_precision += relevant_docs_found / (doc_idx + 1)
found_relevant_doc = True
break
if found_relevant_doc:
all_relevant_docs = len(set(gold_ids))
summed_avg_precision += current_avg_precision / all_relevant_docs
# Metrics
number_of_questions = len(question_label_dict)
recall = correct_retrievals / number_of_questions
mean_reciprocal_rank = summed_reciprocal_rank / number_of_questions
mean_avg_precision = summed_avg_precision / number_of_questions
logger.info((f"For {correct_retrievals} out of {number_of_questions} questions ({recall:.2%}), the answer was in"
f" the top-{top_k} candidate passages selected by the retriever."))
metrics = {
"recall": recall,
"map": mean_avg_precision,
"mrr": mean_reciprocal_rank,
"retrieve_time": self.retrieve_time,
"n_questions": number_of_questions,
"top_k": top_k
}
if return_preds:
return {"metrics": metrics, "predictions": predictions}
else:
return metrics | [
"def",
"eval",
"(",
"self",
",",
"label_index",
":",
"str",
"=",
"\"label\"",
",",
"doc_index",
":",
"str",
"=",
"\"eval_document\"",
",",
"label_origin",
":",
"str",
"=",
"\"gold_label\"",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"open_domain",
":",
"bool",
"=",
"False",
",",
"return_preds",
":",
"bool",
"=",
"False",
",",
")",
"->",
"dict",
":",
"# Extract all questions for evaluation",
"filters",
"=",
"{",
"\"origin\"",
":",
"[",
"label_origin",
"]",
"}",
"timed_retrieve",
"=",
"self",
".",
"timing",
"(",
"self",
".",
"retrieve",
")",
"labels",
"=",
"self",
".",
"document_store",
".",
"get_all_labels_aggregated",
"(",
"index",
"=",
"label_index",
",",
"filters",
"=",
"filters",
")",
"correct_retrievals",
"=",
"0",
"summed_avg_precision",
"=",
"0.0",
"summed_reciprocal_rank",
"=",
"0.0",
"# Collect questions and corresponding answers/document_ids in a dict",
"question_label_dict",
"=",
"{",
"}",
"for",
"label",
"in",
"labels",
":",
"if",
"open_domain",
":",
"question_label_dict",
"[",
"label",
".",
"question",
"]",
"=",
"label",
".",
"multiple_answers",
"else",
":",
"deduplicated_doc_ids",
"=",
"list",
"(",
"set",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"label",
".",
"multiple_document_ids",
"]",
")",
")",
"question_label_dict",
"[",
"label",
".",
"question",
"]",
"=",
"deduplicated_doc_ids",
"predictions",
"=",
"[",
"]",
"# Option 1: Open-domain evaluation by checking if the answer string is in the retrieved docs",
"logger",
".",
"info",
"(",
"\"Performing eval queries...\"",
")",
"if",
"open_domain",
":",
"for",
"question",
",",
"gold_answers",
"in",
"tqdm",
"(",
"question_label_dict",
".",
"items",
"(",
")",
")",
":",
"retrieved_docs",
"=",
"timed_retrieve",
"(",
"question",
",",
"top_k",
"=",
"top_k",
",",
"index",
"=",
"doc_index",
")",
"if",
"return_preds",
":",
"predictions",
".",
"append",
"(",
"{",
"\"question\"",
":",
"question",
",",
"\"retrieved_docs\"",
":",
"retrieved_docs",
"}",
")",
"# check if correct doc in retrieved docs",
"found_relevant_doc",
"=",
"False",
"relevant_docs_found",
"=",
"0",
"current_avg_precision",
"=",
"0.0",
"for",
"doc_idx",
",",
"doc",
"in",
"enumerate",
"(",
"retrieved_docs",
")",
":",
"for",
"gold_answer",
"in",
"gold_answers",
":",
"if",
"gold_answer",
"in",
"doc",
".",
"text",
":",
"relevant_docs_found",
"+=",
"1",
"if",
"not",
"found_relevant_doc",
":",
"correct_retrievals",
"+=",
"1",
"summed_reciprocal_rank",
"+=",
"1",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"current_avg_precision",
"+=",
"relevant_docs_found",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"found_relevant_doc",
"=",
"True",
"break",
"if",
"found_relevant_doc",
":",
"summed_avg_precision",
"+=",
"current_avg_precision",
"/",
"relevant_docs_found",
"# Option 2: Strict evaluation by document ids that are listed in the labels",
"else",
":",
"for",
"question",
",",
"gold_ids",
"in",
"tqdm",
"(",
"question_label_dict",
".",
"items",
"(",
")",
")",
":",
"retrieved_docs",
"=",
"timed_retrieve",
"(",
"question",
",",
"top_k",
"=",
"top_k",
",",
"index",
"=",
"doc_index",
")",
"if",
"return_preds",
":",
"predictions",
".",
"append",
"(",
"{",
"\"question\"",
":",
"question",
",",
"\"retrieved_docs\"",
":",
"retrieved_docs",
"}",
")",
"# check if correct doc in retrieved docs",
"found_relevant_doc",
"=",
"False",
"relevant_docs_found",
"=",
"0",
"current_avg_precision",
"=",
"0.0",
"for",
"doc_idx",
",",
"doc",
"in",
"enumerate",
"(",
"retrieved_docs",
")",
":",
"for",
"gold_id",
"in",
"gold_ids",
":",
"if",
"str",
"(",
"doc",
".",
"id",
")",
"==",
"gold_id",
":",
"relevant_docs_found",
"+=",
"1",
"if",
"not",
"found_relevant_doc",
":",
"correct_retrievals",
"+=",
"1",
"summed_reciprocal_rank",
"+=",
"1",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"current_avg_precision",
"+=",
"relevant_docs_found",
"/",
"(",
"doc_idx",
"+",
"1",
")",
"found_relevant_doc",
"=",
"True",
"break",
"if",
"found_relevant_doc",
":",
"all_relevant_docs",
"=",
"len",
"(",
"set",
"(",
"gold_ids",
")",
")",
"summed_avg_precision",
"+=",
"current_avg_precision",
"/",
"all_relevant_docs",
"# Metrics",
"number_of_questions",
"=",
"len",
"(",
"question_label_dict",
")",
"recall",
"=",
"correct_retrievals",
"/",
"number_of_questions",
"mean_reciprocal_rank",
"=",
"summed_reciprocal_rank",
"/",
"number_of_questions",
"mean_avg_precision",
"=",
"summed_avg_precision",
"/",
"number_of_questions",
"logger",
".",
"info",
"(",
"(",
"f\"For {correct_retrievals} out of {number_of_questions} questions ({recall:.2%}), the answer was in\"",
"f\" the top-{top_k} candidate passages selected by the retriever.\"",
")",
")",
"metrics",
"=",
"{",
"\"recall\"",
":",
"recall",
",",
"\"map\"",
":",
"mean_avg_precision",
",",
"\"mrr\"",
":",
"mean_reciprocal_rank",
",",
"\"retrieve_time\"",
":",
"self",
".",
"retrieve_time",
",",
"\"n_questions\"",
":",
"number_of_questions",
",",
"\"top_k\"",
":",
"top_k",
"}",
"if",
"return_preds",
":",
"return",
"{",
"\"metrics\"",
":",
"metrics",
",",
"\"predictions\"",
":",
"predictions",
"}",
"else",
":",
"return",
"metrics"
] | [
43,
4
] | [
168,
26
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnSumToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
100,
4
] | [
112,
85
] | python | en | ['en', 'error', 'th'] | False |
pluralize | (singular_ge_noun) |
Pluralizes a Great Expectations singular noun
|
Pluralizes a Great Expectations singular noun
| def pluralize(singular_ge_noun):
"""
Pluralizes a Great Expectations singular noun
"""
try:
return SINGULAR_TO_PLURAL_LOOKUP_DICT[singular_ge_noun.lower()]
except KeyError:
raise GreatExpectationsError(
f"Unable to pluralize '{singular_ge_noun}'. Please update "
f"great_expectations.util.SINGULAR_TO_PLURAL_LOOKUP_DICT"
) | [
"def",
"pluralize",
"(",
"singular_ge_noun",
")",
":",
"try",
":",
"return",
"SINGULAR_TO_PLURAL_LOOKUP_DICT",
"[",
"singular_ge_noun",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
":",
"raise",
"GreatExpectationsError",
"(",
"f\"Unable to pluralize '{singular_ge_noun}'. Please update \"",
"f\"great_expectations.util.SINGULAR_TO_PLURAL_LOOKUP_DICT\"",
")"
] | [
72,
0
] | [
82,
9
] | python | en | ['en', 'error', 'th'] | False |
singularize | (plural_ge_noun) |
Singularizes a Great Expectations plural noun
|
Singularizes a Great Expectations plural noun
| def singularize(plural_ge_noun):
"""
Singularizes a Great Expectations plural noun
"""
try:
return PLURAL_TO_SINGULAR_LOOKUP_DICT[plural_ge_noun.lower()]
except KeyError:
raise GreatExpectationsError(
f"Unable to singularize '{plural_ge_noun}'. Please update "
f"great_expectations.util.PLURAL_TO_SINGULAR_LOOKUP_DICT."
) | [
"def",
"singularize",
"(",
"plural_ge_noun",
")",
":",
"try",
":",
"return",
"PLURAL_TO_SINGULAR_LOOKUP_DICT",
"[",
"plural_ge_noun",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
":",
"raise",
"GreatExpectationsError",
"(",
"f\"Unable to singularize '{plural_ge_noun}'. Please update \"",
"f\"great_expectations.util.PLURAL_TO_SINGULAR_LOOKUP_DICT.\"",
")"
] | [
85,
0
] | [
95,
9
] | python | en | ['en', 'error', 'th'] | False |
underscore | (word: str) |
**Borrowed from inflection.underscore**
Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
'device_type'
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
'IoError'
|
**Borrowed from inflection.underscore**
Make an underscored, lowercase form from the expression in the string. | def underscore(word: str) -> str:
"""
**Borrowed from inflection.underscore**
Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
'device_type'
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
'IoError'
"""
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", word)
word = re.sub(r"([a-z\d])([A-Z])", r"\1_\2", word)
word = word.replace("-", "_")
return word.lower() | [
"def",
"underscore",
"(",
"word",
":",
"str",
")",
"->",
"str",
":",
"word",
"=",
"re",
".",
"sub",
"(",
"r\"([A-Z]+)([A-Z][a-z])\"",
",",
"r\"\\1_\\2\"",
",",
"word",
")",
"word",
"=",
"re",
".",
"sub",
"(",
"r\"([a-z\\d])([A-Z])\"",
",",
"r\"\\1_\\2\"",
",",
"word",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"return",
"word",
".",
"lower",
"(",
")"
] | [
98,
0
] | [
118,
23
] | python | en | ['en', 'error', 'th'] | False |
get_currently_executing_function_call_arguments | (
include_module_name: bool = False, include_caller_names: bool = False, **kwargs
) |
:param include_module_name: bool If True, module name will be determined and included in output dictionary (default is False)
:param include_caller_names: bool If True, arguments, such as "self" and "cls", if present, will be included in output dictionary (default is False)
:param kwargs:
:return: dict Output dictionary, consisting of call arguments as attribute "name: value" pairs.
Example usage:
# Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter
# out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary.
self._config = get_currently_executing_function_call_arguments(
include_module_name=True,
**{
"class_name": self.__class__.__name__,
},
)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
|
:param include_module_name: bool If True, module name will be determined and included in output dictionary (default is False)
:param include_caller_names: bool If True, arguments, such as "self" and "cls", if present, will be included in output dictionary (default is False)
:param kwargs:
:return: dict Output dictionary, consisting of call arguments as attribute "name: value" pairs. | def get_currently_executing_function_call_arguments(
include_module_name: bool = False, include_caller_names: bool = False, **kwargs
) -> dict:
"""
:param include_module_name: bool If True, module name will be determined and included in output dictionary (default is False)
:param include_caller_names: bool If True, arguments, such as "self" and "cls", if present, will be included in output dictionary (default is False)
:param kwargs:
:return: dict Output dictionary, consisting of call arguments as attribute "name: value" pairs.
Example usage:
# Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter
# out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary.
self._config = get_currently_executing_function_call_arguments(
include_module_name=True,
**{
"class_name": self.__class__.__name__,
},
)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
"""
cf: FrameType = currentframe()
fb: FrameType = cf.f_back
argvs: ArgInfo = getargvalues(fb)
fc: CodeType = fb.f_code
cur_func_obj: Callable = [
referer
for referer in get_referrers(fc)
if getattr(referer, "__code__", None) is fc
and getclosurevars(referer).nonlocals.items() <= fb.f_locals.items()
][0]
cur_mod = getmodule(cur_func_obj)
sig: Signature = signature(cur_func_obj)
params: dict = {}
var_positional: dict = {}
var_keyword: dict = {}
for key, param in sig.parameters.items():
val: Any = argvs.locals[key]
params[key] = val
if param.kind == Parameter.VAR_POSITIONAL:
var_positional[key] = val
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword[key] = val
bound_args: BoundArguments = sig.bind(**params)
call_args: OrderedDict = bound_args.arguments
call_args_dict: dict = dict(call_args)
for key, value in var_positional.items():
call_args_dict[key] = value
for key, value in var_keyword.items():
call_args_dict.pop(key)
call_args_dict.update(value)
if include_module_name:
call_args_dict.update({"module_name": cur_mod.__name__})
if not include_caller_names:
if call_args.get("cls"):
call_args_dict.pop("cls", None)
if call_args.get("self"):
call_args_dict.pop("self", None)
call_args_dict.update(**kwargs)
return call_args_dict | [
"def",
"get_currently_executing_function_call_arguments",
"(",
"include_module_name",
":",
"bool",
"=",
"False",
",",
"include_caller_names",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"cf",
":",
"FrameType",
"=",
"currentframe",
"(",
")",
"fb",
":",
"FrameType",
"=",
"cf",
".",
"f_back",
"argvs",
":",
"ArgInfo",
"=",
"getargvalues",
"(",
"fb",
")",
"fc",
":",
"CodeType",
"=",
"fb",
".",
"f_code",
"cur_func_obj",
":",
"Callable",
"=",
"[",
"referer",
"for",
"referer",
"in",
"get_referrers",
"(",
"fc",
")",
"if",
"getattr",
"(",
"referer",
",",
"\"__code__\"",
",",
"None",
")",
"is",
"fc",
"and",
"getclosurevars",
"(",
"referer",
")",
".",
"nonlocals",
".",
"items",
"(",
")",
"<=",
"fb",
".",
"f_locals",
".",
"items",
"(",
")",
"]",
"[",
"0",
"]",
"cur_mod",
"=",
"getmodule",
"(",
"cur_func_obj",
")",
"sig",
":",
"Signature",
"=",
"signature",
"(",
"cur_func_obj",
")",
"params",
":",
"dict",
"=",
"{",
"}",
"var_positional",
":",
"dict",
"=",
"{",
"}",
"var_keyword",
":",
"dict",
"=",
"{",
"}",
"for",
"key",
",",
"param",
"in",
"sig",
".",
"parameters",
".",
"items",
"(",
")",
":",
"val",
":",
"Any",
"=",
"argvs",
".",
"locals",
"[",
"key",
"]",
"params",
"[",
"key",
"]",
"=",
"val",
"if",
"param",
".",
"kind",
"==",
"Parameter",
".",
"VAR_POSITIONAL",
":",
"var_positional",
"[",
"key",
"]",
"=",
"val",
"elif",
"param",
".",
"kind",
"==",
"Parameter",
".",
"VAR_KEYWORD",
":",
"var_keyword",
"[",
"key",
"]",
"=",
"val",
"bound_args",
":",
"BoundArguments",
"=",
"sig",
".",
"bind",
"(",
"*",
"*",
"params",
")",
"call_args",
":",
"OrderedDict",
"=",
"bound_args",
".",
"arguments",
"call_args_dict",
":",
"dict",
"=",
"dict",
"(",
"call_args",
")",
"for",
"key",
",",
"value",
"in",
"var_positional",
".",
"items",
"(",
")",
":",
"call_args_dict",
"[",
"key",
"]",
"=",
"value",
"for",
"key",
",",
"value",
"in",
"var_keyword",
".",
"items",
"(",
")",
":",
"call_args_dict",
".",
"pop",
"(",
"key",
")",
"call_args_dict",
".",
"update",
"(",
"value",
")",
"if",
"include_module_name",
":",
"call_args_dict",
".",
"update",
"(",
"{",
"\"module_name\"",
":",
"cur_mod",
".",
"__name__",
"}",
")",
"if",
"not",
"include_caller_names",
":",
"if",
"call_args",
".",
"get",
"(",
"\"cls\"",
")",
":",
"call_args_dict",
".",
"pop",
"(",
"\"cls\"",
",",
"None",
")",
"if",
"call_args",
".",
"get",
"(",
"\"self\"",
")",
":",
"call_args_dict",
".",
"pop",
"(",
"\"self\"",
",",
"None",
")",
"call_args_dict",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"return",
"call_args_dict"
] | [
186,
0
] | [
251,
25
] | python | en | ['en', 'error', 'th'] | False |
verify_dynamic_loading_support | (module_name: str, package_name: str = None) |
:param module_name: a possibly-relative name of a module
:param package_name: the name of a package, to which the given module belongs
|
:param module_name: a possibly-relative name of a module
:param package_name: the name of a package, to which the given module belongs
| def verify_dynamic_loading_support(module_name: str, package_name: str = None) -> None:
"""
:param module_name: a possibly-relative name of a module
:param package_name: the name of a package, to which the given module belongs
"""
try:
# noinspection PyUnresolvedReferences
module_spec: importlib.machinery.ModuleSpec = importlib.util.find_spec(
module_name, package=package_name
)
except ModuleNotFoundError:
module_spec = None
if not module_spec:
if not package_name:
package_name = ""
message: str = f"""No module named "{package_name + module_name}" could be found in the repository. Please \
make sure that the file, corresponding to this package and module, exists and that dynamic loading of code modules, \
templates, and assets is supported in your execution environment. This error is unrecoverable.
"""
raise FileNotFoundError(message) | [
"def",
"verify_dynamic_loading_support",
"(",
"module_name",
":",
"str",
",",
"package_name",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"try",
":",
"# noinspection PyUnresolvedReferences",
"module_spec",
":",
"importlib",
".",
"machinery",
".",
"ModuleSpec",
"=",
"importlib",
".",
"util",
".",
"find_spec",
"(",
"module_name",
",",
"package",
"=",
"package_name",
")",
"except",
"ModuleNotFoundError",
":",
"module_spec",
"=",
"None",
"if",
"not",
"module_spec",
":",
"if",
"not",
"package_name",
":",
"package_name",
"=",
"\"\"",
"message",
":",
"str",
"=",
"f\"\"\"No module named \"{package_name + module_name}\" could be found in the repository. Please \\\nmake sure that the file, corresponding to this package and module, exists and that dynamic loading of code modules, \\\ntemplates, and assets is supported in your execution environment. This error is unrecoverable.\n \"\"\"",
"raise",
"FileNotFoundError",
"(",
"message",
")"
] | [
254,
0
] | [
273,
40
] | python | en | ['en', 'error', 'th'] | False |
import_library_module | (module_name: str) |
:param module_name: a fully-qualified name of a module (e.g., "great_expectations.dataset.sqlalchemy_dataset")
:return: raw source code of the module (if can be retrieved)
|
:param module_name: a fully-qualified name of a module (e.g., "great_expectations.dataset.sqlalchemy_dataset")
:return: raw source code of the module (if can be retrieved)
| def import_library_module(module_name: str) -> Optional[ModuleType]:
"""
:param module_name: a fully-qualified name of a module (e.g., "great_expectations.dataset.sqlalchemy_dataset")
:return: raw source code of the module (if can be retrieved)
"""
module_obj: Optional[ModuleType]
try:
module_obj = importlib.import_module(module_name)
except ImportError:
module_obj = None
return module_obj | [
"def",
"import_library_module",
"(",
"module_name",
":",
"str",
")",
"->",
"Optional",
"[",
"ModuleType",
"]",
":",
"module_obj",
":",
"Optional",
"[",
"ModuleType",
"]",
"try",
":",
"module_obj",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"except",
"ImportError",
":",
"module_obj",
"=",
"None",
"return",
"module_obj"
] | [
276,
0
] | [
288,
21
] | python | en | ['en', 'error', 'th'] | False |
_convert_to_dataset_class | (df, dataset_class, expectation_suite=None, profiler=None) |
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
dataset_class: the class to which to convert the existing DataFrame
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
|
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite | def _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
dataset_class: the class to which to convert the existing DataFrame
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
"""
if expectation_suite is not None:
# Create a dataset of the new class type, and manually initialize expectations according to
# the provided expectation suite
new_df = dataset_class.from_dataset(df)
new_df._initialize_expectations(expectation_suite)
else:
# Instantiate the new Dataset with default expectations
new_df = dataset_class.from_dataset(df)
if profiler is not None:
new_df.profile(profiler)
return new_df | [
"def",
"_convert_to_dataset_class",
"(",
"df",
",",
"dataset_class",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
")",
":",
"if",
"expectation_suite",
"is",
"not",
"None",
":",
"# Create a dataset of the new class type, and manually initialize expectations according to",
"# the provided expectation suite",
"new_df",
"=",
"dataset_class",
".",
"from_dataset",
"(",
"df",
")",
"new_df",
".",
"_initialize_expectations",
"(",
"expectation_suite",
")",
"else",
":",
"# Instantiate the new Dataset with default expectations",
"new_df",
"=",
"dataset_class",
".",
"from_dataset",
"(",
"df",
")",
"if",
"profiler",
"is",
"not",
"None",
":",
"new_df",
".",
"profile",
"(",
"profiler",
")",
"return",
"new_df"
] | [
322,
0
] | [
347,
17
] | python | en | ['en', 'error', 'th'] | False |
_load_and_convert_to_dataset_class | (
df, class_name, module_name, expectation_suite=None, profiler=None
) |
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
|
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite | def _load_and_convert_to_dataset_class(
df, class_name, module_name, expectation_suite=None, profiler=None
):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
"""
verify_dynamic_loading_support(module_name=module_name)
dataset_class = load_class(class_name, module_name)
return _convert_to_dataset_class(df, dataset_class, expectation_suite, profiler) | [
"def",
"_load_and_convert_to_dataset_class",
"(",
"df",
",",
"class_name",
",",
"module_name",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
")",
":",
"verify_dynamic_loading_support",
"(",
"module_name",
"=",
"module_name",
")",
"dataset_class",
"=",
"load_class",
"(",
"class_name",
",",
"module_name",
")",
"return",
"_convert_to_dataset_class",
"(",
"df",
",",
"dataset_class",
",",
"expectation_suite",
",",
"profiler",
")"
] | [
350,
0
] | [
368,
84
] | python | en | ['en', 'error', 'th'] | False |
read_csv | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_csv and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_csv and return a great_expectations dataset. | def read_csv(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_csv and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_csv(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_csv",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
371,
0
] | [
412,
9
] | python | en | ['en', 'en', 'en'] | True |
read_json | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
accessor_func=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_json and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
accessor_func (Callable): functions to transform the json object in the file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_json and return a great_expectations dataset. | def read_json(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
accessor_func=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_json and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
accessor_func (Callable): functions to transform the json object in the file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
if accessor_func is not None:
json_obj = json.load(open(filename, "rb"))
json_obj = accessor_func(json_obj)
df = pd.read_json(json.dumps(json_obj), *args, **kwargs)
else:
df = pd.read_json(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_json",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"accessor_func",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"accessor_func",
"is",
"not",
"None",
":",
"json_obj",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
")",
"json_obj",
"=",
"accessor_func",
"(",
"json_obj",
")",
"df",
"=",
"pd",
".",
"read_json",
"(",
"json",
".",
"dumps",
"(",
"json_obj",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"df",
"=",
"pd",
".",
"read_json",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
415,
0
] | [
465,
9
] | python | en | ['en', 'en', 'en'] | True |
read_excel | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
| Read a file using Pandas read_excel and return a great_expectations dataset. | def read_excel(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
"""
import pandas as pd
try:
df = pd.read_excel(filename, *args, **kwargs)
except ImportError:
raise ImportError(
"Pandas now requires 'openpyxl' as an optional-dependency to read Excel files. Please use pip or conda to install openpyxl and try again"
)
if dataset_class is None:
verify_dynamic_loading_support(module_name=module_name)
dataset_class = load_class(class_name=class_name, module_name=module_name)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(
df=df[key],
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
df = _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
return df | [
"def",
"read_excel",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"try",
":",
"df",
"=",
"pd",
".",
"read_excel",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Pandas now requires 'openpyxl' as an optional-dependency to read Excel files. Please use pip or conda to install openpyxl and try again\"",
")",
"if",
"dataset_class",
"is",
"None",
":",
"verify_dynamic_loading_support",
"(",
"module_name",
"=",
"module_name",
")",
"dataset_class",
"=",
"load_class",
"(",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
")",
"if",
"isinstance",
"(",
"df",
",",
"dict",
")",
":",
"for",
"key",
"in",
"df",
":",
"df",
"[",
"key",
"]",
"=",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
"[",
"key",
"]",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"df",
"=",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"return",
"df"
] | [
468,
0
] | [
520,
13
] | python | en | ['en', 'en', 'en'] | True |
read_table | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_table and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_table and return a great_expectations dataset. | def read_table(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_table and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_table(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_table",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"df",
"=",
"pd",
".",
"read_table",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
523,
0
] | [
564,
9
] | python | en | ['en', 'en', 'en'] | True |
read_feather | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_feather and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_feather and return a great_expectations dataset. | def read_feather(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_feather and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_feather(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_feather",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"df",
"=",
"pd",
".",
"read_feather",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
567,
0
] | [
608,
9
] | python | en | ['en', 'en', 'en'] | True |
read_parquet | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_parquet and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_parquet and return a great_expectations dataset. | def read_parquet(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_parquet and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_parquet(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_parquet",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"df",
"=",
"pd",
".",
"read_parquet",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
611,
0
] | [
652,
9
] | python | en | ['en', 'en', 'en'] | True |
from_pandas | (
pandas_df,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
) | Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string) = None: path to great_expectations expectation suite file
profiler (profiler class) = None: The profiler that should
be run on the dataset to establish a baseline expectation suite.
Returns:
great_expectations dataset
| Read a Pandas data frame and return a great_expectations dataset. | def from_pandas(
pandas_df,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
):
"""Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string) = None: path to great_expectations expectation suite file
profiler (profiler class) = None: The profiler that should
be run on the dataset to establish a baseline expectation suite.
Returns:
great_expectations dataset
"""
if dataset_class is not None:
return _convert_to_dataset_class(
df=pandas_df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=pandas_df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"from_pandas",
"(",
"pandas_df",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
")",
":",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"pandas_df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"pandas_df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
655,
0
] | [
692,
9
] | python | en | ['en', 'en', 'en'] | True |
read_pickle | (
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
) | Read a file using Pandas read_pickle and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
| Read a file using Pandas read_pickle and return a great_expectations dataset. | def read_pickle(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_pickle and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_pickle(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
) | [
"def",
"read_pickle",
"(",
"filename",
",",
"class_name",
"=",
"\"PandasDataset\"",
",",
"module_name",
"=",
"\"great_expectations.dataset\"",
",",
"dataset_class",
"=",
"None",
",",
"expectation_suite",
"=",
"None",
",",
"profiler",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"import",
"pandas",
"as",
"pd",
"df",
"=",
"pd",
".",
"read_pickle",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"dataset_class",
"is",
"not",
"None",
":",
"return",
"_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"dataset_class",
"=",
"dataset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")",
"else",
":",
"return",
"_load_and_convert_to_dataset_class",
"(",
"df",
"=",
"df",
",",
"class_name",
"=",
"class_name",
",",
"module_name",
"=",
"module_name",
",",
"expectation_suite",
"=",
"expectation_suite",
",",
"profiler",
"=",
"profiler",
",",
")"
] | [
695,
0
] | [
736,
9
] | python | en | ['en', 'en', 'en'] | True |
validate | (
data_asset,
expectation_suite=None,
data_asset_name=None,
expectation_suite_name=None,
data_context=None,
data_asset_class_name=None,
data_asset_module_name="great_expectations.dataset",
data_asset_class=None,
*args,
**kwargs,
) | Validate the provided data asset. Validate can accept an optional data_asset_name to apply, data_context to use
to fetch an expectation_suite if one is not provided, and data_asset_class_name/data_asset_module_name or
data_asset_class to use to provide custom expectations.
Args:
data_asset: the asset to validate
expectation_suite: the suite to use, or None to fetch one using a DataContext
data_asset_name: the name of the data asset to use
expectation_suite_name: the name of the expectation_suite to use
data_context: data context to use to fetch an an expectation suite, or the path from which to obtain one
data_asset_class_name: the name of a class to dynamically load a DataAsset class
data_asset_module_name: the name of the module to dynamically load a DataAsset class
data_asset_class: a class to use. overrides data_asset_class_name/ data_asset_module_name if provided
*args:
**kwargs:
Returns:
| Validate the provided data asset. Validate can accept an optional data_asset_name to apply, data_context to use
to fetch an expectation_suite if one is not provided, and data_asset_class_name/data_asset_module_name or
data_asset_class to use to provide custom expectations. | def validate(
data_asset,
expectation_suite=None,
data_asset_name=None,
expectation_suite_name=None,
data_context=None,
data_asset_class_name=None,
data_asset_module_name="great_expectations.dataset",
data_asset_class=None,
*args,
**kwargs,
):
"""Validate the provided data asset. Validate can accept an optional data_asset_name to apply, data_context to use
to fetch an expectation_suite if one is not provided, and data_asset_class_name/data_asset_module_name or
data_asset_class to use to provide custom expectations.
Args:
data_asset: the asset to validate
expectation_suite: the suite to use, or None to fetch one using a DataContext
data_asset_name: the name of the data asset to use
expectation_suite_name: the name of the expectation_suite to use
data_context: data context to use to fetch an an expectation suite, or the path from which to obtain one
data_asset_class_name: the name of a class to dynamically load a DataAsset class
data_asset_module_name: the name of the module to dynamically load a DataAsset class
data_asset_class: a class to use. overrides data_asset_class_name/ data_asset_module_name if provided
*args:
**kwargs:
Returns:
"""
# Get an expectation suite if not provided
if expectation_suite is None and data_context is None:
raise ValueError(
"Either an expectation suite or a DataContext is required for validation."
)
if expectation_suite is None:
logger.info("Using expectation suite from DataContext.")
# Allow data_context to be a string, and try loading it from path in that case
if isinstance(data_context, str):
from great_expectations.data_context import DataContext
data_context = DataContext(data_context)
expectation_suite = data_context.get_expectation_suite(
expectation_suite_name=expectation_suite_name
)
else:
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
if data_asset_name is not None:
raise ValueError(
"When providing an expectation suite, data_asset_name cannot also be provided."
)
if expectation_suite_name is not None:
raise ValueError(
"When providing an expectation suite, expectation_suite_name cannot also be provided."
)
logger.info(
"Validating data_asset_name %s with expectation_suite_name %s"
% (data_asset_name, expectation_suite.expectation_suite_name)
)
# If the object is already a DataAsset type, then this is purely a convenience method
# and no conversion is needed; try to run validate on the given object
if data_asset_class_name is None and data_asset_class is None:
return data_asset.validate(
expectation_suite=expectation_suite,
data_context=data_context,
*args,
**kwargs,
)
# Otherwise, try to convert and validate the dataset
if data_asset_class is None:
verify_dynamic_loading_support(module_name=data_asset_module_name)
data_asset_class = load_class(data_asset_class_name, data_asset_module_name)
import pandas as pd
from great_expectations.dataset import Dataset, PandasDataset
if data_asset_class is None:
# Guess the GE data_asset_type based on the type of the data_asset
if isinstance(data_asset, pd.DataFrame):
data_asset_class = PandasDataset
# Add other data_asset_type conditions here as needed
# Otherwise, we will convert for the user to a subclass of the
# existing class to enable new expectations, but only for datasets
if not isinstance(data_asset, (Dataset, pd.DataFrame)):
raise ValueError(
"The validate util method only supports dataset validations, including custom subclasses. For other data "
"asset types, use the object's own validate method."
)
if not issubclass(type(data_asset), data_asset_class):
if isinstance(data_asset, pd.DataFrame) and issubclass(
data_asset_class, PandasDataset
):
pass # This is a special type of allowed coercion
else:
raise ValueError(
"The validate util method only supports validation for subtypes of the provided data_asset_type."
)
data_asset_ = _convert_to_dataset_class(
data_asset, dataset_class=data_asset_class, expectation_suite=expectation_suite
)
return data_asset_.validate(*args, data_context=data_context, **kwargs) | [
"def",
"validate",
"(",
"data_asset",
",",
"expectation_suite",
"=",
"None",
",",
"data_asset_name",
"=",
"None",
",",
"expectation_suite_name",
"=",
"None",
",",
"data_context",
"=",
"None",
",",
"data_asset_class_name",
"=",
"None",
",",
"data_asset_module_name",
"=",
"\"great_expectations.dataset\"",
",",
"data_asset_class",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"# Get an expectation suite if not provided",
"if",
"expectation_suite",
"is",
"None",
"and",
"data_context",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Either an expectation suite or a DataContext is required for validation.\"",
")",
"if",
"expectation_suite",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Using expectation suite from DataContext.\"",
")",
"# Allow data_context to be a string, and try loading it from path in that case",
"if",
"isinstance",
"(",
"data_context",
",",
"str",
")",
":",
"from",
"great_expectations",
".",
"data_context",
"import",
"DataContext",
"data_context",
"=",
"DataContext",
"(",
"data_context",
")",
"expectation_suite",
"=",
"data_context",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
"=",
"expectation_suite_name",
")",
"else",
":",
"if",
"isinstance",
"(",
"expectation_suite",
",",
"dict",
")",
":",
"expectation_suite",
"=",
"expectationSuiteSchema",
".",
"load",
"(",
"expectation_suite",
")",
"if",
"data_asset_name",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"When providing an expectation suite, data_asset_name cannot also be provided.\"",
")",
"if",
"expectation_suite_name",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"When providing an expectation suite, expectation_suite_name cannot also be provided.\"",
")",
"logger",
".",
"info",
"(",
"\"Validating data_asset_name %s with expectation_suite_name %s\"",
"%",
"(",
"data_asset_name",
",",
"expectation_suite",
".",
"expectation_suite_name",
")",
")",
"# If the object is already a DataAsset type, then this is purely a convenience method",
"# and no conversion is needed; try to run validate on the given object",
"if",
"data_asset_class_name",
"is",
"None",
"and",
"data_asset_class",
"is",
"None",
":",
"return",
"data_asset",
".",
"validate",
"(",
"expectation_suite",
"=",
"expectation_suite",
",",
"data_context",
"=",
"data_context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
"# Otherwise, try to convert and validate the dataset",
"if",
"data_asset_class",
"is",
"None",
":",
"verify_dynamic_loading_support",
"(",
"module_name",
"=",
"data_asset_module_name",
")",
"data_asset_class",
"=",
"load_class",
"(",
"data_asset_class_name",
",",
"data_asset_module_name",
")",
"import",
"pandas",
"as",
"pd",
"from",
"great_expectations",
".",
"dataset",
"import",
"Dataset",
",",
"PandasDataset",
"if",
"data_asset_class",
"is",
"None",
":",
"# Guess the GE data_asset_type based on the type of the data_asset",
"if",
"isinstance",
"(",
"data_asset",
",",
"pd",
".",
"DataFrame",
")",
":",
"data_asset_class",
"=",
"PandasDataset",
"# Add other data_asset_type conditions here as needed",
"# Otherwise, we will convert for the user to a subclass of the",
"# existing class to enable new expectations, but only for datasets",
"if",
"not",
"isinstance",
"(",
"data_asset",
",",
"(",
"Dataset",
",",
"pd",
".",
"DataFrame",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"The validate util method only supports dataset validations, including custom subclasses. For other data \"",
"\"asset types, use the object's own validate method.\"",
")",
"if",
"not",
"issubclass",
"(",
"type",
"(",
"data_asset",
")",
",",
"data_asset_class",
")",
":",
"if",
"isinstance",
"(",
"data_asset",
",",
"pd",
".",
"DataFrame",
")",
"and",
"issubclass",
"(",
"data_asset_class",
",",
"PandasDataset",
")",
":",
"pass",
"# This is a special type of allowed coercion",
"else",
":",
"raise",
"ValueError",
"(",
"\"The validate util method only supports validation for subtypes of the provided data_asset_type.\"",
")",
"data_asset_",
"=",
"_convert_to_dataset_class",
"(",
"data_asset",
",",
"dataset_class",
"=",
"data_asset_class",
",",
"expectation_suite",
"=",
"expectation_suite",
")",
"return",
"data_asset_",
".",
"validate",
"(",
"*",
"args",
",",
"data_context",
"=",
"data_context",
",",
"*",
"*",
"kwargs",
")"
] | [
739,
0
] | [
848,
75
] | python | en | ['en', 'en', 'en'] | True |
gen_directory_tree_str | (startpath) | Print the structure of directory as a tree:
Ex:
project_dir0/
AAA/
BBB/
aaa.txt
bbb.txt
#Note: files and directories are sorted alphabetically, so that this method can be used for testing.
| Print the structure of directory as a tree: | def gen_directory_tree_str(startpath):
"""Print the structure of directory as a tree:
Ex:
project_dir0/
AAA/
BBB/
aaa.txt
bbb.txt
#Note: files and directories are sorted alphabetically, so that this method can be used for testing.
"""
output_str = ""
tuples = list(os.walk(startpath))
tuples.sort()
for root, dirs, files in tuples:
level = root.replace(startpath, "").count(os.sep)
indent = " " * 4 * level
output_str += "{}{}/\n".format(indent, os.path.basename(root))
subindent = " " * 4 * (level + 1)
files.sort()
for f in files:
output_str += "{}{}\n".format(subindent, f)
return output_str | [
"def",
"gen_directory_tree_str",
"(",
"startpath",
")",
":",
"output_str",
"=",
"\"\"",
"tuples",
"=",
"list",
"(",
"os",
".",
"walk",
"(",
"startpath",
")",
")",
"tuples",
".",
"sort",
"(",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"tuples",
":",
"level",
"=",
"root",
".",
"replace",
"(",
"startpath",
",",
"\"\"",
")",
".",
"count",
"(",
"os",
".",
"sep",
")",
"indent",
"=",
"\" \"",
"*",
"4",
"*",
"level",
"output_str",
"+=",
"\"{}{}/\\n\"",
".",
"format",
"(",
"indent",
",",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
")",
"subindent",
"=",
"\" \"",
"*",
"4",
"*",
"(",
"level",
"+",
"1",
")",
"files",
".",
"sort",
"(",
")",
"for",
"f",
"in",
"files",
":",
"output_str",
"+=",
"\"{}{}\\n\"",
".",
"format",
"(",
"subindent",
",",
"f",
")",
"return",
"output_str"
] | [
852,
0
] | [
880,
21
] | python | en | ['en', 'en', 'en'] | True |
lint_code | (code: str) | Lint strings of code passed in. Optional dependency "black" must be installed. | Lint strings of code passed in. Optional dependency "black" must be installed. | def lint_code(code: str) -> str:
"""Lint strings of code passed in. Optional dependency "black" must be installed."""
try:
import black
black_file_mode = black.FileMode()
if not isinstance(code, str):
raise TypeError
try:
linted_code = black.format_file_contents(
code, fast=True, mode=black_file_mode
)
return linted_code
except (black.NothingChanged, RuntimeError):
return code
except ImportError:
logger.warning(
"Please install the optional dependency 'black' to enable linting. Returning input with no changes."
)
return code | [
"def",
"lint_code",
"(",
"code",
":",
"str",
")",
"->",
"str",
":",
"try",
":",
"import",
"black",
"black_file_mode",
"=",
"black",
".",
"FileMode",
"(",
")",
"if",
"not",
"isinstance",
"(",
"code",
",",
"str",
")",
":",
"raise",
"TypeError",
"try",
":",
"linted_code",
"=",
"black",
".",
"format_file_contents",
"(",
"code",
",",
"fast",
"=",
"True",
",",
"mode",
"=",
"black_file_mode",
")",
"return",
"linted_code",
"except",
"(",
"black",
".",
"NothingChanged",
",",
"RuntimeError",
")",
":",
"return",
"code",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"Please install the optional dependency 'black' to enable linting. Returning input with no changes.\"",
")",
"return",
"code"
] | [
883,
0
] | [
902,
19
] | python | en | ['en', 'af', 'en'] | True |
filter_properties_dict | (
properties: dict,
keep_fields: Optional[list] = None,
delete_fields: Optional[list] = None,
clean_nulls: Optional[bool] = True,
clean_falsy: Optional[bool] = False,
keep_falsy_numerics: Optional[bool] = True,
inplace: Optional[bool] = False,
) | Filter the entries of the source dictionary according to directives concerning the existing keys and values.
Args:
properties: source dictionary to be filtered according to the supplied filtering directives
keep_fields: list of keys that must be retained, with the understanding that all other entries will be deleted
delete_fields: list of keys that must be deleted, with the understanding that all other entries will be retained
clean_nulls: If True, then in addition to other filtering directives, delete entries, whose values are None
clean_falsy: If True, then in addition to other filtering directives, delete entries, whose values are Falsy
(If the "clean_falsy" argument is specified at "True", then "clean_nulls" is assumed to be "True" as well.)
inplace: If True, then modify the source properties dictionary; otherwise, make a copy for filtering purposes
keep_falsy_numerics: If True, then in addition to other filtering directives, do not delete zero-valued numerics
Returns:
The (possibly) filtered properties dictionary (or None if no entries remain after filtering is performed)
| Filter the entries of the source dictionary according to directives concerning the existing keys and values. | def filter_properties_dict(
properties: dict,
keep_fields: Optional[list] = None,
delete_fields: Optional[list] = None,
clean_nulls: Optional[bool] = True,
clean_falsy: Optional[bool] = False,
keep_falsy_numerics: Optional[bool] = True,
inplace: Optional[bool] = False,
) -> Optional[dict]:
"""Filter the entries of the source dictionary according to directives concerning the existing keys and values.
Args:
properties: source dictionary to be filtered according to the supplied filtering directives
keep_fields: list of keys that must be retained, with the understanding that all other entries will be deleted
delete_fields: list of keys that must be deleted, with the understanding that all other entries will be retained
clean_nulls: If True, then in addition to other filtering directives, delete entries, whose values are None
clean_falsy: If True, then in addition to other filtering directives, delete entries, whose values are Falsy
(If the "clean_falsy" argument is specified at "True", then "clean_nulls" is assumed to be "True" as well.)
inplace: If True, then modify the source properties dictionary; otherwise, make a copy for filtering purposes
keep_falsy_numerics: If True, then in addition to other filtering directives, do not delete zero-valued numerics
Returns:
The (possibly) filtered properties dictionary (or None if no entries remain after filtering is performed)
"""
if keep_fields and delete_fields:
raise ValueError(
"Only one of keep_fields and delete_fields filtering directives can be specified."
)
if clean_falsy:
clean_nulls = True
if not inplace:
properties = copy.deepcopy(properties)
keys_for_deletion: list = []
if keep_fields:
keys_for_deletion.extend(
[key for key, value in properties.items() if key not in keep_fields]
)
if delete_fields:
keys_for_deletion.extend(
[key for key, value in properties.items() if key in delete_fields]
)
if clean_nulls:
keys_for_deletion.extend(
[
key
for key, value in properties.items()
if not (
(keep_fields and key in keep_fields)
or (delete_fields and key in delete_fields)
or value is not None
)
]
)
if clean_falsy:
if keep_falsy_numerics:
keys_for_deletion.extend(
[
key
for key, value in properties.items()
if not (
(keep_fields and key in keep_fields)
or (delete_fields and key in delete_fields)
or is_numeric(value=value)
or value
)
]
)
else:
keys_for_deletion.extend(
[
key
for key, value in properties.items()
if not (
(keep_fields and key in keep_fields)
or (delete_fields and key in delete_fields)
or value
)
]
)
keys_for_deletion = list(set(keys_for_deletion))
for key in keys_for_deletion:
del properties[key]
if inplace:
return None
return properties | [
"def",
"filter_properties_dict",
"(",
"properties",
":",
"dict",
",",
"keep_fields",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"delete_fields",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"clean_nulls",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"clean_falsy",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"keep_falsy_numerics",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"inplace",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
")",
"->",
"Optional",
"[",
"dict",
"]",
":",
"if",
"keep_fields",
"and",
"delete_fields",
":",
"raise",
"ValueError",
"(",
"\"Only one of keep_fields and delete_fields filtering directives can be specified.\"",
")",
"if",
"clean_falsy",
":",
"clean_nulls",
"=",
"True",
"if",
"not",
"inplace",
":",
"properties",
"=",
"copy",
".",
"deepcopy",
"(",
"properties",
")",
"keys_for_deletion",
":",
"list",
"=",
"[",
"]",
"if",
"keep_fields",
":",
"keys_for_deletion",
".",
"extend",
"(",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
"if",
"key",
"not",
"in",
"keep_fields",
"]",
")",
"if",
"delete_fields",
":",
"keys_for_deletion",
".",
"extend",
"(",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
"if",
"key",
"in",
"delete_fields",
"]",
")",
"if",
"clean_nulls",
":",
"keys_for_deletion",
".",
"extend",
"(",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
"if",
"not",
"(",
"(",
"keep_fields",
"and",
"key",
"in",
"keep_fields",
")",
"or",
"(",
"delete_fields",
"and",
"key",
"in",
"delete_fields",
")",
"or",
"value",
"is",
"not",
"None",
")",
"]",
")",
"if",
"clean_falsy",
":",
"if",
"keep_falsy_numerics",
":",
"keys_for_deletion",
".",
"extend",
"(",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
"if",
"not",
"(",
"(",
"keep_fields",
"and",
"key",
"in",
"keep_fields",
")",
"or",
"(",
"delete_fields",
"and",
"key",
"in",
"delete_fields",
")",
"or",
"is_numeric",
"(",
"value",
"=",
"value",
")",
"or",
"value",
")",
"]",
")",
"else",
":",
"keys_for_deletion",
".",
"extend",
"(",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
"if",
"not",
"(",
"(",
"keep_fields",
"and",
"key",
"in",
"keep_fields",
")",
"or",
"(",
"delete_fields",
"and",
"key",
"in",
"delete_fields",
")",
"or",
"value",
")",
"]",
")",
"keys_for_deletion",
"=",
"list",
"(",
"set",
"(",
"keys_for_deletion",
")",
")",
"for",
"key",
"in",
"keys_for_deletion",
":",
"del",
"properties",
"[",
"key",
"]",
"if",
"inplace",
":",
"return",
"None",
"return",
"properties"
] | [
905,
0
] | [
1000,
21
] | python | en | ['en', 'en', 'en'] | True |
is_sane_slack_webhook | (url: str) | Really basic sanity checking. | Really basic sanity checking. | def is_sane_slack_webhook(url: str) -> bool:
"""Really basic sanity checking."""
if url is None:
return False
return url.strip().startswith("https://hooks.slack.com/") | [
"def",
"is_sane_slack_webhook",
"(",
"url",
":",
"str",
")",
"->",
"bool",
":",
"if",
"url",
"is",
"None",
":",
"return",
"False",
"return",
"url",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"\"https://hooks.slack.com/\"",
")"
] | [
1038,
0
] | [
1043,
61
] | python | en | ['en', 'fil', 'en'] | True |
generate_library_json_from_registered_expectations | () | Generate the JSON object used to populate the public gallery | Generate the JSON object used to populate the public gallery | def generate_library_json_from_registered_expectations():
"""Generate the JSON object used to populate the public gallery"""
library_json = {}
for expectation_name, expectation in _registered_expectations.items():
report_object = expectation().run_diagnostics()
library_json[expectation_name] = report_object
return library_json | [
"def",
"generate_library_json_from_registered_expectations",
"(",
")",
":",
"library_json",
"=",
"{",
"}",
"for",
"expectation_name",
",",
"expectation",
"in",
"_registered_expectations",
".",
"items",
"(",
")",
":",
"report_object",
"=",
"expectation",
"(",
")",
".",
"run_diagnostics",
"(",
")",
"library_json",
"[",
"expectation_name",
"]",
"=",
"report_object",
"return",
"library_json"
] | [
1050,
0
] | [
1058,
23
] | python | en | ['en', 'en', 'en'] | True |
compute_iou | (box_1, box_2) |
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
|
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
| def compute_iou(box_1, box_2):
"""
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
"""
tl_row_1, tl_col_1, br_row_1, br_col_1 = box_1
tl_row_2, tl_col_2, br_row_2, br_col_2 = box_2
assert tl_row_1 < br_row_1
assert tl_col_1 < br_col_1
assert tl_row_2 < br_row_2
assert tl_col_2 < br_col_2
# Compute area of each respective box
area_1 = (br_row_1 - tl_row_1) * (br_col_1 - tl_col_1)
area_2 = (br_row_2 - tl_row_2) * (br_col_2 - tl_col_2)
# Compute area of intersection
tl_row_i = max(tl_row_1, tl_row_2)
tl_col_i = max(tl_col_1, tl_col_2)
br_row_i = min(br_row_1, br_row_2)
br_col_i = min(br_col_1, br_col_2)
if (br_row_i < tl_row_i) or (br_col_i < tl_col_i):
intersection_area = 0
else:
intersection_area = (br_row_i - tl_row_i) * (br_col_i - tl_col_i)
# Compute area of union
union_area = area_1 + area_2 - intersection_area
iou = intersection_area / union_area
assert (iou >= 0) and (iou <= 1.0)
return iou | [
"def",
"compute_iou",
"(",
"box_1",
",",
"box_2",
")",
":",
"tl_row_1",
",",
"tl_col_1",
",",
"br_row_1",
",",
"br_col_1",
"=",
"box_1",
"tl_row_2",
",",
"tl_col_2",
",",
"br_row_2",
",",
"br_col_2",
"=",
"box_2",
"assert",
"tl_row_1",
"<",
"br_row_1",
"assert",
"tl_col_1",
"<",
"br_col_1",
"assert",
"tl_row_2",
"<",
"br_row_2",
"assert",
"tl_col_2",
"<",
"br_col_2",
"# Compute area of each respective box",
"area_1",
"=",
"(",
"br_row_1",
"-",
"tl_row_1",
")",
"*",
"(",
"br_col_1",
"-",
"tl_col_1",
")",
"area_2",
"=",
"(",
"br_row_2",
"-",
"tl_row_2",
")",
"*",
"(",
"br_col_2",
"-",
"tl_col_2",
")",
"# Compute area of intersection",
"tl_row_i",
"=",
"max",
"(",
"tl_row_1",
",",
"tl_row_2",
")",
"tl_col_i",
"=",
"max",
"(",
"tl_col_1",
",",
"tl_col_2",
")",
"br_row_i",
"=",
"min",
"(",
"br_row_1",
",",
"br_row_2",
")",
"br_col_i",
"=",
"min",
"(",
"br_col_1",
",",
"br_col_2",
")",
"if",
"(",
"br_row_i",
"<",
"tl_row_i",
")",
"or",
"(",
"br_col_i",
"<",
"tl_col_i",
")",
":",
"intersection_area",
"=",
"0",
"else",
":",
"intersection_area",
"=",
"(",
"br_row_i",
"-",
"tl_row_i",
")",
"*",
"(",
"br_col_i",
"-",
"tl_col_i",
")",
"# Compute area of union",
"union_area",
"=",
"area_1",
"+",
"area_2",
"-",
"intersection_area",
"iou",
"=",
"intersection_area",
"/",
"union_area",
"assert",
"(",
"iou",
">=",
"0",
")",
"and",
"(",
"iou",
"<=",
"1.0",
")",
"return",
"iou"
] | [
10,
0
] | [
43,
14
] | python | en | ['en', 'error', 'th'] | False |
compute_counts | (preds, ground_truths, iou_thr=0.5, conf_thr=0.5) |
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<ground_truths> is a dictionary containing ground truth bounding boxes for a
collection of images.
|
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<ground_truths> is a dictionary containing ground truth bounding boxes for a
collection of images.
| def compute_counts(preds, ground_truths, iou_thr=0.5, conf_thr=0.5):
"""
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<ground_truths> is a dictionary containing ground truth bounding boxes for a
collection of images.
"""
TP = 0
FP = 0
FN = 0
for pred_file, pred in preds.items():
# Keep track of true-positive/false-positive for each file
TP_file = 0
FP_file = 0
ground_truth_orig = ground_truths[pred_file]
ground_truth = copy.copy(ground_truth_orig)
for bbox_pred in pred:
# Discard predicted bounding boxes with low confidence scores
if bbox_pred[4] < conf_thr:
continue
for bbox_gt in ground_truth:
# See if it matched any ground-truth boxes
iou = compute_iou(bbox_pred[:4], bbox_gt)
if iou > iou_thr:
# Count it as a true-positive-match
TP_file += 1
# Remove this ground-truth so it doesn't get double-counted for a diff match
ground_truth.remove(bbox_gt)
break
else:
# There were no true-matches for this prediction,
# so count it as a false-positive
FP_file += 1
# False negatives: any bboxes we missed
FN_file = len(ground_truth_orig) - TP_file
# Accumulate values for each file
TP += TP_file
FP += FP_file
FN += FN_file
return TP, FP, FN | [
"def",
"compute_counts",
"(",
"preds",
",",
"ground_truths",
",",
"iou_thr",
"=",
"0.5",
",",
"conf_thr",
"=",
"0.5",
")",
":",
"TP",
"=",
"0",
"FP",
"=",
"0",
"FN",
"=",
"0",
"for",
"pred_file",
",",
"pred",
"in",
"preds",
".",
"items",
"(",
")",
":",
"# Keep track of true-positive/false-positive for each file",
"TP_file",
"=",
"0",
"FP_file",
"=",
"0",
"ground_truth_orig",
"=",
"ground_truths",
"[",
"pred_file",
"]",
"ground_truth",
"=",
"copy",
".",
"copy",
"(",
"ground_truth_orig",
")",
"for",
"bbox_pred",
"in",
"pred",
":",
"# Discard predicted bounding boxes with low confidence scores",
"if",
"bbox_pred",
"[",
"4",
"]",
"<",
"conf_thr",
":",
"continue",
"for",
"bbox_gt",
"in",
"ground_truth",
":",
"# See if it matched any ground-truth boxes",
"iou",
"=",
"compute_iou",
"(",
"bbox_pred",
"[",
":",
"4",
"]",
",",
"bbox_gt",
")",
"if",
"iou",
">",
"iou_thr",
":",
"# Count it as a true-positive-match",
"TP_file",
"+=",
"1",
"# Remove this ground-truth so it doesn't get double-counted for a diff match",
"ground_truth",
".",
"remove",
"(",
"bbox_gt",
")",
"break",
"else",
":",
"# There were no true-matches for this prediction,",
"# so count it as a false-positive",
"FP_file",
"+=",
"1",
"# False negatives: any bboxes we missed",
"FN_file",
"=",
"len",
"(",
"ground_truth_orig",
")",
"-",
"TP_file",
"# Accumulate values for each file",
"TP",
"+=",
"TP_file",
"FP",
"+=",
"FP_file",
"FN",
"+=",
"FN_file",
"return",
"TP",
",",
"FP",
",",
"FN"
] | [
46,
0
] | [
96,
21
] | python | en | ['en', 'error', 'th'] | False |
main | () |
Load training data.
|
Load training data.
| def main():
args = parse_args()
'''
Load training data.
'''
with args.preds_folder.joinpath('preds_train.json').open('r') as f:
preds_train = json.load(f)
with args.splits_folder.joinpath('annotations_train.json').open('r') as f:
ground_truths_train = json.load(f)
if args.done_tweaking:
"""
Load test data.
"""
with args.preds_folder.joinpath('preds_test.json').open('r') as f:
preds_test = json.load(f)
with args.splits_folder.joinpath('annotations_test.json').open('r') as f:
ground_truths_test = json.load(f)
# Plot all curves on the same figure
fig, ax = plt.subplots()
ax.set_title('Precision-recall for RedLights2011_Medium (train set)')
# Different iou_thresholds to try:
for iou_thr in [0.25, 0.5, 0.75]:
# Different confidence thresholds to try
confidence_thrs = np.linspace(start=0.5, stop=1)
tp_train = np.zeros(len(confidence_thrs))
fp_train = np.zeros(len(confidence_thrs))
fn_train = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp_train[i], fp_train[i], fn_train[i] = compute_counts(
preds_train, ground_truths_train, iou_thr=iou_thr, conf_thr=conf_thr
)
# Plot training set PR curves
precision_train = tp_train / (tp_train + fp_train)
recall_train = tp_train / (tp_train + fn_train)
ax.plot(recall_train, precision_train, marker='o',
label=f'iou_thr={iou_thr}')
ax.set_xlabel('recall')
ax.set_ylabel('precision')
fig.legend()
plt.show()
if args.done_tweaking:
# Plot test set precision-recall curves
print('Plotting test set PR curves.')
# Plot all curves on the same figure
fig, ax = plt.subplots()
ax.set_title('Precision-recall for RedLights2011_Medium (test set)')
# Different iou_thresholds to try:
for iou_thr in [0.25, 0.5, 0.75]:
# Different confidence thresholds to try
confidence_thrs = np.linspace(start=0.5, stop=1)
tp_test = np.zeros(len(confidence_thrs))
fp_test = np.zeros(len(confidence_thrs))
fn_test = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp_test[i], fp_test[i], fn_test[i] = compute_counts(
preds_test, ground_truths_test, iou_thr=iou_thr, conf_thr=conf_thr
)
# Plot testing set PR curves
precision_test = tp_test / (tp_test + fp_test)
recall_test = tp_test / (tp_test + fn_test)
ax.plot(recall_test, precision_test, marker='o',
label=f'iou_thr={iou_thr}')
ax.set_xlabel('recall')
ax.set_ylabel('precision')
fig.legend()
plt.show() | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"with",
"args",
".",
"preds_folder",
".",
"joinpath",
"(",
"'preds_train.json'",
")",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"preds_train",
"=",
"json",
".",
"load",
"(",
"f",
")",
"with",
"args",
".",
"splits_folder",
".",
"joinpath",
"(",
"'annotations_train.json'",
")",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"ground_truths_train",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"args",
".",
"done_tweaking",
":",
"\"\"\"\n Load test data.\n \"\"\"",
"with",
"args",
".",
"preds_folder",
".",
"joinpath",
"(",
"'preds_test.json'",
")",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"preds_test",
"=",
"json",
".",
"load",
"(",
"f",
")",
"with",
"args",
".",
"splits_folder",
".",
"joinpath",
"(",
"'annotations_test.json'",
")",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"ground_truths_test",
"=",
"json",
".",
"load",
"(",
"f",
")",
"# Plot all curves on the same figure",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"ax",
".",
"set_title",
"(",
"'Precision-recall for RedLights2011_Medium (train set)'",
")",
"# Different iou_thresholds to try:",
"for",
"iou_thr",
"in",
"[",
"0.25",
",",
"0.5",
",",
"0.75",
"]",
":",
"# Different confidence thresholds to try",
"confidence_thrs",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"0.5",
",",
"stop",
"=",
"1",
")",
"tp_train",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"fp_train",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"fn_train",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"for",
"i",
",",
"conf_thr",
"in",
"enumerate",
"(",
"confidence_thrs",
")",
":",
"tp_train",
"[",
"i",
"]",
",",
"fp_train",
"[",
"i",
"]",
",",
"fn_train",
"[",
"i",
"]",
"=",
"compute_counts",
"(",
"preds_train",
",",
"ground_truths_train",
",",
"iou_thr",
"=",
"iou_thr",
",",
"conf_thr",
"=",
"conf_thr",
")",
"# Plot training set PR curves",
"precision_train",
"=",
"tp_train",
"/",
"(",
"tp_train",
"+",
"fp_train",
")",
"recall_train",
"=",
"tp_train",
"/",
"(",
"tp_train",
"+",
"fn_train",
")",
"ax",
".",
"plot",
"(",
"recall_train",
",",
"precision_train",
",",
"marker",
"=",
"'o'",
",",
"label",
"=",
"f'iou_thr={iou_thr}'",
")",
"ax",
".",
"set_xlabel",
"(",
"'recall'",
")",
"ax",
".",
"set_ylabel",
"(",
"'precision'",
")",
"fig",
".",
"legend",
"(",
")",
"plt",
".",
"show",
"(",
")",
"if",
"args",
".",
"done_tweaking",
":",
"# Plot test set precision-recall curves",
"print",
"(",
"'Plotting test set PR curves.'",
")",
"# Plot all curves on the same figure",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"ax",
".",
"set_title",
"(",
"'Precision-recall for RedLights2011_Medium (test set)'",
")",
"# Different iou_thresholds to try:",
"for",
"iou_thr",
"in",
"[",
"0.25",
",",
"0.5",
",",
"0.75",
"]",
":",
"# Different confidence thresholds to try",
"confidence_thrs",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"0.5",
",",
"stop",
"=",
"1",
")",
"tp_test",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"fp_test",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"fn_test",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"confidence_thrs",
")",
")",
"for",
"i",
",",
"conf_thr",
"in",
"enumerate",
"(",
"confidence_thrs",
")",
":",
"tp_test",
"[",
"i",
"]",
",",
"fp_test",
"[",
"i",
"]",
",",
"fn_test",
"[",
"i",
"]",
"=",
"compute_counts",
"(",
"preds_test",
",",
"ground_truths_test",
",",
"iou_thr",
"=",
"iou_thr",
",",
"conf_thr",
"=",
"conf_thr",
")",
"# Plot testing set PR curves",
"precision_test",
"=",
"tp_test",
"/",
"(",
"tp_test",
"+",
"fp_test",
")",
"recall_test",
"=",
"tp_test",
"/",
"(",
"tp_test",
"+",
"fn_test",
")",
"ax",
".",
"plot",
"(",
"recall_test",
",",
"precision_test",
",",
"marker",
"=",
"'o'",
",",
"label",
"=",
"f'iou_thr={iou_thr}'",
")",
"ax",
".",
"set_xlabel",
"(",
"'recall'",
")",
"ax",
".",
"set_ylabel",
"(",
"'precision'",
")",
"fig",
".",
"legend",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | [
125,
0
] | [
206,
18
] | python | en | ['en', 'error', 'th'] | False |
_library_not_loaded_test | (
tmp_path_factory, cli_input, library_name, library_import_name, my_caplog
) |
This test requires that a library is NOT installed. It tests that:
- a helpful error message is returned to install the missing library
- the expected tree structure is in place
- the config yml contains an empty dict in its datasource entry
|
This test requires that a library is NOT installed. It tests that:
- a helpful error message is returned to install the missing library
- the expected tree structure is in place
- the config yml contains an empty dict in its datasource entry
| def _library_not_loaded_test(
tmp_path_factory, cli_input, library_name, library_import_name, my_caplog
):
"""
This test requires that a library is NOT installed. It tests that:
- a helpful error message is returned to install the missing library
- the expected tree structure is in place
- the config yml contains an empty dict in its datasource entry
"""
basedir = tmp_path_factory.mktemp("test_cli_init_diff")
basedir = str(basedir)
os.chdir(basedir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["init", "--no-view"], input=cli_input, catch_exceptions=False
)
stdout = result.output
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new Datasource a short name" in stdout
assert (
"""Next, we will configure database credentials and store them in the `my_db` section
of this config file: great_expectations/uncommitted/config_variables.yml"""
in stdout
)
assert (
f"""Great Expectations relies on the library `{library_import_name}` to connect to your data, \
but the package `{library_name}` containing this library is not installed.
Would you like Great Expectations to try to execute `pip install {library_name}` for you?"""
in stdout
)
assert (
f"""\nOK, exiting now.
- Please execute `pip install {library_name}` before trying again."""
in stdout
)
assert "Profiling" not in stdout
assert "Building" not in stdout
assert "Data Docs" not in stdout
assert "Great Expectations is now set up" not in stdout
assert result.exit_code == 1
assert os.path.isdir(os.path.join(basedir, "great_expectations"))
config_path = os.path.join(basedir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path))
assert config["datasources"] == dict()
obs_tree = gen_directory_tree_str(os.path.join(basedir, "great_expectations"))
assert (
obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
.ge_store_backend_id
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
.ge_store_backend_id
"""
)
assert_no_logging_messages_or_tracebacks(my_caplog, result) | [
"def",
"_library_not_loaded_test",
"(",
"tmp_path_factory",
",",
"cli_input",
",",
"library_name",
",",
"library_import_name",
",",
"my_caplog",
")",
":",
"basedir",
"=",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_cli_init_diff\"",
")",
"basedir",
"=",
"str",
"(",
"basedir",
")",
"os",
".",
"chdir",
"(",
"basedir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"init\"",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"cli_input",
",",
"catch_exceptions",
"=",
"False",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"\"Always know what to expect from your data\"",
"in",
"stdout",
"assert",
"\"What data would you like Great Expectations to connect to\"",
"in",
"stdout",
"assert",
"\"Which database backend are you using\"",
"in",
"stdout",
"assert",
"\"Give your new Datasource a short name\"",
"in",
"stdout",
"assert",
"(",
"\"\"\"Next, we will configure database credentials and store them in the `my_db` section\nof this config file: great_expectations/uncommitted/config_variables.yml\"\"\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"\"\"Great Expectations relies on the library `{library_import_name}` to connect to your data, \\\nbut the package `{library_name}` containing this library is not installed.\n Would you like Great Expectations to try to execute `pip install {library_name}` for you?\"\"\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"\"\"\\nOK, exiting now.\n - Please execute `pip install {library_name}` before trying again.\"\"\"",
"in",
"stdout",
")",
"assert",
"\"Profiling\"",
"not",
"in",
"stdout",
"assert",
"\"Building\"",
"not",
"in",
"stdout",
"assert",
"\"Data Docs\"",
"not",
"in",
"stdout",
"assert",
"\"Great Expectations is now set up\"",
"not",
"in",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"great_expectations\"",
")",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"great_expectations/great_expectations.yml\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
"config",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"config_path",
")",
")",
"assert",
"config",
"[",
"\"datasources\"",
"]",
"==",
"dict",
"(",
")",
"obs_tree",
"=",
"gen_directory_tree_str",
"(",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"great_expectations\"",
")",
")",
"assert",
"(",
"obs_tree",
"==",
"\"\"\"\\\ngreat_expectations/\n .gitignore\n great_expectations.yml\n checkpoints/\n expectations/\n .ge_store_backend_id\n notebooks/\n pandas/\n validation_playground.ipynb\n spark/\n validation_playground.ipynb\n sql/\n validation_playground.ipynb\n plugins/\n custom_data_docs/\n renderers/\n styles/\n data_docs_custom_styles.css\n views/\n uncommitted/\n config_variables.yml\n data_docs/\n validations/\n .ge_store_backend_id\n\"\"\"",
")",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
",",
"result",
")"
] | [
14,
0
] | [
99,
63
] | python | en | ['en', 'error', 'th'] | False |
test_init_install_sqlalchemy | (caplog, tmp_path_factory) | WARNING: THIS TEST IS AWFUL AND WE HATE IT. | WARNING: THIS TEST IS AWFUL AND WE HATE IT. | def test_init_install_sqlalchemy(caplog, tmp_path_factory):
"""WARNING: THIS TEST IS AWFUL AND WE HATE IT."""
# This test is as much about changing the entire test environment with side effects as it is about actually testing
# the observed behavior.
library_import_name = "sqlalchemy"
library_name = "sqlalchemy"
cli_input = "\n\n2\nn\n"
basedir = tmp_path_factory.mktemp("test_cli_init_diff")
basedir = str(basedir)
os.chdir(basedir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["init", "--no-view"], input=cli_input, catch_exceptions=False
)
stdout = result.output
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert (
f"""Great Expectations relies on the library `{library_import_name}` to connect to your data, \
but the package `{library_name}` containing this library is not installed.
Would you like Great Expectations to try to execute `pip install {library_name}` for you?"""
in stdout
)
# NOW, IN AN EVIL KNOWN ONLY TO SLEEPLESS PROGRAMMERS, WE USE OUR UTILITY TO INSTALL SQLALCHEMY
_ = execute_shell_command_with_progress_polling("pip install sqlalchemy") | [
"def",
"test_init_install_sqlalchemy",
"(",
"caplog",
",",
"tmp_path_factory",
")",
":",
"# This test is as much about changing the entire test environment with side effects as it is about actually testing",
"# the observed behavior.",
"library_import_name",
"=",
"\"sqlalchemy\"",
"library_name",
"=",
"\"sqlalchemy\"",
"cli_input",
"=",
"\"\\n\\n2\\nn\\n\"",
"basedir",
"=",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_cli_init_diff\"",
")",
"basedir",
"=",
"str",
"(",
"basedir",
")",
"os",
".",
"chdir",
"(",
"basedir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"init\"",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"cli_input",
",",
"catch_exceptions",
"=",
"False",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"\"Always know what to expect from your data\"",
"in",
"stdout",
"assert",
"\"What data would you like Great Expectations to connect to\"",
"in",
"stdout",
"assert",
"(",
"f\"\"\"Great Expectations relies on the library `{library_import_name}` to connect to your data, \\\nbut the package `{library_name}` containing this library is not installed.\n Would you like Great Expectations to try to execute `pip install {library_name}` for you?\"\"\"",
"in",
"stdout",
")",
"# NOW, IN AN EVIL KNOWN ONLY TO SLEEPLESS PROGRAMMERS, WE USE OUR UTILITY TO INSTALL SQLALCHEMY",
"_",
"=",
"execute_shell_command_with_progress_polling",
"(",
"\"pip install sqlalchemy\"",
")"
] | [
106,
0
] | [
135,
77
] | python | en | ['en', 'en', 'en'] | True |
ExpectAlphabeticalColumnNameCountToEqual4.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# # Setting up a configuration
try:
assert "user_input" in configuration.kwargs, "user_input is required"
assert isinstance(
configuration.kwargs["user_input"], str
), "user_input must be a string"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
super().validate_configuration(configuration)
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# # Setting up a configuration",
"try",
":",
"assert",
"\"user_input\"",
"in",
"configuration",
".",
"kwargs",
",",
"\"user_input is required\"",
"assert",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"user_input\"",
"]",
",",
"str",
")",
",",
"\"user_input must be a string\"",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"return",
"True"
] | [
173,
4
] | [
194,
19
] | python | en | ['en', 'error', 'th'] | False |
cli_message_list | (string_list, list_intro_string=None) | Simple util function for displaying simple lists in cli | Simple util function for displaying simple lists in cli | def cli_message_list(string_list, list_intro_string=None):
"""Simple util function for displaying simple lists in cli"""
if list_intro_string:
cli_message(list_intro_string)
for string in string_list:
cli_message(string) | [
"def",
"cli_message_list",
"(",
"string_list",
",",
"list_intro_string",
"=",
"None",
")",
":",
"if",
"list_intro_string",
":",
"cli_message",
"(",
"list_intro_string",
")",
"for",
"string",
"in",
"string_list",
":",
"cli_message",
"(",
"string",
")"
] | [
47,
0
] | [
52,
27
] | python | en | ['en', 'en', 'en'] | True |
action_list_to_string | (action_list) | Util function for turning an action list into pretty string | Util function for turning an action list into pretty string | def action_list_to_string(action_list):
"""Util function for turning an action list into pretty string"""
action_list_string = ""
for idx, action in enumerate(action_list):
action_list_string += "{} ({})".format(
action["name"], action["action"]["class_name"]
)
if idx == len(action_list) - 1:
continue
action_list_string += " => "
return action_list_string | [
"def",
"action_list_to_string",
"(",
"action_list",
")",
":",
"action_list_string",
"=",
"\"\"",
"for",
"idx",
",",
"action",
"in",
"enumerate",
"(",
"action_list",
")",
":",
"action_list_string",
"+=",
"\"{} ({})\"",
".",
"format",
"(",
"action",
"[",
"\"name\"",
"]",
",",
"action",
"[",
"\"action\"",
"]",
"[",
"\"class_name\"",
"]",
")",
"if",
"idx",
"==",
"len",
"(",
"action_list",
")",
"-",
"1",
":",
"continue",
"action_list_string",
"+=",
"\" => \"",
"return",
"action_list_string"
] | [
55,
0
] | [
65,
29
] | python | en | ['en', 'en', 'en'] | True |
cli_message_dict | (
dict_, indent=3, bullet_char="-", message_list=None, recursion_flag=False
) | Util function for displaying nested dicts representing ge objects in cli | Util function for displaying nested dicts representing ge objects in cli | def cli_message_dict(
dict_, indent=3, bullet_char="-", message_list=None, recursion_flag=False
):
"""Util function for displaying nested dicts representing ge objects in cli"""
if message_list is None:
message_list = []
if dict_.get("name"):
name = dict_.pop("name")
message = "{}<cyan>name:</cyan> {}".format(" " * indent, name)
message_list.append(message)
if dict_.get("module_name"):
module_name = dict_.pop("module_name")
message = "{}<cyan>module_name:</cyan> {}".format(" " * indent, module_name)
message_list.append(message)
if dict_.get("class_name"):
class_name = dict_.pop("class_name")
message = "{}<cyan>class_name:</cyan> {}".format(" " * indent, class_name)
message_list.append(message)
if dict_.get("action_list"):
action_list = dict_.pop("action_list")
action_list_string = action_list_to_string(action_list)
message = "{}<cyan>action_list:</cyan> {}".format(
" " * indent, action_list_string
)
message_list.append(message)
sorted_keys = sorted(dict_.keys())
for key in sorted_keys:
if key == "password":
message = "{}<cyan>password:</cyan> ******".format(" " * indent)
message_list.append(message)
continue
if isinstance(dict_[key], dict):
message = "{}<cyan>{}:</cyan>".format(" " * indent, key)
message_list.append(message)
cli_message_dict(
dict_[key],
indent=indent + 2,
message_list=message_list,
recursion_flag=True,
)
else:
message = "{}<cyan>{}:</cyan> {}".format(" " * indent, key, str(dict_[key]))
message_list.append(message)
if not recursion_flag:
if bullet_char and indent > 1:
first = message_list[0]
new_first = first[:1] + bullet_char + first[2:]
message_list[0] = new_first
cli_message_list(message_list) | [
"def",
"cli_message_dict",
"(",
"dict_",
",",
"indent",
"=",
"3",
",",
"bullet_char",
"=",
"\"-\"",
",",
"message_list",
"=",
"None",
",",
"recursion_flag",
"=",
"False",
")",
":",
"if",
"message_list",
"is",
"None",
":",
"message_list",
"=",
"[",
"]",
"if",
"dict_",
".",
"get",
"(",
"\"name\"",
")",
":",
"name",
"=",
"dict_",
".",
"pop",
"(",
"\"name\"",
")",
"message",
"=",
"\"{}<cyan>name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"module_name\"",
")",
":",
"module_name",
"=",
"dict_",
".",
"pop",
"(",
"\"module_name\"",
")",
"message",
"=",
"\"{}<cyan>module_name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"module_name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"class_name\"",
")",
":",
"class_name",
"=",
"dict_",
".",
"pop",
"(",
"\"class_name\"",
")",
"message",
"=",
"\"{}<cyan>class_name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"class_name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"action_list\"",
")",
":",
"action_list",
"=",
"dict_",
".",
"pop",
"(",
"\"action_list\"",
")",
"action_list_string",
"=",
"action_list_to_string",
"(",
"action_list",
")",
"message",
"=",
"\"{}<cyan>action_list:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"action_list_string",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"sorted_keys",
"=",
"sorted",
"(",
"dict_",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"sorted_keys",
":",
"if",
"key",
"==",
"\"password\"",
":",
"message",
"=",
"\"{}<cyan>password:</cyan> ******\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"continue",
"if",
"isinstance",
"(",
"dict_",
"[",
"key",
"]",
",",
"dict",
")",
":",
"message",
"=",
"\"{}<cyan>{}:</cyan>\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"key",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"cli_message_dict",
"(",
"dict_",
"[",
"key",
"]",
",",
"indent",
"=",
"indent",
"+",
"2",
",",
"message_list",
"=",
"message_list",
",",
"recursion_flag",
"=",
"True",
",",
")",
"else",
":",
"message",
"=",
"\"{}<cyan>{}:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"key",
",",
"str",
"(",
"dict_",
"[",
"key",
"]",
")",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"not",
"recursion_flag",
":",
"if",
"bullet_char",
"and",
"indent",
">",
"1",
":",
"first",
"=",
"message_list",
"[",
"0",
"]",
"new_first",
"=",
"first",
"[",
":",
"1",
"]",
"+",
"bullet_char",
"+",
"first",
"[",
"2",
":",
"]",
"message_list",
"[",
"0",
"]",
"=",
"new_first",
"cli_message_list",
"(",
"message_list",
")"
] | [
68,
0
] | [
116,
38
] | python | en | ['en', 'en', 'en'] | True |
library_install_load_check | (
python_import_name: str, pip_library_name: str
) |
Dynamically load a module from strings, attempt a pip install or raise a helpful error.
:return: True if the library was loaded successfully, False otherwise
Args:
pip_library_name: name of the library to load
python_import_name (str): a module to import to verify installation
|
Dynamically load a module from strings, attempt a pip install or raise a helpful error. | def library_install_load_check(
python_import_name: str, pip_library_name: str
) -> Optional[int]:
"""
Dynamically load a module from strings, attempt a pip install or raise a helpful error.
:return: True if the library was loaded successfully, False otherwise
Args:
pip_library_name: name of the library to load
python_import_name (str): a module to import to verify installation
"""
if is_library_loadable(library_name=python_import_name):
return None
confirm_prompt: str = f"""Great Expectations relies on the library `{python_import_name}` to connect to your data, \
but the package `{pip_library_name}` containing this library is not installed.
Would you like Great Expectations to try to execute `pip install {pip_library_name}` for you?"""
continuation_message: str = f"""\nOK, exiting now.
- Please execute `pip install {pip_library_name}` before trying again."""
pip_install_confirmed = toolkit.confirm_proceed_or_exit(
confirm_prompt=confirm_prompt,
continuation_message=continuation_message,
exit_on_no=True,
exit_code=1,
)
if not pip_install_confirmed:
cli_message(continuation_message)
sys.exit(1)
status_code: int = execute_shell_command_with_progress_polling(
f"pip install {pip_library_name}"
)
# project_distribution: Distribution = get_project_distribution()
# if project_distribution:
# project_name: str = project_distribution.metadata['Name']
# version: str = project_distribution.metadata['Version']
#
# pkg_resources.working_set = pkg_resources.WorkingSet._build_master()
working_set: WorkingSet = pkg_resources.working_set
# noinspection SpellCheckingInspection
distr: Distribution = pkg_resources.get_distribution(dist=pip_library_name)
pkg_resources.WorkingSet.add_entry(self=working_set, entry=distr.key)
library_loadable: bool = is_library_loadable(library_name=python_import_name)
if status_code == 0 and library_loadable:
return 0
if not library_loadable:
cli_message(
f"""<red>ERROR: Great Expectations relies on the library `{pip_library_name}` to connect to your data.</red>
- Please execute `pip install {pip_library_name}` before trying again."""
)
return 1
return status_code | [
"def",
"library_install_load_check",
"(",
"python_import_name",
":",
"str",
",",
"pip_library_name",
":",
"str",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"if",
"is_library_loadable",
"(",
"library_name",
"=",
"python_import_name",
")",
":",
"return",
"None",
"confirm_prompt",
":",
"str",
"=",
"f\"\"\"Great Expectations relies on the library `{python_import_name}` to connect to your data, \\\nbut the package `{pip_library_name}` containing this library is not installed.\n Would you like Great Expectations to try to execute `pip install {pip_library_name}` for you?\"\"\"",
"continuation_message",
":",
"str",
"=",
"f\"\"\"\\nOK, exiting now.\n - Please execute `pip install {pip_library_name}` before trying again.\"\"\"",
"pip_install_confirmed",
"=",
"toolkit",
".",
"confirm_proceed_or_exit",
"(",
"confirm_prompt",
"=",
"confirm_prompt",
",",
"continuation_message",
"=",
"continuation_message",
",",
"exit_on_no",
"=",
"True",
",",
"exit_code",
"=",
"1",
",",
")",
"if",
"not",
"pip_install_confirmed",
":",
"cli_message",
"(",
"continuation_message",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"status_code",
":",
"int",
"=",
"execute_shell_command_with_progress_polling",
"(",
"f\"pip install {pip_library_name}\"",
")",
"# project_distribution: Distribution = get_project_distribution()",
"# if project_distribution:",
"# project_name: str = project_distribution.metadata['Name']",
"# version: str = project_distribution.metadata['Version']",
"#",
"# pkg_resources.working_set = pkg_resources.WorkingSet._build_master()",
"working_set",
":",
"WorkingSet",
"=",
"pkg_resources",
".",
"working_set",
"# noinspection SpellCheckingInspection",
"distr",
":",
"Distribution",
"=",
"pkg_resources",
".",
"get_distribution",
"(",
"dist",
"=",
"pip_library_name",
")",
"pkg_resources",
".",
"WorkingSet",
".",
"add_entry",
"(",
"self",
"=",
"working_set",
",",
"entry",
"=",
"distr",
".",
"key",
")",
"library_loadable",
":",
"bool",
"=",
"is_library_loadable",
"(",
"library_name",
"=",
"python_import_name",
")",
"if",
"status_code",
"==",
"0",
"and",
"library_loadable",
":",
"return",
"0",
"if",
"not",
"library_loadable",
":",
"cli_message",
"(",
"f\"\"\"<red>ERROR: Great Expectations relies on the library `{pip_library_name}` to connect to your data.</red>\n - Please execute `pip install {pip_library_name}` before trying again.\"\"\"",
")",
"return",
"1",
"return",
"status_code"
] | [
155,
0
] | [
214,
22
] | python | en | ['en', 'error', 'th'] | False |
get_batch_request | (
datasource: BaseDatasource,
additional_batch_request_args: Optional[Dict[str, Any]] = None,
) |
This method manages the interaction with user necessary to obtain batch_request for a batch of a data asset.
In order to get batch_request this method needs datasource_name, data_connector_name and data_asset_name
to combine them into a batch_request dictionary.
All three arguments are optional. If they are present, the method uses their values. Otherwise, the method
prompts user to enter them interactively. Since it is possible for any of these three components to be
passed to this method as empty values and to get their values after interacting with user, this method
returns these components' values in case they changed.
If the datasource has data connectors, the method lets user choose a name from that list (note: if there are
multiple data connectors, user has to choose one first).
# :param datasource:
# :param additional_batch_request_args:
# :return: batch_request
|
This method manages the interaction with user necessary to obtain batch_request for a batch of a data asset. | def get_batch_request(
datasource: BaseDatasource,
additional_batch_request_args: Optional[Dict[str, Any]] = None,
) -> Dict[str, Union[str, Dict[str, Any]]]:
"""
This method manages the interaction with user necessary to obtain batch_request for a batch of a data asset.
In order to get batch_request this method needs datasource_name, data_connector_name and data_asset_name
to combine them into a batch_request dictionary.
All three arguments are optional. If they are present, the method uses their values. Otherwise, the method
prompts user to enter them interactively. Since it is possible for any of these three components to be
passed to this method as empty values and to get their values after interacting with user, this method
returns these components' values in case they changed.
If the datasource has data connectors, the method lets user choose a name from that list (note: if there are
multiple data connectors, user has to choose one first).
# :param datasource:
# :param additional_batch_request_args:
# :return: batch_request
"""
available_data_asset_names_by_data_connector_dict: Dict[
str, List[str]
] = datasource.get_available_data_asset_names()
data_connector_name: Optional[str] = select_data_connector_name(
available_data_asset_names_by_data_connector_dict=available_data_asset_names_by_data_connector_dict,
)
batch_request: Dict[str, Union[str, int, Dict[str, Any]]] = {
"datasource_name": datasource.name,
"data_connector_name": data_connector_name,
}
data_asset_name: str
if isinstance(datasource, Datasource):
msg_prompt_enter_data_asset_name: str = f'\nWhich data asset (accessible by data connector "{data_connector_name}") would you like to use?\n'
data_asset_name = _get_data_asset_name_from_data_connector(
datasource=datasource,
data_connector_name=data_connector_name,
msg_prompt_enter_data_asset_name=msg_prompt_enter_data_asset_name,
)
elif isinstance(datasource, SimpleSqlalchemyDatasource):
msg_prompt_enter_data_asset_name: str = (
"\nWhich table would you like to use? (Choose one)\n"
)
data_asset_name = _get_data_asset_name_for_simple_sqlalchemy_datasource(
datasource=datasource,
data_connector_name=data_connector_name,
msg_prompt_enter_data_asset_name=msg_prompt_enter_data_asset_name,
)
else:
raise ge_exceptions.DataContextError(
"Datasource {:s} of unsupported type {:s} was encountered.".format(
datasource.name, str(type(datasource))
)
)
batch_request.update(
{
"data_asset_name": data_asset_name,
}
)
if additional_batch_request_args and isinstance(
additional_batch_request_args, dict
):
batch_request.update(additional_batch_request_args)
batch_spec_passthrough: Dict[str, Union[str, Dict[str, Any]]] = batch_request.get(
"batch_spec_passthrough"
)
if batch_spec_passthrough is None:
batch_spec_passthrough = {}
batch_spec_passthrough.update(_get_batch_spec_passthrough(datasource=datasource))
batch_request["batch_spec_passthrough"] = batch_spec_passthrough
filter_properties_dict(properties=batch_request, clean_falsy=True, inplace=True)
return batch_request | [
"def",
"get_batch_request",
"(",
"datasource",
":",
"BaseDatasource",
",",
"additional_batch_request_args",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
":",
"available_data_asset_names_by_data_connector_dict",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"datasource",
".",
"get_available_data_asset_names",
"(",
")",
"data_connector_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"select_data_connector_name",
"(",
"available_data_asset_names_by_data_connector_dict",
"=",
"available_data_asset_names_by_data_connector_dict",
",",
")",
"batch_request",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"int",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"=",
"{",
"\"datasource_name\"",
":",
"datasource",
".",
"name",
",",
"\"data_connector_name\"",
":",
"data_connector_name",
",",
"}",
"data_asset_name",
":",
"str",
"if",
"isinstance",
"(",
"datasource",
",",
"Datasource",
")",
":",
"msg_prompt_enter_data_asset_name",
":",
"str",
"=",
"f'\\nWhich data asset (accessible by data connector \"{data_connector_name}\") would you like to use?\\n'",
"data_asset_name",
"=",
"_get_data_asset_name_from_data_connector",
"(",
"datasource",
"=",
"datasource",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"msg_prompt_enter_data_asset_name",
"=",
"msg_prompt_enter_data_asset_name",
",",
")",
"elif",
"isinstance",
"(",
"datasource",
",",
"SimpleSqlalchemyDatasource",
")",
":",
"msg_prompt_enter_data_asset_name",
":",
"str",
"=",
"(",
"\"\\nWhich table would you like to use? (Choose one)\\n\"",
")",
"data_asset_name",
"=",
"_get_data_asset_name_for_simple_sqlalchemy_datasource",
"(",
"datasource",
"=",
"datasource",
",",
"data_connector_name",
"=",
"data_connector_name",
",",
"msg_prompt_enter_data_asset_name",
"=",
"msg_prompt_enter_data_asset_name",
",",
")",
"else",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"\"Datasource {:s} of unsupported type {:s} was encountered.\"",
".",
"format",
"(",
"datasource",
".",
"name",
",",
"str",
"(",
"type",
"(",
"datasource",
")",
")",
")",
")",
"batch_request",
".",
"update",
"(",
"{",
"\"data_asset_name\"",
":",
"data_asset_name",
",",
"}",
")",
"if",
"additional_batch_request_args",
"and",
"isinstance",
"(",
"additional_batch_request_args",
",",
"dict",
")",
":",
"batch_request",
".",
"update",
"(",
"additional_batch_request_args",
")",
"batch_spec_passthrough",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"=",
"batch_request",
".",
"get",
"(",
"\"batch_spec_passthrough\"",
")",
"if",
"batch_spec_passthrough",
"is",
"None",
":",
"batch_spec_passthrough",
"=",
"{",
"}",
"batch_spec_passthrough",
".",
"update",
"(",
"_get_batch_spec_passthrough",
"(",
"datasource",
"=",
"datasource",
")",
")",
"batch_request",
"[",
"\"batch_spec_passthrough\"",
"]",
"=",
"batch_spec_passthrough",
"filter_properties_dict",
"(",
"properties",
"=",
"batch_request",
",",
"clean_falsy",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"return",
"batch_request"
] | [
31,
0
] | [
112,
24
] | python | en | ['en', 'error', 'th'] | False |
percents | (x, y) | Определение процента x от y | Определение процента x от y | def percents(x, y):
""" Определение процента x от y"""
oneprc = x/100
result = y/oneprc
return int(result) | [
"def",
"percents",
"(",
"x",
",",
"y",
")",
":",
"oneprc",
"=",
"x",
"/",
"100",
"result",
"=",
"y",
"/",
"oneprc",
"return",
"int",
"(",
"result",
")"
] | [
0,
0
] | [
4,
22
] | python | bg | ['bg', 'ru', 'bg'] | True |
test_simple_checkpoint_default_properties_with_no_optional_arguments | (
empty_data_context,
store_validation_result_action,
store_eval_parameter_action,
update_data_docs_action,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
) | This demonstrates the simplest possible usage. | This demonstrates the simplest possible usage. | def test_simple_checkpoint_default_properties_with_no_optional_arguments(
empty_data_context,
store_validation_result_action,
store_eval_parameter_action,
update_data_docs_action,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
"""This demonstrates the simplest possible usage."""
checkpoint_config = SimpleCheckpointConfigurator(
"my_minimal_simple_checkpoint", empty_data_context
).build()
assert isinstance(checkpoint_config, CheckpointConfig)
assert checkpoint_config.name == "my_minimal_simple_checkpoint"
assert checkpoint_config.action_list == [
store_validation_result_action,
store_eval_parameter_action,
update_data_docs_action,
]
assert checkpoint_config.config_version == 1.0
assert checkpoint_config.class_name == "Checkpoint"
assert checkpoint_config.evaluation_parameters == {}
assert checkpoint_config.runtime_configuration == {}
assert checkpoint_config.validations == []
checkpoint_from_store = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(
"my_minimal_simple_checkpoint"
)
checkpoint_config = checkpoint_from_store.config
assert checkpoint_config.name == "my_minimal_simple_checkpoint"
assert checkpoint_config.action_list == [
store_validation_result_action,
store_eval_parameter_action,
update_data_docs_action,
]
assert checkpoint_config.config_version == 1.0
assert checkpoint_config.class_name == "Checkpoint"
assert checkpoint_config.evaluation_parameters == {}
assert checkpoint_config.runtime_configuration == {}
assert checkpoint_config.validations == [] | [
"def",
"test_simple_checkpoint_default_properties_with_no_optional_arguments",
"(",
"empty_data_context",
",",
"store_validation_result_action",
",",
"store_eval_parameter_action",
",",
"update_data_docs_action",
",",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
",",
")",
":",
"checkpoint_config",
"=",
"SimpleCheckpointConfigurator",
"(",
"\"my_minimal_simple_checkpoint\"",
",",
"empty_data_context",
")",
".",
"build",
"(",
")",
"assert",
"isinstance",
"(",
"checkpoint_config",
",",
"CheckpointConfig",
")",
"assert",
"checkpoint_config",
".",
"name",
"==",
"\"my_minimal_simple_checkpoint\"",
"assert",
"checkpoint_config",
".",
"action_list",
"==",
"[",
"store_validation_result_action",
",",
"store_eval_parameter_action",
",",
"update_data_docs_action",
",",
"]",
"assert",
"checkpoint_config",
".",
"config_version",
"==",
"1.0",
"assert",
"checkpoint_config",
".",
"class_name",
"==",
"\"Checkpoint\"",
"assert",
"checkpoint_config",
".",
"evaluation_parameters",
"==",
"{",
"}",
"assert",
"checkpoint_config",
".",
"runtime_configuration",
"==",
"{",
"}",
"assert",
"checkpoint_config",
".",
"validations",
"==",
"[",
"]",
"checkpoint_from_store",
"=",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
".",
"get_checkpoint",
"(",
"\"my_minimal_simple_checkpoint\"",
")",
"checkpoint_config",
"=",
"checkpoint_from_store",
".",
"config",
"assert",
"checkpoint_config",
".",
"name",
"==",
"\"my_minimal_simple_checkpoint\"",
"assert",
"checkpoint_config",
".",
"action_list",
"==",
"[",
"store_validation_result_action",
",",
"store_eval_parameter_action",
",",
"update_data_docs_action",
",",
"]",
"assert",
"checkpoint_config",
".",
"config_version",
"==",
"1.0",
"assert",
"checkpoint_config",
".",
"class_name",
"==",
"\"Checkpoint\"",
"assert",
"checkpoint_config",
".",
"evaluation_parameters",
"==",
"{",
"}",
"assert",
"checkpoint_config",
".",
"runtime_configuration",
"==",
"{",
"}",
"assert",
"checkpoint_config",
".",
"validations",
"==",
"[",
"]"
] | [
110,
0
] | [
149,
46
] | python | en | ['en', 'en', 'en'] | True |
test_simple_checkpoint_notify_with_all_has_data_docs_action_with_none_specified | (
empty_data_context,
slack_notification_action,
webhook,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
) |
The underlying SlackNotificationAction and SlackRenderer default to
including links to all sites if the key notify_with is not present. We are
intentionally hiding this from users of SimpleCheckpoint by having a default
of "all" that sets the configuration appropriately.
|
The underlying SlackNotificationAction and SlackRenderer default to
including links to all sites if the key notify_with is not present. We are
intentionally hiding this from users of SimpleCheckpoint by having a default
of "all" that sets the configuration appropriately.
| def test_simple_checkpoint_notify_with_all_has_data_docs_action_with_none_specified(
empty_data_context,
slack_notification_action,
webhook,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
"""
The underlying SlackNotificationAction and SlackRenderer default to
including links to all sites if the key notify_with is not present. We are
intentionally hiding this from users of SimpleCheckpoint by having a default
of "all" that sets the configuration appropriately.
"""
checkpoint_config = SimpleCheckpointConfigurator(
"foo", empty_data_context, slack_webhook=webhook, notify_with="all"
).build()
# set the config to include all sites
slack_notification_action["action"]["notify_with"] = None
assert slack_notification_action in checkpoint_config.action_list
checkpoint_from_store = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(
"my_simple_checkpoint_with_slack_and_notify_with_all"
)
checkpoint_config = checkpoint_from_store.config
assert slack_notification_action in checkpoint_config.action_list | [
"def",
"test_simple_checkpoint_notify_with_all_has_data_docs_action_with_none_specified",
"(",
"empty_data_context",
",",
"slack_notification_action",
",",
"webhook",
",",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
",",
")",
":",
"checkpoint_config",
"=",
"SimpleCheckpointConfigurator",
"(",
"\"foo\"",
",",
"empty_data_context",
",",
"slack_webhook",
"=",
"webhook",
",",
"notify_with",
"=",
"\"all\"",
")",
".",
"build",
"(",
")",
"# set the config to include all sites",
"slack_notification_action",
"[",
"\"action\"",
"]",
"[",
"\"notify_with\"",
"]",
"=",
"None",
"assert",
"slack_notification_action",
"in",
"checkpoint_config",
".",
"action_list",
"checkpoint_from_store",
"=",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
".",
"get_checkpoint",
"(",
"\"my_simple_checkpoint_with_slack_and_notify_with_all\"",
")",
"checkpoint_config",
"=",
"checkpoint_from_store",
".",
"config",
"assert",
"slack_notification_action",
"in",
"checkpoint_config",
".",
"action_list"
] | [
228,
0
] | [
252,
69
] | python | en | ['en', 'error', 'th'] | False |
FlattenFilter | (node) | Returns a list of all the node and sub nodes. | Returns a list of all the node and sub nodes. | def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list | [
"def",
"FlattenFilter",
"(",
"node",
")",
":",
"node_list",
"=",
"[",
"]",
"if",
"(",
"node",
".",
"attributes",
"and",
"node",
".",
"getAttribute",
"(",
"'Name'",
")",
"==",
"'_excluded_files'",
")",
":",
"# We don't add the \"_excluded_files\" filter.",
"return",
"[",
"]",
"for",
"current",
"in",
"node",
".",
"childNodes",
":",
"if",
"current",
".",
"nodeName",
"==",
"'Filter'",
":",
"node_list",
".",
"extend",
"(",
"FlattenFilter",
"(",
"current",
")",
")",
"else",
":",
"node_list",
".",
"append",
"(",
"current",
")",
"return",
"node_list"
] | [
94,
0
] | [
109,
18
] | python | en | ['en', 'en', 'en'] | True |
AbsoluteNode | (node) | Makes all the properties we know about in this node absolute. | Makes all the properties we know about in this node absolute. | def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name) | [
"def",
"AbsoluteNode",
"(",
"node",
")",
":",
"if",
"node",
".",
"attributes",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"node",
".",
"attributes",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"[",
"'InheritedPropertySheets'",
",",
"'RelativePath'",
",",
"'AdditionalIncludeDirectories'",
",",
"'IntermediateDirectory'",
",",
"'OutputDirectory'",
",",
"'AdditionalLibraryDirectories'",
"]",
":",
"# We want to fix up these paths",
"path_list",
"=",
"value",
".",
"split",
"(",
"';'",
")",
"new_list",
"=",
"FixFilenames",
"(",
"path_list",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"ARGUMENTS",
"[",
"1",
"]",
")",
")",
"node",
".",
"setAttribute",
"(",
"name",
",",
"';'",
".",
"join",
"(",
"new_list",
")",
")",
"if",
"not",
"value",
":",
"node",
".",
"removeAttribute",
"(",
"name",
")"
] | [
127,
0
] | [
140,
34
] | python | en | ['en', 'en', 'en'] | True |
CleanupVcproj | (node) | For each sub node, we call recursively this function. | For each sub node, we call recursively this function. | def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node) | [
"def",
"CleanupVcproj",
"(",
"node",
")",
":",
"for",
"sub_node",
"in",
"node",
".",
"childNodes",
":",
"AbsoluteNode",
"(",
"sub_node",
")",
"CleanupVcproj",
"(",
"sub_node",
")",
"# Normalize the node, and remove all extranous whitespaces.",
"for",
"sub_node",
"in",
"node",
".",
"childNodes",
":",
"if",
"sub_node",
".",
"nodeType",
"==",
"Node",
".",
"TEXT_NODE",
":",
"sub_node",
".",
"data",
"=",
"sub_node",
".",
"data",
".",
"replace",
"(",
"\"\\r\"",
",",
"\"\"",
")",
"sub_node",
".",
"data",
"=",
"sub_node",
".",
"data",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"sub_node",
".",
"data",
"=",
"sub_node",
".",
"data",
".",
"rstrip",
"(",
")",
"# Fix all the semicolon separated attributes to be sorted, and we also",
"# remove the dups.",
"if",
"node",
".",
"attributes",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"node",
".",
"attributes",
".",
"items",
"(",
")",
":",
"sorted_list",
"=",
"sorted",
"(",
"value",
".",
"split",
"(",
"';'",
")",
")",
"unique_list",
"=",
"[",
"]",
"for",
"i",
"in",
"sorted_list",
":",
"if",
"not",
"unique_list",
".",
"count",
"(",
"i",
")",
":",
"unique_list",
".",
"append",
"(",
"i",
")",
"node",
".",
"setAttribute",
"(",
"name",
",",
"';'",
".",
"join",
"(",
"unique_list",
")",
")",
"if",
"not",
"value",
":",
"node",
".",
"removeAttribute",
"(",
"name",
")",
"if",
"node",
".",
"childNodes",
":",
"node",
".",
"normalize",
"(",
")",
"# For each node, take a copy, and remove it from the list.",
"node_array",
"=",
"[",
"]",
"while",
"node",
".",
"childNodes",
"and",
"node",
".",
"childNodes",
"[",
"0",
"]",
":",
"# Take a copy of the node and remove it from the list.",
"current",
"=",
"node",
".",
"childNodes",
"[",
"0",
"]",
"node",
".",
"removeChild",
"(",
"current",
")",
"# If the child is a filter, we want to append all its children",
"# to this same list.",
"if",
"current",
".",
"nodeName",
"==",
"'Filter'",
":",
"node_array",
".",
"extend",
"(",
"FlattenFilter",
"(",
"current",
")",
")",
"else",
":",
"node_array",
".",
"append",
"(",
"current",
")",
"# Sort the list.",
"node_array",
".",
"sort",
"(",
"CmpNode",
"(",
")",
")",
"# Insert the nodes in the correct order.",
"for",
"new_node",
"in",
"node_array",
":",
"# But don't append empty tool node.",
"if",
"new_node",
".",
"nodeName",
"==",
"'Tool'",
":",
"if",
"new_node",
".",
"attributes",
"and",
"new_node",
".",
"attributes",
".",
"length",
"==",
"1",
":",
"# This one was empty.",
"continue",
"if",
"new_node",
".",
"nodeName",
"==",
"'UserMacro'",
":",
"continue",
"node",
".",
"appendChild",
"(",
"new_node",
")"
] | [
143,
0
] | [
199,
30
] | python | en | ['en', 'en', 'en'] | True |
main | (argv) | Main function of this vcproj prettifier. | Main function of this vcproj prettifier. | def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0 | [
"def",
"main",
"(",
"argv",
")",
":",
"global",
"ARGUMENTS",
"ARGUMENTS",
"=",
"argv",
"# check if we have exactly 1 parameter.",
"if",
"len",
"(",
"argv",
")",
"<",
"2",
":",
"print",
"(",
"'Usage: %s \"c:\\\\path\\\\to\\\\vcproj.vcproj\" [key1=value1] '",
"'[key2=value2]'",
"%",
"argv",
"[",
"0",
"]",
")",
"return",
"1",
"# Parse the keys",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"len",
"(",
"argv",
")",
")",
":",
"(",
"key",
",",
"value",
")",
"=",
"argv",
"[",
"i",
"]",
".",
"split",
"(",
"'='",
")",
"REPLACEMENTS",
"[",
"key",
"]",
"=",
"value",
"# Open the vcproj and parse the xml.",
"dom",
"=",
"parse",
"(",
"argv",
"[",
"1",
"]",
")",
"# First thing we need to do is find the Configuration Node and merge them",
"# with the vsprops they include.",
"for",
"configuration_node",
"in",
"GetConfiguationNodes",
"(",
"dom",
".",
"documentElement",
")",
":",
"# Get the property sheets associated with this configuration.",
"vsprops",
"=",
"configuration_node",
".",
"getAttribute",
"(",
"'InheritedPropertySheets'",
")",
"# Fix the filenames to be absolute.",
"vsprops_list",
"=",
"FixFilenames",
"(",
"vsprops",
".",
"strip",
"(",
")",
".",
"split",
"(",
"';'",
")",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"argv",
"[",
"1",
"]",
")",
")",
"# Extend the list of vsprops with all vsprops contained in the current",
"# vsprops.",
"for",
"current_vsprops",
"in",
"vsprops_list",
":",
"vsprops_list",
".",
"extend",
"(",
"GetChildrenVsprops",
"(",
"current_vsprops",
")",
")",
"# Now that we have all the vsprops, we need to merge them.",
"for",
"current_vsprops",
"in",
"vsprops_list",
":",
"MergeProperties",
"(",
"configuration_node",
",",
"parse",
"(",
"current_vsprops",
")",
".",
"documentElement",
")",
"# Now that everything is merged, we need to cleanup the xml.",
"CleanupVcproj",
"(",
"dom",
".",
"documentElement",
")",
"# Finally, we use the prett xml function to print the vcproj back to the",
"# user.",
"#print dom.toprettyxml(newl=\"\\n\")",
"PrettyPrintNode",
"(",
"dom",
".",
"documentElement",
")",
"return",
"0"
] | [
278,
0
] | [
324,
10
] | python | en | ['en', 'en', 'en'] | True |
venv | (request) |
Prepares a virtual environment for unittest, no extra packages required
:rtype : virtual_environments.VirtualEnvDescription
|
Prepares a virtual environment for unittest, no extra packages required
:rtype : virtual_environments.VirtualEnvDescription
| def venv(request):
"""
Prepares a virtual environment for unittest, no extra packages required
:rtype : virtual_environments.VirtualEnvDescription
"""
return virtual_environments.prepare_virtualenv() | [
"def",
"venv",
"(",
"request",
")",
":",
"return",
"virtual_environments",
".",
"prepare_virtualenv",
"(",
")"
] | [
14,
0
] | [
19,
52
] | python | en | ['en', 'error', 'th'] | False |
IRHTTPMappingGroup.finalize | (self, ir: 'IR', aconf: Config) |
Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into
the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc.
:param ir: the IR we're working from
:param aconf: the Config we're working from
:return: a list of the IRClusters this Group uses
|
Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into
the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc. | def finalize(self, ir: 'IR', aconf: Config) -> List[IRCluster]:
"""
Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into
the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc.
:param ir: the IR we're working from
:param aconf: the Config we're working from
:return: a list of the IRClusters this Group uses
"""
add_request_headers: Dict[str, Any] = {}
add_response_headers: Dict[str, Any] = {}
metadata_labels: Dict[str, str] = {}
for mapping in sorted(self.mappings, key=lambda m: m.route_weight):
# if verbose:
# self.ir.logger.debug("%s mapping %s" % (self, mapping.as_json()))
for k in mapping.keys():
if k.startswith('_') or mapping.skip_key(k) or (k in IRHTTPMappingGroup.DoNotFlattenKeys):
# if verbose:
# self.ir.logger.debug("%s: don't flatten %s" % (self, k))
continue
# if verbose:
# self.ir.logger.debug("%s: flatten %s" % (self, k))
self[k] = mapping[k]
add_request_headers.update(mapping.get('add_request_headers', {}))
add_response_headers.update(mapping.get('add_response_headers', {}))
# Should we have higher weights win over lower if there are conflicts?
# Should we disallow conflicts?
metadata_labels.update(mapping.get('metadata_labels') or {})
if add_request_headers:
self.add_request_headers = add_request_headers
if add_response_headers:
self.add_response_headers = add_response_headers
if metadata_labels:
self.metadata_labels = metadata_labels
if self.get('load_balancer', None) is None:
self['load_balancer'] = ir.ambassador_module.load_balancer
# if verbose:
# self.ir.logger.debug("%s after flattening %s" % (self, self.as_json()))
total_weight = 0.0
unspecified_mappings = 0
# If no rewrite was given at all, default the rewrite to "/", so /, so e.g., if we map
# /prefix1/ to the service service1, then http://ambassador.example.com/prefix1/foo/bar
# would effectively be written to http://service1/foo/bar
#
# If they did give a rewrite, leave it alone so that the Envoy config can correctly
# handle an empty rewrite as no rewriting at all.
if 'rewrite' not in self:
self.rewrite = "/"
# OK. Save some typing with local variables for default labels and our labels...
labels: Dict[str, Any] = self.get('labels', None)
if self.get('keepalive', None) is None:
keepalive_default = ir.ambassador_module.get('keepalive', None)
if keepalive_default:
self['keepalive'] = keepalive_default
if not labels:
# No labels. Use the default label domain to see if we have some valid defaults.
defaults = ir.ambassador_module.get_default_labels()
if defaults:
domain = ir.ambassador_module.get_default_label_domain()
self.labels = {
domain: [
{
'defaults': defaults
}
]
}
else:
# Walk all the domains in our labels, and prepend the defaults, if any.
# ir.logger.info("%s: labels %s" % (self.as_json(), labels))
for domain in labels.keys():
defaults = ir.ambassador_module.get_default_labels(domain)
ir.logger.debug("%s: defaults %s" % (domain, defaults))
if defaults:
ir.logger.debug("%s: labels %s" % (domain, labels[domain]))
for label in labels[domain]:
ir.logger.debug("%s: label %s" % (domain, label))
lkeys = label.keys()
if len(lkeys) > 1:
err = RichStatus.fromError("label has multiple entries (%s) instead of just one" %
lkeys)
aconf.post_error(err, self)
lkey = list(lkeys)[0]
if lkey.startswith('v0_ratelimit_'):
# Don't prepend defaults, as this was imported from a V0 rate_limit.
continue
label[lkey] = defaults + label[lkey]
if self.shadows:
# Only one shadow is supported right now.
shadow = self.shadows[0]
# The shadow is an IRMapping. Save the cluster for it.
shadow.cluster = self.add_cluster_for_mapping(shadow, marker='shadow')
# We don't need a cluster for host_redirect: it's just a name to redirect to.
redir = self.get('host_redirect', None)
if not redir:
for mapping in self.mappings:
mapping.cluster = self.add_cluster_for_mapping(mapping, mapping.cluster_tag)
self.logger.debug(f"Normalizing weights in mappings now...")
if not self.normalize_weights_in_mappings():
self.post_error(f"Could not normalize mapping weights, ignoring...")
return []
return list([ mapping.cluster for mapping in self.mappings ])
else:
# Flatten the case_sensitive field for host_redirect if it exists
if 'case_sensitive' in redir:
self['case_sensitive'] = redir['case_sensitive']
return [] | [
"def",
"finalize",
"(",
"self",
",",
"ir",
":",
"'IR'",
",",
"aconf",
":",
"Config",
")",
"->",
"List",
"[",
"IRCluster",
"]",
":",
"add_request_headers",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"add_response_headers",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"metadata_labels",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"{",
"}",
"for",
"mapping",
"in",
"sorted",
"(",
"self",
".",
"mappings",
",",
"key",
"=",
"lambda",
"m",
":",
"m",
".",
"route_weight",
")",
":",
"# if verbose:",
"# self.ir.logger.debug(\"%s mapping %s\" % (self, mapping.as_json()))",
"for",
"k",
"in",
"mapping",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"'_'",
")",
"or",
"mapping",
".",
"skip_key",
"(",
"k",
")",
"or",
"(",
"k",
"in",
"IRHTTPMappingGroup",
".",
"DoNotFlattenKeys",
")",
":",
"# if verbose:",
"# self.ir.logger.debug(\"%s: don't flatten %s\" % (self, k))",
"continue",
"# if verbose:",
"# self.ir.logger.debug(\"%s: flatten %s\" % (self, k))",
"self",
"[",
"k",
"]",
"=",
"mapping",
"[",
"k",
"]",
"add_request_headers",
".",
"update",
"(",
"mapping",
".",
"get",
"(",
"'add_request_headers'",
",",
"{",
"}",
")",
")",
"add_response_headers",
".",
"update",
"(",
"mapping",
".",
"get",
"(",
"'add_response_headers'",
",",
"{",
"}",
")",
")",
"# Should we have higher weights win over lower if there are conflicts?",
"# Should we disallow conflicts?",
"metadata_labels",
".",
"update",
"(",
"mapping",
".",
"get",
"(",
"'metadata_labels'",
")",
"or",
"{",
"}",
")",
"if",
"add_request_headers",
":",
"self",
".",
"add_request_headers",
"=",
"add_request_headers",
"if",
"add_response_headers",
":",
"self",
".",
"add_response_headers",
"=",
"add_response_headers",
"if",
"metadata_labels",
":",
"self",
".",
"metadata_labels",
"=",
"metadata_labels",
"if",
"self",
".",
"get",
"(",
"'load_balancer'",
",",
"None",
")",
"is",
"None",
":",
"self",
"[",
"'load_balancer'",
"]",
"=",
"ir",
".",
"ambassador_module",
".",
"load_balancer",
"# if verbose:",
"# self.ir.logger.debug(\"%s after flattening %s\" % (self, self.as_json()))",
"total_weight",
"=",
"0.0",
"unspecified_mappings",
"=",
"0",
"# If no rewrite was given at all, default the rewrite to \"/\", so /, so e.g., if we map",
"# /prefix1/ to the service service1, then http://ambassador.example.com/prefix1/foo/bar",
"# would effectively be written to http://service1/foo/bar",
"#",
"# If they did give a rewrite, leave it alone so that the Envoy config can correctly",
"# handle an empty rewrite as no rewriting at all.",
"if",
"'rewrite'",
"not",
"in",
"self",
":",
"self",
".",
"rewrite",
"=",
"\"/\"",
"# OK. Save some typing with local variables for default labels and our labels...",
"labels",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"self",
".",
"get",
"(",
"'labels'",
",",
"None",
")",
"if",
"self",
".",
"get",
"(",
"'keepalive'",
",",
"None",
")",
"is",
"None",
":",
"keepalive_default",
"=",
"ir",
".",
"ambassador_module",
".",
"get",
"(",
"'keepalive'",
",",
"None",
")",
"if",
"keepalive_default",
":",
"self",
"[",
"'keepalive'",
"]",
"=",
"keepalive_default",
"if",
"not",
"labels",
":",
"# No labels. Use the default label domain to see if we have some valid defaults.",
"defaults",
"=",
"ir",
".",
"ambassador_module",
".",
"get_default_labels",
"(",
")",
"if",
"defaults",
":",
"domain",
"=",
"ir",
".",
"ambassador_module",
".",
"get_default_label_domain",
"(",
")",
"self",
".",
"labels",
"=",
"{",
"domain",
":",
"[",
"{",
"'defaults'",
":",
"defaults",
"}",
"]",
"}",
"else",
":",
"# Walk all the domains in our labels, and prepend the defaults, if any.",
"# ir.logger.info(\"%s: labels %s\" % (self.as_json(), labels))",
"for",
"domain",
"in",
"labels",
".",
"keys",
"(",
")",
":",
"defaults",
"=",
"ir",
".",
"ambassador_module",
".",
"get_default_labels",
"(",
"domain",
")",
"ir",
".",
"logger",
".",
"debug",
"(",
"\"%s: defaults %s\"",
"%",
"(",
"domain",
",",
"defaults",
")",
")",
"if",
"defaults",
":",
"ir",
".",
"logger",
".",
"debug",
"(",
"\"%s: labels %s\"",
"%",
"(",
"domain",
",",
"labels",
"[",
"domain",
"]",
")",
")",
"for",
"label",
"in",
"labels",
"[",
"domain",
"]",
":",
"ir",
".",
"logger",
".",
"debug",
"(",
"\"%s: label %s\"",
"%",
"(",
"domain",
",",
"label",
")",
")",
"lkeys",
"=",
"label",
".",
"keys",
"(",
")",
"if",
"len",
"(",
"lkeys",
")",
">",
"1",
":",
"err",
"=",
"RichStatus",
".",
"fromError",
"(",
"\"label has multiple entries (%s) instead of just one\"",
"%",
"lkeys",
")",
"aconf",
".",
"post_error",
"(",
"err",
",",
"self",
")",
"lkey",
"=",
"list",
"(",
"lkeys",
")",
"[",
"0",
"]",
"if",
"lkey",
".",
"startswith",
"(",
"'v0_ratelimit_'",
")",
":",
"# Don't prepend defaults, as this was imported from a V0 rate_limit.",
"continue",
"label",
"[",
"lkey",
"]",
"=",
"defaults",
"+",
"label",
"[",
"lkey",
"]",
"if",
"self",
".",
"shadows",
":",
"# Only one shadow is supported right now.",
"shadow",
"=",
"self",
".",
"shadows",
"[",
"0",
"]",
"# The shadow is an IRMapping. Save the cluster for it.",
"shadow",
".",
"cluster",
"=",
"self",
".",
"add_cluster_for_mapping",
"(",
"shadow",
",",
"marker",
"=",
"'shadow'",
")",
"# We don't need a cluster for host_redirect: it's just a name to redirect to.",
"redir",
"=",
"self",
".",
"get",
"(",
"'host_redirect'",
",",
"None",
")",
"if",
"not",
"redir",
":",
"for",
"mapping",
"in",
"self",
".",
"mappings",
":",
"mapping",
".",
"cluster",
"=",
"self",
".",
"add_cluster_for_mapping",
"(",
"mapping",
",",
"mapping",
".",
"cluster_tag",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Normalizing weights in mappings now...\"",
")",
"if",
"not",
"self",
".",
"normalize_weights_in_mappings",
"(",
")",
":",
"self",
".",
"post_error",
"(",
"f\"Could not normalize mapping weights, ignoring...\"",
")",
"return",
"[",
"]",
"return",
"list",
"(",
"[",
"mapping",
".",
"cluster",
"for",
"mapping",
"in",
"self",
".",
"mappings",
"]",
")",
"else",
":",
"# Flatten the case_sensitive field for host_redirect if it exists",
"if",
"'case_sensitive'",
"in",
"redir",
":",
"self",
"[",
"'case_sensitive'",
"]",
"=",
"redir",
"[",
"'case_sensitive'",
"]",
"return",
"[",
"]"
] | [
281,
4
] | [
420,
21
] | python | en | ['en', 'error', 'th'] | False |
venv | (request) | Virtual environment fixture with PyLint of the minimal and maximal supported version
for a given python version.
* the minimal supported PyLint version is 1.9
* Python 2.7 is supported up to PyLint 1.9
* Python 3.4+ is supported through to the latest, but 1.9 is not supported by python 3
| Virtual environment fixture with PyLint of the minimal and maximal supported version
for a given python version. | def venv(request):
"""Virtual environment fixture with PyLint of the minimal and maximal supported version
for a given python version.
* the minimal supported PyLint version is 1.9
* Python 2.7 is supported up to PyLint 1.9
* Python 3.4+ is supported through to the latest, but 1.9 is not supported by python 3
"""
if sys.version_info < (2, 7) or (3, ) <= sys.version_info < (3, 4):
pytest.skip("PyLint integration requires Python 2.7 or 3.4+")
return virtual_environments.prepare_virtualenv([request.param]) | [
"def",
"venv",
"(",
"request",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"7",
")",
"or",
"(",
"3",
",",
")",
"<=",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"4",
")",
":",
"pytest",
".",
"skip",
"(",
"\"PyLint integration requires Python 2.7 or 3.4+\"",
")",
"return",
"virtual_environments",
".",
"prepare_virtualenv",
"(",
"[",
"request",
".",
"param",
"]",
")"
] | [
15,
0
] | [
26,
67
] | python | en | ['en', 'en', 'en'] | True |
run | (venv, filename) | Execute PyLint with the TeamCityReporter.
:param VirtualEnvDescription venv: virtual environment to run the test in
:param filename: filename to inspect
:rtype: str
:return: captured STDOUT
| Execute PyLint with the TeamCityReporter. | def run(venv, filename):
"""Execute PyLint with the TeamCityReporter.
:param VirtualEnvDescription venv: virtual environment to run the test in
:param filename: filename to inspect
:rtype: str
:return: captured STDOUT
"""
command = ' '.join([os.path.join(venv.bin, 'pylint'), '--output-format', 'teamcity.pylint_reporter.TeamCityReporter', filename])
return run_command(command) | [
"def",
"run",
"(",
"venv",
",",
"filename",
")",
":",
"command",
"=",
"' '",
".",
"join",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"venv",
".",
"bin",
",",
"'pylint'",
")",
",",
"'--output-format'",
",",
"'teamcity.pylint_reporter.TeamCityReporter'",
",",
"filename",
"]",
")",
"return",
"run_command",
"(",
"command",
")"
] | [
68,
0
] | [
78,
31
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.