Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
test_suite_demo_on_context_with_no_datasources | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
We call the "suite demo" command on a data context that has no datasources
configured.
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
We call the "suite demo" command on a data context that has no datasources
configured. | def test_suite_demo_on_context_with_no_datasources(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
We call the "suite demo" command on a data context that has no datasources
configured.
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_root_dir = empty_data_context.root_directory
root_dir = project_root_dir
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert "No datasources found in the context" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_on_context_with_no_datasources",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"root_dir",
"=",
"project_root_dir",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"No datasources found in the context\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
38,
0
] | [
70,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_enter_existing_suite_name_as_arg | (
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
) |
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument | def test_suite_demo_enter_existing_suite_name_as_arg(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
):
"""
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
project_root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
context = DataContext(project_root_dir)
existing_suite_name = "my_dag_node.default"
assert context.list_expectation_suite_names() == [existing_suite_name]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"demo",
"-d",
project_root_dir,
"--suite",
existing_suite_name,
"--no-view",
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_enter_existing_suite_name_as_arg",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
")",
":",
"not_so_empty_data_context",
"=",
"data_context_parameterized_expectation_suite",
"project_root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_root_dir",
",",
"\"uncommitted\"",
")",
")",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"existing_suite_name",
"=",
"\"my_dag_node.default\"",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"existing_suite_name",
"]",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--suite\"",
",",
"existing_suite_name",
",",
"\"--no-view\"",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"f\"An expectation suite named `{existing_suite_name}` already exists.\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`\"",
"in",
"stdout",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
75,
0
] | [
131,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite | (
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
We call the "suite demo" command without the suite name argument
The command should:
- prompt us to enter the name of the expectation suite that will be
created. We answer the prompt with the name of an existing expectation suite.
- display an error message and let us retry until we answer
with a name that is not "taken".
- create an example suite
- NOT open jupyter
- open DataDocs to the new example suite page
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- prompt us to enter the name of the expectation suite that will be
created. We answer the prompt with the name of an existing expectation suite.
- display an error message and let us retry until we answer
with a name that is not "taken".
- create an example suite
- NOT open jupyter
- open DataDocs to the new example suite page
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(root_dir, "uncommitted"))
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
existing_suite_name = "my_dag_node.default"
context = DataContext(root_dir)
assert context.list_expectation_suite_names() == [existing_suite_name]
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\n{existing_suite_name}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_subprocess.call_count == 0
assert mock_webbrowser.call_count == 1
foo = os.path.join(
root_dir, "uncommitted/data_docs/local_site/validations/my_new_suite/"
)
assert f"file://{foo}" in mock_webbrowser.call_args[0][0]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"not_so_empty_data_context",
"=",
"data_context_parameterized_expectation_suite",
"root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
")",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"existing_suite_name",
"=",
"\"my_dag_node.default\"",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"existing_suite_name",
"]",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"f\"{csv_path}\\n{existing_suite_name}\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"f\"An expectation suite named `{existing_suite_name}` already exists.\"",
"in",
"stdout",
")",
"assert",
"(",
"f\"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`\"",
"in",
"stdout",
")",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new Expectation Suite [f1.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here\"",
"in",
"stdout",
")",
"assert",
"\"open a notebook for you now\"",
"not",
"in",
"stdout",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"foo",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted/data_docs/local_site/validations/my_new_suite/\"",
")",
"assert",
"f\"file://{foo}\"",
"in",
"mock_webbrowser",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
136,
0
] | [
212,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_new_creates_empty_suite | (
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
|
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
| def test_suite_new_creates_empty_suite(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
"""
project_root_dir = data_context_parameterized_expectation_suite.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
root_dir = project_root_dir
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert (
"Because you requested an empty suite, we'll open a notebook for you now to edit it!"
in stdout
)
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 1
call_args = mock_subprocess.call_args[0][0]
assert call_args[0] == "jupyter"
assert call_args[1] == "notebook"
assert expected_notebook in call_args[2]
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_new_creates_empty_suite",
"(",
"mock_webbroser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"project_root_dir",
"=",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_root_dir",
",",
"\"uncommitted\"",
")",
")",
"root_dir",
"=",
"project_root_dir",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"new\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo\"",
"]",
",",
"input",
"=",
"f\"{csv}\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new expectation suite\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"not",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"not",
"in",
"stdout",
"assert",
"\"The following Data Docs sites were built\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will create a new Expectation Suite 'foo' and store it here\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Because you requested an empty suite, we'll open a notebook for you now to edit it!\"",
"in",
"stdout",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"expected_notebook",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"\"foo\"",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"suite",
".",
"expectations",
"==",
"[",
"]",
"citations",
"=",
"suite",
".",
"get_citations",
"(",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"citation_date\"",
",",
"None",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"interactive\"",
",",
"None",
")",
"assert",
"filter_properties_dict",
"(",
"properties",
"=",
"citations",
"[",
"0",
"]",
",",
"clean_falsy",
"=",
"True",
")",
"==",
"{",
"\"batch_kwargs\"",
":",
"{",
"\"data_asset_name\"",
":",
"\"f1\"",
",",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv",
",",
"\"reader_method\"",
":",
"\"read_csv\"",
",",
"}",
",",
"\"comment\"",
":",
"\"New suite added via CLI\"",
",",
"}",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"call_args",
"=",
"mock_subprocess",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"call_args",
"[",
"0",
"]",
"==",
"\"jupyter\"",
"assert",
"call_args",
"[",
"1",
"]",
"==",
"\"notebook\"",
"assert",
"expected_notebook",
"in",
"call_args",
"[",
"2",
"]",
"assert",
"mock_webbroser",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
217,
0
] | [
297,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_new_empty_with_no_jupyter | (
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
) |
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
|
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
| def test_suite_new_empty_with_no_jupyter(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
"""
os.mkdir(
os.path.join(
data_context_parameterized_expectation_suite.root_directory, "uncommitted"
)
)
root_dir = data_context_parameterized_expectation_suite.root_directory
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo", "--no-jupyter"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 0
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_new_empty_with_no_jupyter",
"(",
"mock_webbroser",
",",
"mock_subprocess",
",",
"caplog",
",",
"data_context_parameterized_expectation_suite",
",",
"filesystem_csv_2",
",",
")",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
",",
"\"uncommitted\"",
")",
")",
"root_dir",
"=",
"data_context_parameterized_expectation_suite",
".",
"root_directory",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"new\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo\"",
",",
"\"--no-jupyter\"",
"]",
",",
"input",
"=",
"f\"{csv}\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new expectation suite\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"not",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"not",
"in",
"stdout",
"assert",
"\"The following Data Docs sites were built\"",
"not",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will create a new Expectation Suite 'foo' and store it here\"",
"in",
"stdout",
")",
"assert",
"\"open a notebook for you now\"",
"not",
"in",
"stdout",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"expected_notebook",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"assert",
"\"foo\"",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"suite",
".",
"expectations",
"==",
"[",
"]",
"citations",
"=",
"suite",
".",
"get_citations",
"(",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"citation_date\"",
",",
"None",
")",
"citations",
"[",
"0",
"]",
".",
"pop",
"(",
"\"interactive\"",
",",
"None",
")",
"assert",
"filter_properties_dict",
"(",
"properties",
"=",
"citations",
"[",
"0",
"]",
",",
"clean_falsy",
"=",
"True",
")",
"==",
"{",
"\"batch_kwargs\"",
":",
"{",
"\"data_asset_name\"",
":",
"\"f1\"",
",",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv",
",",
"\"reader_method\"",
":",
"\"read_csv\"",
",",
"}",
",",
"\"comment\"",
":",
"\"New suite added via CLI\"",
",",
"}",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_webbroser",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
302,
0
] | [
376,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_one_datasource_without_generator_without_suite_name_argument | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
) |
We call the "suite demo" command without the suite name argument
The command should:
- NOT prompt us to choose a datasource (because there is only one)
- prompt us only to enter the path (The datasource has no generator
configured and not to choose from the generator's list of available data
assets).
- We enter the path of the file we want the command to use as the batch to
create the expectation suite.
- prompt us to enter the name of the expectation suite that will be
created
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_one_datasource_without_generator_without_suite_name_argument(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- NOT prompt us to choose a datasource (because there is only one)
- prompt us only to enter the path (The datasource has no generator
configured and not to choose from the generator's list of available data
assets).
- We enter the path of the file we want the command to use as the batch to
create the expectation suite.
- prompt us to enter the name of the expectation suite that will be
created
- open Data Docs
- NOT open jupyter
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
context = empty_data_context
root_dir = context.root_directory
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 1
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_one_datasource_without_generator_without_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
",",
"filesystem_csv_2",
")",
":",
"empty_data_context",
".",
"add_datasource",
"(",
"\"my_datasource\"",
",",
"module_name",
"=",
"\"great_expectations.datasource\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
",",
")",
"context",
"=",
"empty_data_context",
"root_dir",
"=",
"context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"f\"{csv_path}\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"assert",
"\"Name the new Expectation Suite [f1.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"1",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
381,
0
] | [
456,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
) |
We call the "suite demo" command without the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select a
generator asset from the list.
- The command should prompt us to enter the name of the expectation suite
that will be created.
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command without the suite name argument | def test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
We call the "suite demo" command without the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select a
generator asset from the list.
- The command should prompt us to enter the name of the expectation suite
that will be created.
- open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
os.chdir(root_dir)
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="\n1\n1\n1\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
"""Select a datasource
1. mydatasource
2. random
3. titanic"""
in stdout
)
assert (
"""Which data would you like to use?
1. random (directory)
2. titanic (directory)"""
in stdout
)
assert "Name the new Expectation Suite [random.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"site_builder_data_context_with_html_store_titanic_random",
".",
"root_directory",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"\\n1\\n1\\n1\\nmy_new_suite\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"\"\"Select a datasource\n 1. mydatasource\n 2. random\n 3. titanic\"\"\"",
"in",
"stdout",
")",
"assert",
"(",
"\"\"\"Which data would you like to use?\n 1. random (directory)\n 2. titanic (directory)\"\"\"",
"in",
"stdout",
")",
"assert",
"\"Name the new Expectation Suite [random.warning]\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"2",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
461,
0
] | [
540,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
) |
We call the "suite demo" command with the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select
a generator asset from the list.
- open Data Docs
- NOT open jupyter
|
We call the "suite demo" command with the suite name argument | def test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
We call the "suite demo" command with the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select
a generator asset from the list.
- open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
os.chdir(root_dir)
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"site_builder_data_context_with_html_store_titanic_random",
".",
"root_directory",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Select a datasource\"",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will choose a couple of columns and generate expectations\"",
"in",
"stdout",
")",
"assert",
"\"Generating example Expectation Suite...\"",
"in",
"stdout",
"assert",
"\"Building\"",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built\"",
"in",
"stdout",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:\"",
"in",
"stdout",
")",
"obs_urls",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"assert",
"len",
"(",
"obs_urls",
")",
"==",
"2",
"assert",
"(",
"\"great_expectations/uncommitted/data_docs/local_site/index.html\"",
"in",
"obs_urls",
"[",
"0",
"]",
"[",
"\"site_url\"",
"]",
")",
"expected_index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"data_docs\"",
",",
"\"local_site\"",
",",
"\"index.html\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_index_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
545,
0
] | [
609,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_without_suite_name_raises_error | () | This is really only testing click missing arguments | This is really only testing click missing arguments | def test_suite_edit_without_suite_name_raises_error():
"""This is really only testing click missing arguments"""
runner = CliRunner(mix_stderr=False)
result = runner.invoke(cli, "suite edit", catch_exceptions=False)
assert result.exit_code == 2
assert (
'Error: Missing argument "SUITE".' in result.stderr
or "Error: Missing argument 'SUITE'." in result.stderr
) | [
"def",
"test_suite_edit_without_suite_name_raises_error",
"(",
")",
":",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"\"suite edit\"",
",",
"catch_exceptions",
"=",
"False",
")",
"assert",
"result",
".",
"exit_code",
"==",
"2",
"assert",
"(",
"'Error: Missing argument \"SUITE\".'",
"in",
"result",
".",
"stderr",
"or",
"\"Error: Missing argument 'SUITE'.\"",
"in",
"result",
".",
"stderr",
")"
] | [
612,
0
] | [
620,
5
] | python | en | ['en', 'en', 'en'] | True |
test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", "'{foobar}'"],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are valid JSON." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"\"'{foobar}'\"",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Please check that your batch_kwargs are valid JSON.\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
625,
0
] | [
654,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
context.add_datasource("source", class_name="PandasDatasource")
runner = CliRunner(mix_stderr=False)
batch_kwargs = '{"table": "fake", "datasource": "source"}'
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", batch_kwargs],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "To continue editing this suite" not in stdout
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"context",
".",
"add_datasource",
"(",
"\"source\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"'{\"table\": \"fake\", \"datasource\": \"source\"}'",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"batch_kwargs",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"To continue editing this suite\"",
"not",
"in",
"stdout",
"assert",
"\"Please check that your batch_kwargs are able to load a batch.\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
659,
0
] | [
692,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_non_existent_suite_name_raises_error | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_non_existent_suite_name_raises_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
assert not empty_data_context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
"suite edit not_a_real_suite -d {}".format(project_dir),
catch_exceptions=False,
)
assert result.exit_code == 1
assert "Could not find a suite named `not_a_real_suite`." in result.output
assert "by running `great_expectations suite list`" in result.output
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_non_existent_suite_name_raises_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"assert",
"not",
"empty_data_context",
".",
"list_expectation_suites",
"(",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"\"suite edit not_a_real_suite -d {}\"",
".",
"format",
"(",
"project_dir",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Could not find a suite named `not_a_real_suite`.\"",
"in",
"result",
".",
"output",
"assert",
"\"by running `great_expectations suite list`\"",
"in",
"result",
".",
"output",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
697,
0
] | [
725,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_with_non_existent_datasource_shows_helpful_error_message | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context
) |
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
|
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
| def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
assert context.list_expectation_suites()[0].expectation_suite_name == "foo"
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite edit foo -d {project_dir} --datasource not_real",
catch_exceptions=False,
)
assert result.exit_code == 1
assert (
"Unable to load datasource `not_real` -- no configuration found or invalid configuration."
in result.output
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_with_non_existent_datasource_shows_helpful_error_message",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
")",
":",
"project_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"assert",
"context",
".",
"list_expectation_suites",
"(",
")",
"[",
"0",
"]",
".",
"expectation_suite_name",
"==",
"\"foo\"",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"suite edit foo -d {project_dir} --datasource not_real\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"\"Unable to load datasource `not_real` -- no configuration found or invalid configuration.\"",
"in",
"result",
".",
"output",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
730,
0
] | [
762,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
) |
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
|
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch. | def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_webbrowser.reset_mock()
mock_subprocess.reset_mock()
# remove the citations from the suite
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
suite.meta.pop("citations")
context.save_expectation_suite(suite)
# Actual testing really starts here
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "A batch of data is required to edit the suite" in stdout
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"site_builder_data_context_with_html_store_titanic_random",
".",
"root_directory",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"# remove the citations from the suite",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo_suite\"",
")",
"assert",
"isinstance",
"(",
"suite",
",",
"ExpectationSuite",
")",
"suite",
".",
"meta",
".",
"pop",
"(",
"\"citations\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"# Actual testing really starts here",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"A batch of data is required to edit the suite\"",
"in",
"stdout",
"assert",
"\"Select a datasource\"",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
767,
0
] | [
847,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
) |
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our
test will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments.
The command should:
- NOT open Data Docs
- NOT open jupyter
|
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch. | def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our
test will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments.
The command should:
- NOT open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo_suite", "-d", root_dir],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"site_builder_data_context_with_html_store_titanic_random",
".",
"root_directory",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"2",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"foo_suite\"",
")",
"assert",
"isinstance",
"(",
"suite",
",",
"ExpectationSuite",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
852,
0
] | [
917,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg | (
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
) |
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs).
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
|
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs). | def test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs).
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite", "--no-view"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
batch_kwargs = {
"datasource": "random",
"path": str(
os.path.join(
os.path.abspath(os.path.join(root_dir, os.pardir)),
"data",
"random",
"f1.csv",
)
),
}
batch_kwargs_arg_str = json.dumps(batch_kwargs)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
"--batch-kwargs",
batch_kwargs_arg_str,
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"site_builder_data_context_with_html_store_titanic_random",
",",
")",
":",
"root_dir",
"=",
"site_builder_data_context_with_html_store_titanic_random",
".",
"root_directory",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--suite\"",
",",
"\"foo_suite\"",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"\"\\n2\\n1\\n1\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:\"",
"in",
"stdout",
")",
"batch_kwargs",
"=",
"{",
"\"datasource\"",
":",
"\"random\"",
",",
"\"path\"",
":",
"str",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"os",
".",
"pardir",
")",
")",
",",
"\"data\"",
",",
"\"random\"",
",",
"\"f1.csv\"",
",",
")",
")",
",",
"}",
"batch_kwargs_arg_str",
"=",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo_suite\"",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--batch-kwargs\"",
",",
"batch_kwargs_arg_str",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"foo_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
922,
0
] | [
1013,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error | (
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
) |
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user should see a nice error and the program halts before notebook
compilation.
- NOT open Data Docs
- NOT open jupyter
|
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}' | def test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user should see a nice error and the program halts before notebook
compilation.
- NOT open Data Docs
- NOT open jupyter
'"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": "../data/Titanic.csv"}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert "Unable to load datasource `None`" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"titanic_data_context",
",",
")",
":",
"project_dir",
"=",
"titanic_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"{",
"\"path\"",
":",
"\"../data/Titanic.csv\"",
"}",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Please check that your batch_kwargs are able to load a batch.\"",
"in",
"stdout",
"assert",
"\"Unable to load datasource `None`\"",
"in",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1018,
0
] | [
1068,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs | (
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
) |
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user gets a working notebook
- NOT open Data Docs
- open jupyter
|
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}' | def test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user gets a working notebook
- NOT open Data Docs
- open jupyter
"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": os.path.join(project_dir, "../", "data", "Titanic.csv")}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
"--datasource",
"mydatasource",
],
catch_exceptions=False,
)
stdout = result.output
assert stdout == ""
assert result.exit_code == 0
expected_notebook_path = os.path.join(project_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(project_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"titanic_data_context",
",",
")",
":",
"project_dir",
"=",
"titanic_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_dir",
")",
"context",
".",
"create_expectation_suite",
"(",
"\"foo\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"batch_kwargs",
"=",
"{",
"\"path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"../\"",
",",
"\"data\"",
",",
"\"Titanic.csv\"",
")",
"}",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"foo\"",
",",
"\"-d\"",
",",
"project_dir",
",",
"\"--batch-kwargs\"",
",",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
",",
"\"--datasource\"",
",",
"\"mydatasource\"",
",",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"stdout",
"==",
"\"\"",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"uncommitted\"",
",",
"\"edit_foo.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"expectations\"",
",",
"\"foo.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1073,
0
] | [
1128,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations | (
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
) |
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has one datasource. The datasource has no generators
configured. The command prompts us to enter the file path.
|
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch. | def test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has one datasource. The datasource has no generators
configured. The command prompts us to enter the file path.
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
not_so_empty_data_context = empty_data_context
project_root_dir = not_so_empty_data_context.root_directory
root_dir = project_root_dir
os.chdir(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="{:s}\nmy_new_suite\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")),
catch_exceptions=False,
)
stdout = result.stdout
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
# remove the citations from the suite
context = DataContext(project_root_dir)
suite = context.get_expectation_suite("my_new_suite")
suite.meta.pop("citations")
context.save_expectation_suite(suite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "my_new_suite", "-d", root_dir],
input="{:s}\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")),
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
assert "Enter the path" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_my_new_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations",
"(",
"mock_webbrowser",
",",
"mock_subprocess",
",",
"caplog",
",",
"empty_data_context",
",",
"filesystem_csv_2",
")",
":",
"empty_data_context",
".",
"add_datasource",
"(",
"\"my_datasource\"",
",",
"module_name",
"=",
"\"great_expectations.datasource\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
",",
")",
"not_so_empty_data_context",
"=",
"empty_data_context",
"project_root_dir",
"=",
"not_so_empty_data_context",
".",
"root_directory",
"root_dir",
"=",
"project_root_dir",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"demo\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"{:s}\\nmy_new_suite\\n\\n\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"mock_subprocess",
".",
"reset_mock",
"(",
")",
"mock_webbrowser",
".",
"reset_mock",
"(",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:\"",
"in",
"stdout",
")",
"# remove the citations from the suite",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"\"my_new_suite\"",
")",
"suite",
".",
"meta",
".",
"pop",
"(",
"\"citations\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"edit\"",
",",
"\"my_new_suite\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"{:s}\\n\\n\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filesystem_csv_2",
",",
"\"f1.csv\"",
")",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"\"Select a datasource\"",
"not",
"in",
"stdout",
"assert",
"\"Which data would you like to use\"",
"not",
"in",
"stdout",
"assert",
"\"Enter the path\"",
"in",
"stdout",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_my_new_suite.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"expected_suite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"expectations\"",
",",
"\"my_new_suite.json\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_suite_path",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"0",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1133,
0
] | [
1212,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_on_context_with_no_datasource_raises_error | (
mock_subprocess, mock_emit, caplog, empty_data_context_stats_enabled
) |
We call the "suite scaffold" command on a context with no datasource
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
|
We call the "suite scaffold" command on a context with no datasource | def test_suite_scaffold_on_context_with_no_datasource_raises_error(
mock_subprocess, mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command on a context with no datasource
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert (
"No datasources found in the context. To add a datasource, run `great_expectations datasource new`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_scaffold_on_context_with_no_datasource_raises_error",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"empty_data_context_stats_enabled",
")",
":",
"context",
"=",
"empty_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"\"foop\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"(",
"\"No datasources found in the context. To add a datasource, run `great_expectations datasource new`\"",
"in",
"stdout",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1409,
0
] | [
1454,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_on_existing_suite_raises_error | (
mock_emit, caplog, empty_data_context_stats_enabled
) |
We call the "suite scaffold" command with an existing suite
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
|
We call the "suite scaffold" command with an existing suite | def test_suite_scaffold_on_existing_suite_raises_error(
mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command with an existing suite
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
suite = context.create_expectation_suite("foop")
context.save_expectation_suite(suite)
assert context.list_expectation_suite_names() == ["foop"]
mock_emit.reset_mock()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "An expectation suite named `foop` already exists." in stdout
assert (
"If you intend to edit the suite please use `great_expectations suite edit foop`."
in stdout
)
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_suite_scaffold_on_existing_suite_raises_error",
"(",
"mock_emit",
",",
"caplog",
",",
"empty_data_context_stats_enabled",
")",
":",
"context",
"=",
"empty_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite",
"=",
"context",
".",
"create_expectation_suite",
"(",
"\"foop\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"foop\"",
"]",
"mock_emit",
".",
"reset_mock",
"(",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"\"foop\"",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"An expectation suite named `foop` already exists.\"",
"in",
"stdout",
"assert",
"(",
"\"If you intend to edit the suite please use `great_expectations suite edit foop`.\"",
"in",
"stdout",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
1460,
0
] | [
1509,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_creates_notebook_and_opens_jupyter | (
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
) |
We call the "suite scaffold" command
The command should:
- create a new notebook
- open the notebook in jupyter
- send a DataContext init success message
- send a scaffold success message
|
We call the "suite scaffold" command | def test_suite_scaffold_creates_notebook_and_opens_jupyter(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold" command
The command should:
- create a new notebook
- open the notebook in jupyter
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert mock_subprocess.call_count == 1
assert mock_subprocess.call_args_list == [
mock.call(["jupyter", "notebook", expected_notebook_path])
]
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_scaffold_creates_notebook_and_opens_jupyter",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"titanic_data_context_stats_enabled",
")",
":",
"context",
"=",
"titanic_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite_name",
"=",
"\"foop\"",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"context",
".",
"GE_EDIT_NOTEBOOK_DIR",
",",
"f\"scaffold_{suite_name}.ipynb\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"suite_name",
",",
"\"-d\"",
",",
"root_dir",
"]",
",",
"input",
"=",
"\"1\\n1\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert",
"mock_subprocess",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"[",
"\"jupyter\"",
",",
"\"notebook\"",
",",
"expected_notebook_path",
"]",
")",
"]",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1516,
0
] | [
1568,
5
] | python | en | ['en', 'error', 'th'] | False |
test_suite_scaffold_creates_notebook_with_no_jupyter_flag | (
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
) |
We call the "suite scaffold --no-jupyter"
The command should:
- create a new notebook
- NOT open the notebook in jupyter
- tell the user to open the notebook
- send a DataContext init success message
- send a scaffold success message
|
We call the "suite scaffold --no-jupyter" | def test_suite_scaffold_creates_notebook_with_no_jupyter_flag(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold --no-jupyter"
The command should:
- create a new notebook
- NOT open the notebook in jupyter
- tell the user to open the notebook
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir, "--no-jupyter"],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert (
f"To continue scaffolding this suite, run `jupyter notebook {expected_notebook_path}`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
) | [
"def",
"test_suite_scaffold_creates_notebook_with_no_jupyter_flag",
"(",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"titanic_data_context_stats_enabled",
")",
":",
"context",
"=",
"titanic_data_context_stats_enabled",
"root_dir",
"=",
"context",
".",
"root_directory",
"suite_name",
"=",
"\"foop\"",
"expected_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"context",
".",
"GE_EDIT_NOTEBOOK_DIR",
",",
"f\"scaffold_{suite_name}.ipynb\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"suite\"",
",",
"\"scaffold\"",
",",
"suite_name",
",",
"\"-d\"",
",",
"root_dir",
",",
"\"--no-jupyter\"",
"]",
",",
"input",
"=",
"\"1\\n1\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"assert",
"(",
"f\"To continue scaffolding this suite, run `jupyter notebook {expected_notebook_path}`\"",
"in",
"stdout",
")",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"0",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.suite.scaffold\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v2\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
"allowed_deprecation_message",
"=",
"VALIDATION_OPERATORS_DEPRECATION_MESSAGE",
",",
")"
] | [
1575,
0
] | [
1630,
5
] | python | en | ['en', 'error', 'th'] | False |
EnvoyStatsMgr.update_log_levels | (self, last_attempt: float, level: Optional[str]=None) |
Heavy lifting around updating the Envoy log levels.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_log_levels does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
|
Heavy lifting around updating the Envoy log levels. | def update_log_levels(self, last_attempt: float, level: Optional[str]=None) -> bool:
"""
Heavy lifting around updating the Envoy log levels.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_log_levels does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
"""
# self.logger.info("updating levels")
text = self.fetch_log_levels(level)
if not text:
# Ew.
with self.access_lock:
# EnvoyStats is immutable, so...
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=self.stats.last_update,
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors + 1, # THIS IS A CHANGE
requests=self.stats.requests,
clusters=self.stats.clusters,
envoy=self.stats.envoy
)
self.stats = new_stats
return False
levels: Dict[str, Dict[str, bool]] = {}
for line in text.split("\n"):
if not line:
continue
if line.startswith(' '):
( logtype, level ) = line[2:].split(": ")
x = levels.setdefault(level, {})
x[logtype] = True
# self.logger.info("levels: %s" % levels)
loginfo: Dict[str, Union[str, List[str]]]
if len(levels.keys()) == 1:
loginfo = { 'all': list(levels.keys())[0] }
else:
loginfo = { x: list(levels[x].keys()) for x in levels.keys() }
with self.access_lock:
self.loginfo = loginfo
# self.logger.info("loginfo: %s" % self.loginfo)
return True | [
"def",
"update_log_levels",
"(",
"self",
",",
"last_attempt",
":",
"float",
",",
"level",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"bool",
":",
"# self.logger.info(\"updating levels\")",
"text",
"=",
"self",
".",
"fetch_log_levels",
"(",
"level",
")",
"if",
"not",
"text",
":",
"# Ew.",
"with",
"self",
".",
"access_lock",
":",
"# EnvoyStats is immutable, so...",
"new_stats",
"=",
"EnvoyStats",
"(",
"max_live_age",
"=",
"self",
".",
"stats",
".",
"max_live_age",
",",
"max_ready_age",
"=",
"self",
".",
"stats",
".",
"max_ready_age",
",",
"created",
"=",
"self",
".",
"stats",
".",
"created",
",",
"last_update",
"=",
"self",
".",
"stats",
".",
"last_update",
",",
"last_attempt",
"=",
"last_attempt",
",",
"# THIS IS A CHANGE",
"update_errors",
"=",
"self",
".",
"stats",
".",
"update_errors",
"+",
"1",
",",
"# THIS IS A CHANGE",
"requests",
"=",
"self",
".",
"stats",
".",
"requests",
",",
"clusters",
"=",
"self",
".",
"stats",
".",
"clusters",
",",
"envoy",
"=",
"self",
".",
"stats",
".",
"envoy",
")",
"self",
".",
"stats",
"=",
"new_stats",
"return",
"False",
"levels",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"bool",
"]",
"]",
"=",
"{",
"}",
"for",
"line",
"in",
"text",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"' '",
")",
":",
"(",
"logtype",
",",
"level",
")",
"=",
"line",
"[",
"2",
":",
"]",
".",
"split",
"(",
"\": \"",
")",
"x",
"=",
"levels",
".",
"setdefault",
"(",
"level",
",",
"{",
"}",
")",
"x",
"[",
"logtype",
"]",
"=",
"True",
"# self.logger.info(\"levels: %s\" % levels)",
"loginfo",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"if",
"len",
"(",
"levels",
".",
"keys",
"(",
")",
")",
"==",
"1",
":",
"loginfo",
"=",
"{",
"'all'",
":",
"list",
"(",
"levels",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"}",
"else",
":",
"loginfo",
"=",
"{",
"x",
":",
"list",
"(",
"levels",
"[",
"x",
"]",
".",
"keys",
"(",
")",
")",
"for",
"x",
"in",
"levels",
".",
"keys",
"(",
")",
"}",
"with",
"self",
".",
"access_lock",
":",
"self",
".",
"loginfo",
"=",
"loginfo",
"# self.logger.info(\"loginfo: %s\" % self.loginfo)",
"return",
"True"
] | [
203,
4
] | [
263,
23
] | python | en | ['en', 'error', 'th'] | False |
EnvoyStatsMgr.get_stats | (self) |
Get the current Envoy stats object, safely.
You MUST NOT hold the access_lock when calling this method.
|
Get the current Envoy stats object, safely. | def get_stats(self) -> EnvoyStats:
"""
Get the current Envoy stats object, safely.
You MUST NOT hold the access_lock when calling this method.
"""
with self.access_lock:
return self.stats | [
"def",
"get_stats",
"(",
"self",
")",
"->",
"EnvoyStats",
":",
"with",
"self",
".",
"access_lock",
":",
"return",
"self",
".",
"stats"
] | [
265,
4
] | [
273,
29
] | python | en | ['en', 'error', 'th'] | False |
EnvoyStatsMgr.update_envoy_stats | (self, last_attempt: float) |
Heavy lifting around updating the Envoy stats.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_envoy_stats does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
|
Heavy lifting around updating the Envoy stats. | def update_envoy_stats(self, last_attempt: float) -> None:
"""
Heavy lifting around updating the Envoy stats.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_envoy_stats does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
"""
text = self.fetch_envoy_stats()
if not text:
# EnvoyStats is immutable, so...
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=self.stats.last_update,
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors + 1, # THIS IS A CHANGE
requests=self.stats.requests,
clusters=self.stats.clusters,
envoy=self.stats.envoy
)
with self.access_lock:
self.stats = new_stats
return
# Parse stats into a hierarchy.
envoy_stats: Dict[str, Any] = {} # Ew.
for line in text.split("\n"):
if not line:
continue
# self.logger.info('line: %s' % line)
key, value = line.split(":")
keypath = key.split('.')
node = envoy_stats
for key in keypath[:-1]:
if key not in node:
node[key] = {}
node = node[key]
value = value.strip()
# Skip histograms for the moment.
# if value.startswith("P0("):
# continue
# # for field in value.split(' '):
# # if field.startswith('P95('):
# # value = field.split(',')
try:
node[keypath[-1]] = int(value)
except:
continue
# Now dig into clusters a bit more.
requests_info = {}
active_clusters = {}
if ("http" in envoy_stats) and ("ingress_http" in envoy_stats["http"]):
ingress_stats = envoy_stats["http"]["ingress_http"]
requests_total = ingress_stats.get("downstream_rq_total", 0)
requests_4xx = ingress_stats.get('downstream_rq_4xx', 0)
requests_5xx = ingress_stats.get('downstream_rq_5xx', 0)
requests_bad = requests_4xx + requests_5xx
requests_ok = requests_total - requests_bad
requests_info = {
"total": requests_total,
"4xx": requests_4xx,
"5xx": requests_5xx,
"bad": requests_bad,
"ok": requests_ok,
}
if "cluster" in envoy_stats:
for cluster_name in envoy_stats['cluster']:
cluster = envoy_stats['cluster'][cluster_name]
# # Toss any _%d -- that's madness with our Istio code at the moment.
# cluster_name = re.sub('_\d+$', '', cluster_name)
# mapping_name = active_cluster_map[cluster_name]
# active_mappings[mapping_name] = {}
# self.logger.info("cluster %s stats: %s" % (cluster_name, cluster))
healthy_percent: Optional[int]
healthy_members = cluster['membership_healthy']
total_members = cluster['membership_total']
healthy_percent = percentage(healthy_members, total_members)
update_attempts = cluster['update_attempt']
update_successes = cluster['update_success']
update_percent = percentage(update_successes, update_attempts)
# Weird.
# upstream_ok = cluster.get('upstream_rq_2xx', 0)
# upstream_total = cluster.get('upstream_rq_pending_total', 0)
upstream_total = cluster.get('upstream_rq_completed', 0)
upstream_4xx = cluster.get('upstream_rq_4xx', 0)
upstream_5xx = cluster.get('upstream_rq_5xx', 0)
upstream_bad = upstream_5xx # used to include 4XX here, but that seems wrong.
upstream_ok = upstream_total - upstream_bad
# self.logger.info("%s total %s bad %s ok %s" % (cluster_name, upstream_total, upstream_bad, upstream_ok))
if upstream_total > 0:
healthy_percent = percentage(upstream_ok, upstream_total)
# self.logger.debug("cluster %s is %d%% healthy" % (cluster_name, healthy_percent))
else:
healthy_percent = None
# self.logger.debug("cluster %s has had no requests" % cluster_name)
active_clusters[cluster_name] = {
'healthy_members': healthy_members,
'total_members': total_members,
'healthy_percent': healthy_percent,
'update_attempts': update_attempts,
'update_successes': update_successes,
'update_percent': update_percent,
'upstream_ok': upstream_ok,
'upstream_4xx': upstream_4xx,
'upstream_5xx': upstream_5xx,
'upstream_bad': upstream_bad
}
# OK, we're now officially finished with all the hard stuff.
last_update = time.time()
# Finally, set up the new EnvoyStats.
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=last_update, # THIS IS A CHANGE
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors,
requests=requests_info, # THIS IS A CHANGE
clusters=active_clusters, # THIS IS A CHANGE
envoy=envoy_stats # THIS IS A CHANGE
)
# Make sure we hold the access_lock while messing with self.stats!
with self.access_lock:
self.stats = new_stats | [
"def",
"update_envoy_stats",
"(",
"self",
",",
"last_attempt",
":",
"float",
")",
"->",
"None",
":",
"text",
"=",
"self",
".",
"fetch_envoy_stats",
"(",
")",
"if",
"not",
"text",
":",
"# EnvoyStats is immutable, so...",
"new_stats",
"=",
"EnvoyStats",
"(",
"max_live_age",
"=",
"self",
".",
"stats",
".",
"max_live_age",
",",
"max_ready_age",
"=",
"self",
".",
"stats",
".",
"max_ready_age",
",",
"created",
"=",
"self",
".",
"stats",
".",
"created",
",",
"last_update",
"=",
"self",
".",
"stats",
".",
"last_update",
",",
"last_attempt",
"=",
"last_attempt",
",",
"# THIS IS A CHANGE",
"update_errors",
"=",
"self",
".",
"stats",
".",
"update_errors",
"+",
"1",
",",
"# THIS IS A CHANGE",
"requests",
"=",
"self",
".",
"stats",
".",
"requests",
",",
"clusters",
"=",
"self",
".",
"stats",
".",
"clusters",
",",
"envoy",
"=",
"self",
".",
"stats",
".",
"envoy",
")",
"with",
"self",
".",
"access_lock",
":",
"self",
".",
"stats",
"=",
"new_stats",
"return",
"# Parse stats into a hierarchy.",
"envoy_stats",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"# Ew.",
"for",
"line",
"in",
"text",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"not",
"line",
":",
"continue",
"# self.logger.info('line: %s' % line)",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"\":\"",
")",
"keypath",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"node",
"=",
"envoy_stats",
"for",
"key",
"in",
"keypath",
"[",
":",
"-",
"1",
"]",
":",
"if",
"key",
"not",
"in",
"node",
":",
"node",
"[",
"key",
"]",
"=",
"{",
"}",
"node",
"=",
"node",
"[",
"key",
"]",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"# Skip histograms for the moment.",
"# if value.startswith(\"P0(\"):",
"# continue",
"# # for field in value.split(' '):",
"# # if field.startswith('P95('):",
"# # value = field.split(',')",
"try",
":",
"node",
"[",
"keypath",
"[",
"-",
"1",
"]",
"]",
"=",
"int",
"(",
"value",
")",
"except",
":",
"continue",
"# Now dig into clusters a bit more.",
"requests_info",
"=",
"{",
"}",
"active_clusters",
"=",
"{",
"}",
"if",
"(",
"\"http\"",
"in",
"envoy_stats",
")",
"and",
"(",
"\"ingress_http\"",
"in",
"envoy_stats",
"[",
"\"http\"",
"]",
")",
":",
"ingress_stats",
"=",
"envoy_stats",
"[",
"\"http\"",
"]",
"[",
"\"ingress_http\"",
"]",
"requests_total",
"=",
"ingress_stats",
".",
"get",
"(",
"\"downstream_rq_total\"",
",",
"0",
")",
"requests_4xx",
"=",
"ingress_stats",
".",
"get",
"(",
"'downstream_rq_4xx'",
",",
"0",
")",
"requests_5xx",
"=",
"ingress_stats",
".",
"get",
"(",
"'downstream_rq_5xx'",
",",
"0",
")",
"requests_bad",
"=",
"requests_4xx",
"+",
"requests_5xx",
"requests_ok",
"=",
"requests_total",
"-",
"requests_bad",
"requests_info",
"=",
"{",
"\"total\"",
":",
"requests_total",
",",
"\"4xx\"",
":",
"requests_4xx",
",",
"\"5xx\"",
":",
"requests_5xx",
",",
"\"bad\"",
":",
"requests_bad",
",",
"\"ok\"",
":",
"requests_ok",
",",
"}",
"if",
"\"cluster\"",
"in",
"envoy_stats",
":",
"for",
"cluster_name",
"in",
"envoy_stats",
"[",
"'cluster'",
"]",
":",
"cluster",
"=",
"envoy_stats",
"[",
"'cluster'",
"]",
"[",
"cluster_name",
"]",
"# # Toss any _%d -- that's madness with our Istio code at the moment.",
"# cluster_name = re.sub('_\\d+$', '', cluster_name)",
"# mapping_name = active_cluster_map[cluster_name]",
"# active_mappings[mapping_name] = {}",
"# self.logger.info(\"cluster %s stats: %s\" % (cluster_name, cluster))",
"healthy_percent",
":",
"Optional",
"[",
"int",
"]",
"healthy_members",
"=",
"cluster",
"[",
"'membership_healthy'",
"]",
"total_members",
"=",
"cluster",
"[",
"'membership_total'",
"]",
"healthy_percent",
"=",
"percentage",
"(",
"healthy_members",
",",
"total_members",
")",
"update_attempts",
"=",
"cluster",
"[",
"'update_attempt'",
"]",
"update_successes",
"=",
"cluster",
"[",
"'update_success'",
"]",
"update_percent",
"=",
"percentage",
"(",
"update_successes",
",",
"update_attempts",
")",
"# Weird.",
"# upstream_ok = cluster.get('upstream_rq_2xx', 0)",
"# upstream_total = cluster.get('upstream_rq_pending_total', 0)",
"upstream_total",
"=",
"cluster",
".",
"get",
"(",
"'upstream_rq_completed'",
",",
"0",
")",
"upstream_4xx",
"=",
"cluster",
".",
"get",
"(",
"'upstream_rq_4xx'",
",",
"0",
")",
"upstream_5xx",
"=",
"cluster",
".",
"get",
"(",
"'upstream_rq_5xx'",
",",
"0",
")",
"upstream_bad",
"=",
"upstream_5xx",
"# used to include 4XX here, but that seems wrong.",
"upstream_ok",
"=",
"upstream_total",
"-",
"upstream_bad",
"# self.logger.info(\"%s total %s bad %s ok %s\" % (cluster_name, upstream_total, upstream_bad, upstream_ok))",
"if",
"upstream_total",
">",
"0",
":",
"healthy_percent",
"=",
"percentage",
"(",
"upstream_ok",
",",
"upstream_total",
")",
"# self.logger.debug(\"cluster %s is %d%% healthy\" % (cluster_name, healthy_percent))",
"else",
":",
"healthy_percent",
"=",
"None",
"# self.logger.debug(\"cluster %s has had no requests\" % cluster_name)",
"active_clusters",
"[",
"cluster_name",
"]",
"=",
"{",
"'healthy_members'",
":",
"healthy_members",
",",
"'total_members'",
":",
"total_members",
",",
"'healthy_percent'",
":",
"healthy_percent",
",",
"'update_attempts'",
":",
"update_attempts",
",",
"'update_successes'",
":",
"update_successes",
",",
"'update_percent'",
":",
"update_percent",
",",
"'upstream_ok'",
":",
"upstream_ok",
",",
"'upstream_4xx'",
":",
"upstream_4xx",
",",
"'upstream_5xx'",
":",
"upstream_5xx",
",",
"'upstream_bad'",
":",
"upstream_bad",
"}",
"# OK, we're now officially finished with all the hard stuff.",
"last_update",
"=",
"time",
".",
"time",
"(",
")",
"# Finally, set up the new EnvoyStats.",
"new_stats",
"=",
"EnvoyStats",
"(",
"max_live_age",
"=",
"self",
".",
"stats",
".",
"max_live_age",
",",
"max_ready_age",
"=",
"self",
".",
"stats",
".",
"max_ready_age",
",",
"created",
"=",
"self",
".",
"stats",
".",
"created",
",",
"last_update",
"=",
"last_update",
",",
"# THIS IS A CHANGE",
"last_attempt",
"=",
"last_attempt",
",",
"# THIS IS A CHANGE",
"update_errors",
"=",
"self",
".",
"stats",
".",
"update_errors",
",",
"requests",
"=",
"requests_info",
",",
"# THIS IS A CHANGE",
"clusters",
"=",
"active_clusters",
",",
"# THIS IS A CHANGE",
"envoy",
"=",
"envoy_stats",
"# THIS IS A CHANGE",
")",
"# Make sure we hold the access_lock while messing with self.stats!",
"with",
"self",
".",
"access_lock",
":",
"self",
".",
"stats",
"=",
"new_stats"
] | [
287,
4
] | [
451,
34
] | python | en | ['en', 'error', 'th'] | False |
EnvoyStatsMgr.update | (self) |
Update the Envoy stats object, including our take on Envoy's loglevel and
lower-level statistics.
You MUST NOT hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
The first thing that update_envoy_stats does is to acquire the update_lock.
If it cannot do so immediately, it assumes that another update is already
running, and returns without doing anything further.
update_envoy_stats uses update_log_levels and update_envoy_stats to do all
the heavy lifting around talking to Envoy, managing the access_lock, and
actually writing new data into the Envoy stats object.
|
Update the Envoy stats object, including our take on Envoy's loglevel and
lower-level statistics. | def update(self) -> None:
"""
Update the Envoy stats object, including our take on Envoy's loglevel and
lower-level statistics.
You MUST NOT hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
The first thing that update_envoy_stats does is to acquire the update_lock.
If it cannot do so immediately, it assumes that another update is already
running, and returns without doing anything further.
update_envoy_stats uses update_log_levels and update_envoy_stats to do all
the heavy lifting around talking to Envoy, managing the access_lock, and
actually writing new data into the Envoy stats object.
"""
# self.logger.info("updating estats")
# First up, try bailing early.
if not self.update_lock.acquire(blocking=False):
self.logger.warning("EStats update: skipping due to lock contention")
return
# If here, we have the lock. Make sure it gets released!
try:
# Remember when we started.
last_attempt = time.time()
self.update_log_levels(last_attempt)
self.update_envoy_stats(last_attempt)
except Exception as e:
self.logger.exception("could not update Envoy stats: %s" % e)
finally:
self.update_lock.release() | [
"def",
"update",
"(",
"self",
")",
"->",
"None",
":",
"# self.logger.info(\"updating estats\")",
"# First up, try bailing early.",
"if",
"not",
"self",
".",
"update_lock",
".",
"acquire",
"(",
"blocking",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"EStats update: skipping due to lock contention\"",
")",
"return",
"# If here, we have the lock. Make sure it gets released!",
"try",
":",
"# Remember when we started.",
"last_attempt",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"update_log_levels",
"(",
"last_attempt",
")",
"self",
".",
"update_envoy_stats",
"(",
"last_attempt",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"\"could not update Envoy stats: %s\"",
"%",
"e",
")",
"finally",
":",
"self",
".",
"update_lock",
".",
"release",
"(",
")"
] | [
455,
4
] | [
489,
38
] | python | en | ['en', 'error', 'th'] | False |
TestTopologicallySorted.test_Valid | (self) | Test that sorting works on a valid graph with one possible order. | Test that sorting works on a valid graph with one possible order. | def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b']) | [
"def",
"test_Valid",
"(",
"self",
")",
":",
"graph",
"=",
"{",
"'a'",
":",
"[",
"'b'",
",",
"'c'",
"]",
",",
"'b'",
":",
"[",
"]",
",",
"'c'",
":",
"[",
"'d'",
"]",
",",
"'d'",
":",
"[",
"'b'",
"]",
",",
"}",
"def",
"GetEdge",
"(",
"node",
")",
":",
"return",
"tuple",
"(",
"graph",
"[",
"node",
"]",
")",
"self",
".",
"assertEqual",
"(",
"gyp",
".",
"common",
".",
"TopologicallySorted",
"(",
"graph",
".",
"keys",
"(",
")",
",",
"GetEdge",
")",
",",
"[",
"'a'",
",",
"'c'",
",",
"'d'",
",",
"'b'",
"]",
")"
] | [
14,
2
] | [
26,
27
] | python | en | ['en', 'en', 'en'] | True |
TestTopologicallySorted.test_Cycle | (self) | Test that an exception is thrown on a cyclic graph. | Test that an exception is thrown on a cyclic graph. | def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge) | [
"def",
"test_Cycle",
"(",
"self",
")",
":",
"graph",
"=",
"{",
"'a'",
":",
"[",
"'b'",
"]",
",",
"'b'",
":",
"[",
"'c'",
"]",
",",
"'c'",
":",
"[",
"'d'",
"]",
",",
"'d'",
":",
"[",
"'a'",
"]",
",",
"}",
"def",
"GetEdge",
"(",
"node",
")",
":",
"return",
"tuple",
"(",
"graph",
"[",
"node",
"]",
")",
"self",
".",
"assertRaises",
"(",
"gyp",
".",
"common",
".",
"CycleError",
",",
"gyp",
".",
"common",
".",
"TopologicallySorted",
",",
"graph",
".",
"keys",
"(",
")",
",",
"GetEdge",
")"
] | [
28,
2
] | [
40,
28
] | python | en | ['en', 'en', 'en'] | True |
checkpoint | (ctx) |
Checkpoint operations
A Checkpoint is a bundle of one or more batches of data with one or more
Expectation Suites.
A Checkpoint can be as simple as one batch of data paired with one
Expectation Suite.
A Checkpoint can be as complex as many batches of data across different
datasources paired with one or more Expectation Suites each.
|
Checkpoint operations | def checkpoint(ctx):
"""
Checkpoint operations
A Checkpoint is a bundle of one or more batches of data with one or more
Expectation Suites.
A Checkpoint can be as simple as one batch of data paired with one
Expectation Suite.
A Checkpoint can be as complex as many batches of data across different
datasources paired with one or more Expectation Suites each.
"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.checkpoint.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end" | [
"def",
"checkpoint",
"(",
"ctx",
")",
":",
"directory",
":",
"str",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"context",
":",
"DataContext",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
",",
")",
"# TODO consider moving this all the way up in to the CLIState constructor",
"ctx",
".",
"obj",
".",
"data_context",
"=",
"context",
"usage_stats_prefix",
"=",
"f\"cli.checkpoint.{ctx.invoked_subcommand}\"",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"f\"{usage_stats_prefix}.begin\"",
",",
"success",
"=",
"True",
",",
")",
"ctx",
".",
"obj",
".",
"usage_event_end",
"=",
"f\"{usage_stats_prefix}.end\""
] | [
57,
0
] | [
86,
57
] | python | en | ['en', 'error', 'th'] | False |
checkpoint_new | (ctx, name, jupyter) | Create a new Checkpoint for easy deployments.
NAME is the name of the Checkpoint to create.
| Create a new Checkpoint for easy deployments. | def checkpoint_new(ctx, name, jupyter):
"""Create a new Checkpoint for easy deployments.
NAME is the name of the Checkpoint to create.
"""
_checkpoint_new(ctx=ctx, checkpoint_name=name, jupyter=jupyter) | [
"def",
"checkpoint_new",
"(",
"ctx",
",",
"name",
",",
"jupyter",
")",
":",
"_checkpoint_new",
"(",
"ctx",
"=",
"ctx",
",",
"checkpoint_name",
"=",
"name",
",",
"jupyter",
"=",
"jupyter",
")"
] | [
98,
0
] | [
103,
67
] | python | en | ['en', 'en', 'en'] | True |
checkpoint_list | (ctx) | List configured checkpoints. | List configured checkpoints. | def checkpoint_list(ctx):
"""List configured checkpoints."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
checkpoints: List[str] = context.list_checkpoints()
if not checkpoints:
cli_message(
"No Checkpoints found.\n"
" - Use the command `great_expectations checkpoint new` to create one."
)
toolkit.send_usage_message(context, event=usage_event_end, success=True)
sys.exit(0)
number_found: int = len(checkpoints)
plural: str = "s" if number_found > 1 else ""
message: str = f"Found {number_found} Checkpoint{plural}."
pretty_list: list = [f" - <cyan>{cp}</cyan>" for cp in checkpoints]
cli_message_list(pretty_list, list_intro_string=message)
toolkit.send_usage_message(context, event=usage_event_end, success=True) | [
"def",
"checkpoint_list",
"(",
"ctx",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"checkpoints",
":",
"List",
"[",
"str",
"]",
"=",
"context",
".",
"list_checkpoints",
"(",
")",
"if",
"not",
"checkpoints",
":",
"cli_message",
"(",
"\"No Checkpoints found.\\n\"",
"\" - Use the command `great_expectations checkpoint new` to create one.\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"number_found",
":",
"int",
"=",
"len",
"(",
"checkpoints",
")",
"plural",
":",
"str",
"=",
"\"s\"",
"if",
"number_found",
">",
"1",
"else",
"\"\"",
"message",
":",
"str",
"=",
"f\"Found {number_found} Checkpoint{plural}.\"",
"pretty_list",
":",
"list",
"=",
"[",
"f\" - <cyan>{cp}</cyan>\"",
"for",
"cp",
"in",
"checkpoints",
"]",
"cli_message_list",
"(",
"pretty_list",
",",
"list_intro_string",
"=",
"message",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")"
] | [
173,
0
] | [
192,
76
] | python | en | ['en', 'en', 'en'] | True |
checkpoint_delete | (ctx, checkpoint) | Delete a Checkpoint. | Delete a Checkpoint. | def checkpoint_delete(ctx, checkpoint):
"""Delete a Checkpoint."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
toolkit.delete_checkpoint(
context=context,
checkpoint_name=checkpoint,
usage_event=usage_event_end,
assume_yes=ctx.obj.assume_yes,
)
toolkit.send_usage_message(context, event=usage_event_end, success=True)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return
cli_message(f'Checkpoint "{checkpoint}" deleted.')
sys.exit(0) | [
"def",
"checkpoint_delete",
"(",
"ctx",
",",
"checkpoint",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"toolkit",
".",
"delete_checkpoint",
"(",
"context",
"=",
"context",
",",
"checkpoint_name",
"=",
"checkpoint",
",",
"usage_event",
"=",
"usage_event_end",
",",
"assume_yes",
"=",
"ctx",
".",
"obj",
".",
"assume_yes",
",",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return",
"cli_message",
"(",
"f'Checkpoint \"{checkpoint}\" deleted.'",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
198,
0
] | [
220,
15
] | python | en | ['en', 'it', 'en'] | True |
checkpoint_run | (ctx, checkpoint) | Run a Checkpoint. | Run a Checkpoint. | def checkpoint_run(ctx, checkpoint):
"""Run a Checkpoint."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
result: CheckpointResult = toolkit.run_checkpoint(
context=context,
checkpoint_name=checkpoint,
usage_event=usage_event_end,
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return
if not result["success"]:
cli_message(string="Validation failed!")
toolkit.send_usage_message(context, event=usage_event_end, success=True)
print_validation_operator_results_details(result=result)
sys.exit(1)
cli_message("Validation succeeded!")
toolkit.send_usage_message(context, event=usage_event_end, success=True)
print_validation_operator_results_details(result=result)
sys.exit(0) | [
"def",
"checkpoint_run",
"(",
"ctx",
",",
"checkpoint",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"result",
":",
"CheckpointResult",
"=",
"toolkit",
".",
"run_checkpoint",
"(",
"context",
"=",
"context",
",",
"checkpoint_name",
"=",
"checkpoint",
",",
"usage_event",
"=",
"usage_event_end",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return",
"if",
"not",
"result",
"[",
"\"success\"",
"]",
":",
"cli_message",
"(",
"string",
"=",
"\"Validation failed!\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"print_validation_operator_results_details",
"(",
"result",
"=",
"result",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"cli_message",
"(",
"\"Validation succeeded!\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"print_validation_operator_results_details",
"(",
"result",
"=",
"result",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
226,
0
] | [
254,
15
] | python | en | ['ro', 'gd', 'en'] | False |
checkpoint_script | (ctx, checkpoint) |
Create a python script to run a Checkpoint.
Checkpoints can be run directly without this script using the
`great_expectations Checkpoint run` command.
This script is provided for those who wish to run Checkpoints via python.
|
Create a python script to run a Checkpoint. | def checkpoint_script(ctx, checkpoint):
"""
Create a python script to run a Checkpoint.
Checkpoints can be run directly without this script using the
`great_expectations Checkpoint run` command.
This script is provided for those who wish to run Checkpoints via python.
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
toolkit.validate_checkpoint(
context=context, checkpoint_name=checkpoint, usage_event=usage_event_end
)
script_name: str = f"run_{checkpoint}.py"
script_path: str = os.path.join(
context.root_directory, context.GE_UNCOMMITTED_DIR, script_name
)
if os.path.isfile(script_path):
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message=f"""<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>
- Existing file path: {script_path}""",
)
_write_checkpoint_script_to_disk(
context_directory=context.root_directory,
checkpoint_name=checkpoint,
script_path=script_path,
)
cli_message(
f"""<green>A python script was created that runs the Checkpoint named: `{checkpoint}`</green>
- The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`
- The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`"""
)
toolkit.send_usage_message(context, event=usage_event_end, success=True) | [
"def",
"checkpoint_script",
"(",
"ctx",
",",
"checkpoint",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"toolkit",
".",
"validate_checkpoint",
"(",
"context",
"=",
"context",
",",
"checkpoint_name",
"=",
"checkpoint",
",",
"usage_event",
"=",
"usage_event_end",
")",
"script_name",
":",
"str",
"=",
"f\"run_{checkpoint}.py\"",
"script_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"script_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"script_path",
")",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"\"\"<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>\n - Existing file path: {script_path}\"\"\"",
",",
")",
"_write_checkpoint_script_to_disk",
"(",
"context_directory",
"=",
"context",
".",
"root_directory",
",",
"checkpoint_name",
"=",
"checkpoint",
",",
"script_path",
"=",
"script_path",
",",
")",
"cli_message",
"(",
"f\"\"\"<green>A python script was created that runs the Checkpoint named: `{checkpoint}`</green>\n - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`\n - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`\"\"\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")"
] | [
289,
0
] | [
328,
76
] | python | en | ['en', 'error', 'th'] | False |
Formatter.get_style_defs | (self, arg='') |
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
|
Return the style definitions for the current style as a string. | def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return '' | [
"def",
"get_style_defs",
"(",
"self",
",",
"arg",
"=",
"''",
")",
":",
"return",
"''"
] | [
76,
4
] | [
84,
17
] | python | en | ['en', 'error', 'th'] | False |
Formatter.format | (self, tokensource, outfile) |
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
|
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
| def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile) | [
"def",
"format",
"(",
"self",
",",
"tokensource",
",",
"outfile",
")",
":",
"if",
"self",
".",
"encoding",
":",
"# wrap the outfile in a StreamWriter",
"outfile",
"=",
"codecs",
".",
"lookup",
"(",
"self",
".",
"encoding",
")",
"[",
"3",
"]",
"(",
"outfile",
")",
"return",
"self",
".",
"format_unencoded",
"(",
"tokensource",
",",
"outfile",
")"
] | [
86,
4
] | [
94,
58
] | python | en | ['en', 'error', 'th'] | False |
PandasDatasource.build_configuration | (
cls,
data_asset_type=None,
batch_kwargs_generators=None,
boto3_options=None,
reader_method=None,
reader_options=None,
limit=None,
**kwargs
) |
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
boto3_options: Optional dictionary with key-value pairs to pass to boto3 during instantiation.
reader_method: Optional default reader_method for generated batches
reader_options: Optional default reader_options for generated batches
limit: Optional default limit for generated batches
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
|
Build a full configuration object for a datasource, potentially including generators with defaults. | def build_configuration(
cls,
data_asset_type=None,
batch_kwargs_generators=None,
boto3_options=None,
reader_method=None,
reader_options=None,
limit=None,
**kwargs
):
"""
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
boto3_options: Optional dictionary with key-value pairs to pass to boto3 during instantiation.
reader_method: Optional default reader_method for generated batches
reader_options: Optional default reader_options for generated batches
limit: Optional default limit for generated batches
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
if data_asset_type is None:
data_asset_type = {
"class_name": "PandasDataset",
"module_name": "great_expectations.dataset",
}
else:
data_asset_type = classConfigSchema.dump(ClassConfig(**data_asset_type))
configuration = kwargs
configuration["data_asset_type"] = data_asset_type
if batch_kwargs_generators:
configuration["batch_kwargs_generators"] = batch_kwargs_generators
if boto3_options is not None:
if isinstance(boto3_options, dict):
configuration.update(boto3_options)
else:
raise ValueError(
"boto3_options must be a dictionary of key-value pairs to pass to boto3 upon "
"initialization."
)
configuration["boto3_options"] = boto3_options
if reader_options is not None:
if isinstance(reader_options, dict):
configuration.update(reader_options)
else:
raise ValueError(
"boto3_options must be a dictionary of key-value pairs to pass to boto3 upon "
"initialization."
)
if reader_method is not None:
configuration["reader_method"] = reader_method
if limit is not None:
configuration["limit"] = limit
return configuration | [
"def",
"build_configuration",
"(",
"cls",
",",
"data_asset_type",
"=",
"None",
",",
"batch_kwargs_generators",
"=",
"None",
",",
"boto3_options",
"=",
"None",
",",
"reader_method",
"=",
"None",
",",
"reader_options",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"data_asset_type",
"is",
"None",
":",
"data_asset_type",
"=",
"{",
"\"class_name\"",
":",
"\"PandasDataset\"",
",",
"\"module_name\"",
":",
"\"great_expectations.dataset\"",
",",
"}",
"else",
":",
"data_asset_type",
"=",
"classConfigSchema",
".",
"dump",
"(",
"ClassConfig",
"(",
"*",
"*",
"data_asset_type",
")",
")",
"configuration",
"=",
"kwargs",
"configuration",
"[",
"\"data_asset_type\"",
"]",
"=",
"data_asset_type",
"if",
"batch_kwargs_generators",
":",
"configuration",
"[",
"\"batch_kwargs_generators\"",
"]",
"=",
"batch_kwargs_generators",
"if",
"boto3_options",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"boto3_options",
",",
"dict",
")",
":",
"configuration",
".",
"update",
"(",
"boto3_options",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"boto3_options must be a dictionary of key-value pairs to pass to boto3 upon \"",
"\"initialization.\"",
")",
"configuration",
"[",
"\"boto3_options\"",
"]",
"=",
"boto3_options",
"if",
"reader_options",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"reader_options",
",",
"dict",
")",
":",
"configuration",
".",
"update",
"(",
"reader_options",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"boto3_options must be a dictionary of key-value pairs to pass to boto3 upon \"",
"\"initialization.\"",
")",
"if",
"reader_method",
"is",
"not",
"None",
":",
"configuration",
"[",
"\"reader_method\"",
"]",
"=",
"reader_method",
"if",
"limit",
"is",
"not",
"None",
":",
"configuration",
"[",
"\"limit\"",
"]",
"=",
"limit",
"return",
"configuration"
] | [
39,
4
] | [
104,
28
] | python | en | ['en', 'error', 'th'] | False |
PandasDatasource._infer_default_options | (self, reader_fn: Callable, reader_options: dict) |
Allows reader options to be customized based on file context before loading to a DataFrame
Args:
reader_method (str): pandas reader method
reader_options: Current options and defaults set to pass to the reader method
Returns:
dict: A copy of the reader options post-inference
|
Allows reader options to be customized based on file context before loading to a DataFrame | def _infer_default_options(self, reader_fn: Callable, reader_options: dict) -> dict:
"""
Allows reader options to be customized based on file context before loading to a DataFrame
Args:
reader_method (str): pandas reader method
reader_options: Current options and defaults set to pass to the reader method
Returns:
dict: A copy of the reader options post-inference
"""
while isinstance(reader_fn, partial):
# reader_fn might be partial so need to unwrap to get underlying method
reader_fn = reader_fn.func
name = reader_fn.__name__
if name == "read_parquet":
return {}
if name == "read_excel":
return {}
else:
return {"encoding": "utf-8"} | [
"def",
"_infer_default_options",
"(",
"self",
",",
"reader_fn",
":",
"Callable",
",",
"reader_options",
":",
"dict",
")",
"->",
"dict",
":",
"while",
"isinstance",
"(",
"reader_fn",
",",
"partial",
")",
":",
"# reader_fn might be partial so need to unwrap to get underlying method",
"reader_fn",
"=",
"reader_fn",
".",
"func",
"name",
"=",
"reader_fn",
".",
"__name__",
"if",
"name",
"==",
"\"read_parquet\"",
":",
"return",
"{",
"}",
"if",
"name",
"==",
"\"read_excel\"",
":",
"return",
"{",
"}",
"else",
":",
"return",
"{",
"\"encoding\"",
":",
"\"utf-8\"",
"}"
] | [
291,
4
] | [
311,
40
] | python | en | ['en', 'error', 'th'] | False |
PandasDatasource._get_reader_fn | (self, reader_method=None, path=None) | Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the
correct reader_method.
Args:
reader_method (str): the name of the reader method to use, if available.
path (str): the to use to guess
Returns:
ReaderMethod to use for the filepath
| Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the
correct reader_method. | def _get_reader_fn(self, reader_method=None, path=None):
"""Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the
correct reader_method.
Args:
reader_method (str): the name of the reader method to use, if available.
path (str): the to use to guess
Returns:
ReaderMethod to use for the filepath
"""
if reader_method is None and path is None:
raise BatchKwargsError(
"Unable to determine pandas reader function without reader_method or path.",
{"reader_method": reader_method},
)
reader_options = None
if reader_method is None:
path_guess = self.guess_reader_method_from_path(path)
reader_method = path_guess["reader_method"]
reader_options = path_guess.get(
"reader_options"
) # This may not be there; use None in that case
try:
reader_fn = getattr(pd, reader_method)
if reader_options:
reader_fn = partial(reader_fn, **reader_options)
return reader_fn
except AttributeError:
raise BatchKwargsError(
"Unable to find reader_method %s in pandas." % reader_method,
{"reader_method": reader_method},
) | [
"def",
"_get_reader_fn",
"(",
"self",
",",
"reader_method",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"reader_method",
"is",
"None",
"and",
"path",
"is",
"None",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to determine pandas reader function without reader_method or path.\"",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")",
"reader_options",
"=",
"None",
"if",
"reader_method",
"is",
"None",
":",
"path_guess",
"=",
"self",
".",
"guess_reader_method_from_path",
"(",
"path",
")",
"reader_method",
"=",
"path_guess",
"[",
"\"reader_method\"",
"]",
"reader_options",
"=",
"path_guess",
".",
"get",
"(",
"\"reader_options\"",
")",
"# This may not be there; use None in that case",
"try",
":",
"reader_fn",
"=",
"getattr",
"(",
"pd",
",",
"reader_method",
")",
"if",
"reader_options",
":",
"reader_fn",
"=",
"partial",
"(",
"reader_fn",
",",
"*",
"*",
"reader_options",
")",
"return",
"reader_fn",
"except",
"AttributeError",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to find reader_method %s in pandas.\"",
"%",
"reader_method",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")"
] | [
313,
4
] | [
348,
13
] | python | en | ['en', 'en', 'en'] | True |
norm_to_pixel_s1 | (pose_3d, camera, norm) |
pose_3d: 3d joints with absolute location in the camera coordinate system (meters)
pose_3d.shape = [T, K, N], e.g. [1500, 17, 3]
pose_2d: 2d joints with pixel location in the images coordinate system (pixels)
pose_3d.shape = [T, K, M], e.g. [1500, 17, 2]
return: normed_3d: root joint contain relative [x,y] offset and absolute depth of root Z. others joints are normed 3d joints in pixel unit
normed_2d: zero-center root with resize into a fixed bbox
|
pose_3d: 3d joints with absolute location in the camera coordinate system (meters)
pose_3d.shape = [T, K, N], e.g. [1500, 17, 3]
pose_2d: 2d joints with pixel location in the images coordinate system (pixels)
pose_3d.shape = [T, K, M], e.g. [1500, 17, 2]
return: normed_3d: root joint contain relative [x,y] offset and absolute depth of root Z. others joints are normed 3d joints in pixel unit
normed_2d: zero-center root with resize into a fixed bbox
| def norm_to_pixel_s1(pose_3d, camera, norm):
"""
pose_3d: 3d joints with absolute location in the camera coordinate system (meters)
pose_3d.shape = [T, K, N], e.g. [1500, 17, 3]
pose_2d: 2d joints with pixel location in the images coordinate system (pixels)
pose_3d.shape = [T, K, M], e.g. [1500, 17, 2]
return: normed_3d: root joint contain relative [x,y] offset and absolute depth of root Z. others joints are normed 3d joints in pixel unit
normed_2d: zero-center root with resize into a fixed bbox
"""
# stage1: linear project 3d X,Y to pixel unit, corresponding scale Z to keep the same 3d scale
pose3d_root_Z = pose_3d[:, 0:1, 2:3].copy()
camera = np.repeat(camera[np.newaxis, :], pose3d_root_Z.shape[0], axis=0)
if norm == 'lcn':
ratio1 = week_perspective_scale(camera[:,np.newaxis], pose3d_root_Z)+1 #[T,1,1] project depth as the same scale with XY
else:
ratio1 = week_perspective_scale(camera[:,np.newaxis], pose3d_root_Z) #[T,1,1] project depth as the same scale with XY
pose3d_pixel = np.zeros_like(pose_3d)
if norm == 'weak_proj':
pose3d_root = np.repeat(pose3d_root_Z, 17, axis=-2) # (T,17,1) # For weak perspective projection
pose3d_pixel[..., :2] = pose_3d[..., :2]/pose3d_root * camera[:, np.newaxis, :2] + camera[:, np.newaxis, 2:4]
else:
pose3d_pixel[..., :2] = wrap(project_to_2d_linear, pose_3d.copy(), camera) # Keep all depth from each joints, projected 2d xy are more precise.
pose3d_relative_depth = minus_root(pose_3d[..., 2:3]) # Make root depth=0
pose3d_stage1_depth = pose3d_relative_depth * ratio1 # Root_depth=0 [2000,17,1]
pose3d_pixel[..., 2:3] = pose3d_stage1_depth.copy()
return pose3d_pixel, ratio1 | [
"def",
"norm_to_pixel_s1",
"(",
"pose_3d",
",",
"camera",
",",
"norm",
")",
":",
"# stage1: linear project 3d X,Y to pixel unit, corresponding scale Z to keep the same 3d scale",
"pose3d_root_Z",
"=",
"pose_3d",
"[",
":",
",",
"0",
":",
"1",
",",
"2",
":",
"3",
"]",
".",
"copy",
"(",
")",
"camera",
"=",
"np",
".",
"repeat",
"(",
"camera",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
",",
"pose3d_root_Z",
".",
"shape",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"norm",
"==",
"'lcn'",
":",
"ratio1",
"=",
"week_perspective_scale",
"(",
"camera",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"pose3d_root_Z",
")",
"+",
"1",
"#[T,1,1] project depth as the same scale with XY",
"else",
":",
"ratio1",
"=",
"week_perspective_scale",
"(",
"camera",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"pose3d_root_Z",
")",
"#[T,1,1] project depth as the same scale with XY",
"pose3d_pixel",
"=",
"np",
".",
"zeros_like",
"(",
"pose_3d",
")",
"if",
"norm",
"==",
"'weak_proj'",
":",
"pose3d_root",
"=",
"np",
".",
"repeat",
"(",
"pose3d_root_Z",
",",
"17",
",",
"axis",
"=",
"-",
"2",
")",
"# (T,17,1) # For weak perspective projection",
"pose3d_pixel",
"[",
"...",
",",
":",
"2",
"]",
"=",
"pose_3d",
"[",
"...",
",",
":",
"2",
"]",
"/",
"pose3d_root",
"*",
"camera",
"[",
":",
",",
"np",
".",
"newaxis",
",",
":",
"2",
"]",
"+",
"camera",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"2",
":",
"4",
"]",
"else",
":",
"pose3d_pixel",
"[",
"...",
",",
":",
"2",
"]",
"=",
"wrap",
"(",
"project_to_2d_linear",
",",
"pose_3d",
".",
"copy",
"(",
")",
",",
"camera",
")",
"# Keep all depth from each joints, projected 2d xy are more precise.",
"pose3d_relative_depth",
"=",
"minus_root",
"(",
"pose_3d",
"[",
"...",
",",
"2",
":",
"3",
"]",
")",
"# Make root depth=0",
"pose3d_stage1_depth",
"=",
"pose3d_relative_depth",
"*",
"ratio1",
"# Root_depth=0 [2000,17,1]",
"pose3d_pixel",
"[",
"...",
",",
"2",
":",
"3",
"]",
"=",
"pose3d_stage1_depth",
".",
"copy",
"(",
")",
"return",
"pose3d_pixel",
",",
"ratio1"
] | [
30,
0
] | [
57,
31
] | python | en | ['en', 'error', 'th'] | False |
start_thread_soon | (fn, deliver) | Runs ``deliver(outcome.capture(fn))`` in a worker thread.
Generally ``fn`` does some blocking work, and ``deliver`` delivers the
result back to whoever is interested.
This is a low-level, no-frills interface, very similar to using
`threading.Thread` to spawn a thread directly. The main difference is
that this function tries to re-use threads when possible, so it can be
a bit faster than `threading.Thread`.
Worker threads have the `~threading.Thread.daemon` flag set, which means
that if your main thread exits, worker threads will automatically be
killed. If you want to make sure that your ``fn`` runs to completion, then
you should make sure that the main thread remains alive until ``deliver``
is called.
It is safe to call this function simultaneously from multiple threads.
Args:
fn (sync function): Performs arbitrary blocking work.
deliver (sync function): Takes the `outcome.Outcome` of ``fn``, and
delivers it. *Must not block.*
Because worker threads are cached and reused for multiple calls, neither
function should mutate thread-level state, like `threading.local` objects
– or if they do, they should be careful to revert their changes before
returning.
Note:
The split between ``fn`` and ``deliver`` serves two purposes. First,
it's convenient, since most callers need something like this anyway.
Second, it avoids a small race condition that could cause too many
threads to be spawned. Consider a program that wants to run several
jobs sequentially on a thread, so the main thread submits a job, waits
for it to finish, submits another job, etc. In theory, this program
should only need one worker thread. But what could happen is:
1. Worker thread: First job finishes, and calls ``deliver``.
2. Main thread: receives notification that the job finished, and calls
``start_thread_soon``.
3. Main thread: sees that no worker threads are marked idle, so spawns
a second worker thread.
4. Original worker thread: marks itself as idle.
To avoid this, threads mark themselves as idle *before* calling
``deliver``.
Is this potential extra thread a major problem? Maybe not, but it's
easy enough to avoid, and we figure that if the user is trying to
limit how many threads they're using then it's polite to respect that.
| Runs ``deliver(outcome.capture(fn))`` in a worker thread. | def start_thread_soon(fn, deliver):
"""Runs ``deliver(outcome.capture(fn))`` in a worker thread.
Generally ``fn`` does some blocking work, and ``deliver`` delivers the
result back to whoever is interested.
This is a low-level, no-frills interface, very similar to using
`threading.Thread` to spawn a thread directly. The main difference is
that this function tries to re-use threads when possible, so it can be
a bit faster than `threading.Thread`.
Worker threads have the `~threading.Thread.daemon` flag set, which means
that if your main thread exits, worker threads will automatically be
killed. If you want to make sure that your ``fn`` runs to completion, then
you should make sure that the main thread remains alive until ``deliver``
is called.
It is safe to call this function simultaneously from multiple threads.
Args:
fn (sync function): Performs arbitrary blocking work.
deliver (sync function): Takes the `outcome.Outcome` of ``fn``, and
delivers it. *Must not block.*
Because worker threads are cached and reused for multiple calls, neither
function should mutate thread-level state, like `threading.local` objects
– or if they do, they should be careful to revert their changes before
returning.
Note:
The split between ``fn`` and ``deliver`` serves two purposes. First,
it's convenient, since most callers need something like this anyway.
Second, it avoids a small race condition that could cause too many
threads to be spawned. Consider a program that wants to run several
jobs sequentially on a thread, so the main thread submits a job, waits
for it to finish, submits another job, etc. In theory, this program
should only need one worker thread. But what could happen is:
1. Worker thread: First job finishes, and calls ``deliver``.
2. Main thread: receives notification that the job finished, and calls
``start_thread_soon``.
3. Main thread: sees that no worker threads are marked idle, so spawns
a second worker thread.
4. Original worker thread: marks itself as idle.
To avoid this, threads mark themselves as idle *before* calling
``deliver``.
Is this potential extra thread a major problem? Maybe not, but it's
easy enough to avoid, and we figure that if the user is trying to
limit how many threads they're using then it's polite to respect that.
"""
THREAD_CACHE.start_thread_soon(fn, deliver) | [
"def",
"start_thread_soon",
"(",
"fn",
",",
"deliver",
")",
":",
"THREAD_CACHE",
".",
"start_thread_soon",
"(",
"fn",
",",
"deliver",
")"
] | [
107,
0
] | [
167,
47
] | python | en | ['en', 'en', 'en'] | True |
Cache.add | (self, rsrc: Cacheable,
on_delete: Optional[DeletionHandler]=None) |
Adds an entry to the cache, if it's not already present. If
on_delete is not None, it will called when rsrc is removed from
the cache.
|
Adds an entry to the cache, if it's not already present. If
on_delete is not None, it will called when rsrc is removed from
the cache.
| def add(self, rsrc: Cacheable,
on_delete: Optional[DeletionHandler]=None) -> None:
"""
Adds an entry to the cache, if it's not already present. If
on_delete is not None, it will called when rsrc is removed from
the cache.
"""
key = rsrc.cache_key
if not key:
self.logger.info(f"CACHE: ignore, no cache_key: {rsrc}")
elif key in self.cache:
# self.logger.info(f"CACHE: ignore, already present: {rsrc}")
pass
else:
self.logger.debug(f"CACHE: adding {key}: {rsrc}, on_delete {self.fn_name(on_delete)}")
self.cache[key] = (rsrc, on_delete) | [
"def",
"add",
"(",
"self",
",",
"rsrc",
":",
"Cacheable",
",",
"on_delete",
":",
"Optional",
"[",
"DeletionHandler",
"]",
"=",
"None",
")",
"->",
"None",
":",
"key",
"=",
"rsrc",
".",
"cache_key",
"if",
"not",
"key",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: ignore, no cache_key: {rsrc}\"",
")",
"elif",
"key",
"in",
"self",
".",
"cache",
":",
"# self.logger.info(f\"CACHE: ignore, already present: {rsrc}\")",
"pass",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: adding {key}: {rsrc}, on_delete {self.fn_name(on_delete)}\"",
")",
"self",
".",
"cache",
"[",
"key",
"]",
"=",
"(",
"rsrc",
",",
"on_delete",
")"
] | [
57,
4
] | [
75,
47
] | python | en | ['en', 'error', 'th'] | False |
Cache.link | (self, owner: Cacheable, owned: Cacheable) |
Adds a link to the cache. Links are directional from the owner to
the owned. The basic idea is that if the owner changes, all the owned
things get invalidated. Both the owner and the owned must be in the
cache.
|
Adds a link to the cache. Links are directional from the owner to
the owned. The basic idea is that if the owner changes, all the owned
things get invalidated. Both the owner and the owned must be in the
cache.
| def link(self, owner: Cacheable, owned: Cacheable) -> None:
"""
Adds a link to the cache. Links are directional from the owner to
the owned. The basic idea is that if the owner changes, all the owned
things get invalidated. Both the owner and the owned must be in the
cache.
"""
owner_key = owner.cache_key
owned_key = owned.cache_key
if not owner_key:
self.logger.info(f"CACHE: cannot link, owner has no key: {owner}")
return
if not owned_key:
self.logger.info(f"CACHE: cannot link, owned has no key: {owned}")
return
if owner_key not in self.cache:
self.logger.info(f"CACHE: cannot link, owner not cached: {owner}")
return
if owned_key not in self.cache:
self.logger.info(f"CACHE: cannot link, owned not cached: {owned}")
return
# self.logger.info(f"CACHE: linking {owner_key} -> {owned_key}")
links = self.links.setdefault(owner_key, set())
links.update([ owned_key ]) | [
"def",
"link",
"(",
"self",
",",
"owner",
":",
"Cacheable",
",",
"owned",
":",
"Cacheable",
")",
"->",
"None",
":",
"owner_key",
"=",
"owner",
".",
"cache_key",
"owned_key",
"=",
"owned",
".",
"cache_key",
"if",
"not",
"owner_key",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: cannot link, owner has no key: {owner}\"",
")",
"return",
"if",
"not",
"owned_key",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: cannot link, owned has no key: {owned}\"",
")",
"return",
"if",
"owner_key",
"not",
"in",
"self",
".",
"cache",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: cannot link, owner not cached: {owner}\"",
")",
"return",
"if",
"owned_key",
"not",
"in",
"self",
".",
"cache",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: cannot link, owned not cached: {owned}\"",
")",
"return",
"# self.logger.info(f\"CACHE: linking {owner_key} -> {owned_key}\")",
"links",
"=",
"self",
".",
"links",
".",
"setdefault",
"(",
"owner_key",
",",
"set",
"(",
")",
")",
"links",
".",
"update",
"(",
"[",
"owned_key",
"]",
")"
] | [
77,
4
] | [
107,
35
] | python | en | ['en', 'error', 'th'] | False |
Cache.invalidate | (self, key: str) |
Recursively invalidate the entry named by 'key' and everything to which it
is linked.
|
Recursively invalidate the entry named by 'key' and everything to which it
is linked.
| def invalidate(self, key: str) -> None:
"""
Recursively invalidate the entry named by 'key' and everything to which it
is linked.
"""
# We use worklist to keep track of things to consider: for starters,
# it just has our key in it, and as we find owned things, we add them
# to the worklist to consider.
#
# Note that word "consider". If you want to invalidate something from
# the cache that isn't in the cache, that's not an error -- it'll be
# silently ignored. That helps with dangling links (e.g. if two Mappings
# both link to the same Group, and you invalidate the first Mapping, the
# second will have a dangling link to the now-invalidated Group, and that
# needs to not break anything).
self.invalidate_calls += 1
worklist = [ key ]
# Under the hood, "invalidating" something from this cache is really
# deleting it, so we'll use "to_delete" for the set of things we're going
# to, y'knom, delete. We find all the resources we're going to work with
# before deleting any of them, because I get paranoid about modifying a
# data structure while I'm trying to traverse it.
to_delete: Dict[str, CacheEntry] = {}
# Keep going until we have nothing else to do.
while worklist:
# Pop off the first thing...
key = worklist.pop(0)
# ...and check if it's in the cache.
if key in self.cache:
# It is, good. We can append it to our set of things to delete...
rsrc, on_delete = self.cache[key]
self.logger.debug(f"CACHE: DEL {key}: will delete {rsrc}")
if key not in to_delete:
# We haven't seen this key, so remember to delete it...
to_delete[key] = (rsrc, on_delete)
# ...and then toss all of its linked objects on our list to
# consider.
if key in self.links:
for owned in sorted(self.links[key]):
self.logger.debug(f"CACHE: DEL {key}: will check owned {owned}")
worklist.append(owned)
# (If we have seen the key already, just ignore it and go to the next
# key in the worklist. This is important to not get stuck if we somehow
# get a circular link list.)
# OK, we have a set of things to delete. Get to it.
for key, rdh in to_delete.items():
self.logger.debug(f"CACHE: DEL {key}: smiting!")
self.invalidated_objects += 1
del(self.cache[key])
if key in self.links:
del(self.links[key])
rsrc, on_delete = rdh
if on_delete:
self.logger.debug(f"CACHE: DEL {key}: calling {self.fn_name(on_delete)}")
on_delete(rsrc) | [
"def",
"invalidate",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"None",
":",
"# We use worklist to keep track of things to consider: for starters, ",
"# it just has our key in it, and as we find owned things, we add them",
"# to the worklist to consider.",
"#",
"# Note that word \"consider\". If you want to invalidate something from ",
"# the cache that isn't in the cache, that's not an error -- it'll be",
"# silently ignored. That helps with dangling links (e.g. if two Mappings",
"# both link to the same Group, and you invalidate the first Mapping, the ",
"# second will have a dangling link to the now-invalidated Group, and that",
"# needs to not break anything).",
"self",
".",
"invalidate_calls",
"+=",
"1",
"worklist",
"=",
"[",
"key",
"]",
"# Under the hood, \"invalidating\" something from this cache is really",
"# deleting it, so we'll use \"to_delete\" for the set of things we're going",
"# to, y'knom, delete. We find all the resources we're going to work with",
"# before deleting any of them, because I get paranoid about modifying a",
"# data structure while I'm trying to traverse it.",
"to_delete",
":",
"Dict",
"[",
"str",
",",
"CacheEntry",
"]",
"=",
"{",
"}",
"# Keep going until we have nothing else to do.",
"while",
"worklist",
":",
"# Pop off the first thing...",
"key",
"=",
"worklist",
".",
"pop",
"(",
"0",
")",
"# ...and check if it's in the cache.",
"if",
"key",
"in",
"self",
".",
"cache",
":",
"# It is, good. We can append it to our set of things to delete...",
"rsrc",
",",
"on_delete",
"=",
"self",
".",
"cache",
"[",
"key",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: DEL {key}: will delete {rsrc}\"",
")",
"if",
"key",
"not",
"in",
"to_delete",
":",
"# We haven't seen this key, so remember to delete it...",
"to_delete",
"[",
"key",
"]",
"=",
"(",
"rsrc",
",",
"on_delete",
")",
"# ...and then toss all of its linked objects on our list to",
"# consider.",
"if",
"key",
"in",
"self",
".",
"links",
":",
"for",
"owned",
"in",
"sorted",
"(",
"self",
".",
"links",
"[",
"key",
"]",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: DEL {key}: will check owned {owned}\"",
")",
"worklist",
".",
"append",
"(",
"owned",
")",
"# (If we have seen the key already, just ignore it and go to the next",
"# key in the worklist. This is important to not get stuck if we somehow",
"# get a circular link list.)",
"# OK, we have a set of things to delete. Get to it.",
"for",
"key",
",",
"rdh",
"in",
"to_delete",
".",
"items",
"(",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: DEL {key}: smiting!\"",
")",
"self",
".",
"invalidated_objects",
"+=",
"1",
"del",
"(",
"self",
".",
"cache",
"[",
"key",
"]",
")",
"if",
"key",
"in",
"self",
".",
"links",
":",
"del",
"(",
"self",
".",
"links",
"[",
"key",
"]",
")",
"rsrc",
",",
"on_delete",
"=",
"rdh",
"if",
"on_delete",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: DEL {key}: calling {self.fn_name(on_delete)}\"",
")",
"on_delete",
"(",
"rsrc",
")"
] | [
109,
4
] | [
178,
31
] | python | en | ['en', 'error', 'th'] | False |
Cache.__getitem__ | (self, key: str) |
Fetches only the _resource_ for a given key from the cache. If the
key is not present in the cache, returns None.
If you need the deletion callback, you'll have to work with
self.cache manually.
|
Fetches only the _resource_ for a given key from the cache. If the
key is not present in the cache, returns None. | def __getitem__(self, key: str) -> Optional[Cacheable]:
"""
Fetches only the _resource_ for a given key from the cache. If the
key is not present in the cache, returns None.
If you need the deletion callback, you'll have to work with
self.cache manually.
"""
item: Optional[CacheEntry] = self.cache.get(key, None)
if item is not None:
self.logger.debug(f"CACHE: fetch {key}")
self.hits += 1
return item[0]
else:
self.logger.debug(f"CACHE: missing {key}")
self.misses += 1
return None | [
"def",
"__getitem__",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"Optional",
"[",
"Cacheable",
"]",
":",
"item",
":",
"Optional",
"[",
"CacheEntry",
"]",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"item",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: fetch {key}\"",
")",
"self",
".",
"hits",
"+=",
"1",
"return",
"item",
"[",
"0",
"]",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"CACHE: missing {key}\"",
")",
"self",
".",
"misses",
"+=",
"1",
"return",
"None"
] | [
180,
4
] | [
198,
23
] | python | en | ['en', 'error', 'th'] | False |
Cache.dump | (self) |
Dump the cache to the logger.
|
Dump the cache to the logger.
| def dump(self) -> None:
"""
Dump the cache to the logger.
"""
for k in sorted(self.cache.keys()):
rsrc, on_delete = self.cache[k]
self.logger.info(f"CACHE: {k}, on_delete {self.fn_name(on_delete)}:")
if k in self.links:
for owned in sorted(self.links[k]):
self.logger.info(f"CACHE: -> {owned}") | [
"def",
"dump",
"(",
"self",
")",
"->",
"None",
":",
"for",
"k",
"in",
"sorted",
"(",
"self",
".",
"cache",
".",
"keys",
"(",
")",
")",
":",
"rsrc",
",",
"on_delete",
"=",
"self",
".",
"cache",
"[",
"k",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: {k}, on_delete {self.fn_name(on_delete)}:\"",
")",
"if",
"k",
"in",
"self",
".",
"links",
":",
"for",
"owned",
"in",
"sorted",
"(",
"self",
".",
"links",
"[",
"k",
"]",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f\"CACHE: -> {owned}\"",
")"
] | [
200,
4
] | [
212,
60
] | python | en | ['en', 'error', 'th'] | False |
is_instantiation | (decl_string) |
returns True if `decl_string` is template instantiation and False otherwise
:param decl_string: string that should be checked for pattern presence
:type decl_string: str
:rtype: bool
|
returns True if `decl_string` is template instantiation and False otherwise | def is_instantiation(decl_string):
"""
returns True if `decl_string` is template instantiation and False otherwise
:param decl_string: string that should be checked for pattern presence
:type decl_string: str
:rtype: bool
"""
return __THE_PARSER.has_pattern(decl_string) | [
"def",
"is_instantiation",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"has_pattern",
"(",
"decl_string",
")"
] | [
24,
0
] | [
33,
48
] | python | en | ['en', 'error', 'th'] | False |
name | (decl_string) |
returns name of instantiated template
:type decl_string: str
:rtype: str
|
returns name of instantiated template | def name(decl_string):
"""
returns name of instantiated template
:type decl_string: str
:rtype: str
"""
return __THE_PARSER.name(decl_string) | [
"def",
"name",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"name",
"(",
"decl_string",
")"
] | [
36,
0
] | [
43,
41
] | python | en | ['en', 'error', 'th'] | False |
args | (decl_string) |
returns list of template arguments
:type decl_string: `str`
:rtype: [`str`]
|
returns list of template arguments | def args(decl_string):
"""
returns list of template arguments
:type decl_string: `str`
:rtype: [`str`]
"""
return __THE_PARSER.args(decl_string) | [
"def",
"args",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"args",
"(",
"decl_string",
")"
] | [
46,
0
] | [
53,
41
] | python | en | ['en', 'error', 'th'] | False |
split | (decl_string) | returns (name, [arguments] ) | returns (name, [arguments] ) | def split(decl_string):
"""returns (name, [arguments] )"""
return __THE_PARSER.split(decl_string) | [
"def",
"split",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"split",
"(",
"decl_string",
")"
] | [
56,
0
] | [
58,
42
] | python | en | ['en', 'da', 'en'] | True |
split_recursive | (decl_string) | returns [(name, [arguments])] | returns [(name, [arguments])] | def split_recursive(decl_string):
"""returns [(name, [arguments])]"""
return __THE_PARSER.split_recursive(decl_string) | [
"def",
"split_recursive",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"split_recursive",
"(",
"decl_string",
")"
] | [
61,
0
] | [
63,
52
] | python | en | ['en', 'da', 'en'] | True |
join | (name_, args_) | returns name< argument_1, argument_2, ..., argument_n > | returns name< argument_1, argument_2, ..., argument_n > | def join(name_, args_):
"""returns name< argument_1, argument_2, ..., argument_n >"""
return __THE_PARSER.join(name_, args_) | [
"def",
"join",
"(",
"name_",
",",
"args_",
")",
":",
"return",
"__THE_PARSER",
".",
"join",
"(",
"name_",
",",
"args_",
")"
] | [
66,
0
] | [
68,
42
] | python | en | ['en', 'da', 'en'] | True |
normalize | (decl_string) | returns `decl_string`, which contains "normalized" spaces
this functionality allows to implement comparison of 2 different string
which are actually same: x::y< z > and x::y<z>
| returns `decl_string`, which contains "normalized" spaces | def normalize(decl_string):
"""returns `decl_string`, which contains "normalized" spaces
this functionality allows to implement comparison of 2 different string
which are actually same: x::y< z > and x::y<z>
"""
return __THE_PARSER.normalize(decl_string) | [
"def",
"normalize",
"(",
"decl_string",
")",
":",
"return",
"__THE_PARSER",
".",
"normalize",
"(",
"decl_string",
")"
] | [
71,
0
] | [
77,
46
] | python | en | ['en', 'en', 'en'] | True |
normalize_name | (decl) |
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
Cached variant of normalize | def normalize_name(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name | [
"def",
"normalize_name",
"(",
"decl",
")",
":",
"if",
"decl",
".",
"cache",
".",
"normalized_name",
"is",
"None",
":",
"decl",
".",
"cache",
".",
"normalized_name",
"=",
"normalize",
"(",
"decl",
".",
"name",
")",
"return",
"decl",
".",
"cache",
".",
"normalized_name"
] | [
80,
0
] | [
92,
37
] | python | en | ['en', 'error', 'th'] | False |
normalize_partial_name | (decl) |
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
Cached variant of normalize | def normalize_partial_name(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_partial_name is None:
decl.cache.normalized_partial_name = normalize(decl.partial_name)
return decl.cache.normalized_partial_name | [
"def",
"normalize_partial_name",
"(",
"decl",
")",
":",
"if",
"decl",
".",
"cache",
".",
"normalized_partial_name",
"is",
"None",
":",
"decl",
".",
"cache",
".",
"normalized_partial_name",
"=",
"normalize",
"(",
"decl",
".",
"partial_name",
")",
"return",
"decl",
".",
"cache",
".",
"normalized_partial_name"
] | [
95,
0
] | [
107,
45
] | python | en | ['en', 'error', 'th'] | False |
normalize_full_name_true | (decl) |
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
Cached variant of normalize | def normalize_full_name_true(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_full_name_true is None:
decl.cache.normalized_full_name_true = normalize(
declaration_utils.full_name(decl, with_defaults=True))
return decl.cache.normalized_full_name_true | [
"def",
"normalize_full_name_true",
"(",
"decl",
")",
":",
"if",
"decl",
".",
"cache",
".",
"normalized_full_name_true",
"is",
"None",
":",
"decl",
".",
"cache",
".",
"normalized_full_name_true",
"=",
"normalize",
"(",
"declaration_utils",
".",
"full_name",
"(",
"decl",
",",
"with_defaults",
"=",
"True",
")",
")",
"return",
"decl",
".",
"cache",
".",
"normalized_full_name_true"
] | [
110,
0
] | [
123,
47
] | python | en | ['en', 'error', 'th'] | False |
normalize_full_name_false | (decl) |
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
Cached variant of normalize | def normalize_full_name_false(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_full_name_false is None:
decl.cache.normalized_full_name_false = normalize(
declaration_utils.full_name(decl, with_defaults=False))
return decl.cache.normalized_full_name_false | [
"def",
"normalize_full_name_false",
"(",
"decl",
")",
":",
"if",
"decl",
".",
"cache",
".",
"normalized_full_name_false",
"is",
"None",
":",
"decl",
".",
"cache",
".",
"normalized_full_name_false",
"=",
"normalize",
"(",
"declaration_utils",
".",
"full_name",
"(",
"decl",
",",
"with_defaults",
"=",
"False",
")",
")",
"return",
"decl",
".",
"cache",
".",
"normalized_full_name_false"
] | [
126,
0
] | [
139,
48
] | python | en | ['en', 'error', 'th'] | False |
strip_version | (ver: str) |
strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE,
`kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter.
So we handle exceptions like that here.
:param ver: version string
:return: stripped version
|
strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE,
`kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter.
So we handle exceptions like that here.
:param ver: version string
:return: stripped version
| def strip_version(ver: str):
"""
strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE,
`kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter.
So we handle exceptions like that here.
:param ver: version string
:return: stripped version
"""
try:
return int(ver)
except ValueError as e:
# GKE returns weird versions with '+' in the end
if ver[-1] == '+':
return int(ver[:-1])
# If we still have not taken care of this, raise the error
raise ValueError(e) | [
"def",
"strip_version",
"(",
"ver",
":",
"str",
")",
":",
"try",
":",
"return",
"int",
"(",
"ver",
")",
"except",
"ValueError",
"as",
"e",
":",
"# GKE returns weird versions with '+' in the end",
"if",
"ver",
"[",
"-",
"1",
"]",
"==",
"'+'",
":",
"return",
"int",
"(",
"ver",
"[",
":",
"-",
"1",
"]",
")",
"# If we still have not taken care of this, raise the error",
"raise",
"ValueError",
"(",
"e",
")"
] | [
114,
0
] | [
131,
27
] | python | en | ['en', 'error', 'th'] | False |
MyCustomSemanticTypeColumnDomainBuilder._get_domains | (
self,
variables: Optional[ParameterContainer] = None,
) |
Find the semantic column type for each column and return all domains matching the specified type or types.
|
Find the semantic column type for each column and return all domains matching the specified type or types.
| def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
batch_id: str = self.get_batch_id(variables=variables)
table_column_names: List[str] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs=None,
metric_dependencies=None,
)
)
# First check the column name ends in "_id".
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: candidate_column_name.endswith(
tuple(self._column_name_suffixes)
),
table_column_names,
)
)
column_name: str
domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
)
for column_name in candidate_column_names
]
return domains | [
"def",
"_get_domains",
"(",
"self",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"Domain",
"]",
":",
"batch_id",
":",
"str",
"=",
"self",
".",
"get_batch_id",
"(",
"variables",
"=",
"variables",
")",
"table_column_names",
":",
"List",
"[",
"str",
"]",
"=",
"self",
".",
"get_validator",
"(",
"variables",
"=",
"variables",
")",
".",
"get_metric",
"(",
"metric",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"table.columns\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"batch_id\"",
":",
"batch_id",
",",
"}",
",",
"metric_value_kwargs",
"=",
"None",
",",
"metric_dependencies",
"=",
"None",
",",
")",
")",
"# First check the column name ends in \"_id\".",
"candidate_column_names",
":",
"List",
"[",
"str",
"]",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"candidate_column_name",
":",
"candidate_column_name",
".",
"endswith",
"(",
"tuple",
"(",
"self",
".",
"_column_name_suffixes",
")",
")",
",",
"table_column_names",
",",
")",
")",
"column_name",
":",
"str",
"domains",
":",
"List",
"[",
"Domain",
"]",
"=",
"[",
"Domain",
"(",
"domain_type",
"=",
"MetricDomainTypes",
".",
"COLUMN",
",",
"domain_kwargs",
"=",
"{",
"\"column\"",
":",
"column_name",
",",
"}",
",",
")",
"for",
"column_name",
"in",
"candidate_column_names",
"]",
"return",
"domains"
] | [
43,
4
] | [
85,
22
] | python | en | ['en', 'error', 'th'] | False |
register | (classname: str, cls: "SchemaType") | Add a class to the registry of serializer classes. When a class is
registered, an entry for both its classname and its full, module-qualified
path are added to the registry.
Example: ::
class MyClass:
pass
register('MyClass', MyClass)
# Registry:
# {
# 'MyClass': [path.to.MyClass],
# 'path.to.MyClass': [path.to.MyClass],
# }
| Add a class to the registry of serializer classes. When a class is
registered, an entry for both its classname and its full, module-qualified
path are added to the registry. | def register(classname: str, cls: "SchemaType") -> None:
"""Add a class to the registry of serializer classes. When a class is
registered, an entry for both its classname and its full, module-qualified
path are added to the registry.
Example: ::
class MyClass:
pass
register('MyClass', MyClass)
# Registry:
# {
# 'MyClass': [path.to.MyClass],
# 'path.to.MyClass': [path.to.MyClass],
# }
"""
# Module where the class is located
module = cls.__module__
# Full module path to the class
# e.g. user.schemas.UserSchema
fullpath = ".".join([module, classname])
# If the class is already registered; need to check if the entries are
# in the same module as cls to avoid having multiple instances of the same
# class in the registry
if classname in _registry and not any(
each.__module__ == module for each in _registry[classname]
):
_registry[classname].append(cls)
elif classname not in _registry:
_registry[classname] = [cls]
# Also register the full path
if fullpath not in _registry:
_registry.setdefault(fullpath, []).append(cls)
else:
# If fullpath does exist, replace existing entry
_registry[fullpath] = [cls]
return None | [
"def",
"register",
"(",
"classname",
":",
"str",
",",
"cls",
":",
"\"SchemaType\"",
")",
"->",
"None",
":",
"# Module where the class is located",
"module",
"=",
"cls",
".",
"__module__",
"# Full module path to the class",
"# e.g. user.schemas.UserSchema",
"fullpath",
"=",
"\".\"",
".",
"join",
"(",
"[",
"module",
",",
"classname",
"]",
")",
"# If the class is already registered; need to check if the entries are",
"# in the same module as cls to avoid having multiple instances of the same",
"# class in the registry",
"if",
"classname",
"in",
"_registry",
"and",
"not",
"any",
"(",
"each",
".",
"__module__",
"==",
"module",
"for",
"each",
"in",
"_registry",
"[",
"classname",
"]",
")",
":",
"_registry",
"[",
"classname",
"]",
".",
"append",
"(",
"cls",
")",
"elif",
"classname",
"not",
"in",
"_registry",
":",
"_registry",
"[",
"classname",
"]",
"=",
"[",
"cls",
"]",
"# Also register the full path",
"if",
"fullpath",
"not",
"in",
"_registry",
":",
"_registry",
".",
"setdefault",
"(",
"fullpath",
",",
"[",
"]",
")",
".",
"append",
"(",
"cls",
")",
"else",
":",
"# If fullpath does exist, replace existing entry",
"_registry",
"[",
"fullpath",
"]",
"=",
"[",
"cls",
"]",
"return",
"None"
] | [
25,
0
] | [
64,
15
] | python | en | ['en', 'en', 'en'] | True |
get_class | (
classname: str, all: bool = False
) | Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name.
| Retrieve a class from the registry. | def get_class(
classname: str, all: bool = False
) -> typing.Union[typing.List["SchemaType"], "SchemaType"]:
"""Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name.
"""
try:
classes = _registry[classname]
except KeyError as error:
raise RegistryError(
"Class with name {!r} was not found. You may need "
"to import the class.".format(classname)
) from error
if len(classes) > 1:
if all:
return _registry[classname]
raise RegistryError(
"Multiple classes with name {!r} "
"were found. Please use the full, "
"module-qualified path.".format(classname)
)
else:
return _registry[classname][0] | [
"def",
"get_class",
"(",
"classname",
":",
"str",
",",
"all",
":",
"bool",
"=",
"False",
")",
"->",
"typing",
".",
"Union",
"[",
"typing",
".",
"List",
"[",
"\"SchemaType\"",
"]",
",",
"\"SchemaType\"",
"]",
":",
"try",
":",
"classes",
"=",
"_registry",
"[",
"classname",
"]",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"RegistryError",
"(",
"\"Class with name {!r} was not found. You may need \"",
"\"to import the class.\"",
".",
"format",
"(",
"classname",
")",
")",
"from",
"error",
"if",
"len",
"(",
"classes",
")",
">",
"1",
":",
"if",
"all",
":",
"return",
"_registry",
"[",
"classname",
"]",
"raise",
"RegistryError",
"(",
"\"Multiple classes with name {!r} \"",
"\"were found. Please use the full, \"",
"\"module-qualified path.\"",
".",
"format",
"(",
"classname",
")",
")",
"else",
":",
"return",
"_registry",
"[",
"classname",
"]",
"[",
"0",
"]"
] | [
67,
0
] | [
91,
38
] | python | en | ['en', 'en', 'en'] | True |
test_opt_out_environment_variable | (
in_memory_data_context_config_usage_stats_enabled, monkeypatch
) | Set the env variable GE_USAGE_STATS value to any of the following: FALSE, False, false, 0 | Set the env variable GE_USAGE_STATS value to any of the following: FALSE, False, false, 0 | def test_opt_out_environment_variable(
in_memory_data_context_config_usage_stats_enabled, monkeypatch
):
"""Set the env variable GE_USAGE_STATS value to any of the following: FALSE, False, false, 0"""
monkeypatch.setenv("GE_USAGE_STATS", "False")
assert (
in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.enabled
is True
)
context = BaseDataContext(in_memory_data_context_config_usage_stats_enabled)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False | [
"def",
"test_opt_out_environment_variable",
"(",
"in_memory_data_context_config_usage_stats_enabled",
",",
"monkeypatch",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"\"GE_USAGE_STATS\"",
",",
"\"False\"",
")",
"assert",
"(",
"in_memory_data_context_config_usage_stats_enabled",
".",
"anonymous_usage_statistics",
".",
"enabled",
"is",
"True",
")",
"context",
"=",
"BaseDataContext",
"(",
"in_memory_data_context_config_usage_stats_enabled",
")",
"project_config",
"=",
"context",
".",
"_project_config",
"assert",
"project_config",
".",
"anonymous_usage_statistics",
".",
"enabled",
"is",
"False"
] | [
78,
0
] | [
89,
69
] | python | en | ['en', 'en', 'en'] | True |
InferredAssetSqlDataConnector.__init__ | (
self,
name: str,
datasource_name: str,
execution_engine: Optional[ExecutionEngine] = None,
data_asset_name_prefix: Optional[str] = "",
data_asset_name_suffix: Optional[str] = "",
include_schema_name: Optional[bool] = False,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
excluded_tables: Optional[list] = None,
included_tables: Optional[list] = None,
skip_inapplicable_tables: Optional[bool] = True,
introspection_directives: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
) |
InferredAssetDataConnector for connecting to data on a SQL database
Args:
name (str): The name of this DataConnector
datasource_name (str): The name of the Datasource that contains it
execution_engine (ExecutionEngine): An ExecutionEngine
data_asset_name_prefix (str): An optional prefix to prepend to inferred data_asset_names
data_asset_name_suffix (str): An optional suffix to append to inferred data_asset_names
include_schema_name (bool): Should the data_asset_name include the schema as a prefix?
splitter_method (str): A method to split the target table into multiple Batches
splitter_kwargs (dict): Keyword arguments to pass to splitter_method
sampling_method (str): A method to downsample within a target Batch
sampling_kwargs (dict): Keyword arguments to pass to sampling_method
excluded_tables (List): A list of tables to ignore when inferring data asset_names
included_tables (List): If not None, only include tables in this list when inferring data asset_names
skip_inapplicable_tables (bool):
If True, tables that can't be successfully queried using sampling and splitter methods are excluded from inferred data_asset_names.
If False, the class will throw an error during initialization if any such tables are encountered.
introspection_directives (Dict): Arguments passed to the introspection method to guide introspection
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
|
InferredAssetDataConnector for connecting to data on a SQL database | def __init__(
self,
name: str,
datasource_name: str,
execution_engine: Optional[ExecutionEngine] = None,
data_asset_name_prefix: Optional[str] = "",
data_asset_name_suffix: Optional[str] = "",
include_schema_name: Optional[bool] = False,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
excluded_tables: Optional[list] = None,
included_tables: Optional[list] = None,
skip_inapplicable_tables: Optional[bool] = True,
introspection_directives: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
):
"""
InferredAssetDataConnector for connecting to data on a SQL database
Args:
name (str): The name of this DataConnector
datasource_name (str): The name of the Datasource that contains it
execution_engine (ExecutionEngine): An ExecutionEngine
data_asset_name_prefix (str): An optional prefix to prepend to inferred data_asset_names
data_asset_name_suffix (str): An optional suffix to append to inferred data_asset_names
include_schema_name (bool): Should the data_asset_name include the schema as a prefix?
splitter_method (str): A method to split the target table into multiple Batches
splitter_kwargs (dict): Keyword arguments to pass to splitter_method
sampling_method (str): A method to downsample within a target Batch
sampling_kwargs (dict): Keyword arguments to pass to sampling_method
excluded_tables (List): A list of tables to ignore when inferring data asset_names
included_tables (List): If not None, only include tables in this list when inferring data asset_names
skip_inapplicable_tables (bool):
If True, tables that can't be successfully queried using sampling and splitter methods are excluded from inferred data_asset_names.
If False, the class will throw an error during initialization if any such tables are encountered.
introspection_directives (Dict): Arguments passed to the introspection method to guide introspection
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
"""
self._data_asset_name_prefix = data_asset_name_prefix
self._data_asset_name_suffix = data_asset_name_suffix
self._include_schema_name = include_schema_name
self._splitter_method = splitter_method
self._splitter_kwargs = splitter_kwargs
self._sampling_method = sampling_method
self._sampling_kwargs = sampling_kwargs
self._excluded_tables = excluded_tables
self._included_tables = included_tables
self._skip_inapplicable_tables = skip_inapplicable_tables
self._introspection_directives = introspection_directives or {}
super().__init__(
name=name,
datasource_name=datasource_name,
execution_engine=execution_engine,
assets=None,
batch_spec_passthrough=batch_spec_passthrough,
)
# This cache will contain a "config" for each data_asset discovered via introspection.
# This approach ensures that ConfiguredAssetSqlDataConnector._assets and _introspected_assets_cache store objects of the same "type"
# Note: We should probably turn them into AssetConfig objects
self._introspected_assets_cache = {}
self._refresh_introspected_assets_cache(
self._data_asset_name_prefix,
self._data_asset_name_suffix,
self._include_schema_name,
self._splitter_method,
self._splitter_kwargs,
self._sampling_method,
self._sampling_kwargs,
self._excluded_tables,
self._included_tables,
self._skip_inapplicable_tables,
) | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"datasource_name",
":",
"str",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"data_asset_name_prefix",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"\"",
",",
"data_asset_name_suffix",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"\"",
",",
"include_schema_name",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"splitter_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"splitter_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sampling_method",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"sampling_kwargs",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"excluded_tables",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"included_tables",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"skip_inapplicable_tables",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"introspection_directives",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"self",
".",
"_data_asset_name_prefix",
"=",
"data_asset_name_prefix",
"self",
".",
"_data_asset_name_suffix",
"=",
"data_asset_name_suffix",
"self",
".",
"_include_schema_name",
"=",
"include_schema_name",
"self",
".",
"_splitter_method",
"=",
"splitter_method",
"self",
".",
"_splitter_kwargs",
"=",
"splitter_kwargs",
"self",
".",
"_sampling_method",
"=",
"sampling_method",
"self",
".",
"_sampling_kwargs",
"=",
"sampling_kwargs",
"self",
".",
"_excluded_tables",
"=",
"excluded_tables",
"self",
".",
"_included_tables",
"=",
"included_tables",
"self",
".",
"_skip_inapplicable_tables",
"=",
"skip_inapplicable_tables",
"self",
".",
"_introspection_directives",
"=",
"introspection_directives",
"or",
"{",
"}",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
"=",
"name",
",",
"datasource_name",
"=",
"datasource_name",
",",
"execution_engine",
"=",
"execution_engine",
",",
"assets",
"=",
"None",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
")",
"# This cache will contain a \"config\" for each data_asset discovered via introspection.",
"# This approach ensures that ConfiguredAssetSqlDataConnector._assets and _introspected_assets_cache store objects of the same \"type\"",
"# Note: We should probably turn them into AssetConfig objects",
"self",
".",
"_introspected_assets_cache",
"=",
"{",
"}",
"self",
".",
"_refresh_introspected_assets_cache",
"(",
"self",
".",
"_data_asset_name_prefix",
",",
"self",
".",
"_data_asset_name_suffix",
",",
"self",
".",
"_include_schema_name",
",",
"self",
".",
"_splitter_method",
",",
"self",
".",
"_splitter_kwargs",
",",
"self",
".",
"_sampling_method",
",",
"self",
".",
"_sampling_kwargs",
",",
"self",
".",
"_excluded_tables",
",",
"self",
".",
"_included_tables",
",",
"self",
".",
"_skip_inapplicable_tables",
",",
")"
] | [
18,
4
] | [
94,
9
] | python | en | ['en', 'error', 'th'] | False |
kwargs_to_tuple | (d) | Convert expectation configuration kwargs to a canonical tuple. | Convert expectation configuration kwargs to a canonical tuple. | def kwargs_to_tuple(d):
"""Convert expectation configuration kwargs to a canonical tuple."""
if isinstance(d, list):
return tuple([kwargs_to_tuple(v) for v in sorted(d)])
elif isinstance(d, dict):
return tuple(
[
(k, kwargs_to_tuple(v))
for k, v in sorted(d.items())
if k
not in ["result_format", "include_config", "catch_exceptions", "meta"]
]
)
return d | [
"def",
"kwargs_to_tuple",
"(",
"d",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"return",
"tuple",
"(",
"[",
"kwargs_to_tuple",
"(",
"v",
")",
"for",
"v",
"in",
"sorted",
"(",
"d",
")",
"]",
")",
"elif",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"return",
"tuple",
"(",
"[",
"(",
"k",
",",
"kwargs_to_tuple",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
")",
"if",
"k",
"not",
"in",
"[",
"\"result_format\"",
",",
"\"include_config\"",
",",
"\"catch_exceptions\"",
",",
"\"meta\"",
"]",
"]",
")",
"return",
"d"
] | [
7,
0
] | [
20,
12
] | python | en | ['en', 'en', 'sw'] | True |
DomainBuilder.__init__ | (
self,
data_context: DataContext,
batch_request: Optional[Union[dict, str]] = None,
) |
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
|
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
| def __init__(
self,
data_context: DataContext,
batch_request: Optional[Union[dict, str]] = None,
):
"""
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
"""
if data_context is None:
raise ge_exceptions.ProfilerExecutionError(
message=f"{self.__class__.__name__} requires a data_context, but none was provided."
)
self._data_context = data_context
self._batch_request = batch_request | [
"def",
"__init__",
"(",
"self",
",",
"data_context",
":",
"DataContext",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"dict",
",",
"str",
"]",
"]",
"=",
"None",
",",
")",
":",
"if",
"data_context",
"is",
"None",
":",
"raise",
"ge_exceptions",
".",
"ProfilerExecutionError",
"(",
"message",
"=",
"f\"{self.__class__.__name__} requires a data_context, but none was provided.\"",
")",
"self",
".",
"_data_context",
"=",
"data_context",
"self",
".",
"_batch_request",
"=",
"batch_request"
] | [
21,
4
] | [
38,
43
] | python | en | ['en', 'error', 'th'] | False |
DomainBuilder.get_domains | (
self,
variables: Optional[ParameterContainer] = None,
) |
Note: Please do not overwrite the public "get_domains()" method. If a child class needs to check parameters,
then please do so in its implementation of the (private) "_get_domains()" method, or in a utility method.
|
Note: Please do not overwrite the public "get_domains()" method. If a child class needs to check parameters,
then please do so in its implementation of the (private) "_get_domains()" method, or in a utility method.
| def get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Note: Please do not overwrite the public "get_domains()" method. If a child class needs to check parameters,
then please do so in its implementation of the (private) "_get_domains()" method, or in a utility method.
"""
return self._get_domains(variables=variables) | [
"def",
"get_domains",
"(",
"self",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"Domain",
"]",
":",
"return",
"self",
".",
"_get_domains",
"(",
"variables",
"=",
"variables",
")"
] | [
40,
4
] | [
48,
53
] | python | en | ['en', 'error', 'th'] | False |
DomainBuilder._get_domains | (
self,
variables: Optional[ParameterContainer] = None,
) |
_get_domains is the primary workhorse for the DomainBuilder
|
_get_domains is the primary workhorse for the DomainBuilder
| def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
_get_domains is the primary workhorse for the DomainBuilder
"""
pass | [
"def",
"_get_domains",
"(",
"self",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"Domain",
"]",
":",
"pass"
] | [
51,
4
] | [
59,
12
] | python | en | ['en', 'error', 'th'] | False |
format_exc | (exception, message) |
Formats an exception message to make the output cleaner.
|
Formats an exception message to make the output cleaner.
| def format_exc(exception, message):
"""
Formats an exception message to make the output cleaner.
"""
if exception == Exception:
exc = Exception
return exc, message
elif exception == ElementNotVisibleException:
exc = ElementNotVisibleException
elif exception == "ElementNotVisibleException":
exc = ElementNotVisibleException
elif exception == NoSuchElementException:
exc = NoSuchElementException
elif exception == "NoSuchElementException":
exc = NoSuchElementException
elif exception == NoAlertPresentException:
exc = NoAlertPresentException
elif exception == "NoAlertPresentException":
exc = NoAlertPresentException
elif exception == NoSuchFrameException:
exc = NoSuchFrameException
elif exception == "NoSuchFrameException":
exc = NoSuchFrameException
elif exception == NoSuchWindowException:
exc = NoSuchWindowException
elif exception == "NoSuchWindowException":
exc = NoSuchWindowException
elif exception == "NoSuchFileException":
exc = NoSuchFileException
elif type(exception) is str:
exc = Exception
message = "%s: %s" % (exception, message)
return exc, message
else:
exc = Exception
return exc, message
message = _format_message(message)
return exc, message | [
"def",
"format_exc",
"(",
"exception",
",",
"message",
")",
":",
"if",
"exception",
"==",
"Exception",
":",
"exc",
"=",
"Exception",
"return",
"exc",
",",
"message",
"elif",
"exception",
"==",
"ElementNotVisibleException",
":",
"exc",
"=",
"ElementNotVisibleException",
"elif",
"exception",
"==",
"\"ElementNotVisibleException\"",
":",
"exc",
"=",
"ElementNotVisibleException",
"elif",
"exception",
"==",
"NoSuchElementException",
":",
"exc",
"=",
"NoSuchElementException",
"elif",
"exception",
"==",
"\"NoSuchElementException\"",
":",
"exc",
"=",
"NoSuchElementException",
"elif",
"exception",
"==",
"NoAlertPresentException",
":",
"exc",
"=",
"NoAlertPresentException",
"elif",
"exception",
"==",
"\"NoAlertPresentException\"",
":",
"exc",
"=",
"NoAlertPresentException",
"elif",
"exception",
"==",
"NoSuchFrameException",
":",
"exc",
"=",
"NoSuchFrameException",
"elif",
"exception",
"==",
"\"NoSuchFrameException\"",
":",
"exc",
"=",
"NoSuchFrameException",
"elif",
"exception",
"==",
"NoSuchWindowException",
":",
"exc",
"=",
"NoSuchWindowException",
"elif",
"exception",
"==",
"\"NoSuchWindowException\"",
":",
"exc",
"=",
"NoSuchWindowException",
"elif",
"exception",
"==",
"\"NoSuchFileException\"",
":",
"exc",
"=",
"NoSuchFileException",
"elif",
"type",
"(",
"exception",
")",
"is",
"str",
":",
"exc",
"=",
"Exception",
"message",
"=",
"\"%s: %s\"",
"%",
"(",
"exception",
",",
"message",
")",
"return",
"exc",
",",
"message",
"else",
":",
"exc",
"=",
"Exception",
"return",
"exc",
",",
"message",
"message",
"=",
"_format_message",
"(",
"message",
")",
"return",
"exc",
",",
"message"
] | [
14,
0
] | [
51,
23
] | python | en | ['en', 'error', 'th'] | False |
BaseRecipeAttrViewSet.get_queryset | (self) | Return objects for the current authenticated user only | Return objects for the current authenticated user only | def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct() | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"assigned_only",
"=",
"bool",
"(",
"int",
"(",
"self",
".",
"request",
".",
"query_params",
".",
"get",
"(",
"'assigned_only'",
",",
"0",
")",
")",
")",
"queryset",
"=",
"self",
".",
"queryset",
"if",
"assigned_only",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"recipe__isnull",
"=",
"False",
")",
"return",
"queryset",
".",
"filter",
"(",
"user",
"=",
"self",
".",
"request",
".",
"user",
")",
".",
"order_by",
"(",
"'-name'",
")",
".",
"distinct",
"(",
")"
] | [
18,
4
] | [
29,
38
] | python | en | ['en', 'en', 'en'] | True |
BaseRecipeAttrViewSet.perform_create | (self, serializer) | Create a new tag | Create a new tag | def perform_create(self, serializer):
"""Create a new tag"""
serializer.save(user=self.request.user) | [
"def",
"perform_create",
"(",
"self",
",",
"serializer",
")",
":",
"serializer",
".",
"save",
"(",
"user",
"=",
"self",
".",
"request",
".",
"user",
")"
] | [
31,
4
] | [
33,
47
] | python | en | ['en', 'en', 'en'] | True |
RecipeViewSet._params_to_ints | (self, qs) | Convert a list of string IDs to a list of integers | Convert a list of string IDs to a list of integers | def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')] | [
"def",
"_params_to_ints",
"(",
"self",
",",
"qs",
")",
":",
"return",
"[",
"int",
"(",
"str_id",
")",
"for",
"str_id",
"in",
"qs",
".",
"split",
"(",
"','",
")",
"]"
] | [
55,
4
] | [
57,
56
] | python | en | ['en', 'en', 'en'] | True |
RecipeViewSet.get_queryset | (self) | Retrieve the recipes for the authenticated user | Retrieve the recipes for the authenticated user | def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user) | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"tags",
"=",
"self",
".",
"request",
".",
"query_params",
".",
"get",
"(",
"'tags'",
")",
"ingredients",
"=",
"self",
".",
"request",
".",
"query_params",
".",
"get",
"(",
"'ingredients'",
")",
"queryset",
"=",
"self",
".",
"queryset",
"if",
"tags",
":",
"tag_ids",
"=",
"self",
".",
"_params_to_ints",
"(",
"tags",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"tags__id__in",
"=",
"tag_ids",
")",
"if",
"ingredients",
":",
"ingredient_ids",
"=",
"self",
".",
"_params_to_ints",
"(",
"ingredients",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"ingredients__id__in",
"=",
"ingredient_ids",
")",
"return",
"queryset",
".",
"filter",
"(",
"user",
"=",
"self",
".",
"request",
".",
"user",
")"
] | [
59,
4
] | [
71,
54
] | python | en | ['en', 'en', 'en'] | True |
RecipeViewSet.get_serializer_class | (self) | Return appropriate serializer class | Return appropriate serializer class | def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class | [
"def",
"get_serializer_class",
"(",
"self",
")",
":",
"if",
"self",
".",
"action",
"==",
"'retrieve'",
":",
"return",
"serializers",
".",
"RecipeDetailSerializer",
"elif",
"self",
".",
"action",
"==",
"'upload_image'",
":",
"return",
"serializers",
".",
"RecipeImageSerializer",
"return",
"self",
".",
"serializer_class"
] | [
73,
4
] | [
80,
36
] | python | en | ['en', 'co', 'en'] | True |
RecipeViewSet.perform_create | (self, serializer) | Create a new recipe | Create a new recipe | def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user) | [
"def",
"perform_create",
"(",
"self",
",",
"serializer",
")",
":",
"serializer",
".",
"save",
"(",
"user",
"=",
"self",
".",
"request",
".",
"user",
")"
] | [
82,
4
] | [
84,
47
] | python | en | ['en', 'co', 'en'] | True |
RecipeViewSet.upload_image | (self, request, pk=None) | Upload an image to a recipe | Upload an image to a recipe | def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
) | [
"def",
"upload_image",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"recipe",
"=",
"self",
".",
"get_object",
"(",
")",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"recipe",
",",
"data",
"=",
"request",
".",
"data",
")",
"if",
"serializer",
".",
"is_valid",
"(",
")",
":",
"serializer",
".",
"save",
"(",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
",",
"status",
"=",
"status",
".",
"HTTP_200_OK",
")",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
87,
4
] | [
104,
9
] | python | en | ['en', 'en', 'en'] | True |
is_valid_partition_object | (partition_object) | Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| def is_valid_partition_object(partition_object):
"""Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
return is_valid_continuous_partition_object(
partition_object
) or is_valid_categorical_partition_object(partition_object) | [
"def",
"is_valid_partition_object",
"(",
"partition_object",
")",
":",
"return",
"is_valid_continuous_partition_object",
"(",
"partition_object",
")",
"or",
"is_valid_categorical_partition_object",
"(",
"partition_object",
")"
] | [
22,
0
] | [
29,
64
] | python | en | ['en', 'en', 'en'] | True |
is_valid_categorical_partition_object | (partition_object) | Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| def is_valid_categorical_partition_object(partition_object):
"""Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (
partition_object is None
or ("weights" not in partition_object)
or ("values" not in partition_object)
):
return False
# Expect the same number of values as weights; weights should sum to one
return len(partition_object["values"]) == len(
partition_object["weights"]
) and np.allclose(np.sum(partition_object["weights"]), 1) | [
"def",
"is_valid_categorical_partition_object",
"(",
"partition_object",
")",
":",
"if",
"(",
"partition_object",
"is",
"None",
"or",
"(",
"\"weights\"",
"not",
"in",
"partition_object",
")",
"or",
"(",
"\"values\"",
"not",
"in",
"partition_object",
")",
")",
":",
"return",
"False",
"# Expect the same number of values as weights; weights should sum to one",
"return",
"len",
"(",
"partition_object",
"[",
"\"values\"",
"]",
")",
"==",
"len",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
"and",
"np",
".",
"allclose",
"(",
"np",
".",
"sum",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
",",
"1",
")"
] | [
32,
0
] | [
46,
61
] | python | en | ['en', 'en', 'en'] | True |
is_valid_continuous_partition_object | (partition_object) | Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`. | def is_valid_continuous_partition_object(partition_object):
"""Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (
(partition_object is None)
or ("weights" not in partition_object)
or ("bins" not in partition_object)
):
return False
if "tail_weights" in partition_object:
if len(partition_object["tail_weights"]) != 2:
return False
comb_weights = partition_object["tail_weights"] + partition_object["weights"]
else:
comb_weights = partition_object["weights"]
## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects
# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):
# return False
# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one
return (
(len(partition_object["bins"]) == (len(partition_object["weights"]) + 1))
and np.all(np.diff(partition_object["bins"]) > 0)
and np.allclose(np.sum(comb_weights), 1.0)
) | [
"def",
"is_valid_continuous_partition_object",
"(",
"partition_object",
")",
":",
"if",
"(",
"(",
"partition_object",
"is",
"None",
")",
"or",
"(",
"\"weights\"",
"not",
"in",
"partition_object",
")",
"or",
"(",
"\"bins\"",
"not",
"in",
"partition_object",
")",
")",
":",
"return",
"False",
"if",
"\"tail_weights\"",
"in",
"partition_object",
":",
"if",
"len",
"(",
"partition_object",
"[",
"\"tail_weights\"",
"]",
")",
"!=",
"2",
":",
"return",
"False",
"comb_weights",
"=",
"partition_object",
"[",
"\"tail_weights\"",
"]",
"+",
"partition_object",
"[",
"\"weights\"",
"]",
"else",
":",
"comb_weights",
"=",
"partition_object",
"[",
"\"weights\"",
"]",
"## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects",
"# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):",
"# return False",
"# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one",
"return",
"(",
"(",
"len",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
"==",
"(",
"len",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
"+",
"1",
")",
")",
"and",
"np",
".",
"all",
"(",
"np",
".",
"diff",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
">",
"0",
")",
"and",
"np",
".",
"allclose",
"(",
"np",
".",
"sum",
"(",
"comb_weights",
")",
",",
"1.0",
")",
")"
] | [
49,
0
] | [
78,
5
] | python | en | ['en', 'en', 'en'] | True |
categorical_partition_data | (data) | Convenience method for creating weights from categorical data.
Args:
data (list-like): The data from which to construct the estimate.
Returns:
A new partition object::
{
"values": (list) The categorical values present in the data
"weights": (list) The weights of the values in the partition.
}
See :ref:`partition_object`.
| Convenience method for creating weights from categorical data. | def categorical_partition_data(data):
"""Convenience method for creating weights from categorical data.
Args:
data (list-like): The data from which to construct the estimate.
Returns:
A new partition object::
{
"values": (list) The categorical values present in the data
"weights": (list) The weights of the values in the partition.
}
See :ref:`partition_object`.
"""
# Make dropna explicit (even though it defaults to true)
series = pd.Series(data)
value_counts = series.value_counts(dropna=True)
# Compute weights using denominator only of nonnull values
null_indexes = series.isnull()
nonnull_count = (null_indexes == False).sum()
weights = value_counts.values / nonnull_count
return {"values": value_counts.index.tolist(), "weights": weights} | [
"def",
"categorical_partition_data",
"(",
"data",
")",
":",
"# Make dropna explicit (even though it defaults to true)",
"series",
"=",
"pd",
".",
"Series",
"(",
"data",
")",
"value_counts",
"=",
"series",
".",
"value_counts",
"(",
"dropna",
"=",
"True",
")",
"# Compute weights using denominator only of nonnull values",
"null_indexes",
"=",
"series",
".",
"isnull",
"(",
")",
"nonnull_count",
"=",
"(",
"null_indexes",
"==",
"False",
")",
".",
"sum",
"(",
")",
"weights",
"=",
"value_counts",
".",
"values",
"/",
"nonnull_count",
"return",
"{",
"\"values\"",
":",
"value_counts",
".",
"index",
".",
"tolist",
"(",
")",
",",
"\"weights\"",
":",
"weights",
"}"
] | [
81,
0
] | [
107,
70
] | python | en | ['en', 'en', 'en'] | True |
kde_partition_data | (data, estimate_tails=True) | Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth.
Args:
data (list-like): The data from which to construct the estimate
estimate_tails (bool): Whether to estimate the tails of the distribution to keep the partition object finite
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth. | def kde_partition_data(data, estimate_tails=True):
"""Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth.
Args:
data (list-like): The data from which to construct the estimate
estimate_tails (bool): Whether to estimate the tails of the distribution to keep the partition object finite
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
"""
kde = stats.kde.gaussian_kde(data)
evaluation_bins = np.linspace(
start=np.min(data) - (kde.covariance_factor() / 2),
stop=np.max(data) + (kde.covariance_factor() / 2),
num=np.floor(
((np.max(data) - np.min(data)) / kde.covariance_factor()) + 1
).astype(int),
)
cdf_vals = [kde.integrate_box_1d(-np.inf, x) for x in evaluation_bins]
evaluation_weights = np.diff(cdf_vals)
if estimate_tails:
bins = np.concatenate(
(
[np.min(data) - (1.5 * kde.covariance_factor())],
evaluation_bins,
[np.max(data) + (1.5 * kde.covariance_factor())],
)
)
else:
bins = np.concatenate(([-np.inf], evaluation_bins, [np.inf]))
weights = np.concatenate(([cdf_vals[0]], evaluation_weights, [1 - cdf_vals[-1]]))
return {"bins": bins, "weights": weights} | [
"def",
"kde_partition_data",
"(",
"data",
",",
"estimate_tails",
"=",
"True",
")",
":",
"kde",
"=",
"stats",
".",
"kde",
".",
"gaussian_kde",
"(",
"data",
")",
"evaluation_bins",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"np",
".",
"min",
"(",
"data",
")",
"-",
"(",
"kde",
".",
"covariance_factor",
"(",
")",
"/",
"2",
")",
",",
"stop",
"=",
"np",
".",
"max",
"(",
"data",
")",
"+",
"(",
"kde",
".",
"covariance_factor",
"(",
")",
"/",
"2",
")",
",",
"num",
"=",
"np",
".",
"floor",
"(",
"(",
"(",
"np",
".",
"max",
"(",
"data",
")",
"-",
"np",
".",
"min",
"(",
"data",
")",
")",
"/",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"+",
"1",
")",
".",
"astype",
"(",
"int",
")",
",",
")",
"cdf_vals",
"=",
"[",
"kde",
".",
"integrate_box_1d",
"(",
"-",
"np",
".",
"inf",
",",
"x",
")",
"for",
"x",
"in",
"evaluation_bins",
"]",
"evaluation_weights",
"=",
"np",
".",
"diff",
"(",
"cdf_vals",
")",
"if",
"estimate_tails",
":",
"bins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"np",
".",
"min",
"(",
"data",
")",
"-",
"(",
"1.5",
"*",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"]",
",",
"evaluation_bins",
",",
"[",
"np",
".",
"max",
"(",
"data",
")",
"+",
"(",
"1.5",
"*",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"]",
",",
")",
")",
"else",
":",
"bins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"-",
"np",
".",
"inf",
"]",
",",
"evaluation_bins",
",",
"[",
"np",
".",
"inf",
"]",
")",
")",
"weights",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"cdf_vals",
"[",
"0",
"]",
"]",
",",
"evaluation_weights",
",",
"[",
"1",
"-",
"cdf_vals",
"[",
"-",
"1",
"]",
"]",
")",
")",
"return",
"{",
"\"bins\"",
":",
"bins",
",",
"\"weights\"",
":",
"weights",
"}"
] | [
110,
0
] | [
151,
45
] | python | en | ['en', 'en', 'en'] | True |
continuous_partition_data | (data, bins="auto", n_bins=10, **kwargs) | Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
kwargs (mapping): Additional keyword arguments to be passed to numpy histogram
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition object on continuous data | def continuous_partition_data(data, bins="auto", n_bins=10, **kwargs):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
kwargs (mapping): Additional keyword arguments to be passed to numpy histogram
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
"""
if bins == "uniform":
bins = np.linspace(start=np.min(data), stop=np.max(data), num=n_bins + 1)
elif bins == "ntile":
bins = np.percentile(data, np.linspace(start=0, stop=100, num=n_bins + 1))
elif bins != "auto":
raise ValueError("Invalid parameter for bins argument")
try:
hist, bin_edges = np.histogram(data, bins, density=False, **kwargs)
except ValueError as e:
raise ValueError(
"Unable to compute histogram. Did you know you can pass additional kwargs to numpy histogram,"
"such as a range? Numpy error was: " + str(e)
)
except TypeError as e:
raise TypeError(
"Unable to compute histogram. numpy histogram raised error: " + str(e)
)
return {"bins": bin_edges, "weights": hist / len(data)} | [
"def",
"continuous_partition_data",
"(",
"data",
",",
"bins",
"=",
"\"auto\"",
",",
"n_bins",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"bins",
"==",
"\"uniform\"",
":",
"bins",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"np",
".",
"min",
"(",
"data",
")",
",",
"stop",
"=",
"np",
".",
"max",
"(",
"data",
")",
",",
"num",
"=",
"n_bins",
"+",
"1",
")",
"elif",
"bins",
"==",
"\"ntile\"",
":",
"bins",
"=",
"np",
".",
"percentile",
"(",
"data",
",",
"np",
".",
"linspace",
"(",
"start",
"=",
"0",
",",
"stop",
"=",
"100",
",",
"num",
"=",
"n_bins",
"+",
"1",
")",
")",
"elif",
"bins",
"!=",
"\"auto\"",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameter for bins argument\"",
")",
"try",
":",
"hist",
",",
"bin_edges",
"=",
"np",
".",
"histogram",
"(",
"data",
",",
"bins",
",",
"density",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Unable to compute histogram. Did you know you can pass additional kwargs to numpy histogram,\"",
"\"such as a range? Numpy error was: \"",
"+",
"str",
"(",
"e",
")",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"\"Unable to compute histogram. numpy histogram raised error: \"",
"+",
"str",
"(",
"e",
")",
")",
"return",
"{",
"\"bins\"",
":",
"bin_edges",
",",
"\"weights\"",
":",
"hist",
"/",
"len",
"(",
"data",
")",
"}"
] | [
163,
0
] | [
201,
59
] | python | en | ['en', 'en', 'en'] | True |
build_continuous_partition_object | (
dataset, column, bins="auto", n_bins=10, allow_relative_error=False
) | Convenience method for building a partition object on continuous data from a dataset and column
Args:
dataset (GE Dataset): the dataset for which to compute the partition
column (string): The name of the column for which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition object on continuous data from a dataset and column | def build_continuous_partition_object(
dataset, column, bins="auto", n_bins=10, allow_relative_error=False
):
"""Convenience method for building a partition object on continuous data from a dataset and column
Args:
dataset (GE Dataset): the dataset for which to compute the partition
column (string): The name of the column for which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
"""
bins = dataset.get_column_partition(column, bins, n_bins, allow_relative_error)
if isinstance(bins, np.ndarray):
bins = bins.tolist()
else:
bins = list(bins)
weights = list(
np.array(dataset.get_column_hist(column, tuple(bins)))
/ dataset.get_column_nonnull_count(column)
)
tail_weights = (1 - sum(weights)) / 2
partition_object = {
"bins": bins,
"weights": weights,
"tail_weights": [tail_weights, tail_weights],
}
return partition_object | [
"def",
"build_continuous_partition_object",
"(",
"dataset",
",",
"column",
",",
"bins",
"=",
"\"auto\"",
",",
"n_bins",
"=",
"10",
",",
"allow_relative_error",
"=",
"False",
")",
":",
"bins",
"=",
"dataset",
".",
"get_column_partition",
"(",
"column",
",",
"bins",
",",
"n_bins",
",",
"allow_relative_error",
")",
"if",
"isinstance",
"(",
"bins",
",",
"np",
".",
"ndarray",
")",
":",
"bins",
"=",
"bins",
".",
"tolist",
"(",
")",
"else",
":",
"bins",
"=",
"list",
"(",
"bins",
")",
"weights",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"dataset",
".",
"get_column_hist",
"(",
"column",
",",
"tuple",
"(",
"bins",
")",
")",
")",
"/",
"dataset",
".",
"get_column_nonnull_count",
"(",
"column",
")",
")",
"tail_weights",
"=",
"(",
"1",
"-",
"sum",
"(",
"weights",
")",
")",
"/",
"2",
"partition_object",
"=",
"{",
"\"bins\"",
":",
"bins",
",",
"\"weights\"",
":",
"weights",
",",
"\"tail_weights\"",
":",
"[",
"tail_weights",
",",
"tail_weights",
"]",
",",
"}",
"return",
"partition_object"
] | [
204,
0
] | [
246,
27
] | python | en | ['en', 'en', 'en'] | True |
build_categorical_partition_object | (dataset, column, sort="value") | Convenience method for building a partition object on categorical data from a dataset and column
Args:
dataset (GE Dataset): the dataset for which to compute the partition
column (string): The name of the column for which to construct the estimate.
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
Returns:
A new partition_object::
{
"values": (list) the categorical values for which each weight applies,
"weights": (list) The densities of the values implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition object on categorical data from a dataset and column | def build_categorical_partition_object(dataset, column, sort="value"):
"""Convenience method for building a partition object on categorical data from a dataset and column
Args:
dataset (GE Dataset): the dataset for which to compute the partition
column (string): The name of the column for which to construct the estimate.
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
Returns:
A new partition_object::
{
"values": (list) the categorical values for which each weight applies,
"weights": (list) The densities of the values implied by the partition.
}
See :ref:`partition_object`.
"""
counts = dataset.get_column_value_counts(column, sort)
return {
"values": list(counts.index),
"weights": list(np.array(counts) / dataset.get_column_nonnull_count(column)),
} | [
"def",
"build_categorical_partition_object",
"(",
"dataset",
",",
"column",
",",
"sort",
"=",
"\"value\"",
")",
":",
"counts",
"=",
"dataset",
".",
"get_column_value_counts",
"(",
"column",
",",
"sort",
")",
"return",
"{",
"\"values\"",
":",
"list",
"(",
"counts",
".",
"index",
")",
",",
"\"weights\"",
":",
"list",
"(",
"np",
".",
"array",
"(",
"counts",
")",
"/",
"dataset",
".",
"get_column_nonnull_count",
"(",
"column",
")",
")",
",",
"}"
] | [
249,
0
] | [
273,
5
] | python | en | ['en', 'en', 'en'] | True |
infer_distribution_parameters | (data, distribution, params=None) | Convenience method for determining the shape parameters of a given distribution
Args:
data (list-like): The data to build shape parameters from.
distribution (string): Scipy distribution, determines which parameters to build.
params (dict or None): The known parameters. Parameters given here will not be altered. \
Keep as None to infer all necessary parameters from the data data.
Returns:
A dictionary of named parameters::
{
"mean": (float),
"std_dev": (float),
"loc": (float),
"scale": (float),
"alpha": (float),
"beta": (float),
"min": (float),
"max": (float),
"df": (float)
}
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest
| Convenience method for determining the shape parameters of a given distribution | def infer_distribution_parameters(data, distribution, params=None):
"""Convenience method for determining the shape parameters of a given distribution
Args:
data (list-like): The data to build shape parameters from.
distribution (string): Scipy distribution, determines which parameters to build.
params (dict or None): The known parameters. Parameters given here will not be altered. \
Keep as None to infer all necessary parameters from the data data.
Returns:
A dictionary of named parameters::
{
"mean": (float),
"std_dev": (float),
"loc": (float),
"scale": (float),
"alpha": (float),
"beta": (float),
"min": (float),
"max": (float),
"df": (float)
}
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest
"""
if params is None:
params = dict()
elif not isinstance(params, dict):
raise TypeError(
"params must be a dictionary object, see great_expectations documentation"
)
if "mean" not in params.keys():
params["mean"] = data.mean()
if "std_dev" not in params.keys():
params["std_dev"] = data.std()
if distribution == "beta":
# scipy cdf(x, a, b, loc=0, scale=1)
if "alpha" not in params.keys():
# from https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance
params["alpha"] = (params["mean"] ** 2) * (
((1 - params["mean"]) / params["std_dev"] ** 2) - (1 / params["mean"])
)
if "beta" not in params.keys():
params["beta"] = params["alpha"] * ((1 / params["mean"]) - 1)
elif distribution == "gamma":
# scipy cdf(x, a, loc=0, scale=1)
if "alpha" not in params.keys():
# Using https://en.wikipedia.org/wiki/Gamma_distribution
params["alpha"] = params["mean"] / params.get("scale", 1)
# elif distribution == 'poisson':
# if 'lambda' not in params.keys():
# params['lambda'] = params['mean']
elif distribution == "uniform":
# scipy cdf(x, loc=0, scale=1)
if "min" not in params.keys():
if "loc" in params.keys():
params["min"] = params["loc"]
else:
params["min"] = min(data)
if "max" not in params.keys():
if "scale" in params.keys():
params["max"] = params["scale"]
else:
params["max"] = max(data) - params["min"]
elif distribution == "chi2":
# scipy cdf(x, df, loc=0, scale=1)
if "df" not in params.keys():
# from https://en.wikipedia.org/wiki/Chi-squared_distribution
params["df"] = params["mean"]
# Expon only uses loc and scale, use default
# elif distribution == 'expon':
# scipy cdf(x, loc=0, scale=1)
# if 'lambda' in params.keys():
# Lambda is optional
# params['scale'] = 1 / params['lambda']
elif distribution != "norm":
raise AttributeError(
"Unsupported distribution type. Please refer to Great Expectations Documentation"
)
params["loc"] = params.get("loc", 0)
params["scale"] = params.get("scale", 1)
return params | [
"def",
"infer_distribution_parameters",
"(",
"data",
",",
"distribution",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"dict",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"params must be a dictionary object, see great_expectations documentation\"",
")",
"if",
"\"mean\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"mean\"",
"]",
"=",
"data",
".",
"mean",
"(",
")",
"if",
"\"std_dev\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"std_dev\"",
"]",
"=",
"data",
".",
"std",
"(",
")",
"if",
"distribution",
"==",
"\"beta\"",
":",
"# scipy cdf(x, a, b, loc=0, scale=1)",
"if",
"\"alpha\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# from https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance",
"params",
"[",
"\"alpha\"",
"]",
"=",
"(",
"params",
"[",
"\"mean\"",
"]",
"**",
"2",
")",
"*",
"(",
"(",
"(",
"1",
"-",
"params",
"[",
"\"mean\"",
"]",
")",
"/",
"params",
"[",
"\"std_dev\"",
"]",
"**",
"2",
")",
"-",
"(",
"1",
"/",
"params",
"[",
"\"mean\"",
"]",
")",
")",
"if",
"\"beta\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"beta\"",
"]",
"=",
"params",
"[",
"\"alpha\"",
"]",
"*",
"(",
"(",
"1",
"/",
"params",
"[",
"\"mean\"",
"]",
")",
"-",
"1",
")",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"# scipy cdf(x, a, loc=0, scale=1)",
"if",
"\"alpha\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# Using https://en.wikipedia.org/wiki/Gamma_distribution",
"params",
"[",
"\"alpha\"",
"]",
"=",
"params",
"[",
"\"mean\"",
"]",
"/",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"# elif distribution == 'poisson':",
"# if 'lambda' not in params.keys():",
"# params['lambda'] = params['mean']",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"# scipy cdf(x, loc=0, scale=1)",
"if",
"\"min\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"if",
"\"loc\"",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"min\"",
"]",
"=",
"params",
"[",
"\"loc\"",
"]",
"else",
":",
"params",
"[",
"\"min\"",
"]",
"=",
"min",
"(",
"data",
")",
"if",
"\"max\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"if",
"\"scale\"",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"max\"",
"]",
"=",
"params",
"[",
"\"scale\"",
"]",
"else",
":",
"params",
"[",
"\"max\"",
"]",
"=",
"max",
"(",
"data",
")",
"-",
"params",
"[",
"\"min\"",
"]",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"# scipy cdf(x, df, loc=0, scale=1)",
"if",
"\"df\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# from https://en.wikipedia.org/wiki/Chi-squared_distribution",
"params",
"[",
"\"df\"",
"]",
"=",
"params",
"[",
"\"mean\"",
"]",
"# Expon only uses loc and scale, use default",
"# elif distribution == 'expon':",
"# scipy cdf(x, loc=0, scale=1)",
"# if 'lambda' in params.keys():",
"# Lambda is optional",
"# params['scale'] = 1 / params['lambda']",
"elif",
"distribution",
"!=",
"\"norm\"",
":",
"raise",
"AttributeError",
"(",
"\"Unsupported distribution type. Please refer to Great Expectations Documentation\"",
")",
"params",
"[",
"\"loc\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"loc\"",
",",
"0",
")",
"params",
"[",
"\"scale\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"return",
"params"
] | [
276,
0
] | [
369,
17
] | python | en | ['en', 'en', 'en'] | True |
_scipy_distribution_positional_args_from_dict | (distribution, params) | Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
| Helper function that returns positional arguments for a scipy distribution using a dict of parameters. | def _scipy_distribution_positional_args_from_dict(distribution, params):
"""Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
"""
params["loc"] = params.get("loc", 0)
if "scale" not in params:
params["scale"] = 1
if distribution == "norm":
return params["mean"], params["std_dev"]
elif distribution == "beta":
return params["alpha"], params["beta"], params["loc"], params["scale"]
elif distribution == "gamma":
return params["alpha"], params["loc"], params["scale"]
# elif distribution == 'poisson':
# return params['lambda'], params['loc']
elif distribution == "uniform":
return params["min"], params["max"]
elif distribution == "chi2":
return params["df"], params["loc"], params["scale"]
elif distribution == "expon":
return params["loc"], params["scale"] | [
"def",
"_scipy_distribution_positional_args_from_dict",
"(",
"distribution",
",",
"params",
")",
":",
"params",
"[",
"\"loc\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"loc\"",
",",
"0",
")",
"if",
"\"scale\"",
"not",
"in",
"params",
":",
"params",
"[",
"\"scale\"",
"]",
"=",
"1",
"if",
"distribution",
"==",
"\"norm\"",
":",
"return",
"params",
"[",
"\"mean\"",
"]",
",",
"params",
"[",
"\"std_dev\"",
"]",
"elif",
"distribution",
"==",
"\"beta\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"beta\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"# elif distribution == 'poisson':",
"# return params['lambda'], params['loc']",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"return",
"params",
"[",
"\"min\"",
"]",
",",
"params",
"[",
"\"max\"",
"]",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"return",
"params",
"[",
"\"df\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"return",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]"
] | [
372,
0
] | [
407,
45
] | python | en | ['en', 'en', 'en'] | True |
validate_distribution_parameters | (distribution, params) | Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
| Ensures that necessary parameters for a distribution are present and that all parameters are sensical. | def validate_distribution_parameters(distribution, params):
"""Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
"""
norm_msg = (
"norm distributions require 0 parameters and optionally 'mean', 'std_dev'."
)
beta_msg = "beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'."
gamma_msg = "gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'."
# poisson_msg = "poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'."
uniform_msg = (
"uniform distributions require 0 parameters and optionally 'loc', 'scale'."
)
chi2_msg = "chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'."
expon_msg = (
"expon distributions require 0 parameters and optionally 'loc', 'scale'."
)
if distribution not in [
"norm",
"beta",
"gamma",
"poisson",
"uniform",
"chi2",
"expon",
]:
raise AttributeError("Unsupported distribution provided: %s" % distribution)
if isinstance(params, dict):
# `params` is a dictionary
if params.get("std_dev", 1) <= 0 or params.get("scale", 1) <= 0:
raise ValueError("std_dev and scale must be positive.")
# alpha and beta are required and positive
if distribution == "beta" and (
params.get("alpha", -1) <= 0 or params.get("beta", -1) <= 0
):
raise ValueError("Invalid parameters: %s" % beta_msg)
# alpha is required and positive
elif distribution == "gamma" and params.get("alpha", -1) <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# lambda is a required and positive
# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
# df is necessary and required to be positive
elif distribution == "chi2" and params.get("df", -1) <= 0:
raise ValueError("Invalid parameters: %s:" % chi2_msg)
elif isinstance(params, tuple) or isinstance(params, list):
scale = None
# `params` is a tuple or a list
if distribution == "beta":
if len(params) < 2:
raise ValueError("Missing required parameters: %s" % beta_msg)
if params[0] <= 0 or params[1] <= 0:
raise ValueError("Invalid parameters: %s" % beta_msg)
if len(params) == 4:
scale = params[3]
elif len(params) > 4:
raise ValueError("Too many parameters provided: %s" % beta_msg)
elif distribution == "norm":
if len(params) > 2:
raise ValueError("Too many parameters provided: %s" % norm_msg)
if len(params) == 2:
scale = params[1]
elif distribution == "gamma":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % gamma_msg)
if len(params) == 3:
scale = params[2]
if len(params) > 3:
raise ValueError("Too many parameters provided: %s" % gamma_msg)
elif params[0] <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# elif distribution == 'poisson':
# if len(params) < 1:
# raise ValueError("Missing required parameters: %s" %poisson_msg)
# if len(params) > 2:
# raise ValueError("Too many parameters provided: %s" %poisson_msg)
# elif params[0] <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
elif distribution == "uniform":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % uniform_msg)
elif distribution == "chi2":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % chi2_msg)
elif len(params) == 3:
scale = params[2]
elif len(params) > 3:
raise ValueError("Too many arguments provided: %s" % chi2_msg)
if params[0] <= 0:
raise ValueError("Invalid parameters: %s" % chi2_msg)
elif distribution == "expon":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % expon_msg)
if scale is not None and scale <= 0:
raise ValueError("std_dev and scale must be positive.")
else:
raise ValueError(
"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)"
)
return | [
"def",
"validate_distribution_parameters",
"(",
"distribution",
",",
"params",
")",
":",
"norm_msg",
"=",
"(",
"\"norm distributions require 0 parameters and optionally 'mean', 'std_dev'.\"",
")",
"beta_msg",
"=",
"\"beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'.\"",
"gamma_msg",
"=",
"\"gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'.\"",
"# poisson_msg = \"poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'.\"",
"uniform_msg",
"=",
"(",
"\"uniform distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"chi2_msg",
"=",
"\"chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'.\"",
"expon_msg",
"=",
"(",
"\"expon distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"if",
"distribution",
"not",
"in",
"[",
"\"norm\"",
",",
"\"beta\"",
",",
"\"gamma\"",
",",
"\"poisson\"",
",",
"\"uniform\"",
",",
"\"chi2\"",
",",
"\"expon\"",
",",
"]",
":",
"raise",
"AttributeError",
"(",
"\"Unsupported distribution provided: %s\"",
"%",
"distribution",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"# `params` is a dictionary",
"if",
"params",
".",
"get",
"(",
"\"std_dev\"",
",",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"# alpha and beta are required and positive",
"if",
"distribution",
"==",
"\"beta\"",
"and",
"(",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"beta\"",
",",
"-",
"1",
")",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"# alpha is required and positive",
"elif",
"distribution",
"==",
"\"gamma\"",
"and",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# lambda is a required and positive",
"# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"# df is necessary and required to be positive",
"elif",
"distribution",
"==",
"\"chi2\"",
"and",
"params",
".",
"get",
"(",
"\"df\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s:\"",
"%",
"chi2_msg",
")",
"elif",
"isinstance",
"(",
"params",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"params",
",",
"list",
")",
":",
"scale",
"=",
"None",
"# `params` is a tuple or a list",
"if",
"distribution",
"==",
"\"beta\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
"or",
"params",
"[",
"1",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"4",
":",
"scale",
"=",
"params",
"[",
"3",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"4",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"beta_msg",
")",
"elif",
"distribution",
"==",
"\"norm\"",
":",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"norm_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"gamma_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"if",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"gamma_msg",
")",
"elif",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# elif distribution == 'poisson':",
"# if len(params) < 1:",
"# raise ValueError(\"Missing required parameters: %s\" %poisson_msg)",
"# if len(params) > 2:",
"# raise ValueError(\"Too many parameters provided: %s\" %poisson_msg)",
"# elif params[0] <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"uniform_msg",
")",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"chi2_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"expon_msg",
")",
"if",
"scale",
"is",
"not",
"None",
"and",
"scale",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)\"",
")",
"return"
] | [
410,
0
] | [
549,
10
] | python | en | ['en', 'en', 'en'] | True |
create_multiple_expectations | (df, columns, expectation_type, *args, **kwargs) | Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results.
| Creates an identical expectation for each of the given columns with the specified arguments, if any. | def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):
"""Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results.
"""
expectation = getattr(df, expectation_type)
results = list()
for column in columns:
results.append(expectation(column, *args, **kwargs))
return results | [
"def",
"create_multiple_expectations",
"(",
"df",
",",
"columns",
",",
"expectation_type",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"expectation",
"=",
"getattr",
"(",
"df",
",",
"expectation_type",
")",
"results",
"=",
"list",
"(",
")",
"for",
"column",
"in",
"columns",
":",
"results",
".",
"append",
"(",
"expectation",
"(",
"column",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"results"
] | [
552,
0
] | [
575,
18
] | python | en | ['en', 'en', 'en'] | True |
frac_coverage_classify | (dataset_in, clean_mask, no_data=-9999) |
Description:
Performs fractional coverage algorithm on given dataset. If no clean mask is given, the 'cf_mask'
variable must be included in the input dataset, as it will be used to create a
clean mask
Assumption:
- The implemented algorithm is defined for Landsat 5/Landsat 7; in order for it to
be used for Landsat 8, the bands will need to be adjusted
References:
- Guerschman, Juan P., et al. "Assessing the effects of site heterogeneity and soil
properties when unmixing photosynthetic vegetation, non-photosynthetic vegetation
and bare soil fractions from Landsat and MODIS data." Remote Sensing of Environment
161 (2015): 12-26.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube (can be a derived
product, such as a cloudfree mosaic; should contain
coordinates: latitude, longitude
variables: blue, green, red, nir, swir1, swir2
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - fractional coverage results with no data = -9999; containing
coordinates: latitude, longitude
variables: bs, pv, npv
where bs -> bare soil, pv -> photosynthetic vegetation, npv -> non-photosynthetic vegetation
|
Description:
Performs fractional coverage algorithm on given dataset. If no clean mask is given, the 'cf_mask'
variable must be included in the input dataset, as it will be used to create a
clean mask
Assumption:
- The implemented algorithm is defined for Landsat 5/Landsat 7; in order for it to
be used for Landsat 8, the bands will need to be adjusted
References:
- Guerschman, Juan P., et al. "Assessing the effects of site heterogeneity and soil
properties when unmixing photosynthetic vegetation, non-photosynthetic vegetation
and bare soil fractions from Landsat and MODIS data." Remote Sensing of Environment
161 (2015): 12-26.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube (can be a derived
product, such as a cloudfree mosaic; should contain
coordinates: latitude, longitude
variables: blue, green, red, nir, swir1, swir2
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - fractional coverage results with no data = -9999; containing
coordinates: latitude, longitude
variables: bs, pv, npv
where bs -> bare soil, pv -> photosynthetic vegetation, npv -> non-photosynthetic vegetation
| def frac_coverage_classify(dataset_in, clean_mask, no_data=-9999):
"""
Description:
Performs fractional coverage algorithm on given dataset. If no clean mask is given, the 'cf_mask'
variable must be included in the input dataset, as it will be used to create a
clean mask
Assumption:
- The implemented algorithm is defined for Landsat 5/Landsat 7; in order for it to
be used for Landsat 8, the bands will need to be adjusted
References:
- Guerschman, Juan P., et al. "Assessing the effects of site heterogeneity and soil
properties when unmixing photosynthetic vegetation, non-photosynthetic vegetation
and bare soil fractions from Landsat and MODIS data." Remote Sensing of Environment
161 (2015): 12-26.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube (can be a derived
product, such as a cloudfree mosaic; should contain
coordinates: latitude, longitude
variables: blue, green, red, nir, swir1, swir2
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - fractional coverage results with no data = -9999; containing
coordinates: latitude, longitude
variables: bs, pv, npv
where bs -> bare soil, pv -> photosynthetic vegetation, npv -> non-photosynthetic vegetation
"""
band_stack = []
mosaic_clean_mask = clean_mask.flatten()
for band in [
dataset_in.blue.values, dataset_in.green.values, dataset_in.red.values, dataset_in.nir.values,
dataset_in.swir1.values, dataset_in.swir2.values
]:
band = band.astype(np.float32)
band = band * 0.0001
band = band.flatten()
band_clean = np.full(band.shape, np.nan)
band_clean[mosaic_clean_mask] = band[mosaic_clean_mask]
band_stack.append(band_clean)
band_stack = np.array(band_stack).transpose()
for b in range(6):
band_stack = np.hstack((band_stack, np.expand_dims(np.log(band_stack[:, b]), axis=1)))
for b in range(6):
band_stack = np.hstack(
(band_stack, np.expand_dims(np.multiply(band_stack[:, b], band_stack[:, b + 6]), axis=1)))
for b in range(6):
for b2 in range(b + 1, 6):
band_stack = np.hstack(
(band_stack, np.expand_dims(np.multiply(band_stack[:, b], band_stack[:, b2]), axis=1)))
for b in range(6):
for b2 in range(b + 1, 6):
band_stack = np.hstack(
(band_stack, np.expand_dims(np.multiply(band_stack[:, b + 6], band_stack[:, b2 + 6]), axis=1)))
for b in range(6):
for b2 in range(b + 1, 6):
band_stack = np.hstack((band_stack, np.expand_dims(
np.divide(band_stack[:, b2] - band_stack[:, b], band_stack[:, b2] + band_stack[:, b]), axis=1)))
band_stack = np.nan_to_num(band_stack) # Now a n x 63 matrix (assuming one acquisition)
ones = np.ones(band_stack.shape[0])
ones = ones.reshape(ones.shape[0], 1)
band_stack = np.concatenate((band_stack, ones), axis=1) # Now a n x 64 matrix (assuming one acquisition)
end_members = np.loadtxt(csv_file_path, delimiter=',') # Creates a 64 x 3 matrix
SumToOneWeight = 0.02
ones = np.ones(end_members.shape[1]) * SumToOneWeight
ones = ones.reshape(1, end_members.shape[1])
end_members = np.concatenate((end_members, ones), axis=0).astype(np.float32)
result = np.zeros((band_stack.shape[0], end_members.shape[1]), dtype=np.float32) # Creates an n x 3 matrix
for i in range(band_stack.shape[0]):
if mosaic_clean_mask[i]:
result[i, :] = (opt.nnls(end_members, band_stack[i, :])[0].clip(0, 2.54) * 100).astype(np.int16)
else:
result[i, :] = np.ones((end_members.shape[1]), dtype=np.int16) * (-9999) # Set as no data
latitude = dataset_in.latitude
longitude = dataset_in.longitude
result = result.reshape(latitude.size, longitude.size, 3)
pv_band = result[:, :, 0]
npv_band = result[:, :, 1]
bs_band = result[:, :, 2]
pv_clean = np.full(pv_band.shape, -9999)
npv_clean = np.full(npv_band.shape, -9999)
bs_clean = np.full(bs_band.shape, -9999)
pv_clean[clean_mask] = pv_band[clean_mask]
npv_clean[clean_mask] = npv_band[clean_mask]
bs_clean[clean_mask] = bs_band[clean_mask]
rapp_bands = collections.OrderedDict([('bs', (['latitude', 'longitude'], bs_band)),
('pv', (['latitude', 'longitude'], pv_band)),
('npv', (['latitude', 'longitude'], npv_band))])
rapp_dataset = xr.Dataset(rapp_bands, coords={'latitude': latitude, 'longitude': longitude})
return rapp_dataset | [
"def",
"frac_coverage_classify",
"(",
"dataset_in",
",",
"clean_mask",
",",
"no_data",
"=",
"-",
"9999",
")",
":",
"band_stack",
"=",
"[",
"]",
"mosaic_clean_mask",
"=",
"clean_mask",
".",
"flatten",
"(",
")",
"for",
"band",
"in",
"[",
"dataset_in",
".",
"blue",
".",
"values",
",",
"dataset_in",
".",
"green",
".",
"values",
",",
"dataset_in",
".",
"red",
".",
"values",
",",
"dataset_in",
".",
"nir",
".",
"values",
",",
"dataset_in",
".",
"swir1",
".",
"values",
",",
"dataset_in",
".",
"swir2",
".",
"values",
"]",
":",
"band",
"=",
"band",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"band",
"=",
"band",
"*",
"0.0001",
"band",
"=",
"band",
".",
"flatten",
"(",
")",
"band_clean",
"=",
"np",
".",
"full",
"(",
"band",
".",
"shape",
",",
"np",
".",
"nan",
")",
"band_clean",
"[",
"mosaic_clean_mask",
"]",
"=",
"band",
"[",
"mosaic_clean_mask",
"]",
"band_stack",
".",
"append",
"(",
"band_clean",
")",
"band_stack",
"=",
"np",
".",
"array",
"(",
"band_stack",
")",
".",
"transpose",
"(",
")",
"for",
"b",
"in",
"range",
"(",
"6",
")",
":",
"band_stack",
"=",
"np",
".",
"hstack",
"(",
"(",
"band_stack",
",",
"np",
".",
"expand_dims",
"(",
"np",
".",
"log",
"(",
"band_stack",
"[",
":",
",",
"b",
"]",
")",
",",
"axis",
"=",
"1",
")",
")",
")",
"for",
"b",
"in",
"range",
"(",
"6",
")",
":",
"band_stack",
"=",
"np",
".",
"hstack",
"(",
"(",
"band_stack",
",",
"np",
".",
"expand_dims",
"(",
"np",
".",
"multiply",
"(",
"band_stack",
"[",
":",
",",
"b",
"]",
",",
"band_stack",
"[",
":",
",",
"b",
"+",
"6",
"]",
")",
",",
"axis",
"=",
"1",
")",
")",
")",
"for",
"b",
"in",
"range",
"(",
"6",
")",
":",
"for",
"b2",
"in",
"range",
"(",
"b",
"+",
"1",
",",
"6",
")",
":",
"band_stack",
"=",
"np",
".",
"hstack",
"(",
"(",
"band_stack",
",",
"np",
".",
"expand_dims",
"(",
"np",
".",
"multiply",
"(",
"band_stack",
"[",
":",
",",
"b",
"]",
",",
"band_stack",
"[",
":",
",",
"b2",
"]",
")",
",",
"axis",
"=",
"1",
")",
")",
")",
"for",
"b",
"in",
"range",
"(",
"6",
")",
":",
"for",
"b2",
"in",
"range",
"(",
"b",
"+",
"1",
",",
"6",
")",
":",
"band_stack",
"=",
"np",
".",
"hstack",
"(",
"(",
"band_stack",
",",
"np",
".",
"expand_dims",
"(",
"np",
".",
"multiply",
"(",
"band_stack",
"[",
":",
",",
"b",
"+",
"6",
"]",
",",
"band_stack",
"[",
":",
",",
"b2",
"+",
"6",
"]",
")",
",",
"axis",
"=",
"1",
")",
")",
")",
"for",
"b",
"in",
"range",
"(",
"6",
")",
":",
"for",
"b2",
"in",
"range",
"(",
"b",
"+",
"1",
",",
"6",
")",
":",
"band_stack",
"=",
"np",
".",
"hstack",
"(",
"(",
"band_stack",
",",
"np",
".",
"expand_dims",
"(",
"np",
".",
"divide",
"(",
"band_stack",
"[",
":",
",",
"b2",
"]",
"-",
"band_stack",
"[",
":",
",",
"b",
"]",
",",
"band_stack",
"[",
":",
",",
"b2",
"]",
"+",
"band_stack",
"[",
":",
",",
"b",
"]",
")",
",",
"axis",
"=",
"1",
")",
")",
")",
"band_stack",
"=",
"np",
".",
"nan_to_num",
"(",
"band_stack",
")",
"# Now a n x 63 matrix (assuming one acquisition)",
"ones",
"=",
"np",
".",
"ones",
"(",
"band_stack",
".",
"shape",
"[",
"0",
"]",
")",
"ones",
"=",
"ones",
".",
"reshape",
"(",
"ones",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"band_stack",
"=",
"np",
".",
"concatenate",
"(",
"(",
"band_stack",
",",
"ones",
")",
",",
"axis",
"=",
"1",
")",
"# Now a n x 64 matrix (assuming one acquisition)",
"end_members",
"=",
"np",
".",
"loadtxt",
"(",
"csv_file_path",
",",
"delimiter",
"=",
"','",
")",
"# Creates a 64 x 3 matrix",
"SumToOneWeight",
"=",
"0.02",
"ones",
"=",
"np",
".",
"ones",
"(",
"end_members",
".",
"shape",
"[",
"1",
"]",
")",
"*",
"SumToOneWeight",
"ones",
"=",
"ones",
".",
"reshape",
"(",
"1",
",",
"end_members",
".",
"shape",
"[",
"1",
"]",
")",
"end_members",
"=",
"np",
".",
"concatenate",
"(",
"(",
"end_members",
",",
"ones",
")",
",",
"axis",
"=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"band_stack",
".",
"shape",
"[",
"0",
"]",
",",
"end_members",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Creates an n x 3 matrix",
"for",
"i",
"in",
"range",
"(",
"band_stack",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"mosaic_clean_mask",
"[",
"i",
"]",
":",
"result",
"[",
"i",
",",
":",
"]",
"=",
"(",
"opt",
".",
"nnls",
"(",
"end_members",
",",
"band_stack",
"[",
"i",
",",
":",
"]",
")",
"[",
"0",
"]",
".",
"clip",
"(",
"0",
",",
"2.54",
")",
"*",
"100",
")",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"else",
":",
"result",
"[",
"i",
",",
":",
"]",
"=",
"np",
".",
"ones",
"(",
"(",
"end_members",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
"*",
"(",
"-",
"9999",
")",
"# Set as no data",
"latitude",
"=",
"dataset_in",
".",
"latitude",
"longitude",
"=",
"dataset_in",
".",
"longitude",
"result",
"=",
"result",
".",
"reshape",
"(",
"latitude",
".",
"size",
",",
"longitude",
".",
"size",
",",
"3",
")",
"pv_band",
"=",
"result",
"[",
":",
",",
":",
",",
"0",
"]",
"npv_band",
"=",
"result",
"[",
":",
",",
":",
",",
"1",
"]",
"bs_band",
"=",
"result",
"[",
":",
",",
":",
",",
"2",
"]",
"pv_clean",
"=",
"np",
".",
"full",
"(",
"pv_band",
".",
"shape",
",",
"-",
"9999",
")",
"npv_clean",
"=",
"np",
".",
"full",
"(",
"npv_band",
".",
"shape",
",",
"-",
"9999",
")",
"bs_clean",
"=",
"np",
".",
"full",
"(",
"bs_band",
".",
"shape",
",",
"-",
"9999",
")",
"pv_clean",
"[",
"clean_mask",
"]",
"=",
"pv_band",
"[",
"clean_mask",
"]",
"npv_clean",
"[",
"clean_mask",
"]",
"=",
"npv_band",
"[",
"clean_mask",
"]",
"bs_clean",
"[",
"clean_mask",
"]",
"=",
"bs_band",
"[",
"clean_mask",
"]",
"rapp_bands",
"=",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"'bs'",
",",
"(",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"bs_band",
")",
")",
",",
"(",
"'pv'",
",",
"(",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"pv_band",
")",
")",
",",
"(",
"'npv'",
",",
"(",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"npv_band",
")",
")",
"]",
")",
"rapp_dataset",
"=",
"xr",
".",
"Dataset",
"(",
"rapp_bands",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"latitude",
",",
"'longitude'",
":",
"longitude",
"}",
")",
"return",
"rapp_dataset"
] | [
22,
0
] | [
133,
23
] | python | en | ['en', 'error', 'th'] | False |
main | (platform, product_type, min_lon, max_lon, min_lat, max_lat, start_date, end_date, dc_config) |
Description:
Command-line fractional coverage tool - TODO
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
|
Description:
Command-line fractional coverage tool - TODO
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
| def main(platform, product_type, min_lon, max_lon, min_lat, max_lat, start_date, end_date, dc_config):
"""
Description:
Command-line fractional coverage tool - TODO
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
"""
# Initialize data cube object
dc = datacube.Datacube(config=dc_config, app='dc-frac-cov')
products = dc.list_products()
platform_names = set([product[6] for product in products.values])
if platform not in platform_names:
print('ERROR: Invalid platform.')
print('Valid platforms are:')
for name in platform_names:
print(name)
return
product_names = [product[0] for product in products.values]
if product_type not in product_names:
print('ERROR: Invalid product type.')
print('Valid product types are:')
for name in product_names:
print(name)
return
try:
min_lon = float(args.min_lon)
max_lon = float(args.max_lon)
min_lat = float(args.min_lat)
max_lat = float(args.max_lat)
except:
print('ERROR: Longitudes/Latitudes must be float values')
return
try:
start_date_str = start_date
end_date_str = end_date
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except:
print('ERROR: Invalid date format. Date format: YYYY-MM-DD')
return
if not os.path.exists(dc_config):
print('ERROR: Invalid file path for dc_config')
return
# Retrieve data from Data Cube
dataset_in = dc.load(
platform=platform,
product=product_type,
time=(start_date, end_date),
lon=(min_lon, max_lon),
lat=(min_lat, max_lat))
# Get information needed for saving as GeoTIFF
# Spatial ref
crs = dataset_in.crs
spatial_ref = utilities.get_spatial_ref(crs)
# Upper left coordinates
ul_lon = dataset_in.longitude.values[0]
ul_lat = dataset_in.latitude.values[0]
# Resolution
products = dc.list_products()
resolution = products.resolution[products.name == 'ls7_ledaps']
lon_dist = resolution.values[0][1]
lat_dist = resolution.values[0][0]
# Rotation
lon_rtn = 0
lat_rtn = 0
geotransform = (ul_lon, lon_dist, lon_rtn, ul_lat, lat_rtn, lat_dist)
dataset_out = frac_coverage_classify(dataset_in)
out_file = (str(min_lon) + '_' + str(min_lat) + '_' + start_date_str + '_' + end_date_str + '_frac_coverage.tif')
utilities.save_to_geotiff(out_file, gdal.GDT_Float32, dataset_out, geotransform, spatial_ref) | [
"def",
"main",
"(",
"platform",
",",
"product_type",
",",
"min_lon",
",",
"max_lon",
",",
"min_lat",
",",
"max_lat",
",",
"start_date",
",",
"end_date",
",",
"dc_config",
")",
":",
"# Initialize data cube object",
"dc",
"=",
"datacube",
".",
"Datacube",
"(",
"config",
"=",
"dc_config",
",",
"app",
"=",
"'dc-frac-cov'",
")",
"products",
"=",
"dc",
".",
"list_products",
"(",
")",
"platform_names",
"=",
"set",
"(",
"[",
"product",
"[",
"6",
"]",
"for",
"product",
"in",
"products",
".",
"values",
"]",
")",
"if",
"platform",
"not",
"in",
"platform_names",
":",
"print",
"(",
"'ERROR: Invalid platform.'",
")",
"print",
"(",
"'Valid platforms are:'",
")",
"for",
"name",
"in",
"platform_names",
":",
"print",
"(",
"name",
")",
"return",
"product_names",
"=",
"[",
"product",
"[",
"0",
"]",
"for",
"product",
"in",
"products",
".",
"values",
"]",
"if",
"product_type",
"not",
"in",
"product_names",
":",
"print",
"(",
"'ERROR: Invalid product type.'",
")",
"print",
"(",
"'Valid product types are:'",
")",
"for",
"name",
"in",
"product_names",
":",
"print",
"(",
"name",
")",
"return",
"try",
":",
"min_lon",
"=",
"float",
"(",
"args",
".",
"min_lon",
")",
"max_lon",
"=",
"float",
"(",
"args",
".",
"max_lon",
")",
"min_lat",
"=",
"float",
"(",
"args",
".",
"min_lat",
")",
"max_lat",
"=",
"float",
"(",
"args",
".",
"max_lat",
")",
"except",
":",
"print",
"(",
"'ERROR: Longitudes/Latitudes must be float values'",
")",
"return",
"try",
":",
"start_date_str",
"=",
"start_date",
"end_date_str",
"=",
"end_date",
"start_date",
"=",
"datetime",
".",
"strptime",
"(",
"start_date",
",",
"'%Y-%m-%d'",
")",
"end_date",
"=",
"datetime",
".",
"strptime",
"(",
"end_date",
",",
"'%Y-%m-%d'",
")",
"except",
":",
"print",
"(",
"'ERROR: Invalid date format. Date format: YYYY-MM-DD'",
")",
"return",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dc_config",
")",
":",
"print",
"(",
"'ERROR: Invalid file path for dc_config'",
")",
"return",
"# Retrieve data from Data Cube",
"dataset_in",
"=",
"dc",
".",
"load",
"(",
"platform",
"=",
"platform",
",",
"product",
"=",
"product_type",
",",
"time",
"=",
"(",
"start_date",
",",
"end_date",
")",
",",
"lon",
"=",
"(",
"min_lon",
",",
"max_lon",
")",
",",
"lat",
"=",
"(",
"min_lat",
",",
"max_lat",
")",
")",
"# Get information needed for saving as GeoTIFF",
"# Spatial ref",
"crs",
"=",
"dataset_in",
".",
"crs",
"spatial_ref",
"=",
"utilities",
".",
"get_spatial_ref",
"(",
"crs",
")",
"# Upper left coordinates",
"ul_lon",
"=",
"dataset_in",
".",
"longitude",
".",
"values",
"[",
"0",
"]",
"ul_lat",
"=",
"dataset_in",
".",
"latitude",
".",
"values",
"[",
"0",
"]",
"# Resolution",
"products",
"=",
"dc",
".",
"list_products",
"(",
")",
"resolution",
"=",
"products",
".",
"resolution",
"[",
"products",
".",
"name",
"==",
"'ls7_ledaps'",
"]",
"lon_dist",
"=",
"resolution",
".",
"values",
"[",
"0",
"]",
"[",
"1",
"]",
"lat_dist",
"=",
"resolution",
".",
"values",
"[",
"0",
"]",
"[",
"0",
"]",
"# Rotation",
"lon_rtn",
"=",
"0",
"lat_rtn",
"=",
"0",
"geotransform",
"=",
"(",
"ul_lon",
",",
"lon_dist",
",",
"lon_rtn",
",",
"ul_lat",
",",
"lat_rtn",
",",
"lat_dist",
")",
"dataset_out",
"=",
"frac_coverage_classify",
"(",
"dataset_in",
")",
"out_file",
"=",
"(",
"str",
"(",
"min_lon",
")",
"+",
"'_'",
"+",
"str",
"(",
"min_lat",
")",
"+",
"'_'",
"+",
"start_date_str",
"+",
"'_'",
"+",
"end_date_str",
"+",
"'_frac_coverage.tif'",
")",
"utilities",
".",
"save_to_geotiff",
"(",
"out_file",
",",
"gdal",
".",
"GDT_Float32",
",",
"dataset_out",
",",
"geotransform",
",",
"spatial_ref",
")"
] | [
136,
0
] | [
230,
97
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase.receptive_field | (self) |
Return the total receptive field of this model as # of frames.
|
Return the total receptive field of this model as # of frames.
| def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2 * frames | [
"def",
"receptive_field",
"(",
"self",
")",
":",
"frames",
"=",
"0",
"for",
"f",
"in",
"self",
".",
"pad",
":",
"frames",
"+=",
"f",
"return",
"1",
"+",
"2",
"*",
"frames"
] | [
41,
4
] | [
48,
29
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase.total_causal_shift | (self) |
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
|
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
| def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames | [
"def",
"total_causal_shift",
"(",
"self",
")",
":",
"frames",
"=",
"self",
".",
"causal_shift",
"[",
"0",
"]",
"next_dilation",
"=",
"self",
".",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"filter_widths",
")",
")",
":",
"frames",
"+=",
"self",
".",
"causal_shift",
"[",
"i",
"]",
"*",
"next_dilation",
"next_dilation",
"*=",
"self",
".",
"filter_widths",
"[",
"i",
"]",
"return",
"frames"
] | [
50,
4
] | [
61,
21
] | python | en | ['en', 'error', 'th'] | False |
TemporalModel.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False) |
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0],groups=1, bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2 * self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,groups=1,
bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False,groups=1))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
",",
"dense",
"=",
"False",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"nn",
".",
"Conv1d",
"(",
"num_joints_in",
"*",
"in_features",
",",
"channels",
",",
"filter_widths",
"[",
"0",
"]",
",",
"groups",
"=",
"1",
",",
"bias",
"=",
"False",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
")",
"//",
"2",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
"*",
"next_dilation",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"filter_widths",
"[",
"i",
"]",
"if",
"not",
"dense",
"else",
"(",
"2",
"*",
"self",
".",
"pad",
"[",
"-",
"1",
"]",
"+",
"1",
")",
",",
"dilation",
"=",
"next_dilation",
"if",
"not",
"dense",
"else",
"1",
",",
"groups",
"=",
"1",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"1",
",",
"dilation",
"=",
"1",
",",
"bias",
"=",
"False",
",",
"groups",
"=",
"1",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")"
] | [
92,
4
] | [
131,
49
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelOptimized1f.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024) |
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0], stride=filter_widths[0], groups=1, bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0] // 2) if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], groups=1,bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1,groups=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"nn",
".",
"Conv1d",
"(",
"num_joints_in",
"*",
"in_features",
",",
"channels",
",",
"filter_widths",
"[",
"0",
"]",
",",
"stride",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"groups",
"=",
"1",
",",
"bias",
"=",
"False",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
"//",
"2",
")",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"filter_widths",
"[",
"i",
"]",
",",
"stride",
"=",
"filter_widths",
"[",
"i",
"]",
",",
"groups",
"=",
"1",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"1",
",",
"dilation",
"=",
"1",
",",
"groups",
"=",
"1",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")"
] | [
159,
4
] | [
193,
49
] | python | en | ['en', 'error', 'th'] | False |
Same_Model.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024) |
Initialize this model.
New Arguments:
FlexGroupLayer: Use this function with different group strategies
self.rep_pad: Recommend use nn.ReflectionPad1d to make the same temporal size as 2d inputs.
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024):
"""
Initialize this model.
New Arguments:
FlexGroupLayer: Use this function with different group strategies
self.rep_pad: Recommend use nn.ReflectionPad1d to make the same temporal size as 2d inputs.
"""
mode = 'replicate' #padding mode: reflect, replicate, zeros
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, kernel_size=filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.ref_pad = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels, kernel_size=filter_widths[0], dilation=next_dilation, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, kernel_size=1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
self.ref_pad.append(nn.ReplicationPad1d(next_dilation))
#self.ref_pad.append(nn.ReflectionPad1d(next_dilation))
next_dilation *= filter_widths[i]
#self.reflec = nn.ReflectionPad1d(1)
self.reflec = nn.ReplicationPad1d(1)
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.final_layer = nn.Conv1d(channels, num_joints_out * 3, kernel_size=1, bias=True) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
")",
":",
"mode",
"=",
"'replicate'",
"#padding mode: reflect, replicate, zeros",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"nn",
".",
"Conv1d",
"(",
"num_joints_in",
"*",
"in_features",
",",
"channels",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"bias",
"=",
"False",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"ref_pad",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
")",
"//",
"2",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
"*",
"next_dilation",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"dilation",
"=",
"next_dilation",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"channels",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"self",
".",
"ref_pad",
".",
"append",
"(",
"nn",
".",
"ReplicationPad1d",
"(",
"next_dilation",
")",
")",
"#self.ref_pad.append(nn.ReflectionPad1d(next_dilation))",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"#self.reflec = nn.ReflectionPad1d(1)",
"self",
".",
"reflec",
"=",
"nn",
".",
"ReplicationPad1d",
"(",
"1",
")",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")",
"self",
".",
"avg_pool",
"=",
"nn",
".",
"AdaptiveAvgPool1d",
"(",
"1",
")",
"self",
".",
"final_layer",
"=",
"nn",
".",
"Conv1d",
"(",
"channels",
",",
"num_joints_out",
"*",
"3",
",",
"kernel_size",
"=",
"1",
",",
"bias",
"=",
"True",
")"
] | [
217,
4
] | [
253,
92
] | python | en | ['en', 'error', 'th'] | False |
FlockTool.Dispatch | (self, args) | Dispatches a string command to a method. | Dispatches a string command to a method. | def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:]) | [
"def",
"Dispatch",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"\"Not enough arguments\"",
")",
"method",
"=",
"\"Exec%s\"",
"%",
"self",
".",
"_CommandifyName",
"(",
"args",
"[",
"0",
"]",
")",
"getattr",
"(",
"self",
",",
"method",
")",
"(",
"*",
"args",
"[",
"1",
":",
"]",
")"
] | [
22,
2
] | [
28,
36
] | python | en | ['en', 'en', 'en'] | True |
FlockTool._CommandifyName | (self, name_string) | Transforms a tool name like copy-info-plist to CopyInfoPlist | Transforms a tool name like copy-info-plist to CopyInfoPlist | def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '') | [
"def",
"_CommandifyName",
"(",
"self",
",",
"name_string",
")",
":",
"return",
"name_string",
".",
"title",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")"
] | [
30,
2
] | [
32,
47
] | python | en | ['en', 'pl', 'en'] | True |
FlockTool.ExecFlock | (self, lockfile, *cmd_list) | Emulates the most basic behavior of Linux's flock(1). | Emulates the most basic behavior of Linux's flock(1). | def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list) | [
"def",
"ExecFlock",
"(",
"self",
",",
"lockfile",
",",
"*",
"cmd_list",
")",
":",
"# Rely on exception handling to report errors.",
"# Note that the stock python on SunOS has a bug",
"# where fcntl.flock(fd, LOCK_EX) always fails",
"# with EBADF, that's why we use this F_SETLK",
"# hack instead.",
"fd",
"=",
"os",
".",
"open",
"(",
"lockfile",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_NOCTTY",
"|",
"os",
".",
"O_CREAT",
",",
"0666",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'aix'",
")",
":",
"# Python on AIX is compiled with LARGEFILE support, which changes the",
"# struct size.",
"op",
"=",
"struct",
".",
"pack",
"(",
"'hhIllqq'",
",",
"fcntl",
".",
"F_WRLCK",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"else",
":",
"op",
"=",
"struct",
".",
"pack",
"(",
"'hhllhhl'",
",",
"fcntl",
".",
"F_WRLCK",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_SETLK",
",",
"op",
")",
"return",
"subprocess",
".",
"call",
"(",
"cmd_list",
")"
] | [
34,
2
] | [
49,
36
] | python | en | ['en', 'da', 'en'] | True |
call_redirector_t.__init__ | (self, name, decls) | creates call_redirector_t instance.
:param name: name of method, to be called on every object in the
`decls` list
:param decls: list of objects
| creates call_redirector_t instance. | def __init__(self, name, decls):
"""creates call_redirector_t instance.
:param name: name of method, to be called on every object in the
`decls` list
:param decls: list of objects
"""
object.__init__(self)
self.name = name
self.decls = decls | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"decls",
")",
":",
"object",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"name",
"=",
"name",
"self",
".",
"decls",
"=",
"decls"
] | [
21,
4
] | [
30,
26
] | python | en | ['en', 'en', 'en'] | True |
call_redirector_t.__call__ | (self, *arguments, **keywords) | calls method :attr:`call_redirector_t.name` on every object
within the :attr:`call_redirector_t.decls` list | calls method :attr:`call_redirector_t.name` on every object
within the :attr:`call_redirector_t.decls` list | def __call__(self, *arguments, **keywords):
"""calls method :attr:`call_redirector_t.name` on every object
within the :attr:`call_redirector_t.decls` list"""
for d in self.decls:
callable_ = getattr(d, self.name)
callable_(*arguments, **keywords) | [
"def",
"__call__",
"(",
"self",
",",
"*",
"arguments",
",",
"*",
"*",
"keywords",
")",
":",
"for",
"d",
"in",
"self",
".",
"decls",
":",
"callable_",
"=",
"getattr",
"(",
"d",
",",
"self",
".",
"name",
")",
"callable_",
"(",
"*",
"arguments",
",",
"*",
"*",
"keywords",
")"
] | [
32,
4
] | [
37,
45
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.