code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
async def test_replace_option_prompt_with_invalid_index() -> None: """Attempting to replace the prompt of an option index that doesn't exist should raise an exception.""" async with OptionListApp().run_test() as pilot: with pytest.raises(OptionDoesNotExist): pilot.app.query_one(OptionList).replace_option_prompt_at_index(23, "new-prompt")
Attempting to replace the prompt of an option index that doesn't exist should raise an exception.
test_replace_option_prompt_with_invalid_index
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_replace_option_prompt_with_valid_id() -> None: """It should be possible to replace the prompt of an option ID that does exist.""" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList) option_list.replace_option_prompt("0", "new-prompt") assert option_list.get_option("0").prompt == "new-prompt"
It should be possible to replace the prompt of an option ID that does exist.
test_replace_option_prompt_with_valid_id
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_replace_option_prompt_with_valid_index() -> None: """It should be possible to replace the prompt of an option index that does exist.""" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList).replace_option_prompt_at_index(1, "new-prompt") assert option_list.get_option_at_index(1).prompt == "new-prompt"
It should be possible to replace the prompt of an option index that does exist.
test_replace_option_prompt_with_valid_index
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_replace_single_line_option_prompt_with_multiple() -> None: """It should be possible to replace single line prompt with multiple lines """ new_prompt = "new-prompt\nsecond line" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList) option_list.replace_option_prompt("0", new_prompt) assert option_list.get_option("0").prompt == new_prompt
It should be possible to replace single line prompt with multiple lines
test_replace_single_line_option_prompt_with_multiple
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_replace_multiple_line_option_prompt_with_single() -> None: """It should be possible to replace multiple line prompt with a single line""" new_prompt = "new-prompt" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList) option_list.replace_option_prompt("0", new_prompt) assert option_list.get_option("0").prompt == new_prompt
It should be possible to replace multiple line prompt with a single line
test_replace_multiple_line_option_prompt_with_single
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_replace_multiple_line_option_prompt_with_multiple() -> None: """It should be possible to replace multiple line prompt with multiple lines""" new_prompt = "new-prompt\nsecond line" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList) option_list.replace_option_prompt_at_index(1, new_prompt) assert option_list.get_option_at_index(1).prompt == new_prompt
It should be possible to replace multiple line prompt with multiple lines
test_replace_multiple_line_option_prompt_with_multiple
python
Textualize/textual
tests/option_list/test_option_prompt_replacement.py
https://github.com/Textualize/textual/blob/master/tests/option_list/test_option_prompt_replacement.py
MIT
async def test_checkbox_initial_state() -> None: """The initial states of the check boxes should be as we specified.""" async with CheckboxApp().run_test() as pilot: assert [box.value for box in pilot.app.query(Checkbox)] == [False, False, True] assert [box.has_class("-on") for box in pilot.app.query(Checkbox)] == [ False, False, True, ] assert pilot.app.events_received == []
The initial states of the check boxes should be as we specified.
test_checkbox_initial_state
python
Textualize/textual
tests/toggles/test_checkbox.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_checkbox.py
MIT
async def test_checkbox_toggle() -> None: """Test the status of the check boxes after they've been toggled.""" async with CheckboxApp().run_test() as pilot: for box in pilot.app.query(Checkbox): box.toggle() assert [box.value for box in pilot.app.query(Checkbox)] == [True, True, False] assert [box.has_class("-on") for box in pilot.app.query(Checkbox)] == [ True, True, False, ] await pilot.pause() assert pilot.app.events_received == [ ("cb1", True, True), ("cb2", True, True), ("cb3", False, True), ]
Test the status of the check boxes after they've been toggled.
test_checkbox_toggle
python
Textualize/textual
tests/toggles/test_checkbox.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_checkbox.py
MIT
async def test_change_labels() -> None: """It should be possible to change the labels of toggle buttons.""" async with LabelChangeApp().run_test() as pilot: assert pilot.app.query_one(Checkbox).label == Text("Before") assert pilot.app.query_one("Screen > RadioButton", RadioButton).label == Text( "Before" ) assert pilot.app.query_one("RadioSet > RadioButton", RadioButton).label == Text( "Before" ) pilot.app.query_one(Checkbox).label = "After" pilot.app.query_one("Screen > RadioButton", RadioButton).label = "After" pilot.app.query_one("RadioSet > RadioButton", RadioButton).label = "After" await pilot.pause() assert pilot.app.query_one(Checkbox).label == Text("After") assert pilot.app.query_one("Screen > RadioButton", RadioButton).label == Text( "After" ) assert pilot.app.query_one("RadioSet > RadioButton", RadioButton).label == Text( "After" )
It should be possible to change the labels of toggle buttons.
test_change_labels
python
Textualize/textual
tests/toggles/test_labels.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_labels.py
MIT
async def test_radio_sets_initial_state(): """The initial states of the radio sets should be as we specified.""" async with RadioSetApp().run_test() as pilot: assert pilot.app.query_one("#from_buttons", RadioSet).pressed_index == 2 assert pilot.app.query_one("#from_buttons", RadioSet).pressed_button is not None assert pilot.app.query_one("#from_strings", RadioSet).pressed_index == -1 assert pilot.app.query_one("#from_strings", RadioSet).pressed_button is None assert pilot.app.events_received == []
The initial states of the radio sets should be as we specified.
test_radio_sets_initial_state
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_click_sets_focus(): """Clicking within a radio set should set focus.""" async with RadioSetApp().run_test() as pilot: pilot.app.set_focus(None) assert pilot.app.screen.focused is None await pilot.click("#clickme") assert pilot.app.screen.focused == pilot.app.query_one("#from_buttons")
Clicking within a radio set should set focus.
test_click_sets_focus
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_radio_sets_toggle(): """Test the status of the radio sets after they've been toggled.""" async with RadioSetApp().run_test() as pilot: pilot.app.query_one("#from_buttons", RadioSet)._nodes[0].toggle() pilot.app.query_one("#from_strings", RadioSet)._nodes[2].toggle() await pilot.pause() assert pilot.app.query_one("#from_buttons", RadioSet).pressed_index == 0 assert pilot.app.query_one("#from_buttons", RadioSet).pressed_button is not None assert pilot.app.query_one("#from_strings", RadioSet).pressed_index == 2 assert pilot.app.query_one("#from_strings", RadioSet).pressed_button is not None assert pilot.app.events_received == [ ("from_buttons", 0, [True, False, False]), ("from_strings", 2, [False, False, True]), ]
Test the status of the radio sets after they've been toggled.
test_radio_sets_toggle
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_radioset_same_button_mash(): """Mashing the same button should have no effect.""" async with RadioSetApp().run_test() as pilot: assert pilot.app.query_one("#from_buttons", RadioSet).pressed_index == 2 pilot.app.query_one("#from_buttons", RadioSet)._nodes[2].toggle() assert pilot.app.query_one("#from_buttons", RadioSet).pressed_index == 2 assert pilot.app.events_received == []
Mashing the same button should have no effect.
test_radioset_same_button_mash
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_radioset_inner_navigation(): """Using the cursor keys should navigate between buttons in a set.""" async with RadioSetApp().run_test() as pilot: for key, landing in ( ("down", 1), ("up", 0), ("right", 1), ("left", 0), ("up", 2), ("down", 0), ): await pilot.press(key, "enter") assert ( pilot.app.query_one("#from_buttons", RadioSet).pressed_button == pilot.app.query_one("#from_buttons").children[landing] ) async with RadioSetApp().run_test() as pilot: assert pilot.app.screen.focused is pilot.app.screen.query_one("#from_buttons") await pilot.press("tab") assert pilot.app.screen.focused is pilot.app.screen.query_one("#from_strings") assert pilot.app.query_one("#from_strings", RadioSet)._selected == 0 await pilot.press("down") assert pilot.app.query_one("#from_strings", RadioSet)._selected == 1
Using the cursor keys should navigate between buttons in a set.
test_radioset_inner_navigation
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_radioset_breakout_navigation(): """Shift/Tabbing while in a radioset should move to the previous/next focsuable after the set itself.""" async with RadioSetApp().run_test() as pilot: assert pilot.app.screen.focused is pilot.app.query_one("#from_buttons") await pilot.press("tab") assert pilot.app.screen.focused is pilot.app.query_one("#from_strings") await pilot.press("shift+tab") assert pilot.app.screen.focused is pilot.app.query_one("#from_buttons")
Shift/Tabbing while in a radioset should move to the previous/next focsuable after the set itself.
test_radioset_breakout_navigation
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_there_can_only_be_one(): """Adding multiple 'on' buttons should result in only one on.""" async with BadRadioSetApp().run_test() as pilot: assert len(pilot.app.query("RadioButton.-on")) == 1 assert pilot.app.query_one(RadioSet).pressed_index == 0
Adding multiple 'on' buttons should result in only one on.
test_there_can_only_be_one
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_keyboard_navigation_with_disabled_buttons(): """Regression test for https://github.com/Textualize/textual/issues/3839.""" app = RadioSetDisabledButtonsApp() async with app.run_test() as pilot: await pilot.press("enter") for _ in range(5): await pilot.press("down") await pilot.press("enter") for _ in range(5): await pilot.press("up") await pilot.press("enter") assert app.selected == [ "1", "4", "5", "7", "1", "4", "1", "7", "5", "4", "1", ]
Regression test for https://github.com/Textualize/textual/issues/3839.
test_keyboard_navigation_with_disabled_buttons
python
Textualize/textual
tests/toggles/test_radioset.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radioset.py
MIT
async def test_radio_button_initial_state() -> None: """The initial states of the radio buttons should be as we specified.""" async with RadioButtonApp().run_test() as pilot: assert [button.value for button in pilot.app.query(RadioButton)] == [ False, False, True, ] assert [button.has_class("-on") for button in pilot.app.query(RadioButton)] == [ False, False, True, ] assert pilot.app.events_received == []
The initial states of the radio buttons should be as we specified.
test_radio_button_initial_state
python
Textualize/textual
tests/toggles/test_radiobutton.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radiobutton.py
MIT
async def test_radio_button_toggle() -> None: """Test the status of the radio buttons after they've been toggled.""" async with RadioButtonApp().run_test() as pilot: for box in pilot.app.query(RadioButton): box.toggle() assert [button.value for button in pilot.app.query(RadioButton)] == [ True, True, False, ] assert [button.has_class("-on") for button in pilot.app.query(RadioButton)] == [ True, True, False, ] await pilot.pause() assert pilot.app.events_received == [ ("rb1", True, True), ("rb2", True, True), ("rb3", False, True), ]
Test the status of the radio buttons after they've been toggled.
test_radio_button_toggle
python
Textualize/textual
tests/toggles/test_radiobutton.py
https://github.com/Textualize/textual/blob/master/tests/toggles/test_radiobutton.py
MIT
def replace_link_ids(render: str) -> str: """Link IDs have a random ID and system path which is a problem for reproducible tests. """ return re_link_ids.sub("id=0;foo\x1b", render)
Link IDs have a random ID and system path which is a problem for reproducible tests.
replace_link_ids
python
Textualize/textual
tests/utilities/render.py
https://github.com/Textualize/textual/blob/master/tests/utilities/render.py
MIT
def test_insert_range_text_no_newlines(): """Ensuring we can do a simple replacement of text.""" document = Document(TEXT) document.replace_range((0, 2), (0, 6), "MUST") assert document.lines == [ "I MUST not fear.", "Fear is the mind-killer.", ]
Ensuring we can do a simple replacement of text.
test_insert_range_text_no_newlines
python
Textualize/textual
tests/document/test_document_insert.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_insert.py
MIT
def test_delete_single_newline(document): """Testing deleting newline from right to left""" replace_result = document.replace_range((1, 0), (0, 16), "") assert replace_result == EditResult(end_location=(0, 16), replaced_text="\n") assert document.lines == [ "I must not fear.Fear is the mind-killer.", "I forgot the rest of the quote.", "Sorry Will.", ]
Testing deleting newline from right to left
test_delete_single_newline
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_delete_near_end_of_document(document): """Test deleting a range near the end of a document.""" replace_result = document.replace_range((1, 0), (3, 11), "") assert replace_result == EditResult( end_location=(1, 0), replaced_text="Fear is the mind-killer.\n" "I forgot the rest of the quote.\n" "Sorry Will.", ) assert document.lines == [ "I must not fear.", "", ]
Test deleting a range near the end of a document.
test_delete_near_end_of_document
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_delete_multiple_lines_partially_spanned(document): """Deleting a selection that partially spans the first and final lines of the selection.""" replace_result = document.replace_range((0, 2), (2, 2), "") assert replace_result == EditResult( end_location=(0, 2), replaced_text="must not fear.\nFear is the mind-killer.\nI ", ) assert document.lines == [ "I forgot the rest of the quote.", "Sorry Will.", ]
Deleting a selection that partially spans the first and final lines of the selection.
test_delete_multiple_lines_partially_spanned
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_delete_end_of_line(document): """Testing deleting newline from left to right""" replace_result = document.replace_range((0, 16), (1, 0), "") assert replace_result == EditResult( end_location=(0, 16), replaced_text="\n", ) assert document.lines == [ "I must not fear.Fear is the mind-killer.", "I forgot the rest of the quote.", "Sorry Will.", ]
Testing deleting newline from left to right
test_delete_end_of_line
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_delete_single_line_excluding_newline(document): """Delete from the start to the end of the line.""" replace_result = document.replace_range((2, 0), (2, 31), "") assert replace_result == EditResult( end_location=(2, 0), replaced_text="I forgot the rest of the quote.", ) assert document.lines == [ "I must not fear.", "Fear is the mind-killer.", "", "Sorry Will.", ]
Delete from the start to the end of the line.
test_delete_single_line_excluding_newline
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_delete_single_line_including_newline(document): """Delete from the start of a line to the start of the line below.""" replace_result = document.replace_range((2, 0), (3, 0), "") assert replace_result == EditResult( end_location=(2, 0), replaced_text="I forgot the rest of the quote.\n", ) assert document.lines == [ "I must not fear.", "Fear is the mind-killer.", "Sorry Will.", ]
Delete from the start of a line to the start of the line below.
test_delete_single_line_including_newline
python
Textualize/textual
tests/document/test_document_delete.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document_delete.py
MIT
def test_text(text): """The text we put in is the text we get out.""" document = Document(text) assert document.text == text
The text we put in is the text we get out.
test_text
python
Textualize/textual
tests/document/test_document.py
https://github.com/Textualize/textual/blob/master/tests/document/test_document.py
MIT
def test_no_pipeline_requirements_txt( self, fake_project_cli, fake_metadata, fake_repo_path ): """No pipeline requirements.txt and no project requirements.txt does not create project requirements.txt.""" # Remove project requirements.txt project_requirements_txt = fake_repo_path / "requirements.txt" project_requirements_txt.unlink() self.call_pipeline_create(fake_project_cli, fake_metadata) self.call_micropkg_package(fake_project_cli, fake_metadata) self.call_pipeline_delete(fake_project_cli, fake_metadata) self.call_micropkg_pull(fake_project_cli, fake_metadata, fake_repo_path) assert not project_requirements_txt.exists()
No pipeline requirements.txt and no project requirements.txt does not create project requirements.txt.
test_no_pipeline_requirements_txt
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_requirements.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_requirements.py
Apache-2.0
def test_empty_pipeline_requirements_txt( self, fake_project_cli, fake_metadata, fake_package_path, fake_repo_path ): """Empty pipeline requirements.txt and no project requirements.txt does not create project requirements.txt.""" # Remove project requirements.txt project_requirements_txt = fake_repo_path / "requirements.txt" project_requirements_txt.unlink() self.call_pipeline_create(fake_project_cli, fake_metadata) pipeline_requirements_txt = ( fake_package_path / "pipelines" / PIPELINE_NAME / "requirements.txt" ) pipeline_requirements_txt.touch() self.call_micropkg_package(fake_project_cli, fake_metadata) self.call_pipeline_delete(fake_project_cli, fake_metadata) self.call_micropkg_pull(fake_project_cli, fake_metadata, fake_repo_path) assert not project_requirements_txt.exists()
Empty pipeline requirements.txt and no project requirements.txt does not create project requirements.txt.
test_empty_pipeline_requirements_txt
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_requirements.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_requirements.py
Apache-2.0
def test_complex_requirements( self, requirement, fake_project_cli, fake_metadata, fake_package_path ): """Options that are valid in requirements.txt but cannot be packaged in pyproject.toml.""" self.call_pipeline_create(fake_project_cli, fake_metadata) pipeline_requirements_txt = ( fake_package_path / "pipelines" / PIPELINE_NAME / "requirements.txt" ) pipeline_requirements_txt.write_text(requirement) result = CliRunner().invoke( fake_project_cli, ["micropkg", "package", f"pipelines.{PIPELINE_NAME}"], obj=fake_metadata, ) assert result.exit_code == 1 assert ( "InvalidRequirement: Expected package name at the start of dependency specifier" in result.output or "InvalidRequirement: Expected end or semicolon" in result.output or "InvalidRequirement: Parse error" in result.output )
Options that are valid in requirements.txt but cannot be packaged in pyproject.toml.
test_complex_requirements
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_requirements.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_requirements.py
Apache-2.0
def test_micropkg_package_same_name_as_package_name( self, fake_metadata, fake_project_cli, fake_repo_path ): """Create modular pipeline with the same name as the package name, then package as is. The command should run and the resulting sdist should have all expected contents. """ pipeline_name = fake_metadata.package_name result = CliRunner().invoke( fake_project_cli, ["pipeline", "create", pipeline_name], obj=fake_metadata ) assert result.exit_code == 0 result = CliRunner().invoke( fake_project_cli, ["micropkg", "package", f"pipelines.{pipeline_name}"], obj=fake_metadata, ) sdist_location = fake_repo_path / "dist" assert result.exit_code == 0 assert f"Location: {sdist_location}" in result.output self.assert_sdist_contents_correct( sdist_location=sdist_location, package_name=pipeline_name )
Create modular pipeline with the same name as the package name, then package as is. The command should run and the resulting sdist should have all expected contents.
test_micropkg_package_same_name_as_package_name
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_package.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_package.py
Apache-2.0
def test_micropkg_package_same_name_as_package_name_alias( self, fake_metadata, fake_project_cli, fake_repo_path ): """Create modular pipeline, then package under alias the same name as the package name. The command should run and the resulting sdist should have all expected contents. """ alias = fake_metadata.package_name result = CliRunner().invoke( fake_project_cli, ["pipeline", "create", PIPELINE_NAME], obj=fake_metadata ) assert result.exit_code == 0 result = CliRunner().invoke( fake_project_cli, ["micropkg", "package", f"pipelines.{PIPELINE_NAME}", "--alias", alias], obj=fake_metadata, ) sdist_location = fake_repo_path / "dist" assert result.exit_code == 0 assert f"Location: {sdist_location}" in result.output self.assert_sdist_contents_correct( sdist_location=sdist_location, package_name=alias )
Create modular pipeline, then package under alias the same name as the package name. The command should run and the resulting sdist should have all expected contents.
test_micropkg_package_same_name_as_package_name_alias
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_package.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_package.py
Apache-2.0
def test_package_modular_pipeline_with_nested_parameters( self, fake_repo_path, fake_project_cli, fake_metadata ): """ The setup for the test is as follows: Create two modular pipelines, to verify that only the parameter file with matching pipeline name will be packaged. Add a directory with a parameter file to verify that if a project has parameters structured like below, that the ones inside a directory with the pipeline name are packaged as well when calling `kedro micropkg package` for a specific pipeline. parameters └── retail └── params1.ym """ CliRunner().invoke( fake_project_cli, ["pipeline", "create", "retail"], obj=fake_metadata ) CliRunner().invoke( fake_project_cli, ["pipeline", "create", "retail_banking"], obj=fake_metadata, ) nested_param_path = Path( fake_repo_path / "conf" / "base" / "parameters" / "retail" ) nested_param_path.mkdir(parents=True, exist_ok=True) (nested_param_path / "params1.yml").touch() result = CliRunner().invoke( fake_project_cli, ["micropkg", "package", "pipelines.retail"], obj=fake_metadata, ) assert result.exit_code == 0 assert "'dummy_package.pipelines.retail' packaged!" in result.output sdist_location = fake_repo_path / "dist" assert f"Location: {sdist_location}" in result.output sdist_name = _get_sdist_name(name="retail", version="0.1") sdist_file = sdist_location / sdist_name assert sdist_file.is_file() assert len(list(sdist_location.iterdir())) == 1 with tarfile.open(sdist_file, "r") as tar: sdist_contents = set(tar.getnames()) assert ( "retail-0.1/retail/config/parameters/retail/params1.yml" in sdist_contents ) assert "retail-0.1/retail/config/parameters_retail.yml" in sdist_contents assert ( "retail-0.1/retail/config/parameters_retail_banking.yml" not in sdist_contents )
The setup for the test is as follows: Create two modular pipelines, to verify that only the parameter file with matching pipeline name will be packaged. Add a directory with a parameter file to verify that if a project has parameters structured like below, that the ones inside a directory with the pipeline name are packaged as well when calling `kedro micropkg package` for a specific pipeline. parameters └── retail └── params1.ym
test_package_modular_pipeline_with_nested_parameters
python
kedro-org/kedro
tests/framework/cli/micropkg/test_micropkg_package.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/cli/micropkg/test_micropkg_package.py
Apache-2.0
def _assert_hook_call_record_has_expected_parameters( call_record: logging.LogRecord, expected_parameters: list[str] ): """Assert the given call record has all expected parameters.""" for param in expected_parameters: assert hasattr(call_record, param)
Assert the given call record has all expected parameters.
_assert_hook_call_record_has_expected_parameters
python
kedro-org/kedro
tests/framework/session/conftest.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/conftest.py
Apache-2.0
def logs_listener(): """Fixture to start the logs listener before a test and clean up after the test finishes""" listener = LogsListener() listener.start() yield listener logger.removeHandler(listener.log_handler) listener.stop()
Fixture to start the logs listener before a test and clean up after the test finishes
logs_listener
python
kedro-org/kedro
tests/framework/session/conftest.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/conftest.py
Apache-2.0
def project_hooks(): """A set of project hook implementations that log to stdout whenever it is invoked.""" return LoggingHooks()
A set of project hook implementations that log to stdout whenever it is invoked.
project_hooks
python
kedro-org/kedro
tests/framework/session/conftest.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/conftest.py
Apache-2.0
def mock_session_with_before_node_run_hooks( mocker, project_hooks, mock_package_name, tmp_path ): class BeforeNodeRunHook: """Should overwrite the `cars` dataset""" @hook_impl def before_node_run(self, node: Node): return {"cars": MockDatasetReplacement()} if node.name == "node1" else None class MockSettings(_ProjectSettings): _HOOKS = Validator("HOOKS", default=(project_hooks, BeforeNodeRunHook())) _mock_imported_settings_paths(mocker, MockSettings()) return KedroSession.create(tmp_path)
Should overwrite the `cars` dataset
mock_session_with_before_node_run_hooks
python
kedro-org/kedro
tests/framework/session/test_session_extension_hooks.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session_extension_hooks.py
Apache-2.0
def mock_session_with_broken_before_node_run_hooks( mocker, project_hooks, mock_package_name, tmp_path ): class BeforeNodeRunHook: """Should overwrite the `cars` dataset""" @hook_impl def before_node_run(self): return MockDatasetReplacement() class MockSettings(_ProjectSettings): _HOOKS = Validator("HOOKS", default=(project_hooks, BeforeNodeRunHook())) _mock_imported_settings_paths(mocker, MockSettings()) return KedroSession.create(tmp_path)
Should overwrite the `cars` dataset
mock_session_with_broken_before_node_run_hooks
python
kedro-org/kedro
tests/framework/session/test_session_extension_hooks.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session_extension_hooks.py
Apache-2.0
def create_attrs_autospec(spec: type, spec_set: bool = True) -> Any: """Creates a mock of an attr class (creates mocks recursively on all attributes). https://github.com/python-attrs/attrs/issues/462#issuecomment-1134656377 :param spec: the spec to mock :param spec_set: if True, AttributeError will be raised if an attribute that is not in the spec is set. """ if not hasattr(spec, ATTRS_ATTRIBUTE): raise TypeError(f"{spec!r} is not an attrs class") mock = create_autospec(spec, spec_set=spec_set) for attribute in getattr(spec, ATTRS_ATTRIBUTE): attribute_type = attribute.type if NEW_TYPING: # A[T] does not get a copy of __dict__ from A(Generic[T]) anymore, use __origin__ to get it while hasattr(attribute_type, "__origin__"): attribute_type = attribute_type.__origin__ if hasattr(attribute_type, ATTRS_ATTRIBUTE): mock_attribute = create_attrs_autospec(attribute_type, spec_set) else: mock_attribute = create_autospec(attribute_type, spec_set=spec_set) object.__setattr__(mock, attribute.name, mock_attribute) return mock
Creates a mock of an attr class (creates mocks recursively on all attributes). https://github.com/python-attrs/attrs/issues/462#issuecomment-1134656377 :param spec: the spec to mock :param spec_set: if True, AttributeError will be raised if an attribute that is not in the spec is set.
create_attrs_autospec
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_git_describe( self, fake_project, fake_commit_hash, fake_git_status, mocker ): """Test that git information is added to the session store""" mocker.patch( "subprocess.check_output", side_effect=[fake_commit_hash.encode(), fake_git_status.encode()], ) session = KedroSession.create(fake_project) expected_git_info = { "commit_sha": fake_commit_hash, "dirty": bool(fake_git_status), } assert session.store["git"] == expected_git_info
Test that git information is added to the session store
test_git_describe
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_git_describe_error(self, fake_project, exception, mocker, caplog): """Test that git information is not added to the session store if call to git fails """ caplog.set_level(logging.DEBUG, logger="kedro") mocker.patch("subprocess.check_output", side_effect=exception) session = KedroSession.create(fake_project) assert "git" not in session.store expected_log_message = f"Unable to git describe {fake_project}" actual_log_messages = [ rec.getMessage() for rec in caplog.records if rec.name == SESSION_LOGGER_NAME and rec.levelno == logging.DEBUG ] assert expected_log_message in actual_log_messages
Test that git information is not added to the session store if call to git fails
test_git_describe_error
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_get_username_error(self, fake_project, mocker, caplog): """Test that username information is not added to the session store if call to getuser() fails """ caplog.set_level(logging.DEBUG, logger="kedro") mocker.patch("subprocess.check_output") mocker.patch("getpass.getuser", side_effect=FakeException("getuser error")) session = KedroSession.create(fake_project) assert "username" not in session.store expected_log_messages = [ "Unable to get username. Full exception: getuser error" ] actual_log_messages = [ rec.getMessage() for rec in caplog.records if rec.name == SESSION_LOGGER_NAME and rec.levelno == logging.DEBUG ] assert actual_log_messages == expected_log_messages
Test that username information is not added to the session store if call to getuser() fails
test_get_username_error
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_log_error(self, fake_project): """Test logging the error by the session""" # test that the error is not swallowed by the session with pytest.raises(FakeException), KedroSession.create(fake_project) as session: raise FakeException exception = session.store["exception"] assert exception["type"] == "tests.framework.session.test_session.FakeException" assert not exception["value"] assert any( "raise FakeException" in tb_line for tb_line in exception["traceback"] )
Test logging the error by the session
test_log_error
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_run( self, fake_project, fake_session_id, fake_pipeline_name, mock_context_class, mock_runner, mocker, ): """Test running the project via the session""" mock_hook = mocker.patch( "kedro.framework.session.session._create_hook_manager" ).return_value.hook mock_pipelines = mocker.patch( "kedro.framework.session.session.pipelines", return_value={ _FAKE_PIPELINE_NAME: mocker.Mock(), "__default__": mocker.Mock(), }, ) mock_context = mock_context_class.return_value mock_catalog = mock_context._get_catalog.return_value mock_runner.__name__ = "SequentialRunner" mock_pipeline = mock_pipelines.__getitem__.return_value.filter.return_value with KedroSession.create(fake_project) as session: session.run(runner=mock_runner, pipeline_name=fake_pipeline_name) record_data = { "session_id": fake_session_id, "project_path": fake_project.as_posix(), "env": mock_context.env, "kedro_version": kedro_version, "tags": None, "from_nodes": None, "to_nodes": None, "node_names": None, "from_inputs": None, "to_outputs": None, "load_versions": None, "extra_params": {}, "pipeline_name": fake_pipeline_name, "namespace": None, "runner": mock_runner.__name__, } mock_hook.before_pipeline_run.assert_called_once_with( run_params=record_data, pipeline=mock_pipeline, catalog=mock_catalog ) mock_runner.run.assert_called_once_with( mock_pipeline, mock_catalog, session._hook_manager, fake_session_id ) mock_hook.after_pipeline_run.assert_called_once_with( run_params=record_data, run_result=mock_runner.run.return_value, pipeline=mock_pipeline, catalog=mock_catalog, )
Test running the project via the session
test_run
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_run_thread_runner( self, fake_project, fake_session_id, fake_pipeline_name, mock_context_class, mock_thread_runner, mocker, match_pattern, ): """Test running the project via the session""" mock_hook = mocker.patch( "kedro.framework.session.session._create_hook_manager" ).return_value.hook ds_mock = mocker.Mock(**{"datasets.return_value": ["ds_1", "ds_2"]}) filter_mock = mocker.Mock(**{"filter.return_value": ds_mock}) pipelines_ret = { _FAKE_PIPELINE_NAME: filter_mock, "__default__": filter_mock, } mocker.patch("kedro.framework.session.session.pipelines", pipelines_ret) mocker.patch( "kedro.io.data_catalog.CatalogConfigResolver.match_pattern", return_value=match_pattern, ) with KedroSession.create(fake_project) as session: session.run(runner=mock_thread_runner, pipeline_name=fake_pipeline_name) mock_context = mock_context_class.return_value record_data = { "session_id": fake_session_id, "project_path": fake_project.as_posix(), "env": mock_context.env, "kedro_version": kedro_version, "tags": None, "from_nodes": None, "to_nodes": None, "node_names": None, "from_inputs": None, "to_outputs": None, "load_versions": None, "extra_params": {}, "pipeline_name": fake_pipeline_name, "namespace": None, "runner": mock_thread_runner.__name__, } mock_catalog = mock_context._get_catalog.return_value mock_pipeline = filter_mock.filter() mock_hook.before_pipeline_run.assert_called_once_with( run_params=record_data, pipeline=mock_pipeline, catalog=mock_catalog ) mock_thread_runner.run.assert_called_once_with( mock_pipeline, mock_catalog, session._hook_manager, fake_session_id ) mock_hook.after_pipeline_run.assert_called_once_with( run_params=record_data, run_result=mock_thread_runner.run.return_value, pipeline=mock_pipeline, catalog=mock_catalog, )
Test running the project via the session
test_run_thread_runner
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_run_multiple_times( self, fake_project, fake_session_id, fake_pipeline_name, mock_context_class, mock_runner, mocker, ): """Test running the project more than once via the session""" mock_hook = mocker.patch( "kedro.framework.session.session._create_hook_manager" ).return_value.hook mock_pipelines = mocker.patch( "kedro.framework.session.session.pipelines", return_value={ _FAKE_PIPELINE_NAME: mocker.Mock(), "__default__": mocker.Mock(), }, ) mock_context = mock_context_class.return_value mock_catalog = mock_context._get_catalog.return_value mock_pipeline = mock_pipelines.__getitem__.return_value.filter.return_value message = ( "A run has already been completed as part of the active KedroSession. " "KedroSession has a 1-1 mapping with runs, and thus only one run should be" " executed per session." ) with pytest.raises(Exception, match=message): with KedroSession.create(fake_project) as session: session.run(runner=mock_runner, pipeline_name=fake_pipeline_name) session.run(runner=mock_runner, pipeline_name=fake_pipeline_name) record_data = { "session_id": fake_session_id, "project_path": fake_project.as_posix(), "env": mock_context.env, "kedro_version": kedro_version, "tags": None, "from_nodes": None, "to_nodes": None, "node_names": None, "from_inputs": None, "to_outputs": None, "load_versions": None, "extra_params": {}, "pipeline_name": fake_pipeline_name, "namespace": None, "runner": mock_runner.__name__, } mock_hook.before_pipeline_run.assert_called_once_with( run_params=record_data, pipeline=mock_pipeline, catalog=mock_catalog, ) mock_runner.run.assert_called_once_with( mock_pipeline, mock_catalog, session._hook_manager, fake_session_id ) mock_hook.after_pipeline_run.assert_called_once_with( run_params=record_data, run_result=mock_runner.run.return_value, pipeline=mock_pipeline, catalog=mock_catalog, )
Test running the project more than once via the session
test_run_multiple_times
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_run_exception( self, fake_project, fake_session_id, fake_pipeline_name, mock_context_class, mock_runner, mocker, ): """Test exception being raised during the run""" mock_hook = mocker.patch( "kedro.framework.session.session._create_hook_manager" ).return_value.hook mock_pipelines = mocker.patch( "kedro.framework.session.session.pipelines", return_value={ _FAKE_PIPELINE_NAME: mocker.Mock(), "__default__": mocker.Mock(), }, ) mock_context = mock_context_class.return_value mock_catalog = mock_context._get_catalog.return_value error = FakeException("You shall not pass!") mock_runner.run.side_effect = error # runner.run() raises an error mock_pipeline = mock_pipelines.__getitem__.return_value.filter.return_value with pytest.raises(FakeException), KedroSession.create(fake_project) as session: session.run(runner=mock_runner, pipeline_name=fake_pipeline_name) record_data = { "session_id": fake_session_id, "project_path": fake_project.as_posix(), "env": mock_context.env, "kedro_version": kedro_version, "tags": None, "from_nodes": None, "to_nodes": None, "node_names": None, "from_inputs": None, "to_outputs": None, "load_versions": None, "extra_params": {}, "pipeline_name": fake_pipeline_name, "namespace": None, "runner": mock_runner.__name__, } mock_hook.on_pipeline_error.assert_called_once_with( error=error, run_params=record_data, pipeline=mock_pipeline, catalog=mock_catalog, ) mock_hook.after_pipeline_run.assert_not_called() exception = session.store["exception"] assert exception["type"] == "tests.framework.session.test_session.FakeException" assert exception["value"] == "You shall not pass!" assert exception["traceback"]
Test exception being raised during the run
test_run_exception
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_run_broken_pipeline_multiple_times( self, fake_project, fake_session_id, fake_pipeline_name, mock_context_class, mock_runner, mocker, ): """Test exception being raised during the first run and a second run is allowed to be executed in the same session.""" mock_hook = mocker.patch( "kedro.framework.session.session._create_hook_manager" ).return_value.hook mock_pipelines = mocker.patch( "kedro.framework.session.session.pipelines", return_value={ _FAKE_PIPELINE_NAME: mocker.Mock(), "__default__": mocker.Mock(), }, ) mock_context = mock_context_class.return_value mock_catalog = mock_context._get_catalog.return_value session = KedroSession.create(fake_project) broken_runner = mocker.patch( "kedro.runner.SequentialRunner", autospec=True, ) broken_runner.__name__ = "BrokenRunner" error = FakeException("You shall not pass!") broken_runner.run.side_effect = error # runner.run() raises an error mock_pipeline = mock_pipelines.__getitem__.return_value.filter.return_value with pytest.raises(FakeException): # Execute run with broken runner session.run(runner=broken_runner, pipeline_name=fake_pipeline_name) record_data = { "session_id": fake_session_id, "project_path": fake_project.as_posix(), "env": mock_context.env, "kedro_version": kedro_version, "tags": None, "from_nodes": None, "to_nodes": None, "node_names": None, "from_inputs": None, "to_outputs": None, "load_versions": None, "extra_params": {}, "pipeline_name": fake_pipeline_name, "namespace": None, "runner": broken_runner.__name__, } mock_hook.on_pipeline_error.assert_called_once_with( error=error, run_params=record_data, pipeline=mock_pipeline, catalog=mock_catalog, ) mock_hook.after_pipeline_run.assert_not_called() # Execute run another time with fixed runner fixed_runner = mock_runner session.run(runner=fixed_runner, pipeline_name=fake_pipeline_name) fixed_runner.run.assert_called_once_with( mock_pipeline, mock_catalog, session._hook_manager, fake_session_id ) record_data["runner"] = "MockRunner" mock_hook.after_pipeline_run.assert_called_once_with( run_params=record_data, run_result=fixed_runner.run.return_value, pipeline=mock_pipeline, catalog=mock_catalog, )
Test exception being raised during the first run and a second run is allowed to be executed in the same session.
test_run_broken_pipeline_multiple_times
python
kedro-org/kedro
tests/framework/session/test_session.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session.py
Apache-2.0
def test_calling_register_hooks_twice(self, project_hooks, mock_session): """Calling hook registration multiple times should not raise""" hook_manager = mock_session._hook_manager assert hook_manager.is_registered(project_hooks) _register_hooks(hook_manager, (project_hooks,)) _register_hooks(hook_manager, (project_hooks,)) assert hook_manager.is_registered(project_hooks)
Calling hook registration multiple times should not raise
test_calling_register_hooks_twice
python
kedro-org/kedro
tests/framework/session/test_session_hook_manager.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/session/test_session_hook_manager.py
Apache-2.0
def mock_package_name_with_pipelines_file(tmpdir): pipelines_file_path = tmpdir.mkdir("test_package") / "pipeline_registry.py" pipelines_file_path.write( textwrap.dedent( """ from kedro.pipeline import Pipeline def register_pipelines(): return {"new_pipeline": Pipeline([])} """ ) ) project_path, package_name, _ = str(pipelines_file_path).rpartition("test_package") sys.path.insert(0, project_path) yield package_name sys.path.pop(0)
from kedro.pipeline import Pipeline def register_pipelines(): return {"new_pipeline": Pipeline([])}
mock_package_name_with_pipelines_file
python
kedro-org/kedro
tests/framework/project/test_pipeline_registry.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_registry.py
Apache-2.0
def mock_package_name_with_unimportable_pipelines_file(tmpdir): pipelines_file_path = tmpdir.mkdir("test_broken_package") / "pipeline_registry.py" pipelines_file_path.write( textwrap.dedent( """ import this_is_not_a_real_thing from kedro.pipeline import Pipeline def register_pipelines(): return {"new_pipeline": Pipeline([])} """ ) ) project_path, package_name, _ = str(pipelines_file_path).rpartition( "test_broken_package" ) sys.path.insert(0, project_path) yield package_name sys.path.pop(0)
import this_is_not_a_real_thing from kedro.pipeline import Pipeline def register_pipelines(): return {"new_pipeline": Pipeline([])}
mock_package_name_with_unimportable_pipelines_file
python
kedro-org/kedro
tests/framework/project/test_pipeline_registry.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_registry.py
Apache-2.0
def test_find_pipelines_skips_hidden_modules( mock_package_name_with_pipelines, pipeline_names ): pipelines_dir = Path(sys.path[0]) / mock_package_name_with_pipelines / "pipelines" pipeline_dir = pipelines_dir / ".ipynb_checkpoints" pipeline_dir.mkdir() (pipeline_dir / "__init__.py").write_text( textwrap.dedent( """ from __future__ import annotations from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")]) """ ) ) configure_project(mock_package_name_with_pipelines) pipelines = find_pipelines() assert set(pipelines) == pipeline_names | {"__default__"} assert sum(pipelines.values()).outputs() == pipeline_names
from __future__ import annotations from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")])
test_find_pipelines_skips_hidden_modules
python
kedro-org/kedro
tests/framework/project/test_pipeline_discovery.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_discovery.py
Apache-2.0
def test_find_pipelines_skips_modules_with_unexpected_return_value_type( mock_package_name_with_pipelines, pipeline_names ): # Define `create_pipelines` so that it does not return a `Pipeline`. pipelines_dir = Path(sys.path[0]) / mock_package_name_with_pipelines / "pipelines" pipeline_dir = pipelines_dir / "not_my_pipeline" pipeline_dir.mkdir() (pipeline_dir / "__init__.py").write_text( textwrap.dedent( """ from __future__ import annotations from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> dict[str, Pipeline]: return { "pipe1": pipeline([node(lambda: 1, None, "pipe1")]), "pipe2": pipeline([node(lambda: 2, None, "pipe2")]), } """ ) ) configure_project(mock_package_name_with_pipelines) with pytest.warns( UserWarning, match=( r"Expected the 'create_pipeline' function in the '\S+' " r"module to return a 'Pipeline' object, got 'dict' instead." ), ): pipelines = find_pipelines() assert set(pipelines) == pipeline_names | {"__default__"} assert sum(pipelines.values()).outputs() == pipeline_names
from __future__ import annotations from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> dict[str, Pipeline]: return { "pipe1": pipeline([node(lambda: 1, None, "pipe1")]), "pipe2": pipeline([node(lambda: 2, None, "pipe2")]), }
test_find_pipelines_skips_modules_with_unexpected_return_value_type
python
kedro-org/kedro
tests/framework/project/test_pipeline_discovery.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_discovery.py
Apache-2.0
def test_find_pipelines_handles_simplified_project_structure( mock_package_name_with_pipelines, pipeline_names ): (Path(sys.path[0]) / mock_package_name_with_pipelines / "pipeline.py").write_text( textwrap.dedent( """ from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")]) """ ) ) configure_project(mock_package_name_with_pipelines) pipelines = find_pipelines() assert set(pipelines) == pipeline_names | {"__default__"} assert sum(pipelines.values()).outputs() == pipeline_names | {"simple_pipeline"}
from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")])
test_find_pipelines_handles_simplified_project_structure
python
kedro-org/kedro
tests/framework/project/test_pipeline_discovery.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_discovery.py
Apache-2.0
def test_find_pipelines_handles_project_structure_without_pipelines_dir( mock_package_name_with_pipelines, simplified ): # Delete the `pipelines` directory to simulate a project without it. pipelines_dir = Path(sys.path[0]) / mock_package_name_with_pipelines / "pipelines" shutil.rmtree(pipelines_dir) if simplified: ( Path(sys.path[0]) / mock_package_name_with_pipelines / "pipeline.py" ).write_text( textwrap.dedent( """ from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")]) """ ) ) configure_project(mock_package_name_with_pipelines) pipelines = find_pipelines() assert set(pipelines) == {"__default__"} assert sum(pipelines.values()).outputs() == ( {"simple_pipeline"} if simplified else set() )
from kedro.pipeline import Pipeline, node, pipeline def create_pipeline(**kwargs) -> Pipeline: return pipeline([node(lambda: 1, None, "simple_pipeline")])
test_find_pipelines_handles_project_structure_without_pipelines_dir
python
kedro-org/kedro
tests/framework/project/test_pipeline_discovery.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_pipeline_discovery.py
Apache-2.0
def mock_package_name_with_settings_file(tmpdir): """This mock settings file tests everything that can be customised in settings.py. Where there are suggestions in the project template settings.py (e.g. as for CONFIG_LOADER_CLASS), those suggestions should be tested.""" old_settings = settings.as_dict() settings_file_path = tmpdir.mkdir("test_package").join("settings.py") project_path, package_name, _ = str(settings_file_path).rpartition("test_package") settings_file_path.write( textwrap.dedent( f""" from {__name__} import ProjectHooks HOOKS = (ProjectHooks(),) DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",) from kedro.framework.session.store import BaseSessionStore SESSION_STORE_CLASS = BaseSessionStore SESSION_STORE_ARGS = {{ "path": "./sessions" }} from {__name__} import MyContext CONTEXT_CLASS = MyContext CONF_SOURCE = "test_conf" from kedro.config import OmegaConfigLoader CONFIG_LOADER_CLASS = OmegaConfigLoader CONFIG_LOADER_ARGS = {{ "globals_pattern": "*globals.yml", }} # Class that manages the Data Catalog. from {__name__} import MyDataCatalog DATA_CATALOG_CLASS = MyDataCatalog """ ) ) sys.path.insert(0, project_path) yield package_name sys.path.pop(0) # reset side-effect of configure_project for key, value in old_settings.items(): settings.set(key, value)
This mock settings file tests everything that can be customised in settings.py. Where there are suggestions in the project template settings.py (e.g. as for CONFIG_LOADER_CLASS), those suggestions should be tested.
mock_package_name_with_settings_file
python
kedro-org/kedro
tests/framework/project/test_settings.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_settings.py
Apache-2.0
def mock_package_name_without_settings_file(tmpdir): """This mocks a project that doesn't have a settings.py file. When configured, the project should have sensible default settings.""" package_name = "test_package_without_settings" project_path, _, _ = str(tmpdir.mkdir(package_name)).rpartition(package_name) sys.path.insert(0, project_path) yield package_name sys.path.pop(0)
This mocks a project that doesn't have a settings.py file. When configured, the project should have sensible default settings.
mock_package_name_without_settings_file
python
kedro-org/kedro
tests/framework/project/test_settings.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_settings.py
Apache-2.0
def test_rich_traceback_configuration_extend_suppress(mocker, default_logging_config): """Test the configuration is not overrided but extend for `suppress`""" import click rich_traceback_install = mocker.patch("rich.traceback.install") rich_pretty_install = mocker.patch("rich.pretty.install") sys_executable_path = str(Path(sys.executable).parent) traceback_install_defaults = {"suppress": [click, sys_executable_path]} fake_path = "dummy" rich_handler = { "class": "kedro.logging.RichHandler", "rich_tracebacks": True, "tracebacks_suppress": [fake_path], } test_logging_config = default_logging_config test_logging_config["handlers"]["rich"] = rich_handler LOGGING.configure(test_logging_config) expected_install_defaults = traceback_install_defaults expected_install_defaults["suppress"].extend([fake_path]) rich_traceback_install.assert_called_with(**expected_install_defaults) rich_pretty_install.assert_called_once()
Test the configuration is not overrided but extend for `suppress`
test_rich_traceback_configuration_extend_suppress
python
kedro-org/kedro
tests/framework/project/test_logging.py
https://github.com/kedro-org/kedro/blob/master/tests/framework/project/test_logging.py
Apache-2.0
def branchless_no_input_pipeline(): """The pipeline runs in the order A->B->C->D->E.""" return pipeline( [ node(identity, "D", "E", name="node1"), node(identity, "C", "D", name="node2"), node(identity, "A", "B", name="node3"), node(identity, "B", "C", name="node4"), node(random, None, "A", name="node5"), ] )
The pipeline runs in the order A->B->C->D->E.
branchless_no_input_pipeline
python
kedro-org/kedro
tests/runner/conftest.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/conftest.py
Apache-2.0
def two_branches_crossed_pipeline_variable_inputs(request): """A ``Pipeline`` with an X-shape (two branches with one common node). Non-persistent datasets (other than parameters) are prefixed with an underscore. """ extra_inputs = list(request.param) return pipeline( [ node(first_arg, ["ds0_A", *extra_inputs], "_ds1_A", name="node1_A"), node(first_arg, ["ds0_B", *extra_inputs], "_ds1_B", name="node1_B"), node( multi_input_list_output, ["_ds1_A", "_ds1_B", *extra_inputs], ["ds2_A", "ds2_B"], name="node2", ), node(first_arg, ["ds2_A", *extra_inputs], "_ds3_A", name="node3_A"), node(first_arg, ["ds2_B", *extra_inputs], "_ds3_B", name="node3_B"), node(first_arg, ["_ds3_A", *extra_inputs], "_ds4_A", name="node4_A"), node(first_arg, ["_ds3_B", *extra_inputs], "_ds4_B", name="node4_B"), ] )
A ``Pipeline`` with an X-shape (two branches with one common node). Non-persistent datasets (other than parameters) are prefixed with an underscore.
two_branches_crossed_pipeline_variable_inputs
python
kedro-org/kedro
tests/runner/conftest.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/conftest.py
Apache-2.0
def test_result_saved_not_returned(self, is_async, saving_result_pipeline): """The pipeline runs ds->dsX but save does not save the output.""" def _load(): return 0 def _save(arg): assert arg == 0 catalog = DataCatalog( { "ds": LambdaDataset(load=_load, save=_save), "dsX": LambdaDataset(load=_load, save=_save), } ) output = SequentialRunner(is_async=is_async).run( saving_result_pipeline, catalog ) assert output == {}
The pipeline runs ds->dsX but save does not save the output.
test_result_saved_not_returned
python
kedro-org/kedro
tests/runner/test_sequential_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_sequential_runner.py
Apache-2.0
def test_conflict_feed_catalog( self, is_async, memory_catalog, unfinished_outputs_pipeline, conflicting_feed_dict, ): """ds1 and ds3 will be replaced with new inputs.""" memory_catalog.add_feed_dict(conflicting_feed_dict, replace=True) outputs = SequentialRunner(is_async=is_async).run( unfinished_outputs_pipeline, memory_catalog ) assert isinstance(outputs["ds8"], dict) assert outputs["ds8"]["data"] == 0 assert isinstance(outputs["ds6"], pd.DataFrame)
ds1 and ds3 will be replaced with new inputs.
test_conflict_feed_catalog
python
kedro-org/kedro
tests/runner/test_sequential_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_sequential_runner.py
Apache-2.0
def test_unsatisfied_inputs(self, is_async, unfinished_outputs_pipeline, catalog): """ds1, ds2 and ds3 were not specified.""" with pytest.raises( ValueError, match=rf"not found in the {catalog.__class__.__name__}" ): SequentialRunner(is_async=is_async).run( unfinished_outputs_pipeline, catalog )
ds1, ds2 and ds3 were not specified.
test_unsatisfied_inputs
python
kedro-org/kedro
tests/runner/test_sequential_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_sequential_runner.py
Apache-2.0
def test_stricter_suggest_resume_scenario( self, caplog, two_branches_crossed_pipeline_variable_inputs, persistent_dataset_catalog, failing_node_names, expected_pattern, ): """ Stricter version of previous test. Covers pipelines where inputs are shared across nodes. """ test_pipeline = two_branches_crossed_pipeline_variable_inputs nodes = {n.name: n for n in test_pipeline.nodes} for name in failing_node_names: test_pipeline -= modular_pipeline([nodes[name]]) test_pipeline += modular_pipeline([nodes[name]._copy(func=exception_fn)]) with pytest.raises(Exception, match="test exception"): SequentialRunner().run( test_pipeline, persistent_dataset_catalog, hook_manager=_create_hook_manager(), ) assert re.search(expected_pattern, caplog.text)
Stricter version of previous test. Covers pipelines where inputs are shared across nodes.
test_stricter_suggest_resume_scenario
python
kedro-org/kedro
tests/runner/test_sequential_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_sequential_runner.py
Apache-2.0
def test_thread_run_with_patterns(self, catalog_type): """Test warm-up is done and patterns are resolved before running pipeline. Without the warm-up "Dataset 'dummy_1' has already been registered" error would be raised for this test. We check that the dataset was registered at the warm-up, and we successfully passed to loading it. """ catalog_conf = {"{catch_all}": {"type": "MemoryDataset"}} catalog = catalog_type.from_config(catalog_conf) test_pipeline = pipeline( [ node(identity, inputs="dummy_1", outputs="output_1", name="node_1"), node(identity, inputs="dummy_2", outputs="output_2", name="node_2"), node(identity, inputs="dummy_1", outputs="output_3", name="node_3"), ] ) with pytest.raises( Exception, match="Data for MemoryDataset has not been saved yet" ): ThreadRunner().run(test_pipeline, catalog)
Test warm-up is done and patterns are resolved before running pipeline. Without the warm-up "Dataset 'dummy_1' has already been registered" error would be raised for this test. We check that the dataset was registered at the warm-up, and we successfully passed to loading it.
test_thread_run_with_patterns
python
kedro-org/kedro
tests/runner/test_thread_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_thread_runner.py
Apache-2.0
def test_specified_max_workers( self, mocker, fan_out_fan_in, catalog, user_specified_number, expected_number, ): """ We initialize the runner with max_workers=4. `fan_out_fan_in` pipeline needs 3 threads. A pool with 3 workers should be used. """ executor_cls_mock = mocker.patch( "kedro.runner.thread_runner.ThreadPoolExecutor", wraps=ThreadPoolExecutor, ) catalog.add_feed_dict({"A": 42}) result = ThreadRunner(max_workers=user_specified_number).run( fan_out_fan_in, catalog ) assert result == {"Z": (42, 42, 42)} executor_cls_mock.assert_called_once_with(max_workers=expected_number)
We initialize the runner with max_workers=4. `fan_out_fan_in` pipeline needs 3 threads. A pool with 3 workers should be used.
test_specified_max_workers
python
kedro-org/kedro
tests/runner/test_thread_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_thread_runner.py
Apache-2.0
def test_stricter_suggest_resume_scenario( self, caplog, two_branches_crossed_pipeline_variable_inputs, persistent_dataset_catalog, failing_node_names, expected_pattern, ): """ Stricter version of previous test. Covers pipelines where inputs are shared across nodes. """ test_pipeline = two_branches_crossed_pipeline_variable_inputs nodes = {n.name: n for n in test_pipeline.nodes} for name in failing_node_names: test_pipeline -= modular_pipeline([nodes[name]]) test_pipeline += modular_pipeline([nodes[name]._copy(func=exception_fn)]) with pytest.raises(Exception, match="test exception"): ThreadRunner(max_workers=1).run( test_pipeline, persistent_dataset_catalog, hook_manager=_create_hook_manager(), ) assert re.search(expected_pattern, caplog.text)
Stricter version of previous test. Covers pipelines where inputs are shared across nodes.
test_stricter_suggest_resume_scenario
python
kedro-org/kedro
tests/runner/test_thread_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_thread_runner.py
Apache-2.0
def test_simple_pipeline( self, pipeline_name, persistent_dataset_catalog, remaining_node_names, expected_result, request, ): """ Test suggestion for simple pipelines with a mix of persistent and memory datasets. """ test_pipeline = request.getfixturevalue(pipeline_name) remaining_nodes = test_pipeline.only_nodes(*remaining_node_names).nodes result_node_names = _find_nodes_to_resume_from( test_pipeline, remaining_nodes, persistent_dataset_catalog ) assert expected_result == result_node_names
Test suggestion for simple pipelines with a mix of persistent and memory datasets.
test_simple_pipeline
python
kedro-org/kedro
tests/runner/test_resume_logic.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_resume_logic.py
Apache-2.0
def test_all_datasets_persistent( self, pipeline_name, persistent_dataset_catalog, remaining_node_names, expected_result, request, ): """ Test suggestion for pipelines where all datasets are persisted: In that case, exactly the set of remaining nodes should be re-run. """ test_pipeline = request.getfixturevalue(pipeline_name) catalog = DataCatalog( dict.fromkeys( test_pipeline.datasets(), LambdaDataset(load=lambda: 42, save=lambda data: None), ) ) remaining_nodes = set(test_pipeline.only_nodes(*remaining_node_names).nodes) result_node_names = _find_nodes_to_resume_from( test_pipeline, remaining_nodes, catalog, ) final_pipeline_nodes = set(test_pipeline.from_nodes(*result_node_names).nodes) assert final_pipeline_nodes == remaining_nodes
Test suggestion for pipelines where all datasets are persisted: In that case, exactly the set of remaining nodes should be re-run.
test_all_datasets_persistent
python
kedro-org/kedro
tests/runner/test_resume_logic.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_resume_logic.py
Apache-2.0
def test_added_shared_input( self, pipeline_name, persistent_dataset_catalog, remaining_node_names, expected_result, extra_input, request, ): """ Test suggestion for pipelines where a single persistent dataset or parameter is shared across all nodes. These do not change and therefore should not affect resume suggestion. """ test_pipeline = request.getfixturevalue(pipeline_name) # Add parameter shared across all nodes test_pipeline = modular_pipeline( [n._copy(inputs=[*n.inputs, extra_input]) for n in test_pipeline.nodes] ) remaining_nodes = test_pipeline.only_nodes(*remaining_node_names).nodes result_node_names = _find_nodes_to_resume_from( test_pipeline, remaining_nodes, persistent_dataset_catalog ) assert expected_result == result_node_names
Test suggestion for pipelines where a single persistent dataset or parameter is shared across all nodes. These do not change and therefore should not affect resume suggestion.
test_added_shared_input
python
kedro-org/kedro
tests/runner/test_resume_logic.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_resume_logic.py
Apache-2.0
def test_suggestion_consistency( self, pipeline_name, persistent_dataset_catalog, remaining_node_names, expected_result, request, ): """ Test that suggestions are internally consistent; pipeline generated from resume nodes should exactly contain set of all required nodes. """ test_pipeline = request.getfixturevalue(pipeline_name) remaining_nodes = test_pipeline.only_nodes(*remaining_node_names).nodes required_nodes = _find_all_nodes_for_resumed_pipeline( test_pipeline, remaining_nodes, persistent_dataset_catalog ) resume_node_names = _find_nodes_to_resume_from( test_pipeline, remaining_nodes, persistent_dataset_catalog ) assert {n.name for n in required_nodes} == { n.name for n in test_pipeline.from_nodes(*resume_node_names).nodes }
Test that suggestions are internally consistent; pipeline generated from resume nodes should exactly contain set of all required nodes.
test_suggestion_consistency
python
kedro-org/kedro
tests/runner/test_resume_logic.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_resume_logic.py
Apache-2.0
def test_specified_max_workers_bellow_cpu_cores_count( self, is_async, mocker, fan_out_fan_in, catalog, cpu_cores, user_specified_number, expected_number, ): """ The system has 2 cores, but we initialize the runner with max_workers=4. `fan_out_fan_in` pipeline needs 3 processes. A pool with 3 workers should be used. """ mocker.patch("os.cpu_count", return_value=cpu_cores) executor_cls_mock = mocker.patch( "kedro.runner.parallel_runner.ProcessPoolExecutor", wraps=ProcessPoolExecutor, ) catalog.add_feed_dict({"A": 42}) result = ParallelRunner( max_workers=user_specified_number, is_async=is_async ).run(fan_out_fan_in, catalog) assert result == {"Z": (42, 42, 42)} executor_cls_mock.assert_called_once_with(max_workers=expected_number)
The system has 2 cores, but we initialize the runner with max_workers=4. `fan_out_fan_in` pipeline needs 3 processes. A pool with 3 workers should be used.
test_specified_max_workers_bellow_cpu_cores_count
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_max_worker_windows(self, mocker): """The ProcessPoolExecutor on Python 3.7+ has a quirk with the max worker number on Windows and requires it to be <=61 """ mocker.patch("os.cpu_count", return_value=100) mocker.patch("sys.platform", "win32") parallel_runner = ParallelRunner() assert parallel_runner._max_workers == _MAX_WINDOWS_WORKERS
The ProcessPoolExecutor on Python 3.7+ has a quirk with the max worker number on Windows and requires it to be <=61
test_max_worker_windows
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_task_node_validation(self, is_async, fan_out_fan_in, catalog): """ParallelRunner cannot serialise the lambda function.""" catalog.add_feed_dict({"A": 42}) pipeline = modular_pipeline([fan_out_fan_in, node(lambda x: x, "Z", "X")]) with pytest.raises(AttributeError): ParallelRunner(is_async=is_async).run(pipeline, catalog)
ParallelRunner cannot serialise the lambda function.
test_task_node_validation
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_task_dataset_validation(self, is_async, fan_out_fan_in, catalog): """ParallelRunner cannot serialise datasets marked with `_SINGLE_PROCESS`.""" catalog.add("A", SingleProcessDataset()) with pytest.raises(AttributeError): ParallelRunner(is_async=is_async).run(fan_out_fan_in, catalog)
ParallelRunner cannot serialise datasets marked with `_SINGLE_PROCESS`.
test_task_dataset_validation
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_memory_dataset_output(self, is_async, fan_out_fan_in): """ParallelRunner does not support output to externally created MemoryDatasets. """ pipeline = modular_pipeline([fan_out_fan_in]) catalog = DataCatalog({"C": MemoryDataset()}, {"A": 42}) with pytest.raises(AttributeError, match="['C']"): ParallelRunner(is_async=is_async).run(pipeline, catalog)
ParallelRunner does not support output to externally created MemoryDatasets.
test_memory_dataset_output
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_dataset_not_serialisable(self, is_async, fan_out_fan_in): """Data set A cannot be serialisable because _load and _save are not defined in global scope. """ def _load(): return 0 # pragma: no cover def _save(arg): assert arg == 0 # pragma: no cover # Data set A cannot be serialised catalog = DataCatalog({"A": LambdaDataset(load=_load, save=_save)}) pipeline = modular_pipeline([fan_out_fan_in]) with pytest.raises(AttributeError, match="['A']"): ParallelRunner(is_async=is_async).run(pipeline, catalog)
Data set A cannot be serialisable because _load and _save are not defined in global scope.
test_dataset_not_serialisable
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_memory_dataset_not_serialisable(self, is_async, catalog): """Memory dataset cannot be serialisable because of data it stores.""" data = return_not_serialisable(None) pipeline = modular_pipeline([node(return_not_serialisable, "A", "B")]) catalog.add_feed_dict(feed_dict={"A": 42}) pattern = ( rf"{data.__class__!s} cannot be serialised. ParallelRunner implicit " rf"memory datasets can only be used with serialisable data" ) with pytest.raises(DatasetError, match=pattern): ParallelRunner(is_async=is_async).run(pipeline, catalog)
Memory dataset cannot be serialisable because of data it stores.
test_memory_dataset_not_serialisable
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_unable_to_schedule_all_nodes( self, mocker, is_async, fan_out_fan_in, catalog ): """Test the error raised when `futures` variable is empty, but `todo_nodes` is not (can barely happen in real life). """ catalog.add_feed_dict({"A": 42}) runner = ParallelRunner(is_async=is_async) real_node_deps = fan_out_fan_in.node_dependencies # construct deliberately unresolvable dependencies for all # pipeline nodes, so that none can be run fake_node_deps = {k: {"you_shall_not_pass"} for k in real_node_deps} # property mock requires patching a class, not an instance mocker.patch( "kedro.pipeline.Pipeline.node_dependencies", new_callable=mocker.PropertyMock, return_value=fake_node_deps, ) pattern = "Unable to schedule new tasks although some nodes have not been run" with pytest.raises(RuntimeError, match=pattern): runner.run(fan_out_fan_in, catalog)
Test the error raised when `futures` variable is empty, but `todo_nodes` is not (can barely happen in real life).
test_unable_to_schedule_all_nodes
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def test_stricter_suggest_resume_scenario( self, caplog, two_branches_crossed_pipeline_variable_inputs, logging_dataset_catalog, failing_node_names, expected_pattern, ): """ Stricter version of previous test. Covers pipelines where inputs are shared across nodes. """ test_pipeline = two_branches_crossed_pipeline_variable_inputs nodes = {n.name: n for n in test_pipeline.nodes} for name in failing_node_names: test_pipeline -= modular_pipeline([nodes[name]]) test_pipeline += modular_pipeline([nodes[name]._copy(func=exception_fn)]) with pytest.raises(Exception, match="test exception"): ParallelRunner().run( test_pipeline, logging_dataset_catalog, hook_manager=_create_hook_manager(), ) assert re.search(expected_pattern, caplog.text)
Stricter version of previous test. Covers pipelines where inputs are shared across nodes.
test_stricter_suggest_resume_scenario
python
kedro-org/kedro
tests/runner/test_parallel_runner.py
https://github.com/kedro-org/kedro/blob/master/tests/runner/test_parallel_runner.py
Apache-2.0
def load_obj(obj_path: str, default_obj_path: str = "") -> Any: """Extract an object from a given path. Args: obj_path: Path to an object to be extracted, including the object name. default_obj_path: Default object path. Returns: Extracted object. Raises: AttributeError: When the object does not have the given named attribute. """ obj_path_list = obj_path.rsplit(".", 1) obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path obj_name = obj_path_list[0] module_obj = importlib.import_module(obj_path) return getattr(module_obj, obj_name)
Extract an object from a given path. Args: obj_path: Path to an object to be extracted, including the object name. default_obj_path: Default object path. Returns: Extracted object. Raises: AttributeError: When the object does not have the given named attribute.
load_obj
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def _is_databricks() -> bool: """Evaluate if the current run environment is Databricks or not. Useful to tailor environment-dependent activities like Kedro magic commands or logging features that Databricks doesn't support. Returns: True if run environment is Databricks, otherwise False. """ return "DATABRICKS_RUNTIME_VERSION" in os.environ
Evaluate if the current run environment is Databricks or not. Useful to tailor environment-dependent activities like Kedro magic commands or logging features that Databricks doesn't support. Returns: True if run environment is Databricks, otherwise False.
_is_databricks
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def _is_project(project_path: Union[str, Path]) -> bool: """Evaluate if a given path is a root directory of a Kedro project or not. Args: project_path: Path to be tested for being a root of a Kedro project. Returns: True if a given path is a root directory of a Kedro project, otherwise False. """ metadata_file = Path(project_path).expanduser().resolve() / _PYPROJECT if not metadata_file.is_file(): return False try: return "[tool.kedro]" in metadata_file.read_text(encoding="utf-8") except Exception: return False
Evaluate if a given path is a root directory of a Kedro project or not. Args: project_path: Path to be tested for being a root of a Kedro project. Returns: True if a given path is a root directory of a Kedro project, otherwise False.
_is_project
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def _find_kedro_project(current_dir: Path) -> Any: # pragma: no cover """Given a path, find a Kedro project associated with it. Can be: - Itself, if a path is a root directory of a Kedro project. - One of its parents, if self is not a Kedro project but one of the parent path is. - None, if neither self nor any parent path is a Kedro project. Returns: Kedro project associated with a given path, or None if no relevant Kedro project is found. """ paths_to_check = [current_dir, *list(current_dir.parents)] for parent_dir in paths_to_check: if _is_project(parent_dir): return parent_dir return None
Given a path, find a Kedro project associated with it. Can be: - Itself, if a path is a root directory of a Kedro project. - One of its parents, if self is not a Kedro project but one of the parent path is. - None, if neither self nor any parent path is a Kedro project. Returns: Kedro project associated with a given path, or None if no relevant Kedro project is found.
_find_kedro_project
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def _has_rich_handler(logger: Optional[logging.Logger] = None) -> bool: """Returns true if the logger has a RichHandler attached.""" if not logger: logger = logging.getLogger() # User root by default try: from rich.logging import RichHandler except ImportError: return False return any(isinstance(handler, RichHandler) for handler in logger.handlers)
Returns true if the logger has a RichHandler attached.
_has_rich_handler
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def _format_rich(value: str, markup: str) -> str: """Format string with rich markup""" return f"[{markup}]{value}[/{markup}]"
Format string with rich markup
_format_rich
python
kedro-org/kedro
kedro/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/utils.py
Apache-2.0
def __init__( # noqa: PLR0913 self, conf_source: str, env: str | None = None, runtime_params: dict[str, Any] | None = None, *, config_patterns: dict[str, list[str]] | None = None, base_env: str | None = None, default_run_env: str | None = None, custom_resolvers: dict[str, Callable] | None = None, merge_strategy: dict[str, str] | None = None, ): """Instantiates a ``OmegaConfigLoader``. Args: conf_source: Path to use as root directory for loading configuration. env: Environment that will take precedence over base. runtime_params: Extra parameters passed to a Kedro run. config_patterns: Regex patterns that specify the naming convention for configuration files so they can be loaded. Can be customised by supplying config_patterns as in `CONFIG_LOADER_ARGS` in `settings.py`. base_env: Name of the base environment. When the ``OmegaConfigLoader`` is used directly this defaults to `None`. Otherwise, the value will come from the `CONFIG_LOADER_ARGS` in the project settings, where base_env defaults to `"base"`. This is used in the `conf_paths` property method to construct the configuration paths. default_run_env: Name of the default run environment. When the ``OmegaConfigLoader`` is used directly this defaults to `None`. Otherwise, the value will come from the `CONFIG_LOADER_ARGS` in the project settings, where default_run_env defaults to `"local"`. Can be overridden by supplying the `env` argument. custom_resolvers: A dictionary of custom resolvers to be registered. For more information, see here: https://omegaconf.readthedocs.io/en/2.3_branch/custom_resolvers.html#custom-resolvers merge_strategy: A dictionary that specifies the merging strategy for each configuration type. The accepted merging strategies are `soft` and `destructive`. Defaults to `destructive`. """ self.base_env = base_env or "" self.default_run_env = default_run_env or "" self.merge_strategy = merge_strategy or {} self._globals_oc: DictConfig | None = None self._runtime_params_oc: DictConfig | None = None self.config_patterns = { "catalog": ["catalog*", "catalog*/**", "**/catalog*"], "parameters": ["parameters*", "parameters*/**", "**/parameters*"], "credentials": ["credentials*", "credentials*/**", "**/credentials*"], "globals": ["globals.yml"], } self.config_patterns.update(config_patterns or {}) # Deactivate oc.env built-in resolver for OmegaConf OmegaConf.clear_resolver("oc.env") # Register user provided custom resolvers self._custom_resolvers = custom_resolvers if custom_resolvers: self._register_new_resolvers(custom_resolvers) # Register globals resolver self._register_globals_resolver() # Setup file system and protocol self._fs, self._protocol = self._initialise_filesystem_and_protocol(conf_source) super().__init__( conf_source=conf_source, env=env, runtime_params=runtime_params, ) try: self._globals = self["globals"] except MissingConfigException: self._globals = {}
Instantiates a ``OmegaConfigLoader``. Args: conf_source: Path to use as root directory for loading configuration. env: Environment that will take precedence over base. runtime_params: Extra parameters passed to a Kedro run. config_patterns: Regex patterns that specify the naming convention for configuration files so they can be loaded. Can be customised by supplying config_patterns as in `CONFIG_LOADER_ARGS` in `settings.py`. base_env: Name of the base environment. When the ``OmegaConfigLoader`` is used directly this defaults to `None`. Otherwise, the value will come from the `CONFIG_LOADER_ARGS` in the project settings, where base_env defaults to `"base"`. This is used in the `conf_paths` property method to construct the configuration paths. default_run_env: Name of the default run environment. When the ``OmegaConfigLoader`` is used directly this defaults to `None`. Otherwise, the value will come from the `CONFIG_LOADER_ARGS` in the project settings, where default_run_env defaults to `"local"`. Can be overridden by supplying the `env` argument. custom_resolvers: A dictionary of custom resolvers to be registered. For more information, see here: https://omegaconf.readthedocs.io/en/2.3_branch/custom_resolvers.html#custom-resolvers merge_strategy: A dictionary that specifies the merging strategy for each configuration type. The accepted merging strategies are `soft` and `destructive`. Defaults to `destructive`.
__init__
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def __getitem__(self, key: str) -> dict[str, Any]: # noqa: PLR0912 """Get configuration files by key, load and merge them, and return them in the form of a config dictionary. Args: key: Key of the configuration type to fetch. Raises: KeyError: If key provided isn't present in the config_patterns of this ``OmegaConfigLoader`` instance. MissingConfigException: If no configuration files exist matching the patterns mapped to the provided key. Returns: Dict[str, Any]: A Python dictionary with the combined configuration from all configuration files. """ # Allow bypassing of loading config from patterns if a key and value have been set # explicitly on the ``OmegaConfigLoader`` instance. # Re-register runtime params resolver incase it was previously deactivated self._register_runtime_params_resolver() if key in self: return super().__getitem__(key) # type: ignore[no-any-return] if key not in self.config_patterns: raise KeyError( f"No config patterns were found for '{key}' in your config loader" ) patterns = [*self.config_patterns[key]] if key == "globals": # "runtime_params" resolver is not allowed in globals. OmegaConf.clear_resolver("runtime_params") read_environment_variables = key == "credentials" processed_files: set[Path] = set() # Load base env config if self._protocol == "file": base_path = str(Path(self.conf_source) / self.base_env) else: base_path = str(Path(self._fs.ls("", detail=False)[-1]) / self.base_env) try: base_config = self.load_and_merge_dir_config( # type: ignore[no-untyped-call] base_path, patterns, key, processed_files, read_environment_variables ) except UnsupportedInterpolationType as exc: if "runtime_params" in str(exc): raise UnsupportedInterpolationType( "The `runtime_params:` resolver is not supported for globals." ) else: raise exc config = base_config # Load chosen env config run_env = self.env or self.default_run_env # Return if chosen env config is the same as base config to avoid loading the same config twice if run_env == self.base_env: return config # type: ignore[no-any-return] if self._protocol == "file": env_path = str(Path(self.conf_source) / run_env) else: env_path = str(Path(self._fs.ls("", detail=False)[-1]) / run_env) try: env_config = self.load_and_merge_dir_config( # type: ignore[no-untyped-call] env_path, patterns, key, processed_files, read_environment_variables ) except UnsupportedInterpolationType as exc: if "runtime_params" in str(exc): raise UnsupportedInterpolationType( "The `runtime_params:` resolver is not supported for globals." ) else: raise exc resulting_config = self._merge_configs(config, env_config, key, env_path) if not processed_files and key != "globals": raise MissingConfigException( f"No files of YAML or JSON format found in {base_path} or {env_path} matching" f" the glob pattern(s): {[*self.config_patterns[key]]}" ) return resulting_config # type: ignore[no-any-return]
Get configuration files by key, load and merge them, and return them in the form of a config dictionary. Args: key: Key of the configuration type to fetch. Raises: KeyError: If key provided isn't present in the config_patterns of this ``OmegaConfigLoader`` instance. MissingConfigException: If no configuration files exist matching the patterns mapped to the provided key. Returns: Dict[str, Any]: A Python dictionary with the combined configuration from all configuration files.
__getitem__
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def load_and_merge_dir_config( self, conf_path: str, patterns: Iterable[str], key: str, processed_files: set, read_environment_variables: bool | None = False, ) -> dict[str, Any]: """Recursively load and merge all configuration files in a directory using OmegaConf, which satisfy a given list of glob patterns from a specific path. Args: conf_path: Path to configuration directory. patterns: List of glob patterns to match the filenames against. key: Key of the configuration type to fetch. processed_files: Set of files read for a given configuration type. read_environment_variables: Whether to resolve environment variables. Raises: MissingConfigException: If configuration path doesn't exist or isn't valid. ValueError: If two or more configuration files contain the same key(s). ParserError: If config file contains invalid YAML or JSON syntax. Returns: Resulting configuration dictionary. """ if not self._fs.isdir(Path(conf_path).as_posix()): raise MissingConfigException( f"Given configuration path either does not exist " f"or is not a valid directory: {conf_path}" ) paths = [] for pattern in patterns: for each in self._fs.glob(Path(f"{conf_path!s}/{pattern}").as_posix()): if not self._is_hidden(each): paths.append(Path(each)) deduplicated_paths = set(paths) config_files_filtered = [ path for path in deduplicated_paths if self._is_valid_config_path(path) ] config_per_file = {} for config_filepath in config_files_filtered: try: with self._fs.open(str(config_filepath.as_posix())) as open_config: # As fsspec doesn't allow the file to be read as StringIO, # this is a workaround to read it as a binary file and decode it back to utf8. tmp_fo = io.StringIO(open_config.read().decode("utf8")) config = OmegaConf.load(tmp_fo) processed_files.add(config_filepath) if read_environment_variables: self._resolve_environment_variables(config) config_per_file[config_filepath] = config except (ParserError, ScannerError) as exc: line = exc.problem_mark.line cursor = exc.problem_mark.column raise ParserError( f"Invalid YAML or JSON file {Path(config_filepath).as_posix()}," f" unable to read line {line}, position {cursor}." ) from exc aggregate_config = config_per_file.values() self._check_duplicates(key, config_per_file) if not aggregate_config: return {} if key == "parameters": # Merge with runtime parameters only for "parameters" return OmegaConf.to_container( OmegaConf.merge(*aggregate_config, self.runtime_params), resolve=True ) merged_config_container = OmegaConf.to_container( OmegaConf.merge(*aggregate_config), resolve=True ) return { k: v for k, v in merged_config_container.items() if not k.startswith("_") }
Recursively load and merge all configuration files in a directory using OmegaConf, which satisfy a given list of glob patterns from a specific path. Args: conf_path: Path to configuration directory. patterns: List of glob patterns to match the filenames against. key: Key of the configuration type to fetch. processed_files: Set of files read for a given configuration type. read_environment_variables: Whether to resolve environment variables. Raises: MissingConfigException: If configuration path doesn't exist or isn't valid. ValueError: If two or more configuration files contain the same key(s). ParserError: If config file contains invalid YAML or JSON syntax. Returns: Resulting configuration dictionary.
load_and_merge_dir_config
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _initialise_filesystem_and_protocol( conf_source: str, ) -> tuple[fsspec.AbstractFileSystem, str]: """Set up the file system based on the file type detected in conf_source.""" file_mimetype, _ = mimetypes.guess_type(conf_source) if file_mimetype == "application/x-tar": protocol = "tar" elif file_mimetype in ( "application/zip", "application/x-zip-compressed", "application/zip-compressed", ): protocol = "zip" else: protocol = "file" fs = fsspec.filesystem(protocol=protocol, fo=conf_source) return fs, protocol
Set up the file system based on the file type detected in conf_source.
_initialise_filesystem_and_protocol
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _is_valid_config_path(self, path: Path) -> bool: """Check if given path is a file path and file type is yaml or json.""" posix_path = path.as_posix() return self._fs.isfile(str(posix_path)) and path.suffix in [ ".yml", ".yaml", ".json", ]
Check if given path is a file path and file type is yaml or json.
_is_valid_config_path
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _register_globals_resolver(self) -> None: """Register the globals resolver""" OmegaConf.register_new_resolver( "globals", self._get_globals_value, replace=True, )
Register the globals resolver
_register_globals_resolver
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _get_globals_value(self, variable: str, default_value: Any = _NO_VALUE) -> Any: """Return the globals values to the resolver""" if variable.startswith("_"): raise InterpolationResolutionError( "Keys starting with '_' are not supported for globals." ) if not self._globals_oc: self._globals_oc = OmegaConf.create(self._globals) interpolated_value = OmegaConf.select( self._globals_oc, variable, default=default_value ) if interpolated_value != _NO_VALUE: return interpolated_value else: raise InterpolationResolutionError( f"Globals key '{variable}' not found and no default value provided." )
Return the globals values to the resolver
_get_globals_value
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _get_runtime_value(self, variable: str, default_value: Any = _NO_VALUE) -> Any: """Return the runtime params values to the resolver""" if not self._runtime_params_oc: self._runtime_params_oc = OmegaConf.create(self.runtime_params) interpolated_value = OmegaConf.select( self._runtime_params_oc, variable, default=default_value ) if interpolated_value != _NO_VALUE: return interpolated_value else: raise InterpolationResolutionError( f"Runtime parameter '{variable}' not found and no default value provided." )
Return the runtime params values to the resolver
_get_runtime_value
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _register_new_resolvers(resolvers: dict[str, Callable]) -> None: """Register custom resolvers""" for name, resolver in resolvers.items(): if not OmegaConf.has_resolver(name): msg = f"Registering new custom resolver: {name}" _config_logger.debug(msg) OmegaConf.register_new_resolver(name=name, resolver=resolver)
Register custom resolvers
_register_new_resolvers
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _resolve_environment_variables(config: DictConfig) -> None: """Use the ``oc.env`` resolver to read environment variables and replace them in-place, clearing the resolver after the operation is complete if it was not registered beforehand. Arguments: config {Dict[str, Any]} -- The configuration dictionary to resolve. """ if not OmegaConf.has_resolver("oc.env"): OmegaConf.register_new_resolver("oc.env", oc.env) OmegaConf.resolve(config) OmegaConf.clear_resolver("oc.env") else: OmegaConf.resolve(config)
Use the ``oc.env`` resolver to read environment variables and replace them in-place, clearing the resolver after the operation is complete if it was not registered beforehand. Arguments: config {Dict[str, Any]} -- The configuration dictionary to resolve.
_resolve_environment_variables
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _is_hidden(self, path_str: str) -> bool: """Check if path contains any hidden directory or is a hidden file""" path = Path(path_str) conf_path = Path(self.conf_source).resolve().as_posix() if self._protocol == "file": path = path.resolve() posix_path = path.as_posix() if posix_path.startswith(conf_path): posix_path = posix_path.replace(conf_path, "") parts = posix_path.split(self._fs.sep) # filesystem specific separator HIDDEN = "." # Check if any component (folder or file) starts with a dot (.) return any(part.startswith(HIDDEN) for part in parts)
Check if path contains any hidden directory or is a hidden file
_is_hidden
python
kedro-org/kedro
kedro/config/omegaconf_config.py
https://github.com/kedro-org/kedro/blob/master/kedro/config/omegaconf_config.py
Apache-2.0
def _transcode_split(element: str) -> tuple[str, str]: """Split the name by the transcoding separator. If the transcoding part is missing, empty string will be put in. Returns: Node input/output name before the transcoding separator, if present. Raises: ValueError: Raised if more than one transcoding separator is present in the name. """ split_name = element.split(TRANSCODING_SEPARATOR) if len(split_name) > 2: # noqa: PLR2004 raise ValueError( f"Expected maximum 1 transcoding separator, found {len(split_name) - 1} " f"instead: '{element}'." ) if len(split_name) == 1: split_name.append("") return tuple(split_name) # type: ignore
Split the name by the transcoding separator. If the transcoding part is missing, empty string will be put in. Returns: Node input/output name before the transcoding separator, if present. Raises: ValueError: Raised if more than one transcoding separator is present in the name.
_transcode_split
python
kedro-org/kedro
kedro/pipeline/transcoding.py
https://github.com/kedro-org/kedro/blob/master/kedro/pipeline/transcoding.py
Apache-2.0
def _strip_transcoding(element: str) -> str: """Strip out the transcoding separator and anything that follows. Returns: Node input/output name before the transcoding separator, if present. Raises: ValueError: Raised if more than one transcoding separator is present in the name. """ return _transcode_split(element)[0]
Strip out the transcoding separator and anything that follows. Returns: Node input/output name before the transcoding separator, if present. Raises: ValueError: Raised if more than one transcoding separator is present in the name.
_strip_transcoding
python
kedro-org/kedro
kedro/pipeline/transcoding.py
https://github.com/kedro-org/kedro/blob/master/kedro/pipeline/transcoding.py
Apache-2.0