code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _safe_load_entry_point( entry_point: Any, ) -> Any: """Load entrypoint safely, if fails it will just skip the entrypoint.""" try: return entry_point.load() except Exception as exc: logger.warning( "Failed to load %s commands from %s. Full exception: %s", entry_point.module, entry_point, exc, ) return
Load entrypoint safely, if fails it will just skip the entrypoint.
_safe_load_entry_point
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def load_entry_points(name: str) -> Sequence[click.MultiCommand]: """Load package entry point commands. Args: name: The key value specified in ENTRY_POINT_GROUPS. Raises: KedroCliError: If loading an entry point failed. Returns: List of entry point commands. """ entry_point_commands = [] for entry_point in _get_entry_points(name): loaded_entry_point = _safe_load_entry_point(entry_point) if loaded_entry_point: entry_point_commands.append(loaded_entry_point) return entry_point_commands
Load package entry point commands. Args: name: The key value specified in ENTRY_POINT_GROUPS. Raises: KedroCliError: If loading an entry point failed. Returns: List of entry point commands.
load_entry_points
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def find_run_command(package_name: str) -> Callable: """Find the run command to be executed. This is either the default run command defined in the Kedro framework or a run command defined by an installed plugin. Args: package_name: The name of the package being run. Raises: KedroCliError: If the run command is not found. Returns: Run command to be executed. """ try: project_cli = importlib.import_module(f"{package_name}.cli") # fail gracefully if cli.py does not exist except ModuleNotFoundError as exc: if f"{package_name}.cli" not in str(exc): raise plugins = load_entry_points("project") run = _find_run_command_in_plugins(plugins) if plugins else None if run: # use run command from installed plugin if it exists return run # type: ignore[no-any-return] # use run command from `kedro.framework.cli.project` from kedro.framework.cli.project import run return run # type: ignore[return-value] # fail badly if cli.py exists, but has no `cli` in it if not hasattr(project_cli, "cli"): raise KedroCliError(f"Cannot load commands from {package_name}.cli") return project_cli.run # type: ignore[no-any-return]
Find the run command to be executed. This is either the default run command defined in the Kedro framework or a run command defined by an installed plugin. Args: package_name: The name of the package being run. Raises: KedroCliError: If the run command is not found. Returns: Run command to be executed.
find_run_command
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _config_file_callback(ctx: click.Context, param: Any, value: Any) -> Any: """CLI callback that replaces command line options with values specified in a config file. If command line options are passed, they override config file values. """ ctx.default_map = ctx.default_map or {} section = ctx.info_name if value: config = OmegaConf.to_container(OmegaConf.load(value))[section] for key, value in config.items(): # noqa: PLR1704 _validate_config_file(key) ctx.default_map.update(config) return value
CLI callback that replaces command line options with values specified in a config file. If command line options are passed, they override config file values.
_config_file_callback
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _validate_config_file(key: str) -> None: """Validate the keys provided in the config file against the accepted keys.""" from kedro.framework.cli.project import run run_args = [click_arg.name for click_arg in run.params] run_args.remove("config") if key not in run_args: KedroCliError.VERBOSE_EXISTS = False message = _suggest_cli_command(key, run_args) # type: ignore[arg-type] raise KedroCliError( f"Key `{key}` in provided configuration is not valid. {message}" )
Validate the keys provided in the config file against the accepted keys.
_validate_config_file
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _split_load_versions(ctx: click.Context, param: Any, value: str) -> dict[str, str]: """Split and format the string coming from the --load-versions flag in kedro run, e.g.: "dataset1:time1,dataset2:time2" -> {"dataset1": "time1", "dataset2": "time2"} Args: value: the string with the contents of the --load-versions flag. Returns: A dictionary with the formatted load versions data. """ if not value: return {} lv_tuple = tuple(chain.from_iterable(value.split(",") for value in [value])) load_versions_dict = {} for load_version in lv_tuple: load_version = load_version.strip() # noqa: PLW2901 load_version_list = load_version.split(":", 1) if len(load_version_list) != 2: # noqa: PLR2004 raise KedroCliError( f"Expected the form of 'load_versions' to be " f"'dataset_name:YYYY-MM-DDThh.mm.ss.sssZ'," f"found {load_version} instead" ) load_versions_dict[load_version_list[0]] = load_version_list[1] return load_versions_dict
Split and format the string coming from the --load-versions flag in kedro run, e.g.: "dataset1:time1,dataset2:time2" -> {"dataset1": "time1", "dataset2": "time2"} Args: value: the string with the contents of the --load-versions flag. Returns: A dictionary with the formatted load versions data.
_split_load_versions
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def ipython(metadata: ProjectMetadata, /, env: str, args: Any, **kwargs: Any) -> None: """Open IPython with project specific variables loaded.""" _check_module_importable("IPython") if env: os.environ["KEDRO_ENV"] = env call(["ipython", "--ext", "kedro.ipython", *list(args)])
Open IPython with project specific variables loaded.
ipython
python
kedro-org/kedro
kedro/framework/cli/project.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/project.py
Apache-2.0
def package(metadata: ProjectMetadata) -> None: """Package the project as a Python wheel.""" # Even if the user decides for the older setup.py on purpose, # pyproject.toml is needed for Kedro metadata if (metadata.project_path / "pyproject.toml").is_file(): metadata_dir = metadata.project_path destination_dir = "dist" else: # Assume it's an old Kedro project, packaging metadata was under src # (could be pyproject.toml or setup.py, it's not important) metadata_dir = metadata.source_dir destination_dir = "../dist" call( [ sys.executable, "-m", "build", "--wheel", "--outdir", destination_dir, ], cwd=str(metadata_dir), ) directory = ( str(Path(settings.CONF_SOURCE).parent) if settings.CONF_SOURCE != "conf" else metadata.project_path ) call( [ "tar", "--exclude=local/*.yml", "-czf", f"dist/conf-{metadata.package_name}.tar.gz", f"--directory={directory}", str(Path(settings.CONF_SOURCE).stem), ] )
Package the project as a Python wheel.
package
python
kedro-org/kedro
kedro/framework/cli/project.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/project.py
Apache-2.0
def run( # noqa: PLR0913 tags: str, env: str, runner: str, is_async: bool, node_names: str, to_nodes: str, from_nodes: str, from_inputs: str, to_outputs: str, load_versions: dict[str, str] | None, pipeline: str, config: str, conf_source: str, params: dict[str, Any], namespace: str, ) -> dict[str, Any]: """Run the pipeline.""" runner_obj = load_obj(runner or "SequentialRunner", "kedro.runner") tuple_tags = tuple(tags) tuple_node_names = tuple(node_names) with KedroSession.create( env=env, conf_source=conf_source, extra_params=params ) as session: return session.run( tags=tuple_tags, runner=runner_obj(is_async=is_async), node_names=tuple_node_names, from_nodes=from_nodes, to_nodes=to_nodes, from_inputs=from_inputs, to_outputs=to_outputs, load_versions=load_versions, pipeline_name=pipeline, namespace=namespace, )
Run the pipeline.
run
python
kedro-org/kedro
kedro/framework/cli/project.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/project.py
Apache-2.0
def micropkg() -> None: """(DEPRECATED) Commands for working with micro-packages. DeprecationWarning: micro-packaging is deprecated and will not be available from Kedro 0.20.0."""
(DEPRECATED) Commands for working with micro-packages. DeprecationWarning: micro-packaging is deprecated and will not be available from Kedro 0.20.0.
micropkg
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def pull_package( # noqa: PLR0913 metadata: ProjectMetadata, /, package_path: str, env: str, alias: str, destination: str, fs_args: str, all_flag: str, **kwargs: Any, ) -> None: """(DEPRECATED) Pull and unpack a modular pipeline and other micro-packages in your project.""" deprecation_message = ( "DeprecationWarning: Command 'kedro micropkg pull' is deprecated and " "will not be available from Kedro 0.20.0." ) click.secho(deprecation_message, fg="red") if not package_path and not all_flag: click.secho( "Please specify a package path or add '--all' to pull all micro-packages in the " "'pyproject.toml' package manifest section." ) sys.exit(1) if all_flag: _pull_packages_from_manifest(metadata) return _pull_package( package_path, metadata, env=env, alias=alias, destination=destination, fs_args=fs_args, ) as_alias = f" as '{alias}'" if alias else "" message = f"Micro-package {package_path} pulled and unpacked{as_alias}!" click.secho(message, fg="green")
(DEPRECATED) Pull and unpack a modular pipeline and other micro-packages in your project.
pull_package
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def package_micropkg( # noqa: PLR0913 metadata: ProjectMetadata, /, module_path: str, env: str, alias: str, destination: str, all_flag: str, **kwargs: Any, ) -> None: """(DEPRECATED) Package up a modular pipeline or micro-package as a Python source distribution.""" deprecation_message = ( "DeprecationWarning: Command 'kedro micropkg package' is deprecated and " "will not be available from Kedro 0.20.0." ) click.secho(deprecation_message, fg="red") if not module_path and not all_flag: click.secho( "Please specify a micro-package name or add '--all' to package all micro-packages in " "the 'pyproject.toml' package manifest section." ) sys.exit(1) if all_flag: _package_micropkgs_from_manifest(metadata) return result_path = _package_micropkg( module_path, metadata, alias=alias, destination=destination, env=env ) as_alias = f" as '{alias}'" if alias else "" message = ( f"'{metadata.package_name}.{module_path}' packaged{as_alias}! " f"Location: {result_path}" ) click.secho(message, fg="green")
(DEPRECATED) Package up a modular pipeline or micro-package as a Python source distribution.
package_micropkg
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _refactor_code_for_unpacking( # noqa: PLR0913 project: Project, package_path: Path, tests_path: Path, alias: str | None, destination: str | None, project_metadata: ProjectMetadata, ) -> tuple[Path, Path]: """This is the reverse operation of `_refactor_code_for_package`, i.e we go from: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ tests.py to: <temp_dir> # also the root of the Rope project |__ <package_name> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py """ def _move_package_with_conflicting_name( target: Path, original_name: str, desired_name: str | None = None ) -> Path: _rename_package(project, original_name, "tmp_name") full_path = _create_nested_package(project, target) _move_package(project, "tmp_name", target.as_posix()) desired_name = desired_name or original_name _rename_package(project, (target / "tmp_name").as_posix(), desired_name) return full_path package_name = package_path.stem package_target = Path(project_metadata.package_name) tests_target = Path("tests") if destination: destination_path = Path(destination) package_target = package_target / destination_path tests_target = tests_target / destination_path if alias and alias != package_name: _rename_package(project, package_name, alias) package_name = alias if package_name == project_metadata.package_name: full_path = _move_package_with_conflicting_name(package_target, package_name) else: full_path = _create_nested_package(project, package_target) _move_package(project, package_name, package_target.as_posix()) refactored_package_path = full_path / package_name if not tests_path.exists(): return refactored_package_path, tests_path # we can't rename the tests package to <package_name> # because it will conflict with existing top-level package; # hence we give it a temp name, create the expected # nested folder structure, move the contents there, # then rename the temp name to <package_name>. full_path = _move_package_with_conflicting_name( tests_target, original_name="tests", desired_name=package_name ) refactored_tests_path = full_path / package_name return refactored_package_path, refactored_tests_path
This is the reverse operation of `_refactor_code_for_package`, i.e we go from: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ tests.py to: <temp_dir> # also the root of the Rope project |__ <package_name> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py
_refactor_code_for_unpacking
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _make_install_requires(requirements_txt: Path) -> list[str]: """Parses each line of requirements.txt into a version specifier valid to put in install_requires. Matches pkg_resources.parse_requirements""" if not requirements_txt.exists(): return [] return [ str(_EquivalentRequirement(_drop_comment(requirement_line))) for requirement_line in requirements_txt.read_text().splitlines() if requirement_line and not requirement_line.startswith("#") ]
Parses each line of requirements.txt into a version specifier valid to put in install_requires. Matches pkg_resources.parse_requirements
_make_install_requires
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _move_package(project: Project, source: str, target: str) -> None: """ Move a Python package, refactoring relevant imports along the way. A target of empty string means moving to the root of the `project`. Args: project: rope.base.Project holding the scope of the refactoring. source: Name of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline". target: Destination of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline". """ src_folder = project.get_module(source).get_resource() target_folder = project.get_module(target).get_resource() change = MoveModule(project, src_folder).get_changes(dest=target_folder) project.do(change)
Move a Python package, refactoring relevant imports along the way. A target of empty string means moving to the root of the `project`. Args: project: rope.base.Project holding the scope of the refactoring. source: Name of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline". target: Destination of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline".
_move_package
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _rename_package(project: Project, old_name: str, new_name: str) -> None: """ Rename a Python package, refactoring relevant imports along the way, as well as references in comments. Args: project: rope.base.Project holding the scope of the refactoring. old_name: Old module name. Can be a fully qualified module path, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline", relative to the `project` root. new_name: New module name. Can't be a fully qualified module path. """ folder = project.get_folder(old_name) change = Rename(project, folder).get_changes(new_name, docs=True) project.do(change)
Rename a Python package, refactoring relevant imports along the way, as well as references in comments. Args: project: rope.base.Project holding the scope of the refactoring. old_name: Old module name. Can be a fully qualified module path, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline", relative to the `project` root. new_name: New module name. Can't be a fully qualified module path.
_rename_package
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _refactor_code_for_package( project: Project, package_path: Path, tests_path: Path, alias: str | None, project_metadata: ProjectMetadata, ) -> None: """In order to refactor the imports properly, we need to recreate the same nested structure as in the project. Therefore, we create: <temp_dir> # also the root of the Rope project |__ <package_name> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ path_to_micro_package |__ __init__.py |__ <micro_package> |__ __init__.py We then move <micro_package> outside of package src to top level ("") in temp_dir, and rename folder & imports if alias provided. For tests, we need to extract all the contents of <micro_package> at into top-level `tests` folder. This is not possible in one go with the Rope API, so we have to do it in a bit of a hacky way. We rename <micro_package> to a `tmp_name` and move it at top-level ("") in temp_dir. We remove the old `tests` folder and rename `tmp_name` to `tests`. The final structure should be: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ test.py """ def _move_package_with_conflicting_name( target: Path, conflicting_name: str ) -> None: tmp_name = "tmp_name" tmp_module = target.parent / tmp_name _rename_package(project, target.as_posix(), tmp_name) _move_package(project, tmp_module.as_posix(), "") shutil.rmtree(Path(project.address) / conflicting_name) _rename_package(project, tmp_name, conflicting_name) # Copy source in appropriate folder structure package_target = package_path.relative_to(project_metadata.source_dir) full_path = _create_nested_package(project, package_target) # overwrite=True to update the __init__.py files generated by create_package _sync_dirs(package_path, full_path, overwrite=True) # Copy tests in appropriate folder structure if tests_path.exists(): tests_target = tests_path.relative_to(project_metadata.project_path) full_path = _create_nested_package(project, tests_target) # overwrite=True to update the __init__.py files generated by create_package _sync_dirs(tests_path, full_path, overwrite=True) # Refactor imports in src/package_name/.../micro_package # and imports of `micro_package` in tests. micro_package_name = package_target.stem if micro_package_name == project_metadata.package_name: _move_package_with_conflicting_name(package_target, micro_package_name) else: _move_package(project, package_target.as_posix(), "") shutil.rmtree(Path(project.address) / project_metadata.package_name) if alias: _rename_package(project, micro_package_name, alias) if tests_path.exists(): # we can't move the relevant tests folder as is because # it will conflict with the top-level package <micro_package>; # we can't rename it "tests" and move it, because it will conflict # with the existing "tests" folder at top level; # hence we give it a temp name, move it, delete tests/ and # rename the temp name to tests. _move_package_with_conflicting_name(tests_target, "tests")
In order to refactor the imports properly, we need to recreate the same nested structure as in the project. Therefore, we create: <temp_dir> # also the root of the Rope project |__ <package_name> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ path_to_micro_package |__ __init__.py |__ <micro_package> |__ __init__.py We then move <micro_package> outside of package src to top level ("") in temp_dir, and rename folder & imports if alias provided. For tests, we need to extract all the contents of <micro_package> at into top-level `tests` folder. This is not possible in one go with the Rope API, so we have to do it in a bit of a hacky way. We rename <micro_package> to a `tmp_name` and move it at top-level ("") in temp_dir. We remove the old `tests` folder and rename `tmp_name` to `tests`. The final structure should be: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ test.py
_refactor_code_for_package
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _generate_manifest_file(output_dir: Path) -> None: manifest_file = output_dir / "MANIFEST.in" manifest_file.write_text( """ global-include README.md global-include config/parameters* global-include config/**/parameters* global-include config/parameters*/** global-include config/parameters*/**/* """ )
global-include README.md global-include config/parameters* global-include config/**/parameters* global-include config/parameters*/** global-include config/parameters*/**/*
_generate_manifest_file
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _get_package_artifacts( source_path: Path, package_name: str ) -> tuple[Path, Path, Path]: """From existing package, returns in order: source_path, tests_path, config_path """ artifacts = ( source_path / package_name, source_path / "tests", # package_data (non-python files) needs to live inside one of the packages source_path / package_name / "config", ) return artifacts
From existing package, returns in order: source_path, tests_path, config_path
_get_package_artifacts
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _append_package_reqs( requirements_txt: Path, package_reqs: list[str], package_name: str ) -> None: """Appends micro-package requirements to project level requirements.txt""" incoming_reqs = _safe_parse_requirements(package_reqs) if requirements_txt.is_file(): existing_reqs = _safe_parse_requirements(requirements_txt.read_text()) reqs_to_add = set(incoming_reqs) - set(existing_reqs) if not reqs_to_add: return sorted_reqs = sorted(str(req) for req in reqs_to_add) sep = "\n" with open(requirements_txt, "a", encoding="utf-8") as file: file.write( f"\n\n# Additional requirements from micro-package `{package_name}`:\n" ) file.write(sep.join(sorted_reqs)) click.secho( f"Added the following requirements from micro-package '{package_name}' to " f"requirements.txt:\n{sep.join(sorted_reqs)}" ) else: click.secho( "No project requirements.txt found. Copying contents from project requirements.txt..." ) sorted_reqs = sorted(str(req) for req in incoming_reqs) sep = "\n" with open(requirements_txt, "a", encoding="utf-8") as file: file.write(sep.join(sorted_reqs)) click.secho( "Use 'pip-compile requirements.txt --output-file requirements.lock' to compile " "and 'pip install -r requirements.lock' to install the updated list of requirements." )
Appends micro-package requirements to project level requirements.txt
_append_package_reqs
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _get_all_library_reqs(metadata: PackageMetadata) -> list[str]: """Get all library requirements from metadata, leaving markers intact.""" # See https://discuss.python.org/t/\ # programmatically-getting-non-optional-requirements-of-current-directory/26963/2 return [ str(_EquivalentRequirement(dep_str)) for dep_str in metadata.get_all("Requires-Dist", []) ]
Get all library requirements from metadata, leaving markers intact.
_get_all_library_reqs
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def _safe_parse_requirements( requirements: str | Iterable[str], ) -> set[_EquivalentRequirement]: """Safely parse a requirement or set of requirements. This avoids blowing up when it encounters a requirement it cannot parse (e.g. `-r requirements.txt`). This way we can still extract all the parseable requirements out of a set containing some unparseable requirements. """ parseable_requirements = set() if isinstance(requirements, str): requirements = requirements.splitlines() # TODO: Properly handle continuation lines, # see https://github.com/pypa/setuptools/blob/v67.8.0/setuptools/_reqs.py for requirement_line in requirements: if ( requirement_line and not requirement_line.startswith("#") and not requirement_line.startswith("-e") ): try: parseable_requirements.add( _EquivalentRequirement(_drop_comment(requirement_line)) ) except InvalidRequirement: continue return parseable_requirements
Safely parse a requirement or set of requirements. This avoids blowing up when it encounters a requirement it cannot parse (e.g. `-r requirements.txt`). This way we can still extract all the parseable requirements out of a set containing some unparseable requirements.
_safe_parse_requirements
python
kedro-org/kedro
kedro/framework/cli/micropkg.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/micropkg.py
Apache-2.0
def jupyter() -> None: """Open Jupyter Notebook / Lab with project specific variables loaded."""
Open Jupyter Notebook / Lab with project specific variables loaded.
jupyter
python
kedro-org/kedro
kedro/framework/cli/jupyter.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/jupyter.py
Apache-2.0
def setup(metadata: ProjectMetadata, /, args: Any, **kwargs: Any) -> None: """Initialise the Jupyter Kernel for a kedro project.""" _check_module_importable("ipykernel") validate_settings() kernel_name = f"kedro_{metadata.package_name}" kernel_path = _create_kernel(kernel_name, f"Kedro ({metadata.package_name})") click.secho(f"\nThe kernel has been created successfully at {kernel_path}")
Initialise the Jupyter Kernel for a kedro project.
setup
python
kedro-org/kedro
kedro/framework/cli/jupyter.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/jupyter.py
Apache-2.0
def jupyter_notebook( metadata: ProjectMetadata, /, env: str, args: Any, **kwargs: Any, ) -> None: """Open Jupyter Notebook with project specific variables loaded.""" _check_module_importable("notebook") validate_settings() kernel_name = f"kedro_{metadata.package_name}" _create_kernel(kernel_name, f"Kedro ({metadata.package_name})") if env: os.environ["KEDRO_ENV"] = env python_call( "jupyter", [ "notebook", f"--MultiKernelManager.default_kernel_name={kernel_name}", *list(args), ], )
Open Jupyter Notebook with project specific variables loaded.
jupyter_notebook
python
kedro-org/kedro
kedro/framework/cli/jupyter.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/jupyter.py
Apache-2.0
def jupyter_lab( metadata: ProjectMetadata, /, env: str, args: Any, **kwargs: Any, ) -> None: """Open Jupyter Lab with project specific variables loaded.""" _check_module_importable("jupyterlab") validate_settings() kernel_name = f"kedro_{metadata.package_name}" _create_kernel(kernel_name, f"Kedro ({metadata.package_name})") if env: os.environ["KEDRO_ENV"] = env python_call( "jupyter", ["lab", f"--MultiKernelManager.default_kernel_name={kernel_name}", *list(args)], )
Open Jupyter Lab with project specific variables loaded.
jupyter_lab
python
kedro-org/kedro
kedro/framework/cli/jupyter.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/jupyter.py
Apache-2.0
def _create_kernel(kernel_name: str, display_name: str) -> str: """Creates an IPython kernel for the kedro project. If one with the same kernel_name exists already it will be replaced. Installs the default IPython kernel (which points towards `sys.executable`) and customises it to make the launch command load the kedro extension. This is equivalent to the method recommended for creating a custom IPython kernel on the CLI: https://ipython.readthedocs.io/en/stable/install/kernel_install.html. On linux this creates a directory ~/.local/share/jupyter/kernels/{kernel_name} containing kernel.json, logo-32x32.png, logo-64x64.png and logo-svg.svg. An example kernel.json looks as follows: { "argv": [ "/Users/antony_milne/miniconda3/envs/spaceflights/bin/python", "-m", "ipykernel_launcher", "-f", "{connection_file}", "--ext", "kedro.ipython" ], "display_name": "Kedro (spaceflights)", "language": "python", "metadata": { "debugger": false } } Args: kernel_name: Name of the kernel to create. display_name: Kernel name as it is displayed in the UI. Returns: String of the path of the created kernel. Raises: KedroCliError: When kernel cannot be setup. """ # These packages are required by jupyter lab and notebook, which we have already # checked are importable, so we don't run _check_module_importable on them. from ipykernel.kernelspec import install try: # Install with user=True rather than system-wide to minimise footprint and # ensure that we have permissions to write there. Under the hood this calls # jupyter_client.KernelSpecManager.install_kernel_spec, which automatically # removes an old kernel spec if it already exists. kernel_path = install( user=True, kernel_name=kernel_name, display_name=display_name, ) kernel_json = Path(kernel_path) / "kernel.json" kernel_spec = json.loads(kernel_json.read_text(encoding="utf-8")) kernel_spec["argv"].extend(["--ext", "kedro.ipython"]) # indent=1 is to match the default ipykernel style (see # ipykernel.write_kernel_spec). kernel_json.write_text(json.dumps(kernel_spec, indent=1), encoding="utf-8") kedro_ipython_dir = Path(__file__).parents[2] / "ipython" shutil.copy(kedro_ipython_dir / "logo-32x32.png", kernel_path) shutil.copy(kedro_ipython_dir / "logo-64x64.png", kernel_path) shutil.copy(kedro_ipython_dir / "logo-svg.svg", kernel_path) except Exception as exc: raise KedroCliError( f"Cannot setup kedro kernel for Jupyter.\nError: {exc}" ) from exc return kernel_path
Creates an IPython kernel for the kedro project. If one with the same kernel_name exists already it will be replaced. Installs the default IPython kernel (which points towards `sys.executable`) and customises it to make the launch command load the kedro extension. This is equivalent to the method recommended for creating a custom IPython kernel on the CLI: https://ipython.readthedocs.io/en/stable/install/kernel_install.html. On linux this creates a directory ~/.local/share/jupyter/kernels/{kernel_name} containing kernel.json, logo-32x32.png, logo-64x64.png and logo-svg.svg. An example kernel.json looks as follows: { "argv": [ "/Users/antony_milne/miniconda3/envs/spaceflights/bin/python", "-m", "ipykernel_launcher", "-f", "{connection_file}", "--ext", "kedro.ipython" ], "display_name": "Kedro (spaceflights)", "language": "python", "metadata": { "debugger": false } } Args: kernel_name: Name of the kernel to create. display_name: Kernel name as it is displayed in the UI. Returns: String of the path of the created kernel. Raises: KedroCliError: When kernel cannot be setup.
_create_kernel
python
kedro-org/kedro
kedro/framework/cli/jupyter.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/jupyter.py
Apache-2.0
def _assert_pkg_name_ok(pkg_name: str) -> None: """Check that python package name is in line with PEP8 requirements. Args: pkg_name: Candidate Python package name. Raises: KedroCliError: If package name violates the requirements. """ base_message = f"'{pkg_name}' is not a valid Python package name." if not re.match(r"^[a-zA-Z_]", pkg_name): message = base_message + " It must start with a letter or underscore." raise KedroCliError(message) if len(pkg_name) < 2: # noqa: PLR2004 message = base_message + " It must be at least 2 characters long." raise KedroCliError(message) if not re.match(r"^\w+$", pkg_name[1:]): message = ( base_message + " It must contain only letters, digits, and/or underscores." ) raise KedroCliError(message)
Check that python package name is in line with PEP8 requirements. Args: pkg_name: Candidate Python package name. Raises: KedroCliError: If package name violates the requirements.
_assert_pkg_name_ok
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def pipeline() -> None: """Commands for working with pipelines."""
Commands for working with pipelines.
pipeline
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def create_pipeline( metadata: ProjectMetadata, /, name: str, template_path: Path, skip_config: bool, env: str, **kwargs: Any, ) -> None: """Create a new modular pipeline by providing a name.""" package_dir = metadata.source_dir / metadata.package_name project_root = metadata.project_path / metadata.project_name conf_source = settings.CONF_SOURCE project_conf_path = metadata.project_path / conf_source base_env = settings.CONFIG_LOADER_ARGS.get("base_env", "base") env = env or base_env if not skip_config and not (project_conf_path / env).exists(): raise KedroCliError( f"Unable to locate environment '{env}'. " f"Make sure it exists in the project configuration." ) # Precedence for template_path is: command line > project templates/pipeline dir > global default # If passed on the CLI, click will verify that the path exists so no need to check again if template_path is None: # No path provided on the CLI, try `PROJECT_PATH/templates/pipeline` template_path = Path(metadata.project_path / "templates" / "pipeline") if not template_path.exists(): # and if that folder doesn't exist fall back to the global default template_path = Path(kedro.__file__).parent / "templates" / "pipeline" click.secho(f"Using pipeline template at: '{template_path}'") # Ensure pipelines directory has __init__.py pipelines_dir = package_dir / "pipelines" _ensure_pipelines_init_file(pipelines_dir) result_path = _create_pipeline(name, template_path, pipelines_dir) _copy_pipeline_tests(name, result_path, project_root) _copy_pipeline_configs(result_path, project_conf_path, skip_config, env=env) click.secho(f"\nPipeline '{name}' was successfully created.\n", fg="green")
Create a new modular pipeline by providing a name.
create_pipeline
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def delete_pipeline( metadata: ProjectMetadata, /, name: str, env: str, yes: bool, **kwargs: Any ) -> None: """Delete a modular pipeline by providing a name.""" package_dir = metadata.source_dir / metadata.package_name conf_source = settings.CONF_SOURCE project_conf_path = metadata.project_path / conf_source base_env = settings.CONFIG_LOADER_ARGS.get("base_env", "base") env = env or base_env if not (project_conf_path / env).exists(): raise KedroCliError( f"Unable to locate environment '{env}'. " f"Make sure it exists in the project configuration." ) pipeline_artifacts = _get_pipeline_artifacts(metadata, pipeline_name=name, env=env) files_to_delete = [ pipeline_artifacts.pipeline_conf / filepath for confdir in ("parameters", "catalog") # Since we remove nesting in 'parameters' and 'catalog' folders, # we want to also del the old project's structure for backward compatibility for filepath in (Path(f"{confdir}_{name}.yml"), Path(confdir) / f"{name}.yml") if (pipeline_artifacts.pipeline_conf / filepath).is_file() ] dirs_to_delete = [ path for path in (pipeline_artifacts.pipeline_dir, pipeline_artifacts.pipeline_tests) if path.is_dir() ] if not files_to_delete and not dirs_to_delete: raise KedroCliError(f"Pipeline '{name}' not found.") if not yes: _echo_deletion_warning( "The following paths will be removed:", directories=dirs_to_delete, files=files_to_delete, ) click.echo() yes = click.confirm(f"Are you sure you want to delete pipeline '{name}'?") click.echo() if not yes: raise KedroCliError("Deletion aborted!") _delete_artifacts(*files_to_delete, *dirs_to_delete) click.secho(f"\nPipeline '{name}' was successfully deleted.", fg="green") click.secho( f"\nIf you added the pipeline '{name}' to 'register_pipelines()' in" f""" '{package_dir / "pipeline_registry.py"}', you will need to remove it.""", fg="yellow", )
Delete a modular pipeline by providing a name.
delete_pipeline
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def _sync_dirs( source: Path, target: Path, prefix: str = "", overwrite: bool = False ) -> None: """Recursively copies `source` directory (or file) into `target` directory without overwriting any existing files/directories in the target using the following rules: 1) Skip any files/directories which names match with files in target, unless overwrite=True. 2) Copy all files from source to target. 3) Recursively copy all directories from source to target. Args: source: A local directory to copy from, must exist. target: A local directory to copy to, will be created if doesn't exist yet. prefix: Prefix for CLI message indentation. """ existing = list(target.iterdir()) if target.is_dir() else [] existing_files = {f.name for f in existing if f.is_file()} existing_folders = {f.name for f in existing if f.is_dir()} if source.is_dir(): content = list(source.iterdir()) elif source.is_file(): content = [source] else: # nothing to copy content = [] # pragma: no cover for source_path in content: source_name = source_path.name target_path = target / source_name click.echo(indent(f"Creating '{target_path}': ", prefix), nl=False) if ( # rule #1 not overwrite and source_name in existing_files or source_path.is_file() and source_name in existing_folders ): click.secho("SKIPPED (already exists)", fg="yellow") elif source_path.is_file(): # rule #2 try: target.mkdir(exist_ok=True, parents=True) shutil.copyfile(str(source_path), str(target_path)) except Exception: click.secho("FAILED", fg="red") raise click.secho("OK", fg="green") else: # source_path is a directory, rule #3 click.echo() new_prefix = (prefix or "") + " " * 2 _sync_dirs(source_path, target_path, prefix=new_prefix)
Recursively copies `source` directory (or file) into `target` directory without overwriting any existing files/directories in the target using the following rules: 1) Skip any files/directories which names match with files in target, unless overwrite=True. 2) Copy all files from source to target. 3) Recursively copy all directories from source to target. Args: source: A local directory to copy from, must exist. target: A local directory to copy to, will be created if doesn't exist yet. prefix: Prefix for CLI message indentation.
_sync_dirs
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def _get_artifacts_to_package( project_metadata: ProjectMetadata, module_path: str, env: str ) -> tuple[Path, Path, Path]: """From existing project, returns in order: source_path, tests_path, config_paths""" package_dir = project_metadata.source_dir / project_metadata.package_name project_root = project_metadata.project_path project_conf_path = project_metadata.project_path / settings.CONF_SOURCE artifacts = ( Path(package_dir, *module_path.split(".")), Path(project_root, "tests", *module_path.split(".")), project_conf_path / env, ) return artifacts
From existing project, returns in order: source_path, tests_path, config_paths
_get_artifacts_to_package
python
kedro-org/kedro
kedro/framework/cli/pipeline.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/pipeline.py
Apache-2.0
def starter() -> None: """Commands for working with project starters."""
Commands for working with project starters.
starter
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def new( # noqa: PLR0913 config_path: str, starter_alias: str, selected_tools: str, project_name: str, checkout: str, directory: str, example_pipeline: str, telemetry_consent: str, **kwargs: Any, ) -> None: """Create a new kedro project.""" flag_inputs = { "config": config_path, "starter": starter_alias, "tools": selected_tools, "name": project_name, "checkout": checkout, "directory": directory, "example": example_pipeline, "telemetry_consent": telemetry_consent, } _validate_flag_inputs(flag_inputs) starters_dict = _get_starters_dict() if starter_alias in starters_dict: if directory: raise KedroCliError( "Cannot use the --directory flag with a --starter alias." ) spec = starters_dict[starter_alias] template_path = spec.template_path # "directory" is an optional key for starters from plugins, so if the key is # not present we will use "None". directory = spec.directory # type: ignore[assignment] checkout = _select_checkout_branch_for_cookiecutter(checkout) elif starter_alias is not None: template_path = starter_alias else: template_path = str(TEMPLATE_PATH) # Format user input where necessary if selected_tools is not None: selected_tools = selected_tools.lower() # Get prompts.yml to find what information the user needs to supply as config. tmpdir = tempfile.mkdtemp() cookiecutter_dir = _get_cookiecutter_dir(template_path, checkout, directory, tmpdir) prompts_required = _get_prompts_required_and_clear_from_CLI_provided( cookiecutter_dir, selected_tools, project_name, example_pipeline ) # We only need to make cookiecutter_context if interactive prompts are needed. cookiecutter_context = None if not config_path: cookiecutter_context = _make_cookiecutter_context_for_prompts(cookiecutter_dir) # Cleanup the tmpdir after it's no longer required. # Ideally we would want to be able to use tempfile.TemporaryDirectory() context manager # but it causes an issue with readonly files on windows # see: https://bugs.python.org/issue26660. # So on error, we will attempt to clear the readonly bits and re-attempt the cleanup shutil.rmtree(tmpdir, onerror=_remove_readonly) # type: ignore[arg-type] # Obtain config, either from a file or from interactive user prompts. extra_context = _get_extra_context( prompts_required=prompts_required, config_path=config_path, cookiecutter_context=cookiecutter_context, selected_tools=selected_tools, project_name=project_name, example_pipeline=example_pipeline, starter_alias=starter_alias, ) cookiecutter_args, project_template = _make_cookiecutter_args_and_fetch_template( config=extra_context, checkout=checkout, directory=directory, template_path=template_path, ) if telemetry_consent is not None: telemetry_consent = ( "true" if _parse_yes_no_to_bool(telemetry_consent) else "false" ) _create_project(project_template, cookiecutter_args, telemetry_consent) # If not a starter, print tools and example selection if not starter_alias: # If interactive flow used, print hint interactive_flow = prompts_required and not config_path _print_selection_and_prompt_info( extra_context["tools"], extra_context["example_pipeline"], interactive_flow, )
Create a new kedro project.
new
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def list_starters() -> None: """List all official project starters available.""" starters_dict = _get_starters_dict() # Group all specs by origin as nested dict and sort it. sorted_starters_dict: dict[str, dict[str, KedroStarterSpec]] = { origin: dict(sorted(starters_dict_by_origin)) for origin, starters_dict_by_origin in groupby( starters_dict.items(), lambda item: item[1].origin ) } # ensure kedro starters are listed first sorted_starters_dict = dict( sorted(sorted_starters_dict.items(), key=lambda x: x == "kedro") # type: ignore[comparison-overlap] ) for origin, starters_spec in sorted_starters_dict.items(): click.secho(f"\nStarters from {origin}\n", fg="yellow") click.echo( yaml.safe_dump(_starter_spec_to_dict(starters_spec), sort_keys=False) )
List all official project starters available.
list_starters
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _get_cookiecutter_dir( template_path: str, checkout: str, directory: str, tmpdir: str ) -> Path: """Gives a path to the cookiecutter directory. If template_path is a repo then clones it to ``tmpdir``; if template_path is a file path then directly uses that path without copying anything. """ from cookiecutter.exceptions import RepositoryCloneFailed, RepositoryNotFound from cookiecutter.repository import determine_repo_dir # for performance reasons try: cookiecutter_dir, _ = determine_repo_dir( template=template_path, abbreviations={}, clone_to_dir=Path(tmpdir).resolve(), checkout=checkout, no_input=True, directory=directory, ) except (RepositoryNotFound, RepositoryCloneFailed) as exc: error_message = f"Kedro project template not found at {template_path}." if checkout: error_message += ( f" Specified tag {checkout}. The following tags are available: " + ", ".join(_get_available_tags(template_path)) ) official_starters = sorted(_OFFICIAL_STARTER_SPECS_DICT) raise KedroCliError( f"{error_message}. The aliases for the official Kedro starters are: \n" f"{yaml.safe_dump(official_starters, sort_keys=False)}" ) from exc return Path(cookiecutter_dir)
Gives a path to the cookiecutter directory. If template_path is a repo then clones it to ``tmpdir``; if template_path is a file path then directly uses that path without copying anything.
_get_cookiecutter_dir
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _get_prompts_required_and_clear_from_CLI_provided( cookiecutter_dir: Path, selected_tools: str, project_name: str, example_pipeline: str, ) -> Any: """Finds the information a user must supply according to prompts.yml, and clear it from what has already been provided via the CLI(validate it before)""" prompts_yml = cookiecutter_dir / "prompts.yml" if not prompts_yml.is_file(): return {} try: with prompts_yml.open("r") as prompts_file: prompts_required = yaml.safe_load(prompts_file) except Exception as exc: raise KedroCliError( "Failed to generate project: could not load prompts.yml." ) from exc if selected_tools is not None: _validate_selected_tools(selected_tools) del prompts_required["tools"] if project_name is not None: _validate_input_with_regex_pattern("project_name", project_name) del prompts_required["project_name"] if example_pipeline is not None: _validate_input_with_regex_pattern("yes_no", example_pipeline) del prompts_required["example_pipeline"] return prompts_required
Finds the information a user must supply according to prompts.yml, and clear it from what has already been provided via the CLI(validate it before)
_get_prompts_required_and_clear_from_CLI_provided
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _get_starters_dict() -> dict[str, KedroStarterSpec]: """This function lists all the starter aliases declared in the core repo and in plugins entry points. For example, the output for official kedro starters looks like: {"astro-airflow-iris": KedroStarterSpec( name="astro-airflow-iris", template_path="git+https://github.com/kedro-org/kedro-starters.git", directory="astro-airflow-iris", origin="kedro" ), } """ starter_specs = _OFFICIAL_STARTER_SPECS_DICT for starter_entry_point in _get_entry_points(name="starters"): origin = starter_entry_point.module.split(".")[0] specs: EntryPoints | list = _safe_load_entry_point(starter_entry_point) or [] for spec in specs: if not isinstance(spec, KedroStarterSpec): click.secho( f"The starter configuration loaded from module {origin}" f"should be a 'KedroStarterSpec', got '{type(spec)}' instead", fg="red", ) elif spec.alias in starter_specs: click.secho( f"Starter alias `{spec.alias}` from `{origin}` " f"has been ignored as it is already defined by" f"`{starter_specs[spec.alias].origin}`", fg="red", ) else: spec.origin = origin starter_specs[spec.alias] = spec return starter_specs
This function lists all the starter aliases declared in the core repo and in plugins entry points. For example, the output for official kedro starters looks like: {"astro-airflow-iris": KedroStarterSpec( name="astro-airflow-iris", template_path="git+https://github.com/kedro-org/kedro-starters.git", directory="astro-airflow-iris", origin="kedro" ), }
_get_starters_dict
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _get_extra_context( # noqa: PLR0913 prompts_required: dict, config_path: str, cookiecutter_context: OrderedDict | None, selected_tools: str | None, project_name: str | None, example_pipeline: str | None, starter_alias: str | None, ) -> dict[str, str]: """Generates a config dictionary that will be passed to cookiecutter as `extra_context`, based on CLI flags, user prompts, configuration file or Default values. It is crucial to return a dictionary with string values, otherwise, there will be issues with Cookiecutter. Args: prompts_required: a dictionary of all the prompts that will be shown to the user on project creation. config_path: a string containing the value for the --config flag, or None in case the flag wasn't used. cookiecutter_context: the context for Cookiecutter templates. selected_tools: a string containing the value for the --tools flag, or None in case the flag wasn't used. project_name: a string containing the value for the --name flag, or None in case the flag wasn't used. example_pipeline: a string containing the value for the --example flag, or None in case the flag wasn't used starter_alias: a string containing the value for the --starter flag, or None in case the flag wasn't used Returns: Config dictionary, passed the necessary processing, with default values if needed. """ if config_path: extra_context = _fetch_validate_parse_config_from_file( config_path, prompts_required, starter_alias ) else: extra_context = _fetch_validate_parse_config_from_user_prompts( prompts_required, cookiecutter_context ) # Update extra_context, if CLI inputs are available if selected_tools is not None: tools_numbers = _convert_tool_short_names_to_numbers(selected_tools) extra_context["tools"] = _convert_tool_numbers_to_readable_names(tools_numbers) if project_name is not None: extra_context["project_name"] = project_name if example_pipeline is not None: extra_context["example_pipeline"] = str(_parse_yes_no_to_bool(example_pipeline)) # set defaults for required fields, will be used mostly for starters extra_context.setdefault("kedro_version", version) extra_context.setdefault("tools", str(["None"])) extra_context.setdefault("example_pipeline", "False") return extra_context
Generates a config dictionary that will be passed to cookiecutter as `extra_context`, based on CLI flags, user prompts, configuration file or Default values. It is crucial to return a dictionary with string values, otherwise, there will be issues with Cookiecutter. Args: prompts_required: a dictionary of all the prompts that will be shown to the user on project creation. config_path: a string containing the value for the --config flag, or None in case the flag wasn't used. cookiecutter_context: the context for Cookiecutter templates. selected_tools: a string containing the value for the --tools flag, or None in case the flag wasn't used. project_name: a string containing the value for the --name flag, or None in case the flag wasn't used. example_pipeline: a string containing the value for the --example flag, or None in case the flag wasn't used starter_alias: a string containing the value for the --starter flag, or None in case the flag wasn't used Returns: Config dictionary, passed the necessary processing, with default values if needed.
_get_extra_context
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _convert_tool_short_names_to_numbers(selected_tools: str) -> list: """Prepares tools selection from the CLI or config input to the correct format to be put in the project configuration, if it exists. Replaces tool strings with the corresponding prompt number. Args: selected_tools: a string containing the value for the --tools flag or config file, or None in case none were provided, i.e. lint,docs. Returns: String with the numbers corresponding to the desired tools, or None in case the --tools flag was not used. """ if selected_tools.lower() == "none": return [] if selected_tools.lower() == "all": return list(NUMBER_TO_TOOLS_NAME.keys()) tools = [] for tool in selected_tools.lower().split(","): tool_short_name = tool.strip() if tool_short_name in TOOLS_SHORTNAME_TO_NUMBER: tools.append(TOOLS_SHORTNAME_TO_NUMBER[tool_short_name]) # Remove duplicates if any tools = sorted(list(set(tools))) return tools
Prepares tools selection from the CLI or config input to the correct format to be put in the project configuration, if it exists. Replaces tool strings with the corresponding prompt number. Args: selected_tools: a string containing the value for the --tools flag or config file, or None in case none were provided, i.e. lint,docs. Returns: String with the numbers corresponding to the desired tools, or None in case the --tools flag was not used.
_convert_tool_short_names_to_numbers
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _convert_tool_numbers_to_readable_names(tools_numbers: list) -> str: """Transform the list of tool numbers into a list of readable names, using 'None' for empty lists. Then, convert the result into a string format to prevent issues with Cookiecutter. """ tools_names = [NUMBER_TO_TOOLS_NAME[tool] for tool in tools_numbers] if tools_names == []: tools_names = ["None"] return str(tools_names)
Transform the list of tool numbers into a list of readable names, using 'None' for empty lists. Then, convert the result into a string format to prevent issues with Cookiecutter.
_convert_tool_numbers_to_readable_names
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _fetch_validate_parse_config_from_file( config_path: str, prompts_required: dict, starter_alias: str | None ) -> dict[str, str]: """Obtains configuration for a new kedro project non-interactively from a file. Validates that: 1. All keys specified in prompts_required are retrieved from the configuration. 2. The options 'tools' and 'example_pipeline' are not used in the configuration when any starter option is selected. 3. Variables sourced from the configuration file adhere to the expected format. Parse tools from short names to list of numbers Args: config_path: The path of the config.yml which should contain the data required by ``prompts.yml``. Returns: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. Raises: KedroCliError: If the file cannot be parsed. """ try: with open(config_path, encoding="utf-8") as config_file: config: dict[str, str] = yaml.safe_load(config_file) if KedroCliError.VERBOSE_ERROR: click.echo(config_path + ":") click.echo(yaml.dump(config, default_flow_style=False)) except Exception as exc: raise KedroCliError( f"Failed to generate project: could not load config at {config_path}." ) from exc if starter_alias and ("tools" in config or "example_pipeline" in config): raise KedroCliError( "The --starter flag can not be used with `example_pipeline` and/or `tools` keys in the config file." ) _validate_config_file_against_prompts(config, prompts_required) _validate_input_with_regex_pattern( "project_name", config.get("project_name", "New Kedro Project") ) example_pipeline = config.get("example_pipeline", "no") _validate_input_with_regex_pattern("yes_no", example_pipeline) config["example_pipeline"] = str(_parse_yes_no_to_bool(example_pipeline)) tools_short_names = config.get("tools", "none").lower() _validate_selected_tools(tools_short_names) tools_numbers = _convert_tool_short_names_to_numbers(tools_short_names) config["tools"] = _convert_tool_numbers_to_readable_names(tools_numbers) return config
Obtains configuration for a new kedro project non-interactively from a file. Validates that: 1. All keys specified in prompts_required are retrieved from the configuration. 2. The options 'tools' and 'example_pipeline' are not used in the configuration when any starter option is selected. 3. Variables sourced from the configuration file adhere to the expected format. Parse tools from short names to list of numbers Args: config_path: The path of the config.yml which should contain the data required by ``prompts.yml``. Returns: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. Raises: KedroCliError: If the file cannot be parsed.
_fetch_validate_parse_config_from_file
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _fetch_validate_parse_config_from_user_prompts( prompts: dict[str, Any], cookiecutter_context: OrderedDict | None, ) -> dict[str, str]: """Interactively obtains information from user prompts. Args: prompts: Prompts from prompts.yml. cookiecutter_context: Cookiecutter context generated from cookiecutter.json. Returns: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. """ if not cookiecutter_context: raise Exception("No cookiecutter context available.") config: dict[str, str] = {} for variable_name, prompt_dict in prompts.items(): prompt = _Prompt(**prompt_dict) # render the variable on the command line default_value = cookiecutter_context.get(variable_name) or "" # read the user's input for the variable user_input = click.prompt( str(prompt), default=default_value, show_default=True, type=str, ).strip() if user_input: prompt.validate(user_input) config[variable_name] = user_input if "tools" in config: # convert tools input to list of numbers and validate tools_numbers = _parse_tools_input(config["tools"]) _validate_tool_selection(tools_numbers) config["tools"] = _convert_tool_numbers_to_readable_names(tools_numbers) if "example_pipeline" in config: example_pipeline_bool = _parse_yes_no_to_bool(config["example_pipeline"]) config["example_pipeline"] = str(example_pipeline_bool) return config
Interactively obtains information from user prompts. Args: prompts: Prompts from prompts.yml. cookiecutter_context: Cookiecutter context generated from cookiecutter.json. Returns: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults.
_fetch_validate_parse_config_from_user_prompts
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _make_cookiecutter_args_and_fetch_template( config: dict[str, str], checkout: str, directory: str, template_path: str, ) -> tuple[dict[str, object], str]: """Creates a dictionary of arguments to pass to cookiecutter and returns project template path. Args: config: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. checkout: The tag, branch or commit in the starter repository to checkout. Maps directly to cookiecutter's ``checkout`` argument. Relevant only when using a starter. directory: The directory of a specific starter inside a repository containing multiple starters. Maps directly to cookiecutter's ``directory`` argument. Relevant only when using a starter. https://cookiecutter.readthedocs.io/en/1.7.2/advanced/directories.html template_path: Starter path or kedro template path Returns: Arguments to pass to cookiecutter, project template path """ cookiecutter_args = { "output_dir": config.get("output_dir", str(Path.cwd().resolve())), "no_input": True, "extra_context": config, } if directory: cookiecutter_args["directory"] = directory tools = config["tools"] example_pipeline = config["example_pipeline"] starter_path = "git+https://github.com/kedro-org/kedro-starters.git" cookiecutter_args["checkout"] = checkout if "PySpark" in tools: # Use the spaceflights-pyspark starter if only PySpark is chosen. cookiecutter_args["directory"] = "spaceflights-pyspark" elif example_pipeline == "True": # Use spaceflights-pandas starter if example was selected, but PySpark wasn't cookiecutter_args["directory"] = "spaceflights-pandas" else: # Use the default template path for non PySpark or example options: starter_path = template_path return cookiecutter_args, starter_path
Creates a dictionary of arguments to pass to cookiecutter and returns project template path. Args: config: Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. checkout: The tag, branch or commit in the starter repository to checkout. Maps directly to cookiecutter's ``checkout`` argument. Relevant only when using a starter. directory: The directory of a specific starter inside a repository containing multiple starters. Maps directly to cookiecutter's ``directory`` argument. Relevant only when using a starter. https://cookiecutter.readthedocs.io/en/1.7.2/advanced/directories.html template_path: Starter path or kedro template path Returns: Arguments to pass to cookiecutter, project template path
_make_cookiecutter_args_and_fetch_template
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _validate_config_file_against_prompts( config: dict[str, str], prompts: dict[str, Any] ) -> None: """Checks that the configuration file contains all needed variables. Args: config: The config as a dictionary. prompts: Prompts from prompts.yml. Raises: KedroCliError: If the config file is empty or does not contain all the keys required in prompts, or if the output_dir specified does not exist. """ if not config: raise KedroCliError("Config file is empty.") additional_keys = {"tools": "none", "example_pipeline": "no"} missing_keys = set(prompts) - set(config) missing_mandatory_keys = missing_keys - set(additional_keys) if missing_mandatory_keys: click.echo(yaml.dump(config, default_flow_style=False)) raise KedroCliError( f"{', '.join(missing_mandatory_keys)} not found in config file." ) for key, default_value in additional_keys.items(): if key in missing_keys: click.secho( f"The `{key}` key not found in the config file, default value '{default_value}' is being used.", fg="yellow", ) if "output_dir" in config and not Path(config["output_dir"]).exists(): raise KedroCliError( f"'{config['output_dir']}' is not a valid output directory. " "It must be a relative or absolute path to an existing directory." )
Checks that the configuration file contains all needed variables. Args: config: The config as a dictionary. prompts: Prompts from prompts.yml. Raises: KedroCliError: If the config file is empty or does not contain all the keys required in prompts, or if the output_dir specified does not exist.
_validate_config_file_against_prompts
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _parse_tools_input(tools_str: str | None) -> list[str]: """Parse the tools input string. Args: tools_str: Input string from prompts.yml. Returns: list: List of selected tools as strings. """ def _validate_range(start: Any, end: Any) -> None: if int(start) > int(end): message = f"'{start}-{end}' is an invalid range for project tools.\nPlease ensure range values go from smaller to larger." click.secho(message, fg="red", err=True) sys.exit(1) # safeguard to prevent passing of excessively large intervals that could cause freezing: if int(end) > len(NUMBER_TO_TOOLS_NAME): message = f"'{end}' is not a valid selection.\nPlease select from the available tools: 1, 2, 3, 4, 5, 6." # nosec if end == "7": message += "\nKedro Viz is automatically included in the project. Please remove 7 from your tool selection." click.secho(message, fg="red", err=True) sys.exit(1) if not tools_str: return [] # pragma: no cover tools_str = tools_str.lower() if tools_str == "all": return list(NUMBER_TO_TOOLS_NAME) if tools_str == "none": return [] # Split by comma tools_choices = tools_str.replace(" ", "").split(",") selected: list[str] = [] for choice in tools_choices: if "-" in choice: start, end = choice.split("-") _validate_range(start, end) selected.extend(str(i) for i in range(int(start), int(end) + 1)) else: selected.append(choice.strip()) return selected
Parse the tools input string. Args: tools_str: Input string from prompts.yml. Returns: list: List of selected tools as strings.
_parse_tools_input
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _create_project( template_path: str, cookiecutter_args: dict[str, Any], telemetry_consent: str | None ) -> None: """Creates a new kedro project using cookiecutter. Args: template_path: The path to the cookiecutter template to create the project. It could either be a local directory or a remote VCS repository supported by cookiecutter. For more details, please see: https://cookiecutter.readthedocs.io/en/stable/usage.html#generate-your-project cookiecutter_args: Arguments to pass to cookiecutter. Raises: KedroCliError: If it fails to generate a project. """ from cookiecutter.main import cookiecutter # for performance reasons try: result_path = cookiecutter(template=template_path, **cookiecutter_args) if telemetry_consent is not None: with open(result_path + "/.telemetry", "w") as telemetry_file: telemetry_file.write("consent: " + telemetry_consent) except Exception as exc: raise KedroCliError( "Failed to generate project when running cookiecutter." ) from exc _clean_pycache(Path(result_path)) extra_context = cookiecutter_args["extra_context"] project_name = extra_context.get("project_name", "New Kedro Project") # Print success message click.secho( "\nCongratulations!" f"\nYour project '{project_name}' has been created in the directory \n{result_path}\n" )
Creates a new kedro project using cookiecutter. Args: template_path: The path to the cookiecutter template to create the project. It could either be a local directory or a remote VCS repository supported by cookiecutter. For more details, please see: https://cookiecutter.readthedocs.io/en/stable/usage.html#generate-your-project cookiecutter_args: Arguments to pass to cookiecutter. Raises: KedroCliError: If it fails to generate a project.
_create_project
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def validate(self, user_input: str) -> None: """Validate a given prompt value against the regex validator""" if self.regexp and not re.match(self.regexp, user_input): message = f"'{user_input}' is an invalid value for {(self.title).lower()}." click.secho(message, fg="red", err=True) click.secho(self.error_message, fg="red", err=True) sys.exit(1)
Validate a given prompt value against the regex validator
validate
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _remove_readonly( func: Callable, path: Path, excinfo: tuple ) -> None: # pragma: no cover """Remove readonly files on Windows See: https://docs.python.org/3/library/shutil.html?highlight=shutil#rmtree-example """ os.chmod(path, stat.S_IWRITE) func(path)
Remove readonly files on Windows See: https://docs.python.org/3/library/shutil.html?highlight=shutil#rmtree-example
_remove_readonly
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def _starter_spec_to_dict( starter_specs: dict[str, KedroStarterSpec], ) -> dict[str, dict[str, str]]: """Convert a dictionary of starters spec to a nicely formatted dictionary""" format_dict: dict[str, dict[str, str]] = {} for alias, spec in starter_specs.items(): format_dict[alias] = {} # Each dictionary represent 1 starter format_dict[alias]["template_path"] = spec.template_path if spec.directory: format_dict[alias]["directory"] = spec.directory return format_dict
Convert a dictionary of starters spec to a nicely formatted dictionary
_starter_spec_to_dict
python
kedro-org/kedro
kedro/framework/cli/starters.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/starters.py
Apache-2.0
def cli() -> None: # pragma: no cover """Kedro is a CLI for creating and using Kedro projects. For more information, type ``kedro info``. NOTE: If a command from a plugin conflicts with a built-in command from Kedro, the command from the plugin will take precedence. """ pass
Kedro is a CLI for creating and using Kedro projects. For more information, type ``kedro info``. NOTE: If a command from a plugin conflicts with a built-in command from Kedro, the command from the plugin will take precedence.
cli
python
kedro-org/kedro
kedro/framework/cli/cli.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/cli.py
Apache-2.0
def info() -> None: """Get more information about kedro.""" click.secho(LOGO, fg="green") click.echo( "Kedro is a Python framework for\n" "creating reproducible, maintainable\n" "and modular data science code." ) plugin_versions = {} plugin_entry_points = defaultdict(set) for plugin_entry_point in ENTRY_POINT_GROUPS: for entry_point in _get_entry_points(plugin_entry_point): module_name = entry_point.module.split(".")[0] plugin_versions[module_name] = entry_point.dist.version plugin_entry_points[module_name].add(plugin_entry_point) click.echo() if plugin_versions: click.echo("Installed plugins:") for plugin_name, plugin_version in sorted(plugin_versions.items()): entrypoints_str = ",".join(sorted(plugin_entry_points[plugin_name])) click.echo( f"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})" ) else: # pragma: no cover click.echo("No plugins installed")
Get more information about kedro.
info
python
kedro-org/kedro
kedro/framework/cli/cli.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/cli.py
Apache-2.0
def global_groups(self) -> Sequence[click.MultiCommand]: """Property which loads all global command groups from plugins and combines them with the built-in ones (eventually overriding the built-in ones if they are redefined by plugins). """ return [cli, *load_entry_points("global"), global_commands]
Property which loads all global command groups from plugins and combines them with the built-in ones (eventually overriding the built-in ones if they are redefined by plugins).
global_groups
python
kedro-org/kedro
kedro/framework/cli/cli.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/cli.py
Apache-2.0
def project_groups(self) -> Sequence[click.MultiCommand]: """Property which loads all project command groups from the project and the plugins, then combines them with the built-in ones. Built-in commands can be overridden by plugins, which can be overridden by a custom project cli.py. See https://docs.kedro.org/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands on how to add this. """ if not self._metadata: return [] plugins = load_entry_points("project") try: project_cli = importlib.import_module(f"{self._metadata.package_name}.cli") # fail gracefully if cli.py does not exist except ModuleNotFoundError: # return only built-in commands and commands from plugins # (plugins can override built-in commands) return [*plugins, project_commands] # fail badly if cli.py exists, but has no `cli` in it if not hasattr(project_cli, "cli"): raise KedroCliError( f"Cannot load commands from {self._metadata.package_name}.cli" ) user_defined = project_cli.cli # return built-in commands, plugin commands and user defined commands # (overriding happens as follows built-in < plugins < cli.py) return [user_defined, *plugins, project_commands]
Property which loads all project command groups from the project and the plugins, then combines them with the built-in ones. Built-in commands can be overridden by plugins, which can be overridden by a custom project cli.py. See https://docs.kedro.org/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands on how to add this.
project_groups
python
kedro-org/kedro
kedro/framework/cli/cli.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/cli.py
Apache-2.0
def main() -> None: # pragma: no cover """Main entry point. Look for a ``cli.py``, and, if found, add its commands to `kedro`'s before invoking the CLI. """ _init_plugins() cli_collection = KedroCLI( project_path=_find_kedro_project(Path.cwd()) or Path.cwd() ) cli_collection()
Main entry point. Look for a ``cli.py``, and, if found, add its commands to `kedro`'s before invoking the CLI.
main
python
kedro-org/kedro
kedro/framework/cli/cli.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/cli.py
Apache-2.0
def is_parameter(dataset_name: str) -> bool: # TODO: when breaking change move it to kedro/io/core.py """Check if dataset is a parameter.""" return dataset_name.startswith("params:") or dataset_name == "parameters"
Check if dataset is a parameter.
is_parameter
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def catalog() -> None: """Commands for working with catalog."""
Commands for working with catalog.
catalog
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def list_datasets(metadata: ProjectMetadata, pipeline: str, env: str) -> None: """Show datasets per type.""" title = "Datasets in '{}' pipeline" not_mentioned = "Datasets not mentioned in pipeline" mentioned = "Datasets mentioned in pipeline" factories = "Datasets generated from factories" session = _create_session(metadata.package_name, env=env) context = session.load_context() try: data_catalog = context.catalog datasets_meta = data_catalog._datasets catalog_ds = set(data_catalog.list()) except Exception as exc: raise KedroCliError( f"Unable to instantiate Kedro Catalog.\nError: {exc}" ) from exc target_pipelines = pipeline or pipelines.keys() result = {} for pipe in target_pipelines: pl_obj = pipelines.get(pipe) if pl_obj: pipeline_ds = pl_obj.datasets() else: existing_pls = ", ".join(sorted(pipelines.keys())) raise KedroCliError( f"'{pipe}' pipeline not found! Existing pipelines: {existing_pls}" ) unused_ds = catalog_ds - pipeline_ds default_ds = pipeline_ds - catalog_ds used_ds = catalog_ds - unused_ds # resolve any factory datasets in the pipeline factory_ds_by_type = defaultdict(list) for ds_name in default_ds: if data_catalog.config_resolver.match_pattern(ds_name): ds_config = data_catalog.config_resolver.resolve_pattern(ds_name) factory_ds_by_type[ds_config.get("type", "DefaultDataset")].append( ds_name ) default_ds = default_ds - set(chain.from_iterable(factory_ds_by_type.values())) unused_by_type = _map_type_to_datasets(unused_ds, datasets_meta) used_by_type = _map_type_to_datasets(used_ds, datasets_meta) if default_ds: used_by_type["DefaultDataset"].extend(default_ds) data = ( (mentioned, dict(used_by_type)), (factories, dict(factory_ds_by_type)), (not_mentioned, dict(unused_by_type)), ) result[title.format(pipe)] = {key: value for key, value in data if value} secho(yaml.dump(result))
Show datasets per type.
list_datasets
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def _map_type_to_datasets( datasets: set[str], datasets_meta: dict[str, AbstractDataset] ) -> dict: """Build dictionary with a dataset type as a key and list of datasets of the specific type as a value. """ mapping = defaultdict(list) # type: ignore[var-annotated] for dataset_name in filterfalse(is_parameter, datasets): if isinstance(datasets_meta[dataset_name], _LazyDataset): ds_type = str(datasets_meta[dataset_name]).split(".")[-1] else: ds_type = datasets_meta[dataset_name].__class__.__name__ if dataset_name not in mapping[ds_type]: mapping[ds_type].append(dataset_name) return mapping
Build dictionary with a dataset type as a key and list of datasets of the specific type as a value.
_map_type_to_datasets
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def create_catalog(metadata: ProjectMetadata, pipeline_name: str, env: str) -> None: """Create Data Catalog YAML configuration with missing datasets. Add ``MemoryDataset`` datasets to Data Catalog YAML configuration file for each dataset in a registered pipeline if it is missing from the ``DataCatalog``. The catalog configuration will be saved to `<conf_source>/<env>/catalog_<pipeline_name>.yml` file. """ env = env or "base" session = _create_session(metadata.package_name, env=env) context = session.load_context() pipeline = pipelines.get(pipeline_name) if not pipeline: existing_pipelines = ", ".join(sorted(pipelines.keys())) raise KedroCliError( f"'{pipeline_name}' pipeline not found! Existing pipelines: {existing_pipelines}" ) pipeline_datasets = set(filterfalse(is_parameter, pipeline.datasets())) catalog_datasets = set(filterfalse(is_parameter, context.catalog.list())) # Datasets that are missing in Data Catalog missing_ds = sorted(pipeline_datasets - catalog_datasets) if missing_ds: catalog_path = ( context.project_path / settings.CONF_SOURCE / env / f"catalog_{pipeline_name}.yml" ) _add_missing_datasets_to_catalog(missing_ds, catalog_path) click.echo(f"Data Catalog YAML configuration was created: {catalog_path}") else: click.echo("All datasets are already configured.")
Create Data Catalog YAML configuration with missing datasets. Add ``MemoryDataset`` datasets to Data Catalog YAML configuration file for each dataset in a registered pipeline if it is missing from the ``DataCatalog``. The catalog configuration will be saved to `<conf_source>/<env>/catalog_<pipeline_name>.yml` file.
create_catalog
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def rank_catalog_factories(metadata: ProjectMetadata, env: str) -> None: """List all dataset factories in the catalog, ranked by priority by which they are matched.""" session = _create_session(metadata.package_name, env=env) context = session.load_context() catalog_factories = context.catalog.config_resolver.list_patterns() if catalog_factories: click.echo(yaml.dump(catalog_factories)) else: click.echo("There are no dataset factories in the catalog.")
List all dataset factories in the catalog, ranked by priority by which they are matched.
rank_catalog_factories
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def resolve_patterns(metadata: ProjectMetadata, env: str) -> None: """Resolve catalog factories against pipeline datasets. Note that this command is runner agnostic and thus won't take into account any default dataset creation defined in the runner.""" session = _create_session(metadata.package_name, env=env) context = session.load_context() catalog_config = context.config_loader["catalog"] credentials_config = context._get_config_credentials() data_catalog = DataCatalog.from_config( catalog=catalog_config, credentials=credentials_config ) explicit_datasets = { ds_name: ds_config for ds_name, ds_config in catalog_config.items() if not data_catalog.config_resolver.is_pattern(ds_name) } target_pipelines = pipelines.keys() pipeline_datasets = set() for pipe in target_pipelines: pl_obj = pipelines.get(pipe) if pl_obj: pipeline_datasets.update(pl_obj.datasets()) for ds_name in pipeline_datasets: if ds_name in explicit_datasets or is_parameter(ds_name): continue ds_config = data_catalog.config_resolver.resolve_pattern(ds_name) # Exclude MemoryDatasets not set in the catalog explicitly if ds_config: explicit_datasets[ds_name] = ds_config secho(yaml.dump(explicit_datasets))
Resolve catalog factories against pipeline datasets. Note that this command is runner agnostic and thus won't take into account any default dataset creation defined in the runner.
resolve_patterns
python
kedro-org/kedro
kedro/framework/cli/catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/catalog.py
Apache-2.0
def registry() -> None: """Commands for working with registered pipelines."""
Commands for working with registered pipelines.
registry
python
kedro-org/kedro
kedro/framework/cli/registry.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/registry.py
Apache-2.0
def list_registered_pipelines() -> None: """List all pipelines defined in your pipeline_registry.py file.""" click.echo(yaml.dump(sorted(pipelines)))
List all pipelines defined in your pipeline_registry.py file.
list_registered_pipelines
python
kedro-org/kedro
kedro/framework/cli/registry.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/registry.py
Apache-2.0
def describe_registered_pipeline( metadata: ProjectMetadata, /, name: str, **kwargs: Any ) -> None: """Describe a registered pipeline by providing a pipeline name. Defaults to the `__default__` pipeline. """ pipeline_obj = pipelines.get(name) if not pipeline_obj: all_pipeline_names = pipelines.keys() existing_pipelines = ", ".join(sorted(all_pipeline_names)) raise KedroCliError( f"'{name}' pipeline not found. Existing pipelines: [{existing_pipelines}]" ) nodes = [] for node in pipeline_obj.nodes: namespace = f"{node.namespace}." if node.namespace else "" nodes.append(f"{namespace}{node._name or node._func_name} ({node._func_name})") result = {"Nodes": nodes} click.echo(yaml.dump(result))
Describe a registered pipeline by providing a pipeline name. Defaults to the `__default__` pipeline.
describe_registered_pipeline
python
kedro-org/kedro
kedro/framework/cli/registry.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/registry.py
Apache-2.0
def before_command_run( self, project_metadata: ProjectMetadata, command_args: list[str], ) -> None: """Hooks to be invoked before a CLI command runs. It receives the ``project_metadata`` as well as all command line arguments that were used, including the command and subcommand themselves. Args: project_metadata: The Kedro project's metadata. command_args: The command line arguments that were used. """ pass
Hooks to be invoked before a CLI command runs. It receives the ``project_metadata`` as well as all command line arguments that were used, including the command and subcommand themselves. Args: project_metadata: The Kedro project's metadata. command_args: The command line arguments that were used.
before_command_run
python
kedro-org/kedro
kedro/framework/cli/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/hooks/specs.py
Apache-2.0
def after_command_run( self, project_metadata: ProjectMetadata, command_args: list[str], exit_code: int ) -> None: """Hooks to be invoked after a CLI command runs. It receives the ``project_metadata`` as well as all command line arguments that were used, including the command and subcommand themselves and if the operation was successful or not. Args: project_metadata: The Kedro project's metadata. command_args: The command line arguments that were used. exit_code: Exit code raised by Click application after completion """ pass
Hooks to be invoked after a CLI command runs. It receives the ``project_metadata`` as well as all command line arguments that were used, including the command and subcommand themselves and if the operation was successful or not. Args: project_metadata: The Kedro project's metadata. command_args: The command line arguments that were used. exit_code: Exit code raised by Click application after completion
after_command_run
python
kedro-org/kedro
kedro/framework/cli/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/hooks/specs.py
Apache-2.0
def get_cli_hook_manager() -> PluginManager: """Create or return the global _hook_manager singleton instance.""" global _cli_hook_manager # noqa: PLW0603 if _cli_hook_manager is None: _cli_hook_manager = CLIHooksManager() _cli_hook_manager.trace.root.setwriter(logger.debug) _cli_hook_manager.enable_tracing() return _cli_hook_manager
Create or return the global _hook_manager singleton instance.
get_cli_hook_manager
python
kedro-org/kedro
kedro/framework/cli/hooks/manager.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/hooks/manager.py
Apache-2.0
def _register_cli_hooks(self) -> None: """Register CLI hook implementations from plugin CLI entrypoints""" already_registered = self.get_plugins() self.load_setuptools_entrypoints(_CLI_PLUGIN_HOOKS) # Get list of plugin/distinfo tuples for all setuptools registered plugins. plugininfo = self.list_plugin_distinfo() plugin_names = { f"{dist.project_name}-{dist.version}" for plugin, dist in plugininfo if plugin not in already_registered } if plugin_names: logger.debug( "Registered CLI hooks from %d installed plugin(s): %s", len(plugin_names), ", ".join(sorted(plugin_names)), )
Register CLI hook implementations from plugin CLI entrypoints
_register_cli_hooks
python
kedro-org/kedro
kedro/framework/cli/hooks/manager.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/hooks/manager.py
Apache-2.0
def create( cls, project_path: Path | str | None = None, save_on_close: bool = True, env: str | None = None, extra_params: dict[str, Any] | None = None, conf_source: str | None = None, ) -> KedroSession: """Create a new instance of ``KedroSession`` with the session data. Args: project_path: Path to the project root directory. Default is current working directory Path.cwd(). save_on_close: Whether or not to save the session when it's closed. conf_source: Path to a directory containing configuration env: Environment for the KedroContext. extra_params: Optional dictionary containing extra project parameters for underlying KedroContext. If specified, will update (and therefore take precedence over) the parameters retrieved from the project configuration. Returns: A new ``KedroSession`` instance. """ validate_settings() session = cls( project_path=project_path, session_id=generate_timestamp(), save_on_close=save_on_close, conf_source=conf_source, ) # have to explicitly type session_data otherwise mypy will complain # possibly related to this: https://github.com/python/mypy/issues/1430 session_data: dict[str, Any] = { "project_path": session._project_path, "session_id": session.session_id, } ctx = click.get_current_context(silent=True) if ctx: session_data["cli"] = _jsonify_cli_context(ctx) env = env or os.getenv("KEDRO_ENV") if env: session_data["env"] = env if extra_params: session_data["extra_params"] = extra_params try: session_data["username"] = getpass.getuser() except Exception as exc: logging.getLogger(__name__).debug( "Unable to get username. Full exception: %s", exc ) session_data.update(**_describe_git(session._project_path)) session._store.update(session_data) return session
Create a new instance of ``KedroSession`` with the session data. Args: project_path: Path to the project root directory. Default is current working directory Path.cwd(). save_on_close: Whether or not to save the session when it's closed. conf_source: Path to a directory containing configuration env: Environment for the KedroContext. extra_params: Optional dictionary containing extra project parameters for underlying KedroContext. If specified, will update (and therefore take precedence over) the parameters retrieved from the project configuration. Returns: A new ``KedroSession`` instance.
create
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def store(self) -> dict[str, Any]: """Return a copy of internal store.""" return dict(self._store)
Return a copy of internal store.
store
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def load_context(self) -> KedroContext: """An instance of the project context.""" env = self.store.get("env") extra_params = self.store.get("extra_params") config_loader = self._get_config_loader() context_class = settings.CONTEXT_CLASS context = context_class( package_name=self._package_name, project_path=self._project_path, config_loader=config_loader, env=env, extra_params=extra_params, hook_manager=self._hook_manager, ) self._hook_manager.hook.after_context_created(context=context) return context # type: ignore[no-any-return]
An instance of the project context.
load_context
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def _get_config_loader(self) -> AbstractConfigLoader: """An instance of the config loader.""" env = self.store.get("env") extra_params = self.store.get("extra_params") config_loader_class = settings.CONFIG_LOADER_CLASS return config_loader_class( # type: ignore[no-any-return] conf_source=self._conf_source, env=env, runtime_params=extra_params, **settings.CONFIG_LOADER_ARGS, )
An instance of the config loader.
_get_config_loader
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def close(self) -> None: """Close the current session and save its store to disk if `save_on_close` attribute is True. """ if self.save_on_close: self._store.save()
Close the current session and save its store to disk if `save_on_close` attribute is True.
close
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def run( # noqa: PLR0913 self, pipeline_name: str | None = None, tags: Iterable[str] | None = None, runner: AbstractRunner | None = None, node_names: Iterable[str] | None = None, from_nodes: Iterable[str] | None = None, to_nodes: Iterable[str] | None = None, from_inputs: Iterable[str] | None = None, to_outputs: Iterable[str] | None = None, load_versions: dict[str, str] | None = None, namespace: str | None = None, ) -> dict[str, Any]: """Runs the pipeline with a specified runner. Args: pipeline_name: Name of the pipeline that is being run. tags: An optional list of node tags which should be used to filter the nodes of the ``Pipeline``. If specified, only the nodes containing *any* of these tags will be run. runner: An optional parameter specifying the runner that you want to run the pipeline with. node_names: An optional list of node names which should be used to filter the nodes of the ``Pipeline``. If specified, only the nodes with these names will be run. from_nodes: An optional list of node names which should be used as a starting point of the new ``Pipeline``. to_nodes: An optional list of node names which should be used as an end point of the new ``Pipeline``. from_inputs: An optional list of input datasets which should be used as a starting point of the new ``Pipeline``. to_outputs: An optional list of output datasets which should be used as an end point of the new ``Pipeline``. load_versions: An optional flag to specify a particular dataset version timestamp to load. namespace: The namespace of the nodes that is being run. Raises: ValueError: If the named or `__default__` pipeline is not defined by `register_pipelines`. Exception: Any uncaught exception during the run will be re-raised after being passed to ``on_pipeline_error`` hook. KedroSessionError: If more than one run is attempted to be executed during a single session. Returns: Any node outputs that cannot be processed by the ``DataCatalog``. These are returned in a dictionary, where the keys are defined by the node outputs. """ # Report project name self._logger.info("Kedro project %s", self._project_path.name) if self._run_called: raise KedroSessionError( "A run has already been completed as part of the" " active KedroSession. KedroSession has a 1-1 mapping with" " runs, and thus only one run should be executed per session." ) session_id = self.store["session_id"] save_version = session_id extra_params = self.store.get("extra_params") or {} context = self.load_context() name = pipeline_name or "__default__" try: pipeline = pipelines[name] except KeyError as exc: raise ValueError( f"Failed to find the pipeline named '{name}'. " f"It needs to be generated and returned " f"by the 'register_pipelines' function." ) from exc filtered_pipeline = pipeline.filter( tags=tags, from_nodes=from_nodes, to_nodes=to_nodes, node_names=node_names, from_inputs=from_inputs, to_outputs=to_outputs, node_namespace=namespace, ) record_data = { "session_id": session_id, "project_path": self._project_path.as_posix(), "env": context.env, "kedro_version": kedro_version, "tags": tags, "from_nodes": from_nodes, "to_nodes": to_nodes, "node_names": node_names, "from_inputs": from_inputs, "to_outputs": to_outputs, "load_versions": load_versions, "extra_params": extra_params, "pipeline_name": pipeline_name, "namespace": namespace, "runner": getattr(runner, "__name__", str(runner)), } catalog = context._get_catalog( save_version=save_version, load_versions=load_versions, ) # Run the runner hook_manager = self._hook_manager runner = runner or SequentialRunner() if not isinstance(runner, AbstractRunner): raise KedroSessionError( "KedroSession expect an instance of Runner instead of a class." "Have you forgotten the `()` at the end of the statement?" ) hook_manager.hook.before_pipeline_run( run_params=record_data, pipeline=filtered_pipeline, catalog=catalog ) try: run_result = runner.run( filtered_pipeline, catalog, hook_manager, session_id ) self._run_called = True except Exception as error: hook_manager.hook.on_pipeline_error( error=error, run_params=record_data, pipeline=filtered_pipeline, catalog=catalog, ) raise hook_manager.hook.after_pipeline_run( run_params=record_data, run_result=run_result, pipeline=filtered_pipeline, catalog=catalog, ) return run_result
Runs the pipeline with a specified runner. Args: pipeline_name: Name of the pipeline that is being run. tags: An optional list of node tags which should be used to filter the nodes of the ``Pipeline``. If specified, only the nodes containing *any* of these tags will be run. runner: An optional parameter specifying the runner that you want to run the pipeline with. node_names: An optional list of node names which should be used to filter the nodes of the ``Pipeline``. If specified, only the nodes with these names will be run. from_nodes: An optional list of node names which should be used as a starting point of the new ``Pipeline``. to_nodes: An optional list of node names which should be used as an end point of the new ``Pipeline``. from_inputs: An optional list of input datasets which should be used as a starting point of the new ``Pipeline``. to_outputs: An optional list of output datasets which should be used as an end point of the new ``Pipeline``. load_versions: An optional flag to specify a particular dataset version timestamp to load. namespace: The namespace of the nodes that is being run. Raises: ValueError: If the named or `__default__` pipeline is not defined by `register_pipelines`. Exception: Any uncaught exception during the run will be re-raised after being passed to ``on_pipeline_error`` hook. KedroSessionError: If more than one run is attempted to be executed during a single session. Returns: Any node outputs that cannot be processed by the ``DataCatalog``. These are returned in a dictionary, where the keys are defined by the node outputs.
run
python
kedro-org/kedro
kedro/framework/session/session.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/session.py
Apache-2.0
def read(self) -> dict[str, Any]: """Read the data from the session store. Returns: A mapping containing the session store data. """ self._logger.debug( "'read()' not implemented for '%s'. Assuming empty store.", self.__class__.__name__, ) return {}
Read the data from the session store. Returns: A mapping containing the session store data.
read
python
kedro-org/kedro
kedro/framework/session/store.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/store.py
Apache-2.0
def save(self) -> None: """Persist the session store""" self._logger.debug( "'save()' not implemented for '%s'. Skipping the step.", self.__class__.__name__, )
Persist the session store
save
python
kedro-org/kedro
kedro/framework/session/store.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/session/store.py
Apache-2.0
def _is_relative_path(path_string: str) -> bool: """Checks whether a path string is a relative path. Example: :: >>> _is_relative_path("data/01_raw") == True >>> _is_relative_path("info.log") == True >>> _is_relative_path("/tmp/data/01_raw") == False >>> _is_relative_path(r"C:\\info.log") == False >>> _is_relative_path(r"\\'info.log") == False >>> _is_relative_path("c:/info.log") == False >>> _is_relative_path("s3://info.log") == False Args: path_string: The path string to check. Returns: Whether the string is a relative path. """ # os.path.splitdrive does not reliably work on non-Windows systems # breaking the coverage, using PureWindowsPath instead is_full_windows_path_with_drive = bool(PureWindowsPath(path_string).drive) if is_full_windows_path_with_drive: return False is_remote_path = bool(urlparse(path_string).scheme) if is_remote_path: return False is_absolute_path = PurePosixPath(path_string).is_absolute() if is_absolute_path: return False return True
Checks whether a path string is a relative path. Example: :: >>> _is_relative_path("data/01_raw") == True >>> _is_relative_path("info.log") == True >>> _is_relative_path("/tmp/data/01_raw") == False >>> _is_relative_path(r"C:\\info.log") == False >>> _is_relative_path(r"\\'info.log") == False >>> _is_relative_path("c:/info.log") == False >>> _is_relative_path("s3://info.log") == False Args: path_string: The path string to check. Returns: Whether the string is a relative path.
_is_relative_path
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _convert_paths_to_absolute_posix( project_path: Path, conf_dictionary: dict[str, Any] ) -> dict[str, Any]: """Turn all relative paths inside ``conf_dictionary`` into absolute paths by appending them to ``project_path`` and convert absolute Windows paths to POSIX format. This is a hack to make sure that we don't have to change user's working directory for logging and datasets to work. It is important for non-standard workflows such as IPython notebook where users don't go through `kedro run` or `__main__.py` entrypoints. Example: :: >>> conf = _convert_paths_to_absolute_posix( >>> project_path=Path("/path/to/my/project"), >>> conf_dictionary={ >>> "handlers": { >>> "info_file_handler": { >>> "filename": "info.log" >>> } >>> } >>> } >>> ) >>> print(conf['handlers']['info_file_handler']['filename']) "/path/to/my/project/info.log" Args: project_path: The root directory to prepend to relative path to make absolute path. conf_dictionary: The configuration containing paths to expand. Returns: A dictionary containing only absolute paths. Raises: ValueError: If the provided ``project_path`` is not an absolute path. """ if not project_path.is_absolute(): raise ValueError( f"project_path must be an absolute path. Received: {project_path}" ) # only check a few conf keys that are known to specify a path string as value conf_keys_with_filepath = ("filename", "filepath", "path") for conf_key, conf_value in conf_dictionary.items(): # if the conf_value is another dictionary, absolutify its paths first. if isinstance(conf_value, dict): conf_dictionary[conf_key] = _convert_paths_to_absolute_posix( project_path, conf_value ) continue # if the conf_value is not a dictionary nor a string, skip if not isinstance(conf_value, str): continue # if the conf_value is a string but the conf_key isn't one associated with filepath, skip if conf_key not in conf_keys_with_filepath: continue if _is_relative_path(conf_value): # Absolute local path should be in POSIX format conf_value_absolute_path = (project_path / conf_value).as_posix() conf_dictionary[conf_key] = conf_value_absolute_path elif PureWindowsPath(conf_value).drive: # Convert absolute Windows path to POSIX format conf_dictionary[conf_key] = PureWindowsPath(conf_value).as_posix() return conf_dictionary
Turn all relative paths inside ``conf_dictionary`` into absolute paths by appending them to ``project_path`` and convert absolute Windows paths to POSIX format. This is a hack to make sure that we don't have to change user's working directory for logging and datasets to work. It is important for non-standard workflows such as IPython notebook where users don't go through `kedro run` or `__main__.py` entrypoints. Example: :: >>> conf = _convert_paths_to_absolute_posix( >>> project_path=Path("/path/to/my/project"), >>> conf_dictionary={ >>> "handlers": { >>> "info_file_handler": { >>> "filename": "info.log" >>> } >>> } >>> } >>> ) >>> print(conf['handlers']['info_file_handler']['filename']) "/path/to/my/project/info.log" Args: project_path: The root directory to prepend to relative path to make absolute path. conf_dictionary: The configuration containing paths to expand. Returns: A dictionary containing only absolute paths. Raises: ValueError: If the provided ``project_path`` is not an absolute path.
_convert_paths_to_absolute_posix
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _validate_transcoded_datasets(catalog: CatalogProtocol) -> None: """Validates transcoded datasets are correctly named Args: catalog: The catalog object containing the datasets to be validated. Raises: ValueError: If a dataset name does not conform to the expected transcoding naming conventions,a ValueError is raised by the `_transcode_split` function. """ for dataset_name in catalog._datasets.keys(): _transcode_split(dataset_name)
Validates transcoded datasets are correctly named Args: catalog: The catalog object containing the datasets to be validated. Raises: ValueError: If a dataset name does not conform to the expected transcoding naming conventions,a ValueError is raised by the `_transcode_split` function.
_validate_transcoded_datasets
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def catalog(self) -> CatalogProtocol: """Read-only property referring to Kedro's catalog` for this context. Returns: catalog defined in `catalog.yml`. Raises: KedroContextError: Incorrect catalog registered for the project. """ return self._get_catalog()
Read-only property referring to Kedro's catalog` for this context. Returns: catalog defined in `catalog.yml`. Raises: KedroContextError: Incorrect catalog registered for the project.
catalog
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def params(self) -> dict[str, Any]: """Read-only property referring to Kedro's parameters for this context. Returns: Parameters defined in `parameters.yml` with the addition of any extra parameters passed at initialization. """ try: params = self.config_loader["parameters"] except MissingConfigException as exc: warn(f"Parameters not found in your Kedro project config.\n{exc!s}") params = {} if self._extra_params: # Merge nested structures params = OmegaConf.merge(params, self._extra_params) return OmegaConf.to_container(params) if OmegaConf.is_config(params) else params # type: ignore[return-value]
Read-only property referring to Kedro's parameters for this context. Returns: Parameters defined in `parameters.yml` with the addition of any extra parameters passed at initialization.
params
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _get_catalog( self, save_version: str | None = None, load_versions: dict[str, str] | None = None, ) -> CatalogProtocol: """A hook for changing the creation of a catalog instance. Returns: catalog defined in `catalog.yml`. Raises: KedroContextError: Incorrect catalog registered for the project. """ # '**/catalog*' reads modular pipeline configs conf_catalog = self.config_loader["catalog"] # turn relative paths in conf_catalog into absolute paths # before initializing the catalog conf_catalog = _convert_paths_to_absolute_posix( project_path=self.project_path, conf_dictionary=conf_catalog ) conf_creds = self._get_config_credentials() catalog: DataCatalog = settings.DATA_CATALOG_CLASS.from_config( catalog=conf_catalog, credentials=conf_creds, load_versions=load_versions, save_version=save_version, ) feed_dict = self._get_feed_dict() catalog.add_feed_dict(feed_dict) _validate_transcoded_datasets(catalog) self._hook_manager.hook.after_catalog_created( catalog=catalog, conf_catalog=conf_catalog, conf_creds=conf_creds, feed_dict=feed_dict, save_version=save_version, load_versions=load_versions, ) return catalog
A hook for changing the creation of a catalog instance. Returns: catalog defined in `catalog.yml`. Raises: KedroContextError: Incorrect catalog registered for the project.
_get_catalog
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _add_param_to_feed_dict(param_name: str, param_value: Any) -> None: """This recursively adds parameter paths to the `feed_dict`, whenever `param_value` is a dictionary itself, so that users can specify specific nested parameters in their node inputs. Example: >>> param_name = "a" >>> param_value = {"b": 1} >>> _add_param_to_feed_dict(param_name, param_value) >>> assert feed_dict["params:a"] == {"b": 1} >>> assert feed_dict["params:a.b"] == 1 """ key = f"params:{param_name}" feed_dict[key] = param_value if isinstance(param_value, dict): for key, val in param_value.items(): _add_param_to_feed_dict(f"{param_name}.{key}", val)
This recursively adds parameter paths to the `feed_dict`, whenever `param_value` is a dictionary itself, so that users can specify specific nested parameters in their node inputs. Example: >>> param_name = "a" >>> param_value = {"b": 1} >>> _add_param_to_feed_dict(param_name, param_value) >>> assert feed_dict["params:a"] == {"b": 1} >>> assert feed_dict["params:a.b"] == 1
_get_feed_dict._add_param_to_feed_dict
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _get_feed_dict(self) -> dict[str, Any]: """Get parameters and return the feed dictionary.""" params = self.params feed_dict = {"parameters": params} def _add_param_to_feed_dict(param_name: str, param_value: Any) -> None: """This recursively adds parameter paths to the `feed_dict`, whenever `param_value` is a dictionary itself, so that users can specify specific nested parameters in their node inputs. Example: >>> param_name = "a" >>> param_value = {"b": 1} >>> _add_param_to_feed_dict(param_name, param_value) >>> assert feed_dict["params:a"] == {"b": 1} >>> assert feed_dict["params:a.b"] == 1 """ key = f"params:{param_name}" feed_dict[key] = param_value if isinstance(param_value, dict): for key, val in param_value.items(): _add_param_to_feed_dict(f"{param_name}.{key}", val) for param_name, param_value in params.items(): _add_param_to_feed_dict(param_name, param_value) return feed_dict
Get parameters and return the feed dictionary.
_get_feed_dict
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _get_config_credentials(self) -> dict[str, Any]: """Getter for credentials specified in credentials directory.""" try: conf_creds: dict[str, Any] = self.config_loader["credentials"] except MissingConfigException as exc: logging.getLogger(__name__).debug( "Credentials not found in your Kedro project config.\n %s", str(exc) ) conf_creds = {} return conf_creds
Getter for credentials specified in credentials directory.
_get_config_credentials
python
kedro-org/kedro
kedro/framework/context/context.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/context/context.py
Apache-2.0
def _load_data_wrapper(func: Any) -> Any: """Wrap a method in _ProjectPipelines so that data is loaded on first access. Taking inspiration from dynaconf.utils.functional.new_method_proxy """ def inner(self: Any, *args: Any, **kwargs: Any) -> Any: self._load_data() return func(self._content, *args, **kwargs) return inner
Wrap a method in _ProjectPipelines so that data is loaded on first access. Taking inspiration from dynaconf.utils.functional.new_method_proxy
_load_data_wrapper
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def _load_data(self) -> None: """Lazily read pipelines defined in the pipelines registry module.""" # If the pipelines dictionary has not been configured with a pipelines module # or if data has been loaded if self._pipelines_module is None or self._is_data_loaded: return register_pipelines = self._get_pipelines_registry_callable( self._pipelines_module ) project_pipelines = register_pipelines() self._content = project_pipelines self._is_data_loaded = True
Lazily read pipelines defined in the pipelines registry module.
_load_data
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def configure(self, pipelines_module: str | None = None) -> None: """Configure the pipelines_module to load the pipelines dictionary. Reset the data loading state so that after every ``configure`` call, data are reloaded. """ self._pipelines_module = pipelines_module self._is_data_loaded = False self._content = {}
Configure the pipelines_module to load the pipelines dictionary. Reset the data loading state so that after every ``configure`` call, data are reloaded.
configure
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def __init__(self) -> None: """Initialise project logging. The path to logging configuration is given in environment variable KEDRO_LOGGING_CONFIG (defaults to conf/logging.yml).""" logger = logging.getLogger(__name__) user_logging_path = os.environ.get("KEDRO_LOGGING_CONFIG") project_logging_path = Path("conf/logging.yml") default_logging_path = Path( Path(__file__).parent / "rich_logging.yml" if importlib.util.find_spec("rich") else Path(__file__).parent / "default_logging.yml", ) path: str | Path msg = "" if user_logging_path: path = user_logging_path elif project_logging_path.exists(): path = project_logging_path msg = "You can change this by setting the KEDRO_LOGGING_CONFIG environment variable accordingly." else: # Fallback to the framework default loggings path = default_logging_path msg = f"Using '{path!s}' as logging configuration. " + msg # Load and apply the logging configuration logging_config = Path(path).read_text(encoding="utf-8") self.configure(yaml.safe_load(logging_config)) logger.info(msg)
Initialise project logging. The path to logging configuration is given in environment variable KEDRO_LOGGING_CONFIG (defaults to conf/logging.yml).
__init__
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def configure(self, logging_config: dict[str, Any]) -> None: """Configure project logging using ``logging_config`` (e.g. from project logging.yml). We store this in the UserDict data so that it can be reconfigured in _bootstrap_subprocess. """ logging.config.dictConfig(logging_config) self.data = logging_config
Configure project logging using ``logging_config`` (e.g. from project logging.yml). We store this in the UserDict data so that it can be reconfigured in _bootstrap_subprocess.
configure
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def set_project_logging(self, package_name: str) -> None: """Add the project level logging to the loggers upon provision of a package name. Checks if project logger already exists to prevent overwriting, if none exists it defaults to setting project logs at INFO level.""" if package_name not in self.data["loggers"]: self.data["loggers"][package_name] = {"level": "INFO"} self.configure(self.data)
Add the project level logging to the loggers upon provision of a package name. Checks if project logger already exists to prevent overwriting, if none exists it defaults to setting project logs at INFO level.
set_project_logging
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def configure_project(package_name: str) -> None: """Configure a Kedro project by populating its settings with values defined in user's settings.py and pipeline_registry.py. """ settings_module = f"{package_name}.settings" settings.configure(settings_module) pipelines_module = f"{package_name}.pipeline_registry" pipelines.configure(pipelines_module) # Once the project is successfully configured once, store PACKAGE_NAME as a # global variable to make it easily accessible. This is used by validate_settings() # below, and also by ParallelRunner on Windows, as package_name is required every # time a new subprocess is spawned. global PACKAGE_NAME # noqa: PLW0603 PACKAGE_NAME = package_name if PACKAGE_NAME: LOGGING.set_project_logging(PACKAGE_NAME)
Configure a Kedro project by populating its settings with values defined in user's settings.py and pipeline_registry.py.
configure_project
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def configure_logging(logging_config: dict[str, Any]) -> None: """Configure logging according to ``logging_config`` dictionary.""" LOGGING.configure(logging_config)
Configure logging according to ``logging_config`` dictionary.
configure_logging
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def validate_settings() -> None: """Eagerly validate that the settings module is importable if it exists. This is desirable to surface any syntax or import errors early. In particular, without eagerly importing the settings module, dynaconf would silence any import error (e.g. missing dependency, missing/mislabelled pipeline), and users would instead get a cryptic error message ``Expected an instance of `ConfigLoader`, got `NoneType` instead``. More info on the dynaconf issue: https://github.com/dynaconf/dynaconf/issues/460 """ if PACKAGE_NAME is None: raise ValueError( "Package name not found. Make sure you have configured the project using " "'bootstrap_project'. This should happen automatically if you are using " "Kedro command line interface." ) # Check if file exists, if it does, validate it. if importlib.util.find_spec(f"{PACKAGE_NAME}.settings") is not None: importlib.import_module(f"{PACKAGE_NAME}.settings") else: logger = logging.getLogger(__name__) logger.warning("No 'settings.py' found, defaults will be used.")
Eagerly validate that the settings module is importable if it exists. This is desirable to surface any syntax or import errors early. In particular, without eagerly importing the settings module, dynaconf would silence any import error (e.g. missing dependency, missing/mislabelled pipeline), and users would instead get a cryptic error message ``Expected an instance of `ConfigLoader`, got `NoneType` instead``. More info on the dynaconf issue: https://github.com/dynaconf/dynaconf/issues/460
validate_settings
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def find_pipelines(raise_errors: bool = False) -> dict[str, Pipeline]: # noqa: PLR0912 """Automatically find modular pipelines having a ``create_pipeline`` function. By default, projects created using Kedro 0.18.3 and higher call this function to autoregister pipelines upon creation/addition. Projects that require more fine-grained control can still define the pipeline registry without calling this function. Alternatively, they can modify the mapping generated by the ``find_pipelines`` function. For more information on the pipeline registry and autodiscovery, see https://docs.kedro.org/en/stable/nodes_and_pipelines/pipeline_registry.html Args: raise_errors: If ``True``, raise an error upon failed discovery. Returns: A generated mapping from pipeline names to ``Pipeline`` objects. Raises: ImportError: When a module does not expose a ``create_pipeline`` function, the ``create_pipeline`` function does not return a ``Pipeline`` object, or if the module import fails up front. If ``raise_errors`` is ``False``, see Warns section instead. Warns: UserWarning: When a module does not expose a ``create_pipeline`` function, the ``create_pipeline`` function does not return a ``Pipeline`` object, or if the module import fails up front. If ``raise_errors`` is ``True``, see Raises section instead. """ pipeline_obj = None # Handle the simplified project structure found in several starters. pipeline_module_name = f"{PACKAGE_NAME}.pipeline" try: pipeline_module = importlib.import_module(pipeline_module_name) except Exception as exc: if str(exc) != f"No module named '{pipeline_module_name}'": if raise_errors: raise ImportError( f"An error occurred while importing the " f"'{pipeline_module_name}' module." ) from exc warnings.warn( IMPORT_ERROR_MESSAGE.format( module=pipeline_module_name, tb_exc=traceback.format_exc() ) ) else: pipeline_obj = _create_pipeline(pipeline_module) pipelines_dict = {"__default__": pipeline_obj or pipeline([])} # Handle the case that a project doesn't have a pipelines directory. try: pipelines_package = importlib_resources.files(f"{PACKAGE_NAME}.pipelines") except ModuleNotFoundError as exc: if str(exc) == f"No module named '{PACKAGE_NAME}.pipelines'": return pipelines_dict for pipeline_dir in pipelines_package.iterdir(): if not pipeline_dir.is_dir(): continue pipeline_name = pipeline_dir.name if pipeline_name == "__pycache__": continue # Prevent imports of hidden directories/files if pipeline_name.startswith("."): continue pipeline_module_name = f"{PACKAGE_NAME}.pipelines.{pipeline_name}" try: pipeline_module = importlib.import_module(pipeline_module_name) except Exception as exc: if raise_errors: raise ImportError( f"An error occurred while importing the " f"'{pipeline_module_name}' module." ) from exc warnings.warn( IMPORT_ERROR_MESSAGE.format( module=pipeline_module_name, tb_exc=traceback.format_exc() ) ) continue pipeline_obj = _create_pipeline(pipeline_module) if pipeline_obj is not None: pipelines_dict[pipeline_name] = pipeline_obj return pipelines_dict
Automatically find modular pipelines having a ``create_pipeline`` function. By default, projects created using Kedro 0.18.3 and higher call this function to autoregister pipelines upon creation/addition. Projects that require more fine-grained control can still define the pipeline registry without calling this function. Alternatively, they can modify the mapping generated by the ``find_pipelines`` function. For more information on the pipeline registry and autodiscovery, see https://docs.kedro.org/en/stable/nodes_and_pipelines/pipeline_registry.html Args: raise_errors: If ``True``, raise an error upon failed discovery. Returns: A generated mapping from pipeline names to ``Pipeline`` objects. Raises: ImportError: When a module does not expose a ``create_pipeline`` function, the ``create_pipeline`` function does not return a ``Pipeline`` object, or if the module import fails up front. If ``raise_errors`` is ``False``, see Warns section instead. Warns: UserWarning: When a module does not expose a ``create_pipeline`` function, the ``create_pipeline`` function does not return a ``Pipeline`` object, or if the module import fails up front. If ``raise_errors`` is ``True``, see Raises section instead.
find_pipelines
python
kedro-org/kedro
kedro/framework/project/__init__.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/project/__init__.py
Apache-2.0
def __init__( self, max_workers: int | None = None, is_async: bool = False, extra_dataset_patterns: dict[str, dict[str, Any]] | None = None, ): """ Instantiates the runner. Args: max_workers: Number of worker processes to spawn. If not set, calculated automatically based on the pipeline configuration and CPU core count. is_async: If True, set to False, because `ThreadRunner` doesn't support loading and saving the node inputs and outputs asynchronously with threads. Defaults to False. extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog during the run. This is used to set the default datasets to MemoryDataset for `ThreadRunner`. Raises: ValueError: bad parameters passed """ if is_async: warnings.warn( "'ThreadRunner' doesn't support loading and saving the " "node inputs and outputs asynchronously with threads. " "Setting 'is_async' to False." ) default_dataset_pattern = {"{default}": {"type": "MemoryDataset"}} self._extra_dataset_patterns = extra_dataset_patterns or default_dataset_pattern super().__init__( is_async=False, extra_dataset_patterns=self._extra_dataset_patterns ) self._max_workers = self._validate_max_workers(max_workers)
Instantiates the runner. Args: max_workers: Number of worker processes to spawn. If not set, calculated automatically based on the pipeline configuration and CPU core count. is_async: If True, set to False, because `ThreadRunner` doesn't support loading and saving the node inputs and outputs asynchronously with threads. Defaults to False. extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog during the run. This is used to set the default datasets to MemoryDataset for `ThreadRunner`. Raises: ValueError: bad parameters passed
__init__
python
kedro-org/kedro
kedro/runner/thread_runner.py
https://github.com/kedro-org/kedro/blob/master/kedro/runner/thread_runner.py
Apache-2.0
def _get_required_workers_count(self, pipeline: Pipeline) -> int: """ Calculate the max number of processes required for the pipeline """ # Number of nodes is a safe upper-bound estimate. # It's also safe to reduce it by the number of layers minus one, # because each layer means some nodes depend on other nodes # and they can not run in parallel. # It might be not a perfect solution, but good enough and simple. required_threads = len(pipeline.nodes) - len(pipeline.grouped_nodes) + 1 return ( min(required_threads, self._max_workers) if self._max_workers else required_threads )
Calculate the max number of processes required for the pipeline
_get_required_workers_count
python
kedro-org/kedro
kedro/runner/thread_runner.py
https://github.com/kedro-org/kedro/blob/master/kedro/runner/thread_runner.py
Apache-2.0
def _run( self, pipeline: Pipeline, catalog: CatalogProtocol, hook_manager: PluginManager | None = None, session_id: str | None = None, ) -> None: """The method implementing threaded pipeline running. Args: pipeline: The ``Pipeline`` to run. catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data. hook_manager: The ``PluginManager`` to activate hooks. session_id: The id of the session. Raises: Exception: in case of any downstream node failure. """ super()._run( pipeline=pipeline, catalog=catalog, hook_manager=hook_manager, session_id=session_id, )
The method implementing threaded pipeline running. Args: pipeline: The ``Pipeline`` to run. catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data. hook_manager: The ``PluginManager`` to activate hooks. session_id: The id of the session. Raises: Exception: in case of any downstream node failure.
_run
python
kedro-org/kedro
kedro/runner/thread_runner.py
https://github.com/kedro-org/kedro/blob/master/kedro/runner/thread_runner.py
Apache-2.0