code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def __init__(
self,
max_workers: int | None = None,
is_async: bool = False,
extra_dataset_patterns: dict[str, dict[str, Any]] | None = None,
):
"""
Instantiates the runner by creating a Manager.
Args:
max_workers: Number of worker processes to spawn. If not set,
calculated automatically based on the pipeline configuration
and CPU core count. On windows machines, the max_workers value
cannot be larger than 61 and will be set to min(61, max_workers).
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets to SharedMemoryDataset
for `ParallelRunner`.
Raises:
ValueError: bad parameters passed
"""
default_dataset_pattern = {"{default}": {"type": "SharedMemoryDataset"}}
self._extra_dataset_patterns = extra_dataset_patterns or default_dataset_pattern
super().__init__(
is_async=is_async, extra_dataset_patterns=self._extra_dataset_patterns
)
self._manager = ParallelRunnerManager()
self._manager.start()
self._max_workers = self._validate_max_workers(max_workers) | Instantiates the runner by creating a Manager.
Args:
max_workers: Number of worker processes to spawn. If not set,
calculated automatically based on the pipeline configuration
and CPU core count. On windows machines, the max_workers value
cannot be larger than 61 and will be set to min(61, max_workers).
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets to SharedMemoryDataset
for `ParallelRunner`.
Raises:
ValueError: bad parameters passed | __init__ | python | kedro-org/kedro | kedro/runner/parallel_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/parallel_runner.py | Apache-2.0 |
def _validate_nodes(cls, nodes: Iterable[Node]) -> None:
"""Ensure all tasks are serialisable."""
unserialisable = []
for node in nodes:
try:
ForkingPickler.dumps(node)
except (AttributeError, PicklingError):
unserialisable.append(node)
if unserialisable:
raise AttributeError(
f"The following nodes cannot be serialised: {sorted(unserialisable)}\n"
f"In order to utilize multiprocessing you need to make sure all nodes "
f"are serialisable, i.e. nodes should not include lambda "
f"functions, nested functions, closures, etc.\nIf you "
f"are using custom decorators ensure they are correctly decorated using "
f"functools.wraps()."
) | Ensure all tasks are serialisable. | _validate_nodes | python | kedro-org/kedro | kedro/runner/parallel_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/parallel_runner.py | Apache-2.0 |
def _validate_catalog(cls, catalog: CatalogProtocol, pipeline: Pipeline) -> None:
"""Ensure that all datasets are serialisable and that we do not have
any non proxied memory datasets being used as outputs as their content
will not be synchronized across threads.
"""
datasets = catalog._datasets
unserialisable = []
for name, dataset in datasets.items():
if getattr(dataset, "_SINGLE_PROCESS", False): # SKIP_IF_NO_SPARK
unserialisable.append(name)
continue
try:
ForkingPickler.dumps(dataset)
except (AttributeError, PicklingError):
unserialisable.append(name)
if unserialisable:
raise AttributeError(
f"The following datasets cannot be used with multiprocessing: "
f"{sorted(unserialisable)}\nIn order to utilize multiprocessing you "
f"need to make sure all datasets are serialisable, i.e. datasets "
f"should not make use of lambda functions, nested functions, closures "
f"etc.\nIf you are using custom decorators ensure they are correctly "
f"decorated using functools.wraps()."
)
memory_datasets = []
for name, dataset in datasets.items():
if (
name in pipeline.all_outputs()
and isinstance(dataset, MemoryDataset)
and not isinstance(dataset, BaseProxy)
):
memory_datasets.append(name)
if memory_datasets:
raise AttributeError(
f"The following datasets are memory datasets: "
f"{sorted(memory_datasets)}\n"
f"ParallelRunner does not support output to externally created "
f"MemoryDatasets"
) | Ensure that all datasets are serialisable and that we do not have
any non proxied memory datasets being used as outputs as their content
will not be synchronized across threads. | _validate_catalog | python | kedro-org/kedro | kedro/runner/parallel_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/parallel_runner.py | Apache-2.0 |
def _get_required_workers_count(self, pipeline: Pipeline) -> int:
"""
Calculate the max number of processes required for the pipeline,
limit to the number of CPU cores.
"""
# Number of nodes is a safe upper-bound estimate.
# It's also safe to reduce it by the number of layers minus one,
# because each layer means some nodes depend on other nodes
# and they can not run in parallel.
# It might be not a perfect solution, but good enough and simple.
required_processes = len(pipeline.nodes) - len(pipeline.grouped_nodes) + 1
return min(required_processes, self._max_workers) | Calculate the max number of processes required for the pipeline,
limit to the number of CPU cores. | _get_required_workers_count | python | kedro-org/kedro | kedro/runner/parallel_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/parallel_runner.py | Apache-2.0 |
def _run(
self,
pipeline: Pipeline,
catalog: CatalogProtocol,
hook_manager: PluginManager | None = None,
session_id: str | None = None,
) -> None:
"""The method implementing parallel pipeline running.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
AttributeError: When the provided pipeline is not suitable for
parallel execution.
RuntimeError: If the runner is unable to schedule the execution of
all pipeline nodes.
Exception: In case of any downstream node failure.
"""
if not self._is_async:
self._logger.info(
"Using synchronous mode for loading and saving data. Use the --async flag "
"for potential performance gains. https://docs.kedro.org/en/stable/nodes_and_pipelines/run_a_pipeline.html#load-and-save-asynchronously"
)
super()._run(
pipeline=pipeline,
catalog=catalog,
session_id=session_id,
) | The method implementing parallel pipeline running.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
AttributeError: When the provided pipeline is not suitable for
parallel execution.
RuntimeError: If the runner is unable to schedule the execution of
all pipeline nodes.
Exception: In case of any downstream node failure. | _run | python | kedro-org/kedro | kedro/runner/parallel_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/parallel_runner.py | Apache-2.0 |
def __call__(self) -> Node:
"""Make the class instance callable by ProcessPoolExecutor."""
return self.execute() | Make the class instance callable by ProcessPoolExecutor. | __call__ | python | kedro-org/kedro | kedro/runner/task.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/task.py | Apache-2.0 |
def _run_node_synchronization(
package_name: str | None = None,
logging_config: dict[str, Any] | None = None,
) -> Any:
"""Run a single `Node` with inputs from and outputs to the `catalog`.
A ``PluginManager`` instance is created in each subprocess because the
``PluginManager`` can't be serialised.
Args:
package_name: The name of the project Python package.
logging_config: A dictionary containing logging configuration.
Returns:
The node argument.
"""
if multiprocessing.get_start_method() == "spawn" and package_name:
Task._bootstrap_subprocess(package_name, logging_config)
hook_manager = _create_hook_manager()
_register_hooks(hook_manager, settings.HOOKS)
_register_hooks_entry_points(hook_manager, settings.DISABLE_HOOKS_FOR_PLUGINS)
return hook_manager | Run a single `Node` with inputs from and outputs to the `catalog`.
A ``PluginManager`` instance is created in each subprocess because the
``PluginManager`` can't be serialised.
Args:
package_name: The name of the project Python package.
logging_config: A dictionary containing logging configuration.
Returns:
The node argument. | _run_node_synchronization | python | kedro-org/kedro | kedro/runner/task.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/task.py | Apache-2.0 |
def _synchronous_dataset_load(
dataset_name: str,
node: Node,
catalog: CatalogProtocol,
hook_manager: PluginManager,
) -> Any:
"""Minimal wrapper to ensure Hooks are run synchronously
within an asynchronous dataset load."""
hook_manager.hook.before_dataset_loaded(dataset_name=dataset_name, node=node)
return_ds = catalog.load(dataset_name)
hook_manager.hook.after_dataset_loaded(
dataset_name=dataset_name, data=return_ds, node=node
)
return return_ds | Minimal wrapper to ensure Hooks are run synchronously
within an asynchronous dataset load. | _synchronous_dataset_load | python | kedro-org/kedro | kedro/runner/task.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/task.py | Apache-2.0 |
def __init__(
self,
is_async: bool = False,
extra_dataset_patterns: dict[str, dict[str, Any]] | None = None,
):
"""Instantiates the runner class.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets to MemoryDataset
for `SequentialRunner`.
"""
default_dataset_pattern = {"{default}": {"type": "MemoryDataset"}}
self._extra_dataset_patterns = extra_dataset_patterns or default_dataset_pattern
super().__init__(
is_async=is_async, extra_dataset_patterns=self._extra_dataset_patterns
) | Instantiates the runner class.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets to MemoryDataset
for `SequentialRunner`. | __init__ | python | kedro-org/kedro | kedro/runner/sequential_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/sequential_runner.py | Apache-2.0 |
def _run(
self,
pipeline: Pipeline,
catalog: CatalogProtocol,
hook_manager: PluginManager | None = None,
session_id: str | None = None,
) -> None:
"""The method implementing sequential pipeline running.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
Exception: in case of any downstream node failure.
"""
if not self._is_async:
self._logger.info(
"Using synchronous mode for loading and saving data. Use the --async flag "
"for potential performance gains. https://docs.kedro.org/en/stable/nodes_and_pipelines/run_a_pipeline.html#load-and-save-asynchronously"
)
super()._run(
pipeline=pipeline,
catalog=catalog,
hook_manager=hook_manager,
session_id=session_id,
) | The method implementing sequential pipeline running.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
Exception: in case of any downstream node failure. | _run | python | kedro-org/kedro | kedro/runner/sequential_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/sequential_runner.py | Apache-2.0 |
def __init__(
self,
is_async: bool = False,
extra_dataset_patterns: dict[str, dict[str, Any]] | None = None,
):
"""Instantiates the runner class.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets on the Runner instances.
"""
self._is_async = is_async
self._extra_dataset_patterns = extra_dataset_patterns | Instantiates the runner class.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
extra_dataset_patterns: Extra dataset factory patterns to be added to the catalog
during the run. This is used to set the default datasets on the Runner instances. | __init__ | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def run(
self,
pipeline: Pipeline,
catalog: CatalogProtocol,
hook_manager: PluginManager | None = None,
session_id: str | None = None,
) -> dict[str, Any]:
"""Run the ``Pipeline`` using the datasets provided by ``catalog``
and save results back to the same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the catalog.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
# Check which datasets used in the pipeline are in the catalog or match
# a pattern in the catalog, not including extra dataset patterns
# Run a warm-up to materialize all datasets in the catalog before run
warmed_up_ds = []
for ds in pipeline.datasets():
if ds in catalog:
warmed_up_ds.append(ds)
_ = catalog._get_dataset(ds)
# Check if there are any input datasets that aren't in the catalog and
# don't match a pattern in the catalog.
unsatisfied = pipeline.inputs() - set(warmed_up_ds)
if unsatisfied:
raise ValueError(
f"Pipeline input(s) {unsatisfied} not found in the {catalog.__class__.__name__}"
)
# Register the default dataset pattern with the catalog
# TODO: replace with catalog.config_resolver.add_runtime_patterns() when removing old catalog
catalog = catalog.shallow_copy(
extra_dataset_patterns=self._extra_dataset_patterns
)
hook_or_null_manager = hook_manager or _NullPluginManager()
# Check which datasets used in the pipeline are in the catalog or match
# a pattern in the catalog, including added extra_dataset_patterns
registered_ds = [ds for ds in pipeline.datasets() if ds in catalog]
if self._is_async:
self._logger.info(
"Asynchronous mode is enabled for loading and saving data"
)
self._run(pipeline, catalog, hook_or_null_manager, session_id) # type: ignore[arg-type]
self._logger.info("Pipeline execution completed successfully.")
# Identify MemoryDataset in the catalog
memory_datasets = {
ds_name
for ds_name, ds in catalog._datasets.items()
if isinstance(ds, MemoryDataset) or isinstance(ds, SharedMemoryDataset)
}
# Check if there's any output datasets that aren't in the catalog and don't match a pattern
# in the catalog and include MemoryDataset.
free_outputs = pipeline.outputs() - (set(registered_ds) - memory_datasets)
run_output = {ds_name: catalog.load(ds_name) for ds_name in free_outputs}
# Remove runtime patterns after run, so they do not affect further runs
if self._extra_dataset_patterns:
catalog.config_resolver.remove_runtime_patterns(
self._extra_dataset_patterns
)
return run_output | Run the ``Pipeline`` using the datasets provided by ``catalog``
and save results back to the same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the catalog.
These are returned in a dictionary, where the keys are defined
by the node outputs. | run | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def run_only_missing(
self, pipeline: Pipeline, catalog: CatalogProtocol, hook_manager: PluginManager
) -> dict[str, Any]:
"""Run only the missing outputs from the ``Pipeline`` using the
datasets provided by ``catalog``, and save results back to the
same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be
satisfied.
Returns:
Any node outputs that cannot be processed by the
catalog. These are returned in a dictionary, where
the keys are defined by the node outputs.
"""
free_outputs = pipeline.outputs() - set(catalog.list())
missing = {ds for ds in catalog.list() if not catalog.exists(ds)}
to_build = free_outputs | missing
to_rerun = pipeline.only_nodes_with_outputs(*to_build) + pipeline.from_inputs(
*to_build
)
# We also need any missing datasets that are required to run the
# `to_rerun` pipeline, including any chains of missing datasets.
unregistered_ds = pipeline.datasets() - set(catalog.list())
output_to_unregistered = pipeline.only_nodes_with_outputs(*unregistered_ds)
input_from_unregistered = to_rerun.inputs() & unregistered_ds
to_rerun += output_to_unregistered.to_outputs(*input_from_unregistered)
return self.run(to_rerun, catalog, hook_manager) | Run only the missing outputs from the ``Pipeline`` using the
datasets provided by ``catalog``, and save results back to the
same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be
satisfied.
Returns:
Any node outputs that cannot be processed by the
catalog. These are returned in a dictionary, where
the keys are defined by the node outputs. | run_only_missing | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _get_executor(self, max_workers: int) -> Executor | None:
"""Abstract method to provide the correct executor (e.g., ThreadPoolExecutor, ProcessPoolExecutor or None if running sequentially)."""
pass | Abstract method to provide the correct executor (e.g., ThreadPoolExecutor, ProcessPoolExecutor or None if running sequentially). | _get_executor | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _run(
self,
pipeline: Pipeline,
catalog: CatalogProtocol,
hook_manager: PluginManager | None = None,
session_id: str | None = None,
) -> None:
"""The abstract interface for running pipelines, assuming that the
inputs have already been checked and normalized by run().
This contains the Common pipeline execution logic using an executor.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
"""
nodes = pipeline.nodes
self._validate_catalog(catalog, pipeline)
self._validate_nodes(nodes)
self._set_manager_datasets(catalog, pipeline)
load_counts = Counter(chain.from_iterable(n.inputs for n in pipeline.nodes))
node_dependencies = pipeline.node_dependencies
todo_nodes = set(node_dependencies.keys())
done_nodes: set[Node] = set()
futures = set()
done = None
max_workers = self._get_required_workers_count(pipeline)
pool = self._get_executor(max_workers)
if pool is None:
for exec_index, node in enumerate(nodes):
try:
Task(
node=node,
catalog=catalog,
hook_manager=hook_manager,
is_async=self._is_async,
session_id=session_id,
).execute()
done_nodes.add(node)
except Exception:
self._suggest_resume_scenario(pipeline, done_nodes, catalog)
raise
self._logger.info("Completed node: %s", node.name)
self._logger.info(
"Completed %d out of %d tasks", len(done_nodes), len(nodes)
)
self._release_datasets(node, catalog, load_counts, pipeline)
return # Exit early since everything runs sequentially
with pool as executor:
while True:
ready = {n for n in todo_nodes if node_dependencies[n] <= done_nodes}
todo_nodes -= ready
for node in ready:
task = Task(
node=node,
catalog=catalog,
hook_manager=hook_manager,
is_async=self._is_async,
session_id=session_id,
)
if isinstance(executor, ProcessPoolExecutor):
task.parallel = True
futures.add(executor.submit(task))
if not futures:
if todo_nodes:
self._raise_runtime_error(todo_nodes, done_nodes, ready, done)
break
done, futures = wait(futures, return_when=FIRST_COMPLETED)
for future in done:
try:
node = future.result()
except Exception:
self._suggest_resume_scenario(pipeline, done_nodes, catalog)
raise
done_nodes.add(node)
self._logger.info("Completed node: %s", node.name)
self._logger.info(
"Completed %d out of %d tasks", len(done_nodes), len(nodes)
)
self._release_datasets(node, catalog, load_counts, pipeline) | The abstract interface for running pipelines, assuming that the
inputs have already been checked and normalized by run().
This contains the Common pipeline execution logic using an executor.
Args:
pipeline: The ``Pipeline`` to run.
catalog: An implemented instance of ``CatalogProtocol`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session. | _run | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _suggest_resume_scenario(
self,
pipeline: Pipeline,
done_nodes: Iterable[Node],
catalog: CatalogProtocol,
) -> None:
"""
Suggest a command to the user to resume a run after it fails.
The run should be started from the point closest to the failure
for which persisted input exists.
Args:
pipeline: the ``Pipeline`` of the run.
done_nodes: the ``Node``s that executed successfully.
catalog: an implemented instance of ``CatalogProtocol`` of the run.
"""
remaining_nodes = set(pipeline.nodes) - set(done_nodes)
postfix = ""
if done_nodes:
start_node_names = _find_nodes_to_resume_from(
pipeline=pipeline,
unfinished_nodes=remaining_nodes,
catalog=catalog,
)
start_nodes_str = ",".join(sorted(start_node_names))
postfix += f' --from-nodes "{start_nodes_str}"'
if not postfix:
self._logger.warning(
"No nodes ran. Repeat the previous command to attempt a new run."
)
else:
self._logger.warning(
f"There are {len(remaining_nodes)} nodes that have not run.\n"
"You can resume the pipeline run from the nearest nodes with "
"persisted inputs by adding the following "
f"argument to your previous command:\n{postfix}"
) | Suggest a command to the user to resume a run after it fails.
The run should be started from the point closest to the failure
for which persisted input exists.
Args:
pipeline: the ``Pipeline`` of the run.
done_nodes: the ``Node``s that executed successfully.
catalog: an implemented instance of ``CatalogProtocol`` of the run. | _suggest_resume_scenario | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _release_datasets(
node: Node, catalog: CatalogProtocol, load_counts: dict, pipeline: Pipeline
) -> None:
"""Decrement dataset load counts and release any datasets we've finished with"""
for dataset in node.inputs:
load_counts[dataset] -= 1
if load_counts[dataset] < 1 and dataset not in pipeline.inputs():
catalog.release(dataset)
for dataset in node.outputs:
if load_counts[dataset] < 1 and dataset not in pipeline.outputs():
catalog.release(dataset) | Decrement dataset load counts and release any datasets we've finished with | _release_datasets | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _validate_max_workers(cls, max_workers: int | None) -> int:
"""
Validates and returns the number of workers. Sets to os.cpu_count() or 1 if max_workers is None,
and limits max_workers to 61 on Windows.
Args:
max_workers: Desired number of workers. If None, defaults to os.cpu_count() or 1.
Returns:
A valid number of workers to use.
Raises:
ValueError: If max_workers is set and is not positive.
"""
if max_workers is None:
max_workers = os.cpu_count() or 1
if sys.platform == "win32":
max_workers = min(_MAX_WINDOWS_WORKERS, max_workers)
elif max_workers <= 0:
raise ValueError("max_workers should be positive")
return max_workers | Validates and returns the number of workers. Sets to os.cpu_count() or 1 if max_workers is None,
and limits max_workers to 61 on Windows.
Args:
max_workers: Desired number of workers. If None, defaults to os.cpu_count() or 1.
Returns:
A valid number of workers to use.
Raises:
ValueError: If max_workers is set and is not positive. | _validate_max_workers | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _find_nodes_to_resume_from(
pipeline: Pipeline, unfinished_nodes: Collection[Node], catalog: CatalogProtocol
) -> set[str]:
"""Given a collection of unfinished nodes in a pipeline using
a certain catalog, find the node names to pass to pipeline.from_nodes()
to cover all unfinished nodes, including any additional nodes
that should be re-run if their outputs are not persisted.
Args:
pipeline: the ``Pipeline`` to find starting nodes for.
unfinished_nodes: collection of ``Node``s that have not finished yet
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
Set of node names to pass to pipeline.from_nodes() to continue
the run.
"""
nodes_to_be_run = _find_all_nodes_for_resumed_pipeline(
pipeline, unfinished_nodes, catalog
)
# Find which of the remaining nodes would need to run first (in topo sort)
persistent_ancestors = _find_initial_node_group(pipeline, nodes_to_be_run)
return {n.name for n in persistent_ancestors} | Given a collection of unfinished nodes in a pipeline using
a certain catalog, find the node names to pass to pipeline.from_nodes()
to cover all unfinished nodes, including any additional nodes
that should be re-run if their outputs are not persisted.
Args:
pipeline: the ``Pipeline`` to find starting nodes for.
unfinished_nodes: collection of ``Node``s that have not finished yet
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
Set of node names to pass to pipeline.from_nodes() to continue
the run. | _find_nodes_to_resume_from | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _find_all_nodes_for_resumed_pipeline(
pipeline: Pipeline, unfinished_nodes: Iterable[Node], catalog: CatalogProtocol
) -> set[Node]:
"""Breadth-first search approach to finding the complete set of
``Node``s which need to run to cover all unfinished nodes,
including any additional nodes that should be re-run if their outputs
are not persisted.
Args:
pipeline: the ``Pipeline`` to analyze.
unfinished_nodes: the iterable of ``Node``s which have not finished yet.
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
A set containing all input unfinished ``Node``s and all remaining
``Node``s that need to run in case their outputs are not persisted.
"""
nodes_to_run = set(unfinished_nodes)
initial_nodes = _nodes_with_external_inputs(unfinished_nodes)
queue, visited = deque(initial_nodes), set(initial_nodes)
while queue:
current_node = queue.popleft()
nodes_to_run.add(current_node)
# Look for parent nodes which produce non-persistent inputs (if those exist)
non_persistent_inputs = _enumerate_non_persistent_inputs(current_node, catalog)
for node in _enumerate_nodes_with_outputs(pipeline, non_persistent_inputs):
if node in visited:
continue
visited.add(node)
queue.append(node)
# Make sure no downstream tasks are skipped
nodes_to_run = set(pipeline.from_nodes(*(n.name for n in nodes_to_run)).nodes)
return nodes_to_run | Breadth-first search approach to finding the complete set of
``Node``s which need to run to cover all unfinished nodes,
including any additional nodes that should be re-run if their outputs
are not persisted.
Args:
pipeline: the ``Pipeline`` to analyze.
unfinished_nodes: the iterable of ``Node``s which have not finished yet.
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
A set containing all input unfinished ``Node``s and all remaining
``Node``s that need to run in case their outputs are not persisted. | _find_all_nodes_for_resumed_pipeline | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _nodes_with_external_inputs(nodes_of_interest: Iterable[Node]) -> set[Node]:
"""For given ``Node``s , find their subset which depends on
external inputs of the ``Pipeline`` they constitute. External inputs
are pipeline inputs not produced by other ``Node``s in the ``Pipeline``.
Args:
nodes_of_interest: the ``Node``s to analyze.
Returns:
A set of ``Node``s that depend on external inputs
of nodes of interest.
"""
p_nodes_of_interest = Pipeline(nodes_of_interest)
p_nodes_with_external_inputs = p_nodes_of_interest.only_nodes_with_inputs(
*p_nodes_of_interest.inputs()
)
return set(p_nodes_with_external_inputs.nodes) | For given ``Node``s , find their subset which depends on
external inputs of the ``Pipeline`` they constitute. External inputs
are pipeline inputs not produced by other ``Node``s in the ``Pipeline``.
Args:
nodes_of_interest: the ``Node``s to analyze.
Returns:
A set of ``Node``s that depend on external inputs
of nodes of interest. | _nodes_with_external_inputs | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _enumerate_non_persistent_inputs(node: Node, catalog: CatalogProtocol) -> set[str]:
"""Enumerate non-persistent input datasets of a ``Node``.
Args:
node: the ``Node`` to check the inputs of.
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
Set of names of non-persistent inputs of given ``Node``.
"""
# We use _datasets because they pertain parameter name format
catalog_datasets = catalog._datasets
non_persistent_inputs: set[str] = set()
for node_input in node.inputs:
if node_input.startswith("params:"):
continue
if (
node_input not in catalog_datasets
or catalog_datasets[node_input]._EPHEMERAL
):
non_persistent_inputs.add(node_input)
return non_persistent_inputs | Enumerate non-persistent input datasets of a ``Node``.
Args:
node: the ``Node`` to check the inputs of.
catalog: an implemented instance of ``CatalogProtocol`` of the run.
Returns:
Set of names of non-persistent inputs of given ``Node``. | _enumerate_non_persistent_inputs | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _enumerate_nodes_with_outputs(
pipeline: Pipeline, outputs: Collection[str]
) -> list[Node]:
"""For given outputs, returns a list containing nodes that
generate them in the given ``Pipeline``.
Args:
pipeline: the ``Pipeline`` to search for nodes in.
outputs: the dataset names to find source nodes for.
Returns:
A list of all ``Node``s that are producing ``outputs``.
"""
parent_pipeline = pipeline.only_nodes_with_outputs(*outputs)
return parent_pipeline.nodes | For given outputs, returns a list containing nodes that
generate them in the given ``Pipeline``.
Args:
pipeline: the ``Pipeline`` to search for nodes in.
outputs: the dataset names to find source nodes for.
Returns:
A list of all ``Node``s that are producing ``outputs``. | _enumerate_nodes_with_outputs | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _find_initial_node_group(pipeline: Pipeline, nodes: Iterable[Node]) -> list[Node]:
"""Given a collection of ``Node``s in a ``Pipeline``,
find the initial group of ``Node``s to be run (in topological order).
This can be used to define a sub-pipeline with the smallest possible
set of nodes to pass to --from-nodes.
Args:
pipeline: the ``Pipeline`` to search for initial ``Node``s in.
nodes: the ``Node``s to find initial group for.
Returns:
A list of initial ``Node``s to run given inputs (in topological order).
"""
node_names = {n.name for n in nodes}
if len(node_names) == 0:
return []
sub_pipeline = pipeline.only_nodes(*node_names)
initial_nodes = sub_pipeline.grouped_nodes[0]
return initial_nodes | Given a collection of ``Node``s in a ``Pipeline``,
find the initial group of ``Node``s to be run (in topological order).
This can be used to define a sub-pipeline with the smallest possible
set of nodes to pass to --from-nodes.
Args:
pipeline: the ``Pipeline`` to search for initial ``Node``s in.
nodes: the ``Node``s to find initial group for.
Returns:
A list of initial ``Node``s to run given inputs (in topological order). | _find_initial_node_group | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def run_node(
node: Node,
catalog: CatalogProtocol,
hook_manager: PluginManager,
is_async: bool = False,
session_id: str | None = None,
) -> Node:
"""Run a single `Node` with inputs from and outputs to the `catalog`.
Args:
node: The ``Node`` to run.
catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs.
hook_manager: The ``PluginManager`` to activate hooks.
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
session_id: The session id of the pipeline run.
Raises:
ValueError: Raised if is_async is set to True for nodes wrapping
generator functions.
Returns:
The node argument.
"""
warnings.warn(
"`run_node()` has been deprecated and will be removed in Kedro 0.20.0",
KedroDeprecationWarning,
)
if is_async and inspect.isgeneratorfunction(node.func):
raise ValueError(
f"Async data loading and saving does not work with "
f"nodes wrapping generator functions. Please make "
f"sure you don't use `yield` anywhere "
f"in node {node!s}."
)
task = Task(
node=node,
catalog=catalog,
hook_manager=hook_manager,
is_async=is_async,
session_id=session_id,
)
node = task.execute()
return node | Run a single `Node` with inputs from and outputs to the `catalog`.
Args:
node: The ``Node`` to run.
catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs.
hook_manager: The ``PluginManager`` to activate hooks.
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
session_id: The session id of the pipeline run.
Raises:
ValueError: Raised if is_async is set to True for nodes wrapping
generator functions.
Returns:
The node argument. | run_node | python | kedro-org/kedro | kedro/runner/runner.py | https://github.com/kedro-org/kedro/blob/master/kedro/runner/runner.py | Apache-2.0 |
def _uninstall_existing_build() -> None:
"""Uninstall an existing build with the same name as the build to install."""
api_client = _get_api_client()
library_api = LibrariesApi(api_client)
libraries = [
{"whl": f"{DBFS_UPLOAD_PATH.absolute_path}/{_get_build_file_path().name}"}
]
library_api.uninstall_libraries(CLUSTER_ID, libraries)
logging.info("Triggered uninstall of Kedro wheel file on %s", CLUSTER_ID) | Uninstall an existing build with the same name as the build to install. | _uninstall_existing_build | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def _restart_cluster_if_running() -> None:
"""Restart a Databricks cluster if it is currently running, otherwise no-op."""
api_client = _get_api_client()
cluster_api = ClusterApi(api_client)
if cluster_api.get_cluster(CLUSTER_ID)["state"] == "TERMINATED":
logging.info(
"Cluster %s is not currently running. Launch it manually to apply"
"changes",
CLUSTER_ID,
)
return
logging.info("Cluster %s is being restarted to apply changes.", CLUSTER_ID)
cluster_api.restart_cluster(CLUSTER_ID) | Restart a Databricks cluster if it is currently running, otherwise no-op. | _restart_cluster_if_running | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def _upload_build_to_dbfs() -> None:
"""Upload the wheel file at the given path to DBFS."""
api_client = _get_api_client()
dbfs_api = DbfsApi(api_client)
src_path = str(_get_build_file_path())
dbfs_api.put_file(
src_path,
DbfsPath(f"{DBFS_UPLOAD_PATH.absolute_path}/{_get_build_file_path().name}"),
overwrite=True,
)
logging.info("Uploaded Kedro wheel file to %s") | Upload the wheel file at the given path to DBFS. | _upload_build_to_dbfs | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def _install_build() -> None:
"""Install Kedro on the target cluster using the uploaded wheel file"""
api_client = _get_api_client()
library_api = LibrariesApi(api_client)
libraries = [
{"whl": f"{DBFS_UPLOAD_PATH.absolute_path}/{_get_build_file_path().name}"}
]
library_api.install_libraries(CLUSTER_ID, libraries)
logging.info("Triggered install of Kedro wheel file on %s", CLUSTER_ID) | Install Kedro on the target cluster using the uploaded wheel file | _install_build | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def _get_api_client() -> ApiClient:
"""Create an ApiClient object using the config"""
config = get_config()
if config.is_valid_with_token:
return ApiClient(host=config.host, token=config.token)
return ApiClient(user=config.username, password=config.password, host=config.host) | Create an ApiClient object using the config | _get_api_client | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def _get_build_file_path() -> Path:
"""Get the path of the whl file to install. If multiple whl files are found,
return the file with the highest version number.
"""
dist_path = Path(__file__).resolve().parent.parent / "dist"
whl_files = list(dist_path.glob("*.whl"))
whl_files.sort()
try:
return whl_files[-1]
except IndexError:
raise ValueError("No wheel files found in dist directory.") | Get the path of the whl file to install. If multiple whl files are found,
return the file with the highest version number. | _get_build_file_path | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def main() -> None:
"""Main entry point for the script."""
_uninstall_existing_build()
_restart_cluster_if_running()
_upload_build_to_dbfs()
_install_build() | Main entry point for the script. | main | python | kedro-org/kedro | tools/databricks_build.py | https://github.com/kedro-org/kedro/blob/master/tools/databricks_build.py | Apache-2.0 |
def autolink_replacements(what: str) -> list[tuple[str, str, str]]:
"""
Create a list containing replacement tuples of the form:
(``regex``, ``replacement``, ``obj``) for all classes and methods which are
imported in ``KEDRO_MODULES`` ``__init__.py`` files. The ``replacement``
is a reStructuredText link to their documentation.
For example, if the docstring reads:
This LambdaDataset loads and saves ...
Then the word ``LambdaDataset``, will be replaced by
:class:`~kedro.io.LambdaDataset`
Works for plural as well, e.g:
These ``LambdaDataset``s load and save
Will convert to:
These :class:`kedro.io.LambdaDataset` load and save
Args:
what: The objects to create replacement tuples for. Possible values
["class", "func"].
Returns:
A list of tuples: (regex, replacement, obj), for all "what" objects
imported in __init__.py files of ``KEDRO_MODULES``.
"""
replacements = []
suggestions = []
for module in KEDRO_MODULES:
if what == "class":
objects = get_classes(module)
elif what == "func":
objects = get_functions(module)
# Look for recognised class names/function names which are
# surrounded by double back-ticks
if what == "class":
# first do plural only for classes
replacements += [
(
rf"``{obj}``s",
f":{what}:`~{module}.{obj}`\\\\s",
obj,
)
for obj in objects
]
# singular
replacements += [
(rf"``{obj}``", f":{what}:`~{module}.{obj}`", obj) for obj in objects
]
# Look for recognised class names/function names which are NOT
# surrounded by double back-ticks, so that we can log these in the
# terminal
if what == "class":
# first do plural only for classes
suggestions += [
(rf"(?<!\w|`){obj}s(?!\w|`{{2}})", f"``{obj}``s", obj)
for obj in objects
]
# then singular
suggestions += [
(rf"(?<!\w|`){obj}(?!\w|`{{2}})", f"``{obj}``", obj) for obj in objects
]
return replacements, suggestions | Create a list containing replacement tuples of the form:
(``regex``, ``replacement``, ``obj``) for all classes and methods which are
imported in ``KEDRO_MODULES`` ``__init__.py`` files. The ``replacement``
is a reStructuredText link to their documentation.
For example, if the docstring reads:
This LambdaDataset loads and saves ...
Then the word ``LambdaDataset``, will be replaced by
:class:`~kedro.io.LambdaDataset`
Works for plural as well, e.g:
These ``LambdaDataset``s load and save
Will convert to:
These :class:`kedro.io.LambdaDataset` load and save
Args:
what: The objects to create replacement tuples for. Possible values
["class", "func"].
Returns:
A list of tuples: (regex, replacement, obj), for all "what" objects
imported in __init__.py files of ``KEDRO_MODULES``. | autolink_replacements | python | kedro-org/kedro | docs/source/conf.py | https://github.com/kedro-org/kedro/blob/master/docs/source/conf.py | Apache-2.0 |
def log_suggestions(lines: list[str], name: str):
"""Use the ``suggestions`` list to log in the terminal places where the
developer has forgotten to surround with double back-ticks class
name/function name references.
Args:
lines: The docstring lines.
name: The name of the object whose docstring is contained in lines.
"""
title_printed = False
for i in range(len(lines)):
if ">>>" in lines[i]:
continue
for existing, replacement, obj in suggestions:
new = re.sub(existing, rf"{replacement}", lines[i])
if new == lines[i]:
continue
if ":rtype:" in lines[i] or ":type " in lines[i]:
continue
if not title_printed:
secho("-" * 50 + "\n" + name + ":\n" + "-" * 50, fg="blue")
title_printed = True
print(
"["
+ str(i)
+ "] "
+ re.sub(existing, rf"{style(obj, fg='magenta')}", lines[i])
)
print(
"["
+ str(i)
+ "] "
+ re.sub(existing, rf"``{style(obj, fg='green')}``", lines[i])
)
if title_printed:
print("\n") | Use the ``suggestions`` list to log in the terminal places where the
developer has forgotten to surround with double back-ticks class
name/function name references.
Args:
lines: The docstring lines.
name: The name of the object whose docstring is contained in lines. | log_suggestions | python | kedro-org/kedro | docs/source/conf.py | https://github.com/kedro-org/kedro/blob/master/docs/source/conf.py | Apache-2.0 |
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to a Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
obj = mod
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
obj = inspect.unwrap(obj)
filename = inspect.getsourcefile(obj)
source, lineno = inspect.getsourcelines(obj)
relpath = os.path.relpath(filename, start=os.path.dirname(
kedro.__file__))
return 'https://github.com/kedro-org/kedro/blob/main/kedro/%s#L%d#L%d' % (
relpath, lineno, lineno + len(source) - 1
)
except (KeyError, ImportError, AttributeError, TypeError, OSError, ValueError):
return None | Resolve a GitHub URL corresponding to a Python object. | linkcode_resolve | python | kedro-org/kedro | docs/source/conf.py | https://github.com/kedro-org/kedro/blob/master/docs/source/conf.py | Apache-2.0 |
def before_all(context):
"""Environment preparation before other cli tests are run.
Installs (core) kedro by running pip in the top level directory.
"""
context = _setup_minimal_env(context)
context = _install_project_requirements(context) | Environment preparation before other cli tests are run.
Installs (core) kedro by running pip in the top level directory. | before_all | python | kedro-org/kedro | features/environment.py | https://github.com/kedro-org/kedro/blob/master/features/environment.py | Apache-2.0 |
def _create_new_venv() -> Path:
"""Create a new venv.
Returns:
path to created venv
"""
# Create venv
venv_dir = _create_tmp_dir()
venv.main([str(venv_dir)])
return venv_dir | Create a new venv.
Returns:
path to created venv | _create_new_venv | python | kedro-org/kedro | features/environment.py | https://github.com/kedro-org/kedro/blob/master/features/environment.py | Apache-2.0 |
def _create_tmp_dir() -> Path:
"""Create a temp directory and add it to _PATHS_TO_REMOVE"""
tmp_dir = Path(tempfile.mkdtemp()).resolve()
_PATHS_TO_REMOVE.add(tmp_dir)
return tmp_dir | Create a temp directory and add it to _PATHS_TO_REMOVE | _create_tmp_dir | python | kedro-org/kedro | features/environment.py | https://github.com/kedro-org/kedro/blob/master/features/environment.py | Apache-2.0 |
def _check_service_up(context: behave.runner.Context, url: str, string: str):
"""Check that a service is running and responding appropriately.
Args:
context: Test context.
url: Url that is to be read.
string: The string to be checked.
"""
response = requests.get(url, timeout=1.0)
response.raise_for_status()
data = response.text
assert string in data
assert context.result.poll() is None | Check that a service is running and responding appropriately.
Args:
context: Test context.
url: Url that is to be read.
string: The string to be checked. | _check_service_up | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def create_config_file(context):
"""Behave step to create a temporary config file
(given the existing temp directory) and store it in the context.
"""
context.config_file = context.temp_dir / "config.yml"
context.project_name = "project-dummy"
context.root_project_dir = context.temp_dir / context.project_name
context.package_name = context.project_name.replace("-", "_")
config = {
"project_name": context.project_name,
"repo_name": context.project_name,
"output_dir": str(context.temp_dir),
"python_package": context.package_name,
}
with context.config_file.open("w") as config_file:
yaml.dump(config, config_file, default_flow_style=False) | Behave step to create a temporary config file
(given the existing temp directory) and store it in the context. | create_config_file | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def create_config_file_without_starter(context):
"""Behave step to create a temporary config file
(given the existing temp directory) and store it in the context.
"""
context.config_file = context.temp_dir / "config.yml"
context.project_name = "project-dummy"
context.root_project_dir = context.temp_dir / context.project_name
context.package_name = context.project_name.replace("-", "_")
config = {
"tools": "lint, test, log, docs, data",
"project_name": context.project_name,
"example_pipeline": "no",
"repo_name": context.project_name,
"output_dir": str(context.temp_dir),
"python_package": context.package_name,
}
with context.config_file.open("w") as config_file:
yaml.dump(config, config_file, default_flow_style=False) | Behave step to create a temporary config file
(given the existing temp directory) and store it in the context. | create_config_file_without_starter | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def create_config_file_with_tools(context, tools):
"""Behave step to create a temporary config file
(given the existing temp directory) and store it in the context.
It takes a custom tools list and sets example prompt to `n`.
"""
context.config_file = context.temp_dir / "config.yml"
context.project_name = "project-dummy"
context.root_project_dir = context.temp_dir / context.project_name
context.package_name = context.project_name.replace("-", "_")
config = {
"tools": tools,
"example_pipeline": "n",
"project_name": context.project_name,
"repo_name": context.project_name,
"output_dir": str(context.temp_dir),
"python_package": context.package_name,
}
with context.config_file.open("w") as config_file:
yaml.dump(config, config_file, default_flow_style=False) | Behave step to create a temporary config file
(given the existing temp directory) and store it in the context.
It takes a custom tools list and sets example prompt to `n`. | create_config_file_with_tools | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def pip_install_dependencies(context):
"""Install project dependencies using pip."""
reqs_path = "requirements.txt"
res = run(
[context.pip, "install", "-r", reqs_path],
env=context.env,
cwd=str(context.root_project_dir),
)
if res.returncode != OK_EXIT_CODE:
print(res.stdout)
print(res.stderr)
assert False | Install project dependencies using pip. | pip_install_dependencies | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def install_project_package_via_pip(context):
"""Install a python package using pip."""
dist_dir = context.root_project_dir / "dist"
(whl_file,) = dist_dir.glob("*.whl")
run([context.pip, "install", str(whl_file)], env=context.env) | Install a python package using pip. | install_project_package_via_pip | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def install_test_plugin(context):
"""Install a python package using pip."""
plugin_dir = Path(__file__).parent / "test_plugin"
res = run([context.pip, "install", "-e", str(plugin_dir)], env=context.env)
assert res.returncode == OK_EXIT_CODE, res | Install a python package using pip. | install_test_plugin | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def disable_plugin_hooks(context, plugin):
"""Set `disable_hooks_for_plugins` in `settings.py`."""
settings_path = (
context.root_project_dir / "src" / context.package_name / "settings.py"
)
to_add = f"""\nDISABLE_HOOKS_FOR_PLUGINS = ("{plugin}",)"""
with settings_path.open("a") as settings_file:
settings_file.write(to_add) | Set `disable_hooks_for_plugins` in `settings.py`. | disable_plugin_hooks | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def use_omegaconfigloader(context):
"""Set `config_loader_class` in `settings.py`."""
settings_path = (
context.root_project_dir / "src" / context.package_name / "settings.py"
)
to_add = """\nfrom kedro.config import OmegaConfigLoader
\nCONFIG_LOADER_CLASS = OmegaConfigLoader"""
with settings_path.open("a") as settings_file:
settings_file.write(to_add) | Set `config_loader_class` in `settings.py`. | use_omegaconfigloader | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def init_git_repo(context):
"""Init git repo"""
with util.chdir(context.root_project_dir):
check_run("git init")
check_run("git config user.name 'Tester'")
check_run("git config user.email '[email protected]'") | Init git repo | init_git_repo | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def add_test_jupyter_nb(context):
"""Create a test jupyter notebook using TEST_JUPYTER_ORG."""
with open(
str(context.root_project_dir / "notebooks" / "hello_world.ipynb"),
"w",
encoding="utf-8",
) as test_nb_fh:
test_nb_fh.write(TEST_JUPYTER_ORG) | Create a test jupyter notebook using TEST_JUPYTER_ORG. | add_test_jupyter_nb | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def create_project_with_starter(context, starter):
"""Behave step to run kedro new given the config I previously created."""
if starter == "default":
starter = Path(__file__).parent / "test_starter"
args = [
context.kedro,
"new",
"-c",
str(context.config_file),
"--starter",
str(starter),
]
res = run(
args,
env=context.env,
cwd=context.temp_dir,
)
assert res.returncode == OK_EXIT_CODE, res | Behave step to run kedro new given the config I previously created. | create_project_with_starter | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def create_project_without_starter(context):
"""Behave step to run kedro new given the config I previously created."""
res = run(
[context.kedro, "new", "-c", str(context.config_file)],
env=context.env,
cwd=context.temp_dir,
)
assert res.returncode == OK_EXIT_CODE, res
# prevent telemetry from prompting for input during e2e tests
telemetry_file = context.root_project_dir / ".telemetry"
telemetry_file.write_text("consent: false", encoding="utf-8") | Behave step to run kedro new given the config I previously created. | create_project_without_starter | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def add_proj_dir_to_staging(context):
"""Add root project dir to staging"""
with util.chdir(context.root_project_dir):
check_run("git add .") | Add root project dir to staging | add_proj_dir_to_staging | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def commit_changes_to_git(context):
"""Commit changes to git"""
with util.chdir(context.root_project_dir):
check_run(f"git commit -m 'Change {time()}'") | Commit changes to git | commit_changes_to_git | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def exec_kedro_target(context, command):
"""Execute Kedro target."""
split_command = command.split()
cmd = [context.kedro, *split_command]
context.result = run(cmd, env=context.env, cwd=str(context.root_project_dir)) | Execute Kedro target. | exec_kedro_target | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def exec_project(context):
"""Execute installed Kedro project target."""
cmd = [str(context.bin_dir / context.project_name)]
# N.B.: prior to the introduction of load_package_context, this test was passing
# accidentally because it was executing the installed project package at the
# same directory as project root, so a lot of things were available on Path.cwd().
# We take care to delete with `delete_unnecessary_assets` to simulate the behaviour
# of a installed package in a fresh environment.
context.result = run(cmd, env=context.env, cwd=str(context.root_project_dir)) | Execute installed Kedro project target. | exec_project | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def get_kedro_version(context):
"""Behave step to run `kedro -V`."""
res = run([context.kedro, "-V"], env=context.env, cwd=context.temp_dir)
context.version_str = res.stdout
assert context.version_str, res # check non-empty | Behave step to run `kedro -V`. | get_kedro_version | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def get_kedro_version_python(context):
"""Behave step to run `python -m kedro -V`."""
cmd = [context.python, "-m", "kedro", "-V"]
context.version_str = run(cmd, env=context.env, cwd=context.temp_dir).stdout
assert context.version_str # check non-empty | Behave step to run `python -m kedro -V`. | get_kedro_version_python | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def exec_notebook(context, command):
"""Execute Kedro Jupyter target."""
split_command = command.split()
cmd = [context.kedro, "jupyter", *split_command]
# Jupyter notebook forks a child process from a parent process, and
# only kills the parent process when it is terminated
context.result = ChildTerminatingPopen(
cmd, env=context.env, cwd=str(context.root_project_dir), universal_newlines=True
) | Execute Kedro Jupyter target. | exec_notebook | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def wait(context, timeout):
"""Wait for child process to terminate."""
context.result.wait(timeout) | Wait for child process to terminate. | wait | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def simulate_nb_execution(context):
"""Change test jupyter notebook to TEST_JUPYTER_AFTER_EXEC
simulate that it was executed and output was saved.
"""
with open(
str(context.root_project_dir / "notebooks" / "hello_world.ipynb"),
"w",
encoding="utf-8",
) as test_nb_fh:
test_nb_fh.write(TEST_JUPYTER_AFTER_EXEC) | Change test jupyter notebook to TEST_JUPYTER_AFTER_EXEC
simulate that it was executed and output was saved. | simulate_nb_execution | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def delete_notebooks_dir(context):
"""Delete notebooks directory in project"""
with util.chdir(context.root_project_dir):
shutil.rmtree("notebooks/") | Delete notebooks directory in project | delete_notebooks_dir | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def do_git_reset_hard(context):
"""Perform a hard git reset"""
with util.chdir(context.root_project_dir):
check_run("git reset --hard HEAD") | Perform a hard git reset | do_git_reset_hard | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def move_package(context: behave.runner.Context, new_source_dir):
"""Move the project package to a new directory."""
current_src_path = (context.root_project_dir / "src").resolve()
new_src_path = (context.root_project_dir / new_source_dir).resolve()
new_src_path.mkdir(exist_ok=True)
shutil.move(str(current_src_path / context.package_name), str(new_src_path)) | Move the project package to a new directory. | move_package | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def update_pyproject_toml(context: behave.runner.Context, new_source_dir):
"""Update `source_dir` in pyproject.toml file."""
pyproject_toml_path = context.root_project_dir / "pyproject.toml"
content = toml.load(pyproject_toml_path)
content["tool"]["kedro"]["source_dir"] = new_source_dir
content_str = toml.dumps(content)
pyproject_toml_path.write_text(content_str) | Update `source_dir` in pyproject.toml file. | update_pyproject_toml | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def update_kedro_req(context: behave.runner.Context):
"""Remove kedro as a standalone requirement."""
reqs_path = context.root_project_dir / "requirements.txt"
if reqs_path.is_file():
old_reqs = reqs_path.read_text().splitlines()
new_reqs = []
for req in old_reqs:
if req.startswith("kedro") and Requirement(req).name.lower() == "kedro":
# Do not include kedro as it's preinstalled in the environment
pass
else:
new_reqs.append(req)
new_reqs = "\n".join(new_reqs)
assert old_reqs != new_reqs
reqs_path.write_text(new_reqs) | Remove kedro as a standalone requirement. | update_kedro_req | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_kedro_version(context):
"""Behave step to check validity of the kedro version."""
CLI_flat_list = context.version_str.split()
CLI_dictionary = {
CLI_flat_list[i]: CLI_flat_list[i + 1]
for i in range(0, len(CLI_flat_list) - 1, 2)
}
version_no = CLI_dictionary.get("version")
assert version_no == kedro.__version__ | Behave step to check validity of the kedro version. | check_kedro_version | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def is_created(name):
"""Check if path exists."""
return (context.root_project_dir / name).exists() | Check if path exists. | check_created_project_structure.is_created | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_created_project_structure(context):
"""Behave step to check the subdirectories created by kedro new."""
def is_created(name):
"""Check if path exists."""
return (context.root_project_dir / name).exists()
for path in ("README.md", "src", "data"):
assert is_created(path) | Behave step to check the subdirectories created by kedro new. | check_created_project_structure | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def is_created(name):
"""Check if path exists."""
return (context.root_project_dir / name).exists() | Check if path exists. | check_created_project_structure_from_tools.is_created | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_created_project_structure_from_tools(context, tools):
"""Behave step to check the subdirectories created by kedro new with tools."""
def is_created(name):
"""Check if path exists."""
return (context.root_project_dir / name).exists()
# Base checks for any project
for path in ["README.md", "src", "pyproject.toml", "requirements.txt"]:
assert is_created(path), f"{path} does not exist"
tools_list = (
tools.split(",")
if tools != "all"
else ["lint", "test", "log", "docs", "data", "pyspark", "viz"]
)
if "lint" in tools_list: # lint tool
pass # No files are added
if "test" in tools_list: # test tool
assert is_created("tests"), "tests directory does not exist"
if "log" in tools_list: # log tool
assert is_created("conf/logging.yml"), "logging configuration does not exist"
if "docs" in tools_list: # docs tool
assert is_created("docs"), "docs directory does not exist"
if "data" in tools_list: # data tool
assert is_created("data"), "data directory does not exist"
if "pyspark" in tools_list: # PySpark tool
assert is_created("conf/base/spark.yml"), "spark.yml does not exist"
if "viz" in tools_list: # viz tool
expected_reporting_path = Path(
f"src/{context.package_name}/pipelines/reporting"
)
assert is_created(
expected_reporting_path
), "reporting pipeline directory does not exist" | Behave step to check the subdirectories created by kedro new with tools. | check_created_project_structure_from_tools | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_message_printed(context, msg):
"""Check that specified message is printed to stdout (can be a segment)."""
if isinstance(context.result, ChildTerminatingPopen):
stdout = context.result.stdout.read().decode()
context.result.terminate()
else:
stdout = context.result.stdout
clean_logs = util.clean_up_log(stdout)
assert msg in clean_logs, (
"Expected the following message segment to be printed on stdout: "
f"{msg},\nbut got {stdout}"
) | Check that specified message is printed to stdout (can be a segment). | check_message_printed | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_message_not_printed(context, msg):
"""Check that specified message is not printed to stdout."""
if isinstance(context.result, ChildTerminatingPopen):
stdout = context.result.stdout.read().decode()
context.result.terminate()
else:
stdout = context.result.stdout
assert msg not in stdout, (
"Expected the following message segment not to be printed on stdout: "
f"{msg},\nbut got {stdout}"
) | Check that specified message is not printed to stdout. | check_message_not_printed | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_error_message_printed(context, msg):
"""Check that specified message is printed to stderr (can be a segment)."""
if isinstance(context.result, ChildTerminatingPopen):
stderr = context.result.stderr.read().decode()
context.result.terminate()
else:
stderr = context.result.stderr
assert msg in stderr, (
"Expected the following message segment to be printed on stderr: "
f"{msg},\nbut got {stderr}"
) | Check that specified message is printed to stderr (can be a segment). | check_error_message_printed | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_additional_cell_added(context):
"""Check that an addiitonal cell has been added compared to notebook
coded by TEST_JUPYTER_ORG.
"""
with open(
str(context.root_project_dir / "notebooks" / "hello_world.ipynb"),
encoding="utf-8",
) as test_nb_fh:
context.nb_data = json.load(test_nb_fh)
assert len(context.nb_data["cells"]) == 2 # noqa: PLR2004 | Check that an addiitonal cell has been added compared to notebook
coded by TEST_JUPYTER_ORG. | check_additional_cell_added | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_output_cells_empty(context):
"""Check that all cells contain empty output array."""
for cell in context.nb_data["cells"]:
assert cell["outputs"] == [] | Check that all cells contain empty output array. | check_output_cells_empty | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_jupyter_nb_proc_on_port(context: behave.runner.Context, port: int):
"""Check that jupyter notebook service is running on specified port.
Args:
context: Test context
port: Port to check
"""
url = f"http://localhost:{port}"
try:
_check_service_up(context, url, "Jupyter Server")
finally:
context.result.terminate() | Check that jupyter notebook service is running on specified port.
Args:
context: Test context
port: Port to check | check_jupyter_nb_proc_on_port | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_jupyter_lab_proc_on_port(context: behave.runner.Context, port: int):
"""Check that jupyter lab service is running on specified port.
Args:
context: Test context
port: Port to check
"""
url = f"http://localhost:{port}"
try:
_check_service_up(context, url, '<a href="/lab"')
finally:
context.result.terminate() | Check that jupyter lab service is running on specified port.
Args:
context: Test context
port: Port to check | check_jupyter_lab_proc_on_port | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_docs_generated(context: behave.runner.Context):
"""Check that new project docs are generated."""
index_html = (
context.root_project_dir / "docs" / "build" / "html" / "index.html"
).read_text("utf-8")
project_repo = context.project_name.replace("-", "_")
assert f"Welcome to project {project_repo}'s API docs!" in index_html, index_html | Check that new project docs are generated. | check_docs_generated | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def check_reqs_generated(context: behave.runner.Context):
"""Check that new project requirements are generated."""
reqs_path = context.root_project_dir / "requirements.lock"
assert reqs_path.is_file()
assert "This file is autogenerated by pip-compile" in reqs_path.read_text() | Check that new project requirements are generated. | check_reqs_generated | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def add_micropkg_to_pyproject_toml(context: behave.runner.Context):
pyproject_toml_path = context.root_project_dir / "pyproject.toml"
project_toml_str = textwrap.dedent(
"""
[tool.kedro.micropkg.package]
"pipelines.data_science" = {alias = "ds"}
"""
)
with pyproject_toml_path.open(mode="a") as file:
file.write(project_toml_str) | [tool.kedro.micropkg.package]
"pipelines.data_science" = {alias = "ds"} | add_micropkg_to_pyproject_toml | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def exec_magic_command(context):
"""Execute Kedro target."""
cmd = [context.python, "ipython_script.py"]
context.result = run(
cmd, env=context.env, cwd=str(context.root_project_dir), print_output=True
) | Execute Kedro target. | exec_magic_command | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def change_dir(context, dir):
"""Execute Kedro target."""
util.chdir(dir) | Execute Kedro target. | change_dir | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def pip_install_project_and_dev_dependencies(context):
"""Install project and its development dependencies using pip."""
_ = run(
[context.pip, "install", ".[dev]"],
env=context.env,
cwd=str(context.root_project_dir),
) | Install project and its development dependencies using pip. | pip_install_project_and_dev_dependencies | python | kedro-org/kedro | features/steps/cli_steps.py | https://github.com/kedro-org/kedro/blob/master/features/steps/cli_steps.py | Apache-2.0 |
def run(
cmd: list | str, split: bool = True, print_output: bool = False, **kwargs: Any
) -> subprocess.CompletedProcess:
"""Run a shell command.
Args:
cmd: A command string, or a command followed by program
arguments that will be submitted to Popen to run.
split: Flag that splits command to provide as multiple *args
to Popen. Default is True.
print_output: If True will print previously captured stdout.
Default is False.
**kwargs: Extra options to pass to subprocess.
Example:
::
"ls"
"ls -la"
"chmod 754 local/file"
Returns:
Result with attributes args, returncode, stdout and stderr.
"""
if isinstance(cmd, str) and split:
cmd = shlex.split(cmd)
result = subprocess.run(cmd, input="", capture_output=True, **kwargs) # noqa: PLW1510, S603
result.stdout = result.stdout.decode("utf-8")
result.stderr = result.stderr.decode("utf-8")
if print_output:
print(result.stdout)
return result | Run a shell command.
Args:
cmd: A command string, or a command followed by program
arguments that will be submitted to Popen to run.
split: Flag that splits command to provide as multiple *args
to Popen. Default is True.
print_output: If True will print previously captured stdout.
Default is False.
**kwargs: Extra options to pass to subprocess.
Example:
::
"ls"
"ls -la"
"chmod 754 local/file"
Returns:
Result with attributes args, returncode, stdout and stderr. | run | python | kedro-org/kedro | features/steps/sh_run.py | https://github.com/kedro-org/kedro/blob/master/features/steps/sh_run.py | Apache-2.0 |
def check_run(cmd: list | str, print_output: bool = False) -> None:
"""
Run cmd using subprocess.check_call (throws error if non-zero value
returned)
Args:
cmd: command to be run
print_output: whether to print output
"""
if isinstance(cmd, str):
split_cmd = shlex.split(cmd)
else:
split_cmd = cmd
if print_output:
subprocess.check_call(split_cmd) # noqa: S603
else:
subprocess.check_call(split_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603 | Run cmd using subprocess.check_call (throws error if non-zero value
returned)
Args:
cmd: command to be run
print_output: whether to print output | check_run | python | kedro-org/kedro | features/steps/sh_run.py | https://github.com/kedro-org/kedro/blob/master/features/steps/sh_run.py | Apache-2.0 |
def __init__(self, cmd: list[str], **kwargs) -> None:
"""
Initializer pipes stderr and stdout.
Args:
cmd: command to be run.
**kwargs: keyword arguments such as env and cwd
"""
super().__init__(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs
) | Initializer pipes stderr and stdout.
Args:
cmd: command to be run.
**kwargs: keyword arguments such as env and cwd | __init__ | python | kedro-org/kedro | features/steps/sh_run.py | https://github.com/kedro-org/kedro/blob/master/features/steps/sh_run.py | Apache-2.0 |
def terminate(self) -> None:
"""Terminate process and children."""
try:
proc = psutil.Process(self.pid)
procs = [proc, *proc.children(recursive=True)]
except psutil.NoSuchProcess:
pass
else:
for proc in reversed(procs):
try:
proc.terminate()
except psutil.NoSuchProcess:
pass
alive = psutil.wait_procs(procs, timeout=3)[1]
for proc in alive:
proc.kill() | Terminate process and children. | terminate | python | kedro-org/kedro | features/steps/sh_run.py | https://github.com/kedro-org/kedro/blob/master/features/steps/sh_run.py | Apache-2.0 |
def chdir(path: Path) -> Iterator:
"""Context manager to help execute code in a different directory.
Args:
path: directory to change to.
Yields:
None
"""
old_pwd = os.getcwd()
os.chdir(str(path))
try:
yield
finally:
os.chdir(old_pwd) | Context manager to help execute code in a different directory.
Args:
path: directory to change to.
Yields:
None | chdir | python | kedro-org/kedro | features/steps/util.py | https://github.com/kedro-org/kedro/blob/master/features/steps/util.py | Apache-2.0 |
def wait_for(
func: Callable,
timeout_: int = 10,
print_error: bool = False,
sleep_for: int = 1,
**kwargs,
) -> Any:
"""Run specified function until it returns expected result until timeout.
Args:
func: Specified function.
timeout_: Time out in seconds. Defaults to 10.
print_error: whether any exceptions raised should be printed.
Defaults to False.
sleep_for: Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func.
Raises:
WaitForException: if func doesn't return expected result within the
specified time.
Returns:
Function return.
"""
end = time() + timeout_
while time() <= end:
try:
result = func(**kwargs)
return result
except Exception as err:
if print_error:
print(err)
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return within specified timeout: {timeout_}"
) | Run specified function until it returns expected result until timeout.
Args:
func: Specified function.
timeout_: Time out in seconds. Defaults to 10.
print_error: whether any exceptions raised should be printed.
Defaults to False.
sleep_for: Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func.
Raises:
WaitForException: if func doesn't return expected result within the
specified time.
Returns:
Function return. | wait_for | python | kedro-org/kedro | features/steps/util.py | https://github.com/kedro-org/kedro/blob/master/features/steps/util.py | Apache-2.0 |
def parse_csv(text: str) -> list[str]:
"""Parse comma separated **double quoted** strings in behave steps
Args:
text: double quoted comma separated string
Returns:
List of string tokens
"""
return re.findall(r"\"(.+?)\"\s*,?", text) | Parse comma separated **double quoted** strings in behave steps
Args:
text: double quoted comma separated string
Returns:
List of string tokens | parse_csv | python | kedro-org/kedro | features/steps/util.py | https://github.com/kedro-org/kedro/blob/master/features/steps/util.py | Apache-2.0 |
def clean_up_log(stdout: str) -> str:
"""
Cleans up log output by removing duplicate lines, extra whitespaces,
and log levels (INFO, WARNING, ERROR) along with .py filenames.
Args:
stdout (str): The log output to be cleaned.
Returns:
str: Cleaned log output without unnecessary information.
"""
cleaned_lines = []
already_extracted = set()
for line in stdout.split("\n"):
if any(word in line for word in ["WARNING", "INFO", "ERROR"]):
# Remove log levels and .py filenames
cleaned_line = re.sub(r"\b(INFO|WARNING|ERROR)\b|\s+\w+\.py:\d+", "", line)
cleaned_lines.append(cleaned_line.strip())
already_extracted.add(line)
elif line not in already_extracted:
cleaned_lines.append(line)
cleaned_output = "\n".join(cleaned_lines)
cleaned_output = re.sub(r"\s+", " ", cleaned_output)
return cleaned_output.strip() | Cleans up log output by removing duplicate lines, extra whitespaces,
and log levels (INFO, WARNING, ERROR) along with .py filenames.
Args:
stdout (str): The log output to be cleaned.
Returns:
str: Cleaned log output without unnecessary information. | clean_up_log | python | kedro-org/kedro | features/steps/util.py | https://github.com/kedro-org/kedro/blob/master/features/steps/util.py | Apache-2.0 |
def register_pipelines() -> dict[str, Pipeline]:
"""Register the project's pipelines.
Returns:
A mapping from pipeline names to ``Pipeline`` objects.
"""
pipelines = find_pipelines()
pipelines["__default__"] = sum(pipelines.values())
pipelines["data_processing"] = pipeline(
pipelines["data_engineering"], namespace="data_processing"
)
return pipelines | Register the project's pipelines.
Returns:
A mapping from pipeline names to ``Pipeline`` objects. | register_pipelines | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipeline_registry.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipeline_registry.py | Apache-2.0 |
def split_data(data: pd.DataFrame, example_test_data_ratio: float) -> dict[str, Any]:
"""Node for splitting the classical Iris dataset into training and test
sets, each split into features and labels.
The split ratio parameter is taken from conf/project/parameters.yml.
The data and the parameters will be loaded and provided to your function
automatically when the pipeline is executed and it is time to run this node.
"""
print("load_node executed successfully")
data.columns = [
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"target",
]
classes = sorted(data["target"].unique())
# One-hot encoding for the target variable
data = pd.get_dummies(data, columns=["target"], prefix="", prefix_sep="")
# Shuffle all the data
data = data.sample(frac=1).reset_index(drop=True)
# Split to training and testing data
n = data.shape[0]
n_test = int(n * example_test_data_ratio)
training_data = data.iloc[n_test:, :].reset_index(drop=True)
test_data = data.iloc[:n_test, :].reset_index(drop=True)
# Split the data to features and labels
train_data_x = training_data.loc[:, "sepal_length":"petal_width"]
train_data_y = training_data[classes]
test_data_x = test_data.loc[:, "sepal_length":"petal_width"]
test_data_y = test_data[classes]
# When returning many variables, it is a good practice to give them names:
return dict(
train_x=train_data_x,
train_y=train_data_y,
test_x=test_data_x,
test_y=test_data_y,
) | Node for splitting the classical Iris dataset into training and test
sets, each split into features and labels.
The split ratio parameter is taken from conf/project/parameters.yml.
The data and the parameters will be loaded and provided to your function
automatically when the pipeline is executed and it is time to run this node. | split_data | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_engineering/nodes.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_engineering/nodes.py | Apache-2.0 |
def train_model(
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: dict[str, Any]
) -> np.ndarray:
"""Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
conf/project/parameters.yml. All of the data as well as the parameters
will be provided to this function at the time of execution.
"""
num_iter = parameters["example_num_train_iter"]
lr = parameters["example_learning_rate"]
X = train_x.to_numpy()
Y = train_y.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
weights = []
# Train one model for each class in Y
for k in range(Y.shape[1]):
# Initialise weights
theta = np.zeros(X.shape[1])
y = Y[:, k]
for _ in range(num_iter):
z = np.dot(X, theta)
h = _sigmoid(z)
gradient = np.dot(X.T, (h - y)) / y.size
theta -= lr * gradient
# Save the weights for each model
weights.append(theta)
# Return a joint multi-class model with weights for all classes
return np.vstack(weights).transpose() | Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
conf/project/parameters.yml. All of the data as well as the parameters
will be provided to this function at the time of execution. | train_model | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | Apache-2.0 |
def predict(model: np.ndarray, test_x: pd.DataFrame) -> np.ndarray:
"""Node for making predictions given a pre-trained model and a test set."""
X = test_x.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
# Predict "probabilities" for each class
result = _sigmoid(np.dot(X, model))
# Return the index of the class with max probability for all samples
return np.argmax(result, axis=1) | Node for making predictions given a pre-trained model and a test set. | predict | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | Apache-2.0 |
def report_accuracy(predictions: np.ndarray, test_y: pd.DataFrame) -> None:
"""Node for reporting the accuracy of the predictions performed by the
previous node. Notice that this function has no outputs, except logging.
"""
# Get true class index
target = np.argmax(test_y.to_numpy(), axis=1)
# Calculate accuracy of predictions
accuracy = np.sum(predictions == target) / target.shape[0]
# Log the accuracy of the model
log = logging.getLogger(__name__)
log.info("Model accuracy on test set: %0.2f%%", accuracy * 100) | Node for reporting the accuracy of the predictions performed by the
previous node. Notice that this function has no outputs, except logging. | report_accuracy | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | Apache-2.0 |
def _sigmoid(z):
"""A helper sigmoid function used by the training and the scoring nodes."""
return 1 / (1 + np.exp(-z)) | A helper sigmoid function used by the training and the scoring nodes. | _sigmoid | python | kedro-org/kedro | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | https://github.com/kedro-org/kedro/blob/master/features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | Apache-2.0 |
def time_init(self):
"""Benchmark the time to initialize the catalog"""
KedroDataCatalog.from_config(base_catalog) | Benchmark the time to initialize the catalog | time_init | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_contains(self):
"""Benchmark the time to check if a dataset exists"""
for i in range(1,1001):
f"dataset_{i}" in self.catalog | Benchmark the time to check if a dataset exists | time_contains | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_getitem(self):
"""Benchmark the time to get a dataset"""
for i in range(1,1001):
self.catalog[f"dataset_{i}"] | Benchmark the time to get a dataset | time_getitem | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.