code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def __init__( self, datasets: dict[str, AbstractDataset] | None = None, raw_data: dict[str, Any] | None = None, config_resolver: CatalogConfigResolver | None = None, load_versions: dict[str, str] | None = None, save_version: str | None = None, ) -> None: """``KedroDataCatalog`` stores instances of ``AbstractDataset`` implementations to provide ``load`` and ``save`` capabilities from anywhere in the program. To use a ``KedroDataCatalog``, you need to instantiate it with a dictionary of datasets. Then it will act as a single point of reference for your calls, relaying load and save functions to the underlying datasets. Note: ``KedroDataCatalog`` is an experimental feature and is under active development. Therefore, it is possible we'll introduce breaking changes to this class, so be mindful of that if you decide to use it already. Args: datasets: A dictionary of dataset names and dataset instances. raw_data: A dictionary with data to be added in memory as `MemoryDataset`` instances. Keys represent dataset names and the values are raw data. config_resolver: An instance of CatalogConfigResolver to resolve dataset patterns and configurations. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={"cars": cars}) """ self._config_resolver = config_resolver or CatalogConfigResolver() # TODO: rename back to _datasets when removing old catalog self.__datasets: dict[str, AbstractDataset] = datasets or {} self._lazy_datasets: dict[str, _LazyDataset] = {} self._load_versions, self._save_version = _validate_versions( datasets, load_versions or {}, save_version ) self._use_rich_markup = _has_rich_handler() for ds_name, ds_config in self._config_resolver.config.items(): self._add_from_config(ds_name, ds_config) raw_data = raw_data or {} for ds_name, data in raw_data.items(): self[ds_name] = data # type: ignore[has-type]
``KedroDataCatalog`` stores instances of ``AbstractDataset`` implementations to provide ``load`` and ``save`` capabilities from anywhere in the program. To use a ``KedroDataCatalog``, you need to instantiate it with a dictionary of datasets. Then it will act as a single point of reference for your calls, relaying load and save functions to the underlying datasets. Note: ``KedroDataCatalog`` is an experimental feature and is under active development. Therefore, it is possible we'll introduce breaking changes to this class, so be mindful of that if you decide to use it already. Args: datasets: A dictionary of dataset names and dataset instances. raw_data: A dictionary with data to be added in memory as `MemoryDataset`` instances. Keys represent dataset names and the values are raw data. config_resolver: An instance of CatalogConfigResolver to resolve dataset patterns and configurations. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={"cars": cars})
__init__
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def __contains__(self, dataset_name: str) -> bool: """Check if an item is in the catalog as a materialised dataset or pattern.""" return ( dataset_name in self.__datasets or dataset_name in self._lazy_datasets or self._config_resolver.match_pattern(dataset_name) is not None )
Check if an item is in the catalog as a materialised dataset or pattern.
__contains__
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def __eq__(self, other) -> bool: # type: ignore[no-untyped-def] """Compares two catalogs based on materialised datasets and datasets patterns.""" return ( self.__datasets, self._lazy_datasets, self._config_resolver.list_patterns(), ) == ( other.__datasets, other._lazy_datasets, other.config_resolver.list_patterns(), )
Compares two catalogs based on materialised datasets and datasets patterns.
__eq__
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def keys(self) -> List[str]: # noqa: UP006 """List all dataset names registered in the catalog.""" return list(self._lazy_datasets.keys()) + list(self.__datasets.keys())
List all dataset names registered in the catalog.
keys
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def values(self) -> List[AbstractDataset]: # noqa: UP006 """List all datasets registered in the catalog.""" return [self.get(key) for key in self]
List all datasets registered in the catalog.
values
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def items(self) -> List[tuple[str, AbstractDataset]]: # noqa: UP006 """List all dataset names and datasets registered in the catalog.""" return [(key, self.get(key)) for key in self]
List all dataset names and datasets registered in the catalog.
items
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def __getitem__(self, ds_name: str) -> AbstractDataset: """Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: ds_name: A dataset name. Returns: An instance of AbstractDataset. Raises: DatasetNotFoundError: When a dataset with the given name is not in the collection and does not match patterns. """ return self.get_dataset(ds_name)
Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: ds_name: A dataset name. Returns: An instance of AbstractDataset. Raises: DatasetNotFoundError: When a dataset with the given name is not in the collection and does not match patterns.
__getitem__
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def __setitem__(self, key: str, value: Any) -> None: """Add dataset to the ``KedroDataCatalog`` using the given key as a datsets name and the provided data as the value. The value can either be raw data or a Kedro dataset (i.e., an instance of a class inheriting from ``AbstractDataset``). If raw data is provided, it will be automatically wrapped in a ``MemoryDataset`` before being added to the catalog. Args: key: Name of the dataset. value: Raw data or an instance of a class inheriting from ``AbstractDataset``. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> import pandas as pd >>> >>> df = pd.DataFrame({"col1": [1, 2], >>> "col2": [4, 5], >>> "col3": [5, 6]}) >>> >>> catalog = KedroDataCatalog() >>> catalog["data_df"] = df # Add raw data as a MemoryDataset >>> >>> assert catalog.load("data_df").equals(df) >>> >>> csv_dataset = CSVDataset(filepath="test.csv") >>> csv_dataset.save(df) >>> catalog["data_csv_dataset"] = csv_dataset # Add a dataset instance >>> >>> assert catalog.load("data_csv_dataset").equals(df) """ if key in self.__datasets: self._logger.warning("Replacing dataset '%s'", key) if isinstance(value, AbstractDataset): self._load_versions, self._save_version = _validate_versions( {key: value}, self._load_versions, self._save_version ) self.__datasets[key] = value elif isinstance(value, _LazyDataset): self._lazy_datasets[key] = value else: self._logger.info(f"Adding input data as a MemoryDataset - {key}") self.__datasets[key] = MemoryDataset(data=value) # type: ignore[abstract]
Add dataset to the ``KedroDataCatalog`` using the given key as a datsets name and the provided data as the value. The value can either be raw data or a Kedro dataset (i.e., an instance of a class inheriting from ``AbstractDataset``). If raw data is provided, it will be automatically wrapped in a ``MemoryDataset`` before being added to the catalog. Args: key: Name of the dataset. value: Raw data or an instance of a class inheriting from ``AbstractDataset``. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> import pandas as pd >>> >>> df = pd.DataFrame({"col1": [1, 2], >>> "col2": [4, 5], >>> "col3": [5, 6]}) >>> >>> catalog = KedroDataCatalog() >>> catalog["data_df"] = df # Add raw data as a MemoryDataset >>> >>> assert catalog.load("data_df").equals(df) >>> >>> csv_dataset = CSVDataset(filepath="test.csv") >>> csv_dataset.save(df) >>> catalog["data_csv_dataset"] = csv_dataset # Add a dataset instance >>> >>> assert catalog.load("data_csv_dataset").equals(df)
__setitem__
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def get( self, key: str, default: AbstractDataset | None = None ) -> AbstractDataset | None: """Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: key: A dataset name. default: Optional argument for default dataset to return in case requested dataset not in the catalog. Returns: An instance of AbstractDataset. """ if key not in self.__datasets and key not in self._lazy_datasets: ds_config = self._config_resolver.resolve_pattern(key) if ds_config: self._add_from_config(key, ds_config) lazy_dataset = self._lazy_datasets.pop(key, None) if lazy_dataset: self[key] = lazy_dataset.materialize() dataset = self.__datasets.get(key, None) return dataset or default
Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: key: A dataset name. default: Optional argument for default dataset to return in case requested dataset not in the catalog. Returns: An instance of AbstractDataset.
get
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def from_config( cls, catalog: dict[str, dict[str, Any]] | None, credentials: dict[str, dict[str, Any]] | None = None, load_versions: dict[str, str] | None = None, save_version: str | None = None, ) -> KedroDataCatalog: """Create a ``KedroDataCatalog`` instance from configuration. This is a factory method used to provide developers with a way to instantiate ``KedroDataCatalog`` with configuration parsed from configuration files. Args: catalog: A dictionary whose keys are the dataset names and the values are dictionaries with the constructor arguments for classes implementing ``AbstractDataset``. The dataset class to be loaded is specified with the key ``type`` and their fully qualified class name. All ``kedro.io`` dataset can be specified by their class name only, i.e. their module name can be omitted. credentials: A dictionary containing credentials for different datasets. Use the ``credentials`` key in a ``AbstractDataset`` to refer to the appropriate credentials as shown in the example below. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Returns: An instantiated ``KedroDataCatalog`` containing all specified datasets, created and ready to use. Raises: DatasetNotFoundError: When `load_versions` refers to a dataset that doesn't exist in the catalog. Example: :: >>> config = { >>> "cars": { >>> "type": "pandas.CSVDataset", >>> "filepath": "cars.csv", >>> "save_args": { >>> "index": False >>> } >>> }, >>> "boats": { >>> "type": "pandas.CSVDataset", >>> "filepath": "s3://aws-bucket-name/boats.csv", >>> "credentials": "boats_credentials", >>> "save_args": { >>> "index": False >>> } >>> } >>> } >>> >>> credentials = { >>> "boats_credentials": { >>> "client_kwargs": { >>> "aws_access_key_id": "<your key id>", >>> "aws_secret_access_key": "<your secret>" >>> } >>> } >>> } >>> >>> catalog = KedroDataCatalog.from_config(config, credentials) >>> >>> df = catalog.load("cars") >>> catalog.save("boats", df) """ catalog = catalog or {} config_resolver = CatalogConfigResolver(catalog, credentials) save_version = save_version or generate_timestamp() load_versions = load_versions or {} missing_keys = [ ds_name for ds_name in load_versions if not ( ds_name in config_resolver.config or config_resolver.match_pattern(ds_name) ) ] if missing_keys: raise DatasetNotFoundError( f"'load_versions' keys [{', '.join(sorted(missing_keys))}] " f"are not found in the catalog." ) return cls( load_versions=load_versions, save_version=save_version, config_resolver=config_resolver, )
Create a ``KedroDataCatalog`` instance from configuration. This is a factory method used to provide developers with a way to instantiate ``KedroDataCatalog`` with configuration parsed from configuration files. Args: catalog: A dictionary whose keys are the dataset names and the values are dictionaries with the constructor arguments for classes implementing ``AbstractDataset``. The dataset class to be loaded is specified with the key ``type`` and their fully qualified class name. All ``kedro.io`` dataset can be specified by their class name only, i.e. their module name can be omitted. credentials: A dictionary containing credentials for different datasets. Use the ``credentials`` key in a ``AbstractDataset`` to refer to the appropriate credentials as shown in the example below. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Returns: An instantiated ``KedroDataCatalog`` containing all specified datasets, created and ready to use. Raises: DatasetNotFoundError: When `load_versions` refers to a dataset that doesn't exist in the catalog. Example: :: >>> config = { >>> "cars": { >>> "type": "pandas.CSVDataset", >>> "filepath": "cars.csv", >>> "save_args": { >>> "index": False >>> } >>> }, >>> "boats": { >>> "type": "pandas.CSVDataset", >>> "filepath": "s3://aws-bucket-name/boats.csv", >>> "credentials": "boats_credentials", >>> "save_args": { >>> "index": False >>> } >>> } >>> } >>> >>> credentials = { >>> "boats_credentials": { >>> "client_kwargs": { >>> "aws_access_key_id": "<your key id>", >>> "aws_secret_access_key": "<your secret>" >>> } >>> } >>> } >>> >>> catalog = KedroDataCatalog.from_config(config, credentials) >>> >>> df = catalog.load("cars") >>> catalog.save("boats", df)
from_config
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def _add_from_config(self, ds_name: str, ds_config: dict[str, Any]) -> None: """Create a LazyDataset instance and add it to the catalog. Args: ds_name: A dataset name. ds_config: A dataset configuration. Raises: DatasetError: When a dataset configuration provided is not valid. """ self._validate_dataset_config(ds_name, ds_config) ds = _LazyDataset( ds_name, ds_config, self._load_versions.get(ds_name), self._save_version, ) self.add(ds_name, ds)
Create a LazyDataset instance and add it to the catalog. Args: ds_name: A dataset name. ds_config: A dataset configuration. Raises: DatasetError: When a dataset configuration provided is not valid.
_add_from_config
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def get_dataset( self, ds_name: str, version: Version | None = None, suggest: bool = True ) -> AbstractDataset: # TODO: remove when removing old catalog """Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: ds_name: A dataset name. version: Optional argument for concrete dataset version to be loaded. Works only with versioned datasets. suggest: Optional argument whether to suggest fuzzy-matching datasets' names in the DatasetNotFoundError message. Returns: An instance of AbstractDataset. Raises: DatasetNotFoundError: When a dataset with the given name is not in the collection and do not match patterns. """ dataset = self.get(ds_name) if dataset is None: error_msg = f"Dataset '{ds_name}' not found in the catalog" # Flag to turn on/off fuzzy-matching which can be time consuming and # slow down plugins like `kedro-viz` if suggest: matches = difflib.get_close_matches(ds_name, self.keys()) if matches: suggestions = ", ".join(matches) error_msg += f" - did you mean one of these instead: {suggestions}" raise DatasetNotFoundError(error_msg) if version and isinstance(dataset, AbstractVersionedDataset): # we only want to return a similar-looking dataset, # not modify the one stored in the current catalog dataset = dataset._copy(_version=version) return dataset
Get a dataset by name from an internal collection of datasets. If a dataset is not in the collection but matches any pattern it is instantiated and added to the collection first, then returned. Args: ds_name: A dataset name. version: Optional argument for concrete dataset version to be loaded. Works only with versioned datasets. suggest: Optional argument whether to suggest fuzzy-matching datasets' names in the DatasetNotFoundError message. Returns: An instance of AbstractDataset. Raises: DatasetNotFoundError: When a dataset with the given name is not in the collection and do not match patterns.
get_dataset
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def add( self, ds_name: str, dataset: AbstractDataset | _LazyDataset, replace: bool = False, ) -> None: # TODO: remove when removing old catalog """Adds a new ``AbstractDataset`` object to the ``KedroDataCatalog``.""" if ( ds_name in self.__datasets or ds_name in self._lazy_datasets ) and not replace: raise DatasetAlreadyExistsError( f"Dataset '{ds_name}' has already been registered" ) self.__setitem__(ds_name, dataset)
Adds a new ``AbstractDataset`` object to the ``KedroDataCatalog``.
add
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def filter( self, name_regex: re.Pattern[str] | str | None = None, type_regex: re.Pattern[str] | str | None = None, by_type: type | list[type] | None = None, ) -> List[str]: # noqa: UP006 """Filter dataset names registered in the catalog based on name and/or type. This method allows filtering datasets by their names and/or types. Regular expressions should be precompiled before passing them to `name_regex` or `type_regex`, but plain strings are also supported. Args: name_regex: Optional compiled regex pattern or string to filter dataset names. type_regex: Optional compiled regex pattern or string to filter dataset types. The provided regex is matched against the full dataset type path, for example: `kedro_datasets.pandas.parquet_dataset.ParquetDataset`. by_type: Optional dataset type(s) to filter by. This performs an instance type check rather than a regex match. It can be a single dataset type or a list of types. Returns: A list of dataset names that match the filtering criteria. Example: :: >>> import re >>> catalog = KedroDataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.filter(name_regex='raw') >>> # get datasets where names start with 'model_' (precompiled regex) >>> model_datasets = catalog.filter(name_regex=re.compile('^model_')) >>> # get datasets of a specific type using type_regex >>> csv_datasets = catalog.filter(type_regex='pandas.excel_dataset.ExcelDataset') >>> # get datasets where names contain 'train' and type matches 'CSV' in the path >>> catalog.filter(name_regex="train", type_regex="CSV") >>> # get datasets where names include 'data' and are of a specific type >>> from kedro_datasets.pandas import SQLQueryDataset >>> catalog.filter(name_regex="data", by_type=SQLQueryDataset) >>> # get datasets where names include 'data' and are of multiple specific types >>> from kedro.io import MemoryDataset >>> catalog.filter(name_regex="data", by_type=[MemoryDataset, SQLQueryDataset]) """ filtered = self.keys() # Apply name filter if specified if name_regex: filtered = [ ds_name for ds_name in filtered if re.search(name_regex, ds_name) ] # Apply type filters if specified by_type_set = set() if by_type: if not isinstance(by_type, list): by_type = [by_type] for _type in by_type: by_type_set.add(f"{_type.__module__}.{_type.__qualname__}") if by_type_set or type_regex: filtered_types = [] for ds_name in filtered: # Retrieve the dataset type if ds_name in self._lazy_datasets: str_type = str(self._lazy_datasets[ds_name]) else: class_type = type(self.__datasets[ds_name]) str_type = f"{class_type.__module__}.{class_type.__qualname__}" # Match against type_regex and apply by_type filtering if (not type_regex or re.search(type_regex, str_type)) and ( not by_type_set or str_type in by_type_set ): filtered_types.append(ds_name) return filtered_types return filtered
Filter dataset names registered in the catalog based on name and/or type. This method allows filtering datasets by their names and/or types. Regular expressions should be precompiled before passing them to `name_regex` or `type_regex`, but plain strings are also supported. Args: name_regex: Optional compiled regex pattern or string to filter dataset names. type_regex: Optional compiled regex pattern or string to filter dataset types. The provided regex is matched against the full dataset type path, for example: `kedro_datasets.pandas.parquet_dataset.ParquetDataset`. by_type: Optional dataset type(s) to filter by. This performs an instance type check rather than a regex match. It can be a single dataset type or a list of types. Returns: A list of dataset names that match the filtering criteria. Example: :: >>> import re >>> catalog = KedroDataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.filter(name_regex='raw') >>> # get datasets where names start with 'model_' (precompiled regex) >>> model_datasets = catalog.filter(name_regex=re.compile('^model_')) >>> # get datasets of a specific type using type_regex >>> csv_datasets = catalog.filter(type_regex='pandas.excel_dataset.ExcelDataset') >>> # get datasets where names contain 'train' and type matches 'CSV' in the path >>> catalog.filter(name_regex="train", type_regex="CSV") >>> # get datasets where names include 'data' and are of a specific type >>> from kedro_datasets.pandas import SQLQueryDataset >>> catalog.filter(name_regex="data", by_type=SQLQueryDataset) >>> # get datasets where names include 'data' and are of multiple specific types >>> from kedro.io import MemoryDataset >>> catalog.filter(name_regex="data", by_type=[MemoryDataset, SQLQueryDataset])
filter
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def list( self, regex_search: str | None = None, regex_flags: int | re.RegexFlag = 0 ) -> List[str]: # noqa: UP006 # TODO: remove when removing old catalog """List all dataset names registered in the catalog, optionally filtered by a regex pattern. If a regex pattern is provided, only dataset names matching the pattern will be returned. This method supports optional regex flags for customization Args: regex_search: Optional regular expression to filter dataset names. regex_flags: Optional regex flags. Returns: A list of dataset names that match the `regex_search` criteria. If no pattern is provided, all dataset names are returned. Raises: SyntaxError: If the provided regex pattern is invalid. Example: :: >>> catalog = KedroDataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.list(regex_search='raw') >>> # get datasets which start with 'prm' or 'feat' >>> feat_eng_data = catalog.list(regex_search='^(prm|feat)') >>> # get datasets which end with 'time_series' >>> models = catalog.list(regex_search='.+time_series$') """ if regex_search is None: return self.keys() if regex_search == "": self._logger.warning("The empty string will not match any datasets") return [] if not regex_flags: regex_flags = re.IGNORECASE try: pattern = re.compile(regex_search, flags=regex_flags) except re.error as exc: raise SyntaxError( f"Invalid regular expression provided: '{regex_search}'" ) from exc return [ds_name for ds_name in self.__iter__() if pattern.search(ds_name)]
List all dataset names registered in the catalog, optionally filtered by a regex pattern. If a regex pattern is provided, only dataset names matching the pattern will be returned. This method supports optional regex flags for customization Args: regex_search: Optional regular expression to filter dataset names. regex_flags: Optional regex flags. Returns: A list of dataset names that match the `regex_search` criteria. If no pattern is provided, all dataset names are returned. Raises: SyntaxError: If the provided regex pattern is invalid. Example: :: >>> catalog = KedroDataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.list(regex_search='raw') >>> # get datasets which start with 'prm' or 'feat' >>> feat_eng_data = catalog.list(regex_search='^(prm|feat)') >>> # get datasets which end with 'time_series' >>> models = catalog.list(regex_search='.+time_series$')
list
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def save(self, name: str, data: Any) -> None: # TODO: rename input argument when breaking change: name -> ds_name """Save data to a registered dataset. Args: name: A dataset to be saved to. data: A data object to be saved as configured in the registered dataset. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> import pandas as pd >>> >>> from kedro.io import KedroDataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={'cars': cars}) >>> >>> df = pd.DataFrame({'col1': [1, 2], >>> 'col2': [4, 5], >>> 'col3': [5, 6]}) >>> catalog.save("cars", df) """ dataset = self.get_dataset(name) self._logger.info( "Saving data to %s (%s)...", _format_rich(name, "dark_orange") if self._use_rich_markup else name, type(dataset).__name__, extra={"markup": True}, ) dataset.save(data)
Save data to a registered dataset. Args: name: A dataset to be saved to. data: A data object to be saved as configured in the registered dataset. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> import pandas as pd >>> >>> from kedro.io import KedroDataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={'cars': cars}) >>> >>> df = pd.DataFrame({'col1': [1, 2], >>> 'col2': [4, 5], >>> 'col3': [5, 6]}) >>> catalog.save("cars", df)
save
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def load(self, name: str, version: str | None = None) -> Any: # TODO: rename input argument when breaking change: name -> ds_name # TODO: remove version from input arguments when breaking change """Loads a registered dataset. Args: name: A dataset to be loaded. version: Optional argument for concrete data version to be loaded. Works only with versioned datasets. Returns: The loaded data as configured. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> from kedro.io import KedroDataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={'cars': cars}) >>> >>> df = catalog.load("cars") """ load_version = Version(version, None) if version else None dataset = self.get_dataset(name, version=load_version) self._logger.info( "Loading data from %s (%s)...", _format_rich(name, "dark_orange") if self._use_rich_markup else name, type(dataset).__name__, extra={"markup": True}, ) return dataset.load()
Loads a registered dataset. Args: name: A dataset to be loaded. version: Optional argument for concrete data version to be loaded. Works only with versioned datasets. Returns: The loaded data as configured. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> from kedro.io import KedroDataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = KedroDataCatalog(datasets={'cars': cars}) >>> >>> df = catalog.load("cars")
load
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def release(self, name: str) -> None: """Release any cached data associated with a dataset Args: name: A dataset to be checked. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. """ dataset = self.get_dataset(name) dataset.release()
Release any cached data associated with a dataset Args: name: A dataset to be checked. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered.
release
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def confirm(self, name: str) -> None: """Confirm a dataset by its name. Args: name: Name of the dataset. Raises: DatasetError: When the dataset does not have `confirm` method. """ self._logger.info("Confirming dataset '%s'", name) dataset = self.get_dataset(name) if hasattr(dataset, "confirm"): dataset.confirm() else: raise DatasetError(f"Dataset '{name}' does not have 'confirm' method")
Confirm a dataset by its name. Args: name: Name of the dataset. Raises: DatasetError: When the dataset does not have `confirm` method.
confirm
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def shallow_copy( self, extra_dataset_patterns: Patterns | None = None ) -> KedroDataCatalog: # TODO: remove when removing old catalog """Returns a shallow copy of the current object. Returns: Copy of the current object. """ if extra_dataset_patterns: self._config_resolver.add_runtime_patterns(extra_dataset_patterns) return self
Returns a shallow copy of the current object. Returns: Copy of the current object.
shallow_copy
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def exists(self, name: str) -> bool: """Checks whether registered dataset exists by calling its `exists()` method. Raises a warning and returns False if `exists()` is not implemented. Args: name: A dataset to be checked. Returns: Whether the dataset output exists. """ try: dataset = self._get_dataset(name) except DatasetNotFoundError: return False return dataset.exists()
Checks whether registered dataset exists by calling its `exists()` method. Raises a warning and returns False if `exists()` is not implemented. Args: name: A dataset to be checked. Returns: Whether the dataset output exists.
exists
python
kedro-org/kedro
kedro/io/kedro_data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/kedro_data_catalog.py
Apache-2.0
def _sub_nonword_chars(dataset_name: str) -> str: """Replace non-word characters in dataset names since Kedro 0.16.2. Args: dataset_name: The dataset name registered in the data catalog. Returns: The name used in `DataCatalog.datasets`. """ return re.sub(WORDS_REGEX_PATTERN, "__", dataset_name)
Replace non-word characters in dataset names since Kedro 0.16.2. Args: dataset_name: The dataset name registered in the data catalog. Returns: The name used in `DataCatalog.datasets`.
_sub_nonword_chars
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def __init__( self, *datasets_collections: _FrozenDatasets | dict[str, AbstractDataset] | None, ): """Return a _FrozenDatasets instance from some datasets collections. Each collection could either be another _FrozenDatasets or a dictionary. """ self._original_names: dict[str, str] = {} for collection in datasets_collections: if collection is None: continue if isinstance(collection, _FrozenDatasets): self.__dict__.update(collection.__dict__) self._original_names.update(collection._original_names) else: # Non-word characters in dataset names are replaced with `__` # for easy access to transcoded/prefixed datasets. for dataset_name, dataset in collection.items(): self.__dict__[_sub_nonword_chars(dataset_name)] = dataset self._original_names[dataset_name] = ""
Return a _FrozenDatasets instance from some datasets collections. Each collection could either be another _FrozenDatasets or a dictionary.
__init__
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def __init__( # noqa: PLR0913 self, datasets: dict[str, AbstractDataset] | None = None, feed_dict: dict[str, Any] | None = None, dataset_patterns: Patterns | None = None, # Kept for interface compatibility load_versions: dict[str, str] | None = None, save_version: str | None = None, default_pattern: Patterns | None = None, # Kept for interface compatibility config_resolver: CatalogConfigResolver | None = None, ) -> None: """``DataCatalog`` stores instances of ``AbstractDataset`` implementations to provide ``load`` and ``save`` capabilities from anywhere in the program. To use a ``DataCatalog``, you need to instantiate it with a dictionary of datasets. Then it will act as a single point of reference for your calls, relaying load and save functions to the underlying datasets. Args: datasets: A dictionary of dataset names and dataset instances. feed_dict: A feed dict with data to be added in memory. dataset_patterns: A dictionary of dataset factory patterns and corresponding dataset configuration. When fetched from catalog configuration these patterns will be sorted by: 1. Decreasing specificity (number of characters outside the curly brackets) 2. Decreasing number of placeholders (number of curly bracket pairs) 3. Alphabetically A pattern of specificity 0 is a catch-all pattern and will overwrite the default pattern provided through the runners if it comes before "default" in the alphabet. Such an overwriting pattern will emit a warning. The `"{default}"` name will not emit a warning. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. default_pattern: A dictionary of the default catch-all pattern that overrides the default pattern provided through the runners. config_resolver: An instance of CatalogConfigResolver to resolve dataset patterns and configurations. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars}) """ warnings.warn( "`DataCatalog` has been deprecated and will be replaced by `KedroDataCatalog`, in Kedro 1.0.0." "Currently some `KedroDataCatalog` APIs have been retained for compatibility with `DataCatalog`, including " "the `datasets` property and the `get_datasets`, `_get_datasets`, `add`,` list`, `add_feed_dict`, " "and `shallow_copy` methods. These will be removed or replaced with updated alternatives in Kedro 1.0.0. " "For more details, refer to the documentation: " "https://docs.kedro.org/en/stable/data/index.html#kedrodatacatalog-experimental-feature", KedroDeprecationWarning, ) self._config_resolver = config_resolver or CatalogConfigResolver() # Kept to avoid breaking changes if not config_resolver: self._config_resolver._dataset_patterns = dataset_patterns or {} self._config_resolver._default_pattern = default_pattern or {} self._load_versions, self._save_version = _validate_versions( datasets, load_versions or {}, save_version ) self._datasets: dict[str, AbstractDataset] = {} self.datasets: _FrozenDatasets | None = None self.add_all(datasets or {}) self._use_rich_markup = _has_rich_handler() if feed_dict: self.add_feed_dict(feed_dict)
``DataCatalog`` stores instances of ``AbstractDataset`` implementations to provide ``load`` and ``save`` capabilities from anywhere in the program. To use a ``DataCatalog``, you need to instantiate it with a dictionary of datasets. Then it will act as a single point of reference for your calls, relaying load and save functions to the underlying datasets. Args: datasets: A dictionary of dataset names and dataset instances. feed_dict: A feed dict with data to be added in memory. dataset_patterns: A dictionary of dataset factory patterns and corresponding dataset configuration. When fetched from catalog configuration these patterns will be sorted by: 1. Decreasing specificity (number of characters outside the curly brackets) 2. Decreasing number of placeholders (number of curly bracket pairs) 3. Alphabetically A pattern of specificity 0 is a catch-all pattern and will overwrite the default pattern provided through the runners if it comes before "default" in the alphabet. Such an overwriting pattern will emit a warning. The `"{default}"` name will not emit a warning. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. default_pattern: A dictionary of the default catch-all pattern that overrides the default pattern provided through the runners. config_resolver: An instance of CatalogConfigResolver to resolve dataset patterns and configurations. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars})
__init__
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def __contains__(self, dataset_name: str) -> bool: """Check if an item is in the catalog as a materialised dataset or pattern""" return ( dataset_name in self._datasets or self._config_resolver.match_pattern(dataset_name) is not None )
Check if an item is in the catalog as a materialised dataset or pattern
__contains__
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def from_config( cls, catalog: dict[str, dict[str, Any]] | None, credentials: dict[str, dict[str, Any]] | None = None, load_versions: dict[str, str] | None = None, save_version: str | None = None, ) -> DataCatalog: """Create a ``DataCatalog`` instance from configuration. This is a factory method used to provide developers with a way to instantiate ``DataCatalog`` with configuration parsed from configuration files. Args: catalog: A dictionary whose keys are the dataset names and the values are dictionaries with the constructor arguments for classes implementing ``AbstractDataset``. The dataset class to be loaded is specified with the key ``type`` and their fully qualified class name. All ``kedro.io`` dataset can be specified by their class name only, i.e. their module name can be omitted. credentials: A dictionary containing credentials for different datasets. Use the ``credentials`` key in a ``AbstractDataset`` to refer to the appropriate credentials as shown in the example below. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Returns: An instantiated ``DataCatalog`` containing all specified datasets, created and ready to use. Raises: DatasetError: When the method fails to create any of the data sets from their config. DatasetNotFoundError: When `load_versions` refers to a dataset that doesn't exist in the catalog. Example: :: >>> config = { >>> "cars": { >>> "type": "pandas.CSVDataset", >>> "filepath": "cars.csv", >>> "save_args": { >>> "index": False >>> } >>> }, >>> "boats": { >>> "type": "pandas.CSVDataset", >>> "filepath": "s3://aws-bucket-name/boats.csv", >>> "credentials": "boats_credentials", >>> "save_args": { >>> "index": False >>> } >>> } >>> } >>> >>> credentials = { >>> "boats_credentials": { >>> "client_kwargs": { >>> "aws_access_key_id": "<your key id>", >>> "aws_secret_access_key": "<your secret>" >>> } >>> } >>> } >>> >>> catalog = DataCatalog.from_config(config, credentials) >>> >>> df = catalog.load("cars") >>> catalog.save("boats", df) """ catalog = catalog or {} datasets = {} config_resolver = CatalogConfigResolver(catalog, credentials) save_version = save_version or generate_timestamp() load_versions = load_versions or {} for ds_name in catalog: if not config_resolver.is_pattern(ds_name): datasets[ds_name] = AbstractDataset.from_config( ds_name, config_resolver.config.get(ds_name, {}), load_versions.get(ds_name), save_version, ) missing_keys = [ ds_name for ds_name in load_versions if not ( ds_name in config_resolver.config or config_resolver.match_pattern(ds_name) ) ] if missing_keys: raise DatasetNotFoundError( f"'load_versions' keys [{', '.join(sorted(missing_keys))}] " f"are not found in the catalog." ) return cls( datasets=datasets, dataset_patterns=config_resolver._dataset_patterns, load_versions=load_versions, save_version=save_version, default_pattern=config_resolver._default_pattern, config_resolver=config_resolver, )
Create a ``DataCatalog`` instance from configuration. This is a factory method used to provide developers with a way to instantiate ``DataCatalog`` with configuration parsed from configuration files. Args: catalog: A dictionary whose keys are the dataset names and the values are dictionaries with the constructor arguments for classes implementing ``AbstractDataset``. The dataset class to be loaded is specified with the key ``type`` and their fully qualified class name. All ``kedro.io`` dataset can be specified by their class name only, i.e. their module name can be omitted. credentials: A dictionary containing credentials for different datasets. Use the ``credentials`` key in a ``AbstractDataset`` to refer to the appropriate credentials as shown in the example below. load_versions: A mapping between dataset names and versions to load. Has no effect on datasets without enabled versioning. save_version: Version string to be used for ``save`` operations by all datasets with enabled versioning. It must: a) be a case-insensitive string that conforms with operating system filename limitations, b) always return the latest version when sorted in lexicographical order. Returns: An instantiated ``DataCatalog`` containing all specified datasets, created and ready to use. Raises: DatasetError: When the method fails to create any of the data sets from their config. DatasetNotFoundError: When `load_versions` refers to a dataset that doesn't exist in the catalog. Example: :: >>> config = { >>> "cars": { >>> "type": "pandas.CSVDataset", >>> "filepath": "cars.csv", >>> "save_args": { >>> "index": False >>> } >>> }, >>> "boats": { >>> "type": "pandas.CSVDataset", >>> "filepath": "s3://aws-bucket-name/boats.csv", >>> "credentials": "boats_credentials", >>> "save_args": { >>> "index": False >>> } >>> } >>> } >>> >>> credentials = { >>> "boats_credentials": { >>> "client_kwargs": { >>> "aws_access_key_id": "<your key id>", >>> "aws_secret_access_key": "<your secret>" >>> } >>> } >>> } >>> >>> catalog = DataCatalog.from_config(config, credentials) >>> >>> df = catalog.load("cars") >>> catalog.save("boats", df)
from_config
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def load(self, name: str, version: str | None = None) -> Any: """Loads a registered dataset. Args: name: A dataset to be loaded. version: Optional argument for concrete data version to be loaded. Works only with versioned datasets. Returns: The loaded data as configured. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> from kedro.io import DataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars}) >>> >>> df = catalog.load("cars") """ load_version = Version(version, None) if version else None dataset = self._get_dataset(name, version=load_version) self._logger.info( "Loading data from %s (%s)...", _format_rich(name, "dark_orange") if self._use_rich_markup else name, type(dataset).__name__, extra={"markup": True}, ) result = dataset.load() return result
Loads a registered dataset. Args: name: A dataset to be loaded. version: Optional argument for concrete data version to be loaded. Works only with versioned datasets. Returns: The loaded data as configured. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> from kedro.io import DataCatalog >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars}) >>> >>> df = catalog.load("cars")
load
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def save(self, name: str, data: Any) -> None: """Save data to a registered dataset. Args: name: A dataset to be saved to. data: A data object to be saved as configured in the registered dataset. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> import pandas as pd >>> >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars}) >>> >>> df = pd.DataFrame({'col1': [1, 2], >>> 'col2': [4, 5], >>> 'col3': [5, 6]}) >>> catalog.save("cars", df) """ dataset = self._get_dataset(name) self._logger.info( "Saving data to %s (%s)...", _format_rich(name, "dark_orange") if self._use_rich_markup else name, type(dataset).__name__, extra={"markup": True}, ) dataset.save(data)
Save data to a registered dataset. Args: name: A dataset to be saved to. data: A data object to be saved as configured in the registered dataset. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. Example: :: >>> import pandas as pd >>> >>> from kedro_datasets.pandas import CSVDataset >>> >>> cars = CSVDataset(filepath="cars.csv", >>> load_args=None, >>> save_args={"index": False}) >>> catalog = DataCatalog(datasets={'cars': cars}) >>> >>> df = pd.DataFrame({'col1': [1, 2], >>> 'col2': [4, 5], >>> 'col3': [5, 6]}) >>> catalog.save("cars", df)
save
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def exists(self, name: str) -> bool: """Checks whether registered dataset exists by calling its `exists()` method. Raises a warning and returns False if `exists()` is not implemented. Args: name: A dataset to be checked. Returns: Whether the dataset output exists. """ try: dataset = self._get_dataset(name) except DatasetNotFoundError: return False return dataset.exists()
Checks whether registered dataset exists by calling its `exists()` method. Raises a warning and returns False if `exists()` is not implemented. Args: name: A dataset to be checked. Returns: Whether the dataset output exists.
exists
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def release(self, name: str) -> None: """Release any cached data associated with a dataset Args: name: A dataset to be checked. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered. """ dataset = self._get_dataset(name) dataset.release()
Release any cached data associated with a dataset Args: name: A dataset to be checked. Raises: DatasetNotFoundError: When a dataset with the given name has not yet been registered.
release
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def add( self, dataset_name: str, dataset: AbstractDataset, replace: bool = False, ) -> None: """Adds a new ``AbstractDataset`` object to the ``DataCatalog``. Args: dataset_name: A unique dataset name which has not been registered yet. dataset: A dataset object to be associated with the given data set name. replace: Specifies whether to replace an existing dataset with the same name is allowed. Raises: DatasetAlreadyExistsError: When a dataset with the same name has already been registered. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> catalog = DataCatalog(datasets={ >>> 'cars': CSVDataset(filepath="cars.csv") >>> }) >>> >>> catalog.add("boats", CSVDataset(filepath="boats.csv")) """ if dataset_name in self._datasets: if replace: self._logger.warning("Replacing dataset '%s'", dataset_name) else: raise DatasetAlreadyExistsError( f"Dataset '{dataset_name}' has already been registered" ) self._load_versions, self._save_version = _validate_versions( {dataset_name: dataset}, self._load_versions, self._save_version ) self._datasets[dataset_name] = dataset self.datasets = _FrozenDatasets(self.datasets, {dataset_name: dataset})
Adds a new ``AbstractDataset`` object to the ``DataCatalog``. Args: dataset_name: A unique dataset name which has not been registered yet. dataset: A dataset object to be associated with the given data set name. replace: Specifies whether to replace an existing dataset with the same name is allowed. Raises: DatasetAlreadyExistsError: When a dataset with the same name has already been registered. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> >>> catalog = DataCatalog(datasets={ >>> 'cars': CSVDataset(filepath="cars.csv") >>> }) >>> >>> catalog.add("boats", CSVDataset(filepath="boats.csv"))
add
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def add_all( self, datasets: dict[str, AbstractDataset], replace: bool = False, ) -> None: """Adds a group of new datasets to the ``DataCatalog``. Args: datasets: A dictionary of dataset names and dataset instances. replace: Specifies whether to replace an existing dataset with the same name is allowed. Raises: DatasetAlreadyExistsError: When a dataset with the same name has already been registered. Example: :: >>> from kedro_datasets.pandas import CSVDataset, ParquetDataset >>> >>> catalog = DataCatalog(datasets={ >>> "cars": CSVDataset(filepath="cars.csv") >>> }) >>> additional = { >>> "planes": ParquetDataset("planes.parq"), >>> "boats": CSVDataset(filepath="boats.csv") >>> } >>> >>> catalog.add_all(additional) >>> >>> assert catalog.list() == ["cars", "planes", "boats"] """ for ds_name, ds in datasets.items(): self.add(ds_name, ds, replace)
Adds a group of new datasets to the ``DataCatalog``. Args: datasets: A dictionary of dataset names and dataset instances. replace: Specifies whether to replace an existing dataset with the same name is allowed. Raises: DatasetAlreadyExistsError: When a dataset with the same name has already been registered. Example: :: >>> from kedro_datasets.pandas import CSVDataset, ParquetDataset >>> >>> catalog = DataCatalog(datasets={ >>> "cars": CSVDataset(filepath="cars.csv") >>> }) >>> additional = { >>> "planes": ParquetDataset("planes.parq"), >>> "boats": CSVDataset(filepath="boats.csv") >>> } >>> >>> catalog.add_all(additional) >>> >>> assert catalog.list() == ["cars", "planes", "boats"]
add_all
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def add_feed_dict(self, feed_dict: dict[str, Any], replace: bool = False) -> None: """Add datasets to the ``DataCatalog`` using the data provided through the `feed_dict`. `feed_dict` is a dictionary where the keys represent dataset names and the values can either be raw data or Kedro datasets - instances of classes that inherit from ``AbstractDataset``. If raw data is provided, it will be automatically wrapped in a ``MemoryDataset`` before being added to the ``DataCatalog``. Args: feed_dict: A dictionary with data to be added to the ``DataCatalog``. Keys are dataset names and values can be raw data or instances of classes that inherit from ``AbstractDataset``. replace: Specifies whether to replace an existing dataset with the same name in the ``DataCatalog``. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> import pandas as pd >>> >>> df = pd.DataFrame({"col1": [1, 2], >>> "col2": [4, 5], >>> "col3": [5, 6]}) >>> >>> catalog = DataCatalog() >>> catalog.add_feed_dict({ >>> "data_df": df >>> }, replace=True) >>> >>> assert catalog.load("data_df").equals(df) >>> >>> csv_dataset = CSVDataset(filepath="test.csv") >>> csv_dataset.save(df) >>> catalog.add_feed_dict({"data_csv_dataset": csv_dataset}) >>> >>> assert catalog.load("data_csv_dataset").equals(df) """ for ds_name, ds_data in feed_dict.items(): dataset = ( ds_data if isinstance(ds_data, AbstractDataset) else MemoryDataset(data=ds_data) # type: ignore[abstract] ) self.add(ds_name, dataset, replace)
Add datasets to the ``DataCatalog`` using the data provided through the `feed_dict`. `feed_dict` is a dictionary where the keys represent dataset names and the values can either be raw data or Kedro datasets - instances of classes that inherit from ``AbstractDataset``. If raw data is provided, it will be automatically wrapped in a ``MemoryDataset`` before being added to the ``DataCatalog``. Args: feed_dict: A dictionary with data to be added to the ``DataCatalog``. Keys are dataset names and values can be raw data or instances of classes that inherit from ``AbstractDataset``. replace: Specifies whether to replace an existing dataset with the same name in the ``DataCatalog``. Example: :: >>> from kedro_datasets.pandas import CSVDataset >>> import pandas as pd >>> >>> df = pd.DataFrame({"col1": [1, 2], >>> "col2": [4, 5], >>> "col3": [5, 6]}) >>> >>> catalog = DataCatalog() >>> catalog.add_feed_dict({ >>> "data_df": df >>> }, replace=True) >>> >>> assert catalog.load("data_df").equals(df) >>> >>> csv_dataset = CSVDataset(filepath="test.csv") >>> csv_dataset.save(df) >>> catalog.add_feed_dict({"data_csv_dataset": csv_dataset}) >>> >>> assert catalog.load("data_csv_dataset").equals(df)
add_feed_dict
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def list(self, regex_search: str | None = None) -> list[str]: """ List of all dataset names registered in the catalog. This can be filtered by providing an optional regular expression which will only return matching keys. Args: regex_search: An optional regular expression which can be provided to limit the datasets returned by a particular pattern. Returns: A list of dataset names available which match the `regex_search` criteria (if provided). All dataset names are returned by default. Raises: SyntaxError: When an invalid regex filter is provided. Example: :: >>> catalog = DataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.list(regex_search='raw') >>> # get datasets which start with 'prm' or 'feat' >>> feat_eng_data = catalog.list(regex_search='^(prm|feat)') >>> # get datasets which end with 'time_series' >>> models = catalog.list(regex_search='.+time_series$') """ if regex_search is None: return list(self._datasets.keys()) if not regex_search.strip(): self._logger.warning("The empty string will not match any datasets") return [] try: pattern = re.compile(regex_search, flags=re.IGNORECASE) except re.error as exc: raise SyntaxError( f"Invalid regular expression provided: '{regex_search}'" ) from exc return [ds_name for ds_name in self._datasets if pattern.search(ds_name)]
List of all dataset names registered in the catalog. This can be filtered by providing an optional regular expression which will only return matching keys. Args: regex_search: An optional regular expression which can be provided to limit the datasets returned by a particular pattern. Returns: A list of dataset names available which match the `regex_search` criteria (if provided). All dataset names are returned by default. Raises: SyntaxError: When an invalid regex filter is provided. Example: :: >>> catalog = DataCatalog() >>> # get datasets where the substring 'raw' is present >>> raw_data = catalog.list(regex_search='raw') >>> # get datasets which start with 'prm' or 'feat' >>> feat_eng_data = catalog.list(regex_search='^(prm|feat)') >>> # get datasets which end with 'time_series' >>> models = catalog.list(regex_search='.+time_series$')
list
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def shallow_copy( self, extra_dataset_patterns: Patterns | None = None ) -> DataCatalog: """Returns a shallow copy of the current object. Returns: Copy of the current object. """ if extra_dataset_patterns: self._config_resolver.add_runtime_patterns(extra_dataset_patterns) return self.__class__( datasets=self._datasets, dataset_patterns=self._config_resolver._dataset_patterns, default_pattern=self._config_resolver._default_pattern, load_versions=self._load_versions, save_version=self._save_version, config_resolver=self._config_resolver, )
Returns a shallow copy of the current object. Returns: Copy of the current object.
shallow_copy
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def confirm(self, name: str) -> None: """Confirm a dataset by its name. Args: name: Name of the dataset. Raises: DatasetError: When the dataset does not have `confirm` method. """ self._logger.info("Confirming dataset '%s'", name) dataset = self._get_dataset(name) if hasattr(dataset, "confirm"): dataset.confirm() else: raise DatasetError(f"Dataset '{name}' does not have 'confirm' method")
Confirm a dataset by its name. Args: name: Name of the dataset. Raises: DatasetError: When the dataset does not have `confirm` method.
confirm
python
kedro-org/kedro
kedro/io/data_catalog.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/data_catalog.py
Apache-2.0
def __init__( self, dataset: AbstractDataset | dict, version: Version | None = None, copy_mode: str | None = None, metadata: dict[str, Any] | None = None, ): """Creates a new instance of ``CachedDataset`` pointing to the provided Python object. Args: dataset: A Kedro Dataset object or a dictionary to cache. version: If specified, should be an instance of ``kedro.io.core.Version``. If its ``load`` attribute is None, the latest version will be loaded. If its ``save`` attribute is None, save version will be autogenerated. copy_mode: The copy mode used to copy the data. Possible values are: "deepcopy", "copy" and "assign". If not provided, it is inferred based on the data type. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins. Raises: ValueError: If the provided dataset is not a valid dict/YAML representation of a dataset or an actual dataset. """ self._EPHEMERAL = True if isinstance(dataset, dict): self._dataset = self._from_config(dataset, version) elif isinstance(dataset, AbstractDataset): self._dataset = dataset else: raise ValueError( "The argument type of 'dataset' should be either a dict/YAML " "representation of the dataset, or the actual dataset object." ) self._cache = MemoryDataset(copy_mode=copy_mode) # type: ignore[abstract] self.metadata = metadata
Creates a new instance of ``CachedDataset`` pointing to the provided Python object. Args: dataset: A Kedro Dataset object or a dictionary to cache. version: If specified, should be an instance of ``kedro.io.core.Version``. If its ``load`` attribute is None, the latest version will be loaded. If its ``save`` attribute is None, save version will be autogenerated. copy_mode: The copy mode used to copy the data. Possible values are: "deepcopy", "copy" and "assign". If not provided, it is inferred based on the data type. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins. Raises: ValueError: If the provided dataset is not a valid dict/YAML representation of a dataset or an actual dataset.
__init__
python
kedro-org/kedro
kedro/io/cached_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/cached_dataset.py
Apache-2.0
def __init__( self, data: Any = _EMPTY, copy_mode: str | None = None, metadata: dict[str, Any] | None = None, ): """Creates a new instance of ``MemoryDataset`` pointing to the provided Python object. Args: data: Python object containing the data. copy_mode: The copy mode used to copy the data. Possible values are: "deepcopy", "copy" and "assign". If not provided, it is inferred based on the data type. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins. """ self._data = _EMPTY self._copy_mode = copy_mode self.metadata = metadata self._EPHEMERAL = True if data is not _EMPTY: self.save.__wrapped__(self, data) # type: ignore[attr-defined]
Creates a new instance of ``MemoryDataset`` pointing to the provided Python object. Args: data: Python object containing the data. copy_mode: The copy mode used to copy the data. Possible values are: "deepcopy", "copy" and "assign". If not provided, it is inferred based on the data type. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins.
__init__
python
kedro-org/kedro
kedro/io/memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/memory_dataset.py
Apache-2.0
def _infer_copy_mode(data: Any) -> str: """Infers the copy mode to use given the data type. Args: data: The data whose type will be used to infer the copy mode. Returns: One of "copy", "assign" or "deepcopy" as the copy mode to use. """ try: import pandas as pd except ImportError: # pragma: no cover pd = None # type: ignore[assignment] # pragma: no cover try: import numpy as np except ImportError: # pragma: no cover np = None # type: ignore[assignment] # pragma: no cover try: import ibis except ImportError: # pragma: no cover ibis = None # tpye: ignore[assignment] # pragma: no cover if pd and isinstance(data, pd.DataFrame) or np and isinstance(data, np.ndarray): copy_mode = "copy" elif type(data).__name__ == "DataFrame" or ibis and isinstance(data, ibis.Table): copy_mode = "assign" else: copy_mode = "deepcopy" return copy_mode
Infers the copy mode to use given the data type. Args: data: The data whose type will be used to infer the copy mode. Returns: One of "copy", "assign" or "deepcopy" as the copy mode to use.
_infer_copy_mode
python
kedro-org/kedro
kedro/io/memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/memory_dataset.py
Apache-2.0
def _copy_with_mode(data: Any, copy_mode: str) -> Any: """Returns the copied data using the copy mode specified. If no copy mode is provided, then it is inferred based on the type of the data. Args: data: The data to copy. copy_mode: The copy mode to use, one of "deepcopy", "copy" and "assign". Raises: DatasetError: If copy_mode is specified, but isn't valid (i.e: not one of deepcopy, copy, assign) Returns: The data copied according to the specified copy mode. """ if copy_mode == "deepcopy": copied_data = copy.deepcopy(data) elif copy_mode == "copy": copied_data = data.copy() elif copy_mode == "assign": copied_data = data else: raise DatasetError( f"Invalid copy mode: {copy_mode}. " f"Possible values are: deepcopy, copy, assign." ) return copied_data
Returns the copied data using the copy mode specified. If no copy mode is provided, then it is inferred based on the type of the data. Args: data: The data to copy. copy_mode: The copy mode to use, one of "deepcopy", "copy" and "assign". Raises: DatasetError: If copy_mode is specified, but isn't valid (i.e: not one of deepcopy, copy, assign) Returns: The data copied according to the specified copy mode.
_copy_with_mode
python
kedro-org/kedro
kedro/io/memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/memory_dataset.py
Apache-2.0
def _is_memory_dataset(ds_or_type: AbstractDataset | str) -> bool: """Check if dataset or str type provided is a MemoryDataset.""" if isinstance(ds_or_type, MemoryDataset): return True if isinstance(ds_or_type, str): return ds_or_type in {"MemoryDataset", "kedro.io.memory_dataset.MemoryDataset"} return False
Check if dataset or str type provided is a MemoryDataset.
_is_memory_dataset
python
kedro-org/kedro
kedro/io/memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/memory_dataset.py
Apache-2.0
def is_pattern(pattern: str) -> bool: """Check if a given string is a pattern. Assume that any name with '{' is a pattern.""" return "{" in pattern
Check if a given string is a pattern. Assume that any name with '{' is a pattern.
is_pattern
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _pattern_specificity(pattern: str) -> int: """Calculate the specificity of a pattern based on characters outside curly brackets.""" # Remove all the placeholders from the pattern and count the number of remaining chars result = re.sub(r"\{.*?\}", "", pattern) return len(result)
Calculate the specificity of a pattern based on characters outside curly brackets.
_pattern_specificity
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _sort_patterns(cls, dataset_patterns: Patterns) -> Patterns: """Sort a dictionary of dataset patterns according to parsing rules. In order: 1. Decreasing specificity (number of characters outside the curly brackets) 2. Decreasing number of placeholders (number of curly bracket pairs) 3. Alphabetically """ sorted_keys = sorted( dataset_patterns, key=lambda pattern: ( -(cls._pattern_specificity(pattern)), -pattern.count("{"), pattern, ), ) catch_all = [ pattern for pattern in sorted_keys if cls._pattern_specificity(pattern) == 0 ] if len(catch_all) > 1: raise DatasetError( f"Multiple catch-all patterns found in the catalog: {', '.join(catch_all)}. Only one catch-all pattern is allowed, remove the extras." ) return {key: dataset_patterns[key] for key in sorted_keys}
Sort a dictionary of dataset patterns according to parsing rules. In order: 1. Decreasing specificity (number of characters outside the curly brackets) 2. Decreasing number of placeholders (number of curly bracket pairs) 3. Alphabetically
_sort_patterns
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _fetch_credentials(credentials_name: str, credentials: dict[str, Any]) -> Any: """Fetch the specified credentials from the provided credentials dictionary. Args: credentials_name: Credentials name. credentials: A dictionary with all credentials. Returns: The set of requested credentials. Raises: KeyError: When a dataset with the given name has not yet been registered. """ try: return credentials[credentials_name] except KeyError as exc: raise KeyError( f"Unable to find credentials '{credentials_name}': check your data " "catalog and credentials configuration. See " "https://kedro.readthedocs.io/en/stable/kedro.io.DataCatalog.html " "for an example." ) from exc
Fetch the specified credentials from the provided credentials dictionary. Args: credentials_name: Credentials name. credentials: A dictionary with all credentials. Returns: The set of requested credentials. Raises: KeyError: When a dataset with the given name has not yet been registered.
_fetch_credentials
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _resolve_credentials( cls, config: dict[str, Any], credentials: dict[str, Any] ) -> dict[str, Any]: """Return the dataset configuration where credentials are resolved using credentials dictionary provided. Args: config: Original dataset config, which may contain unresolved credentials. credentials: A dictionary with all credentials. Returns: The dataset config, where all the credentials are successfully resolved. """ config = copy.deepcopy(config) def _resolve_value(key: str, value: Any) -> Any: if key == CREDENTIALS_KEY and isinstance(value, str): return cls._fetch_credentials(value, credentials) if isinstance(value, dict): return {k: _resolve_value(k, v) for k, v in value.items()} return value return {k: _resolve_value(k, v) for k, v in config.items()}
Return the dataset configuration where credentials are resolved using credentials dictionary provided. Args: config: Original dataset config, which may contain unresolved credentials. credentials: A dictionary with all credentials. Returns: The dataset config, where all the credentials are successfully resolved.
_resolve_credentials
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _validate_pattern_config(cls, ds_name: str, ds_config: dict[str, Any]) -> None: """Checks whether a dataset factory pattern configuration is valid - all keys used in the configuration present in the dataset factory pattern name. Args: ds_name: Dataset factory pattern name. ds_config: Dataset pattern configuration. Raises: DatasetError: when keys used in the configuration do not present in the dataset factory pattern name. """ # Find all occurrences of {} in the string including brackets search_regex = r"\{.*?\}" name_placeholders = set(re.findall(search_regex, ds_name)) config_placeholders = set() def _traverse_config(config: Any) -> None: if isinstance(config, dict): for value in config.values(): _traverse_config(value) elif isinstance(config, (list, tuple)): for value in config: _traverse_config(value) elif isinstance(config, str) and "}" in config: config_placeholders.update(set(re.findall(search_regex, config))) _traverse_config(ds_config) if config_placeholders - name_placeholders: raise DatasetError( f"Incorrect dataset configuration provided. " f"Keys used in the configuration {config_placeholders - name_placeholders} " f"should present in the dataset factory pattern name {ds_name}." )
Checks whether a dataset factory pattern configuration is valid - all keys used in the configuration present in the dataset factory pattern name. Args: ds_name: Dataset factory pattern name. ds_config: Dataset pattern configuration. Raises: DatasetError: when keys used in the configuration do not present in the dataset factory pattern name.
_validate_pattern_config
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _resolve_dataset_config( cls, ds_name: str, pattern: str, config: Any, ) -> Any: """Resolve dataset configuration based on the provided pattern.""" resolved_vars = parse(pattern, ds_name) # Resolve the factory config for the dataset if isinstance(config, dict): for key, value in config.items(): config[key] = cls._resolve_dataset_config(ds_name, pattern, value) elif isinstance(config, (list, tuple)): config = [ cls._resolve_dataset_config(ds_name, pattern, value) for value in config ] elif isinstance(config, str) and "}" in config: config = config.format_map(resolved_vars.named) return config
Resolve dataset configuration based on the provided pattern.
_resolve_dataset_config
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def list_patterns(self) -> list[str]: """List al patterns available in the catalog.""" return ( list(self._dataset_patterns.keys()) + list(self._default_pattern.keys()) + list(self._runtime_patterns.keys()) )
List al patterns available in the catalog.
list_patterns
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def match_pattern(self, ds_name: str) -> str | None: """Match a dataset name against patterns in a dictionary.""" all_patterns = self.list_patterns() matches = (pattern for pattern in all_patterns if parse(pattern, ds_name)) return next(matches, None)
Match a dataset name against patterns in a dictionary.
match_pattern
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def _extract_patterns( cls, config: dict[str, dict[str, Any]] | None, credentials: dict[str, dict[str, Any]] | None, ) -> tuple[Patterns, Patterns]: """Extract and sort patterns from the configuration.""" config = config or {} credentials = credentials or {} dataset_patterns = {} user_default = {} for ds_name, ds_config in config.items(): if cls.is_pattern(ds_name): cls._validate_pattern_config(ds_name, ds_config) dataset_patterns[ds_name] = cls._resolve_credentials( ds_config, credentials ) sorted_patterns = cls._sort_patterns(dataset_patterns) if sorted_patterns: # If the last pattern is a catch-all pattern, pop it and set it as the default if cls._pattern_specificity(list(sorted_patterns.keys())[-1]) == 0: last_pattern = sorted_patterns.popitem() user_default = {last_pattern[0]: last_pattern[1]} return sorted_patterns, user_default
Extract and sort patterns from the configuration.
_extract_patterns
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def resolve_credentials( cls, config: dict[str, dict[str, Any]] | None, credentials: dict[str, dict[str, Any]] | None, ) -> dict[str, dict[str, Any]]: """Initialize the dataset configuration with resolved credentials.""" config = config or {} credentials = credentials or {} resolved_configs = {} for ds_name, ds_config in config.items(): if not isinstance(ds_config, dict): raise DatasetError( f"Catalog entry '{ds_name}' is not a valid dataset configuration. " "\nHint: If this catalog entry is intended for variable interpolation, " "make sure that the key is preceded by an underscore." ) if not cls.is_pattern(ds_name): resolved_configs[ds_name] = cls._resolve_credentials( ds_config, credentials ) return resolved_configs
Initialize the dataset configuration with resolved credentials.
resolve_credentials
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def unresolve_credentials( cred_name: str, ds_config: dict[str, dict[str, Any]] | None ) -> tuple[dict[str, dict[str, Any]], dict[str, dict[str, Any]]]: """Extracts and replaces credentials in a dataset configuration with references, ensuring separation of credentials from the dataset configuration. Credentials are searched for recursively in the dataset configuration. The first occurrence of the `CREDENTIALS_KEY` is replaced with a generated reference key. Args: cred_name: A unique identifier for the credentials being unresolved. This is used to generate a reference key for the credentials. ds_config: The dataset configuration containing potential credentials under the key `CREDENTIALS_KEY`. Returns: A tuple containing: ds_config_copy : A deep copy of the original dataset configuration with credentials replaced by reference keys. credentials: A dictionary mapping generated reference keys to the original credentials. """ ds_config_copy = copy.deepcopy(ds_config) or {} credentials: dict[str, Any] = {} credentials_ref = f"{cred_name}_{CREDENTIALS_KEY}" def unresolve(config: Any) -> None: # We don't expect credentials key appears more than once within the same dataset config, # So once we found the key first time we unresolve it and stop iterating after for key, val in config.items(): if key == CREDENTIALS_KEY and config[key]: credentials[credentials_ref] = config[key] config[key] = credentials_ref return if isinstance(val, dict): unresolve(val) unresolve(ds_config_copy) return ds_config_copy, credentials
Extracts and replaces credentials in a dataset configuration with references, ensuring separation of credentials from the dataset configuration. Credentials are searched for recursively in the dataset configuration. The first occurrence of the `CREDENTIALS_KEY` is replaced with a generated reference key. Args: cred_name: A unique identifier for the credentials being unresolved. This is used to generate a reference key for the credentials. ds_config: The dataset configuration containing potential credentials under the key `CREDENTIALS_KEY`. Returns: A tuple containing: ds_config_copy : A deep copy of the original dataset configuration with credentials replaced by reference keys. credentials: A dictionary mapping generated reference keys to the original credentials.
unresolve_credentials
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def resolve_pattern(self, ds_name: str) -> dict[str, Any]: """Resolve dataset patterns and return resolved configurations based on the existing patterns.""" matched_pattern = self.match_pattern(ds_name) if matched_pattern and ds_name not in self._resolved_configs: pattern_config = self._get_pattern_config(matched_pattern) ds_config = self._resolve_dataset_config( ds_name, matched_pattern, copy.deepcopy(pattern_config) ) if ( self._pattern_specificity(matched_pattern) == 0 and matched_pattern in self._default_pattern ): self._logger.warning( "Config from the dataset factory pattern '%s' in the catalog will be used to " "override the default dataset creation for '%s'", matched_pattern, ds_name, ) return ds_config # type: ignore[no-any-return] return self._resolved_configs.get(ds_name, {})
Resolve dataset patterns and return resolved configurations based on the existing patterns.
resolve_pattern
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def add_runtime_patterns(self, dataset_patterns: Patterns) -> None: """Add new runtime patterns and re-sort them.""" self._runtime_patterns = {**self._runtime_patterns, **dataset_patterns} self._runtime_patterns = self._sort_patterns(self._runtime_patterns)
Add new runtime patterns and re-sort them.
add_runtime_patterns
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def remove_runtime_patterns(self, dataset_patterns: Patterns) -> None: """Remove runtime patterns and re-sort them.""" for pattern_name in dataset_patterns: if pattern_name in self._runtime_patterns: del self._runtime_patterns[pattern_name] self._runtime_patterns = self._sort_patterns(self._runtime_patterns)
Remove runtime patterns and re-sort them.
remove_runtime_patterns
python
kedro-org/kedro
kedro/io/catalog_config_resolver.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/catalog_config_resolver.py
Apache-2.0
def __init__( self, load: Callable[[], Any] | None, save: Callable[[Any], None] | None, exists: Callable[[], bool] | None = None, release: Callable[[], None] | None = None, metadata: dict[str, Any] | None = None, ): """Creates a new instance of ``LambdaDataset`` with references to the required input/output dataset methods. Args: load: Method to load data from a dataset. save: Method to save data to a dataset. exists: Method to check whether output data already exists. release: Method to release any cached information. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins. Raises: DatasetError: If a method is specified, but is not a Callable. """ warnings.warn( "`LambdaDataset` has been deprecated and will be removed in Kedro 0.20.0.", KedroDeprecationWarning, ) for name, value in [ ("load", load), ("save", save), ("exists", exists), ("release", release), ]: if value is not None and not callable(value): raise DatasetError( f"'{name}' function for LambdaDataset must be a Callable. " f"Object of type '{value.__class__.__name__}' provided instead." ) self.__load = load self.__save = save self.__exists = exists self.__release = release self.metadata = metadata
Creates a new instance of ``LambdaDataset`` with references to the required input/output dataset methods. Args: load: Method to load data from a dataset. save: Method to save data to a dataset. exists: Method to check whether output data already exists. release: Method to release any cached information. metadata: Any arbitrary metadata. This is ignored by Kedro, but may be consumed by users or external plugins. Raises: DatasetError: If a method is specified, but is not a Callable.
__init__
python
kedro-org/kedro
kedro/io/lambda_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/lambda_dataset.py
Apache-2.0
def __init__(self, manager: SyncManager | None = None): """Creates a new instance of ``SharedMemoryDataset``, and creates shared MemoryDataset attribute. Args: manager: An instance of multiprocessing manager for shared objects. """ self._EPHEMERAL = True if manager: self.shared_memory_dataset = manager.MemoryDataset() # type: ignore[attr-defined] else: self.shared_memory_dataset = None
Creates a new instance of ``SharedMemoryDataset``, and creates shared MemoryDataset attribute. Args: manager: An instance of multiprocessing manager for shared objects.
__init__
python
kedro-org/kedro
kedro/io/shared_memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/shared_memory_dataset.py
Apache-2.0
def save(self, data: Any) -> None: """Calls save method of a shared MemoryDataset in SyncManager.""" try: self.shared_memory_dataset.save(data) except Exception as exc: # Checks if the error is due to serialisation or not try: pickle.dumps(data) except Exception as serialisation_exc: # SKIP_IF_NO_SPARK raise DatasetError( f"{data.__class__!s} cannot be serialised. ParallelRunner " "implicit memory datasets can only be used with serialisable data" ) from serialisation_exc raise exc # pragma: no cover
Calls save method of a shared MemoryDataset in SyncManager.
save
python
kedro-org/kedro
kedro/io/shared_memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/shared_memory_dataset.py
Apache-2.0
def _describe(self) -> dict[str, Any]: """SharedMemoryDataset doesn't have any constructor argument to return.""" return {}
SharedMemoryDataset doesn't have any constructor argument to return.
_describe
python
kedro-org/kedro
kedro/io/shared_memory_dataset.py
https://github.com/kedro-org/kedro/blob/master/kedro/io/shared_memory_dataset.py
Apache-2.0
def _remove_from_file(file_path: Path, content_to_remove: str) -> None: """Remove specified content from the file. Args: file_path (Path): The path of the file from which to remove content. content_to_remove (str): The content to be removed from the file. """ with open(file_path) as file: lines = file.readlines() # Split the content to remove into lines and remove trailing whitespaces/newlines content_to_remove_lines = [line.strip() for line in content_to_remove.split("\n")] # Keep lines that are not in content_to_remove lines = [line for line in lines if line.strip() not in content_to_remove_lines] with open(file_path, "w") as file: file.writelines(lines)
Remove specified content from the file. Args: file_path (Path): The path of the file from which to remove content. content_to_remove (str): The content to be removed from the file.
_remove_from_file
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_nested_section(data: dict, nested_key: str) -> None: """Remove a nested section from a dictionary representing a TOML file. Args: data (dict): The dictionary from which to remove the section. nested_key (str): The dotted path key representing the nested section to remove. """ keys = nested_key.split(".") current_data = data # Look for Parent section for key in keys[:-1]: # Iterate over all but last element if key in current_data: current_data = current_data[key] else: return # Parent section not found, nothing to remove # Remove the nested section and any empty parent sections current_data.pop(keys[-1], None) # Remove last element otherwise return None for key in reversed(keys[:-1]): parent_section = data for key_part in keys[: keys.index(key)]: parent_section = parent_section[key_part] if not current_data: # If the section is empty, remove it parent_section.pop(key, None) current_data = parent_section else: break # If the section is not empty, stop removing
Remove a nested section from a dictionary representing a TOML file. Args: data (dict): The dictionary from which to remove the section. nested_key (str): The dotted path key representing the nested section to remove.
_remove_nested_section
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_from_toml(file_path: Path, sections_to_remove: list) -> None: """Remove specified sections from a TOML file. Args: file_path (Path): The path to the TOML file. sections_to_remove (list): A list of section keys to remove from the TOML file. """ # Load the TOML file with open(file_path) as file: data = toml.load(file) # Remove the specified sections for section in sections_to_remove: _remove_nested_section(data, section) with open(file_path, "w") as file: toml.dump(data, file)
Remove specified sections from a TOML file. Args: file_path (Path): The path to the TOML file. sections_to_remove (list): A list of section keys to remove from the TOML file.
_remove_from_toml
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_dir(path: Path) -> None: """Remove a directory if it exists. Args: path (Path): The path of the directory to remove. """ if path.exists(): shutil.rmtree(str(path))
Remove a directory if it exists. Args: path (Path): The path of the directory to remove.
_remove_dir
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_file(path: Path) -> None: """Remove a file if it exists. Args: path (Path): The path of the file to remove. """ if path.exists(): path.unlink()
Remove a file if it exists. Args: path (Path): The path of the file to remove.
_remove_file
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_pyspark_starter_files(python_package_name: str) -> None: """Clean up the unnecessary files in the starters template. Args: python_package_name (str): The name of the python package. """ # Remove all .csv and .xlsx files from data/01_raw/ raw_data_path = current_dir / "data/01_raw/" for file_path in raw_data_path.glob("*.*"): if file_path.suffix in [".csv", ".xlsx"]: file_path.unlink() # Empty the contents of conf/base/catalog.yml catalog_yml_path = current_dir / "conf/base/catalog.yml" if catalog_yml_path.exists(): catalog_yml_path.write_text("") # Remove parameter files from conf/base conf_base_path = current_dir / "conf/base/" parameter_file_patterns = ["parameters_*.yml", "parameters/*.yml"] for pattern in parameter_file_patterns: for param_file in conf_base_path.glob(pattern): _remove_file(param_file) # Remove the pipelines subdirectories pipelines_to_remove = ["data_science", "data_processing", "reporting"] pipelines_path = current_dir / f"src/{python_package_name}/pipelines/" for pipeline_subdir in pipelines_to_remove: _remove_dir(pipelines_path / pipeline_subdir) # Remove all test files and subdirectories from tests/pipelines/ test_pipeline_path = current_dir / "tests/pipelines/data_science/test_pipeline.py" _remove_file(test_pipeline_path) _remove_dir(current_dir / "tests/pipelines/data_science")
Clean up the unnecessary files in the starters template. Args: python_package_name (str): The name of the python package.
_remove_pyspark_starter_files
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def _remove_extras_from_kedro_datasets(file_path: Path) -> None: """Remove all extras from kedro-datasets in the requirements file, while keeping the version. Args: file_path (Path): The path of the requirements file. """ with open(file_path) as file: lines = file.readlines() for i, line in enumerate(lines): if "kedro-datasets[" in line: # Split the line at '[', and keep the part before it package = line.split("[", 1)[0] # Extract version version = line.split("]")[-1] lines[i] = package + version with open(file_path, "w") as file: file.writelines(lines)
Remove all extras from kedro-datasets in the requirements file, while keeping the version. Args: file_path (Path): The path of the requirements file.
_remove_extras_from_kedro_datasets
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def setup_template_tools( selected_tools_list: str, requirements_file_path: Path, pyproject_file_path: Path, python_package_name: str, example_pipeline: str, ) -> None: """Set up the templates according to the choice of tools. Args: selected_tools_list (str): A string contains the selected tools. requirements_file_path (Path): The path of the `requirements.txt` in the template. pyproject_file_path (Path): The path of the `pyproject.toml` in the template python_package_name (str): The name of the python package. example_pipeline (str): 'True' if example pipeline was selected """ if "Linting" not in selected_tools_list and "Testing" not in selected_tools_list: _remove_from_toml(pyproject_file_path, dev_pyproject_requirements) if "Linting" not in selected_tools_list: _remove_from_toml(pyproject_file_path, lint_pyproject_requirements) if "Testing" not in selected_tools_list: _remove_from_toml(pyproject_file_path, test_pyproject_requirements) _remove_dir(current_dir / "tests") if "Logging" not in selected_tools_list: _remove_file(current_dir / "conf/logging.yml") if "Documentation" not in selected_tools_list: _remove_from_toml(pyproject_file_path, docs_pyproject_requirements) _remove_dir(current_dir / "docs") if "Data Structure" not in selected_tools_list and example_pipeline != "True": _remove_dir(current_dir / "data") if "PySpark" in selected_tools_list and example_pipeline != "True": _remove_pyspark_starter_files(python_package_name) # Remove requirements used by example pipelines _remove_from_file(requirements_file_path, example_pipeline_requirements) _remove_extras_from_kedro_datasets(requirements_file_path)
Set up the templates according to the choice of tools. Args: selected_tools_list (str): A string contains the selected tools. requirements_file_path (Path): The path of the `requirements.txt` in the template. pyproject_file_path (Path): The path of the `pyproject.toml` in the template python_package_name (str): The name of the python package. example_pipeline (str): 'True' if example pipeline was selected
setup_template_tools
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def sort_requirements(requirements_file_path: Path) -> None: """Sort entries in `requirements.txt`, writing back changes, if any. Args: requirements_file_path (Path): The path to the `requirements.txt` file. """ with open(requirements_file_path, "rb+") as file_obj: fix_requirements(file_obj)
Sort entries in `requirements.txt`, writing back changes, if any. Args: requirements_file_path (Path): The path to the `requirements.txt` file.
sort_requirements
python
kedro-org/kedro
kedro/templates/project/hooks/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/hooks/utils.py
Apache-2.0
def register_pipelines() -> dict[str, Pipeline]: """Register the project's pipelines. Returns: A mapping from pipeline names to ``Pipeline`` objects. """ pipelines = find_pipelines() pipelines["__default__"] = sum(pipelines.values()) return pipelines
Register the project's pipelines. Returns: A mapping from pipeline names to ``Pipeline`` objects.
register_pipelines
python
kedro-org/kedro
kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipeline_registry.py
https://github.com/kedro-org/kedro/blob/master/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipeline_registry.py
Apache-2.0
def _get_project_metadata(project_path: Path) -> ProjectMetadata: """Read project metadata from `<project_root>/pyproject.toml` config file, under the `[tool.kedro]` section. Args: project_path: Local path to project root directory to look up `pyproject.toml` in. Raises: RuntimeError: `pyproject.toml` was not found or the `[tool.kedro]` section is missing, or config file cannot be parsed. ValueError: If project version is different from Kedro package version. Note: Project version is the Kedro version the project was generated with. Returns: A named tuple that contains project metadata. """ pyproject_toml = project_path / _PYPROJECT if not pyproject_toml.is_file(): raise RuntimeError( f"Could not find the project configuration file '{_PYPROJECT}' in {project_path}. " f"If you have created your project with Kedro " f"version <0.17.0, make sure to update your project template. " f"See https://github.com/kedro-org/kedro/blob/main/RELEASE.md" f"#migration-guide-from-kedro-016-to-kedro-0170 " f"for how to migrate your Kedro project." ) try: metadata_dict = toml.load(pyproject_toml) except Exception as exc: raise RuntimeError(f"Failed to parse '{_PYPROJECT}' file.") from exc try: metadata_dict = metadata_dict["tool"]["kedro"] except KeyError as exc: raise RuntimeError( f"There's no '[tool.kedro]' section in the '{_PYPROJECT}'. " f"Please add '[tool.kedro]' section to the file with appropriate " f"configuration parameters." ) from exc mandatory_keys = ["package_name", "project_name", "kedro_init_version"] missing_keys = [key for key in mandatory_keys if key not in metadata_dict] if missing_keys: raise RuntimeError(f"Missing required keys {missing_keys} from '{_PYPROJECT}'.") # check the match for major and minor version (skip patch version) if ( metadata_dict["kedro_init_version"].split(".")[:2] != kedro_version.split(".")[:2] ): raise ValueError(_version_mismatch_error(metadata_dict["kedro_init_version"])) # Default settings source_dir = Path(metadata_dict.get("source_dir", "src")).expanduser() source_dir = (project_path / source_dir).resolve() metadata_dict["tools"] = metadata_dict.get("tools") metadata_dict["example_pipeline"] = metadata_dict.get("example_pipeline") metadata_dict["source_dir"] = source_dir metadata_dict["config_file"] = pyproject_toml metadata_dict["project_path"] = project_path metadata_dict.pop("micropkg", {}) # don't include micro-packaging specs try: return ProjectMetadata(**metadata_dict) except TypeError as exc: expected_keys = [*mandatory_keys, "source_dir", "tools", "example_pipeline"] raise RuntimeError( f"Found unexpected keys in '{_PYPROJECT}'. Make sure " f"it only contains the following keys: {expected_keys}." ) from exc
Read project metadata from `<project_root>/pyproject.toml` config file, under the `[tool.kedro]` section. Args: project_path: Local path to project root directory to look up `pyproject.toml` in. Raises: RuntimeError: `pyproject.toml` was not found or the `[tool.kedro]` section is missing, or config file cannot be parsed. ValueError: If project version is different from Kedro package version. Note: Project version is the Kedro version the project was generated with. Returns: A named tuple that contains project metadata.
_get_project_metadata
python
kedro-org/kedro
kedro/framework/startup.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/startup.py
Apache-2.0
def _validate_source_path(source_path: Path, project_path: Path) -> None: """Validate the source path exists and is relative to the project path. Args: source_path: Absolute source path. project_path: Path to the Kedro project. Raises: ValueError: If source_path is not relative to project_path. NotADirectoryError: If source_path does not exist. """ try: source_path.relative_to(project_path) except ValueError as exc: raise ValueError( f"Source path '{source_path}' has to be relative to " f"your project root '{project_path}'." ) from exc if not source_path.exists(): raise NotADirectoryError(f"Source path '{source_path}' cannot be found.")
Validate the source path exists and is relative to the project path. Args: source_path: Absolute source path. project_path: Path to the Kedro project. Raises: ValueError: If source_path is not relative to project_path. NotADirectoryError: If source_path does not exist.
_validate_source_path
python
kedro-org/kedro
kedro/framework/startup.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/startup.py
Apache-2.0
def bootstrap_project(project_path: str | Path) -> ProjectMetadata: """Run setup required at the beginning of the workflow when running in project mode, and return project metadata. """ project_path = Path(project_path).expanduser().resolve() metadata = _get_project_metadata(project_path) _add_src_to_path(metadata.source_dir, project_path) configure_project(metadata.package_name) return metadata
Run setup required at the beginning of the workflow when running in project mode, and return project metadata.
bootstrap_project
python
kedro-org/kedro
kedro/framework/startup.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/startup.py
Apache-2.0
def after_catalog_created( # noqa: PLR0913 self, catalog: CatalogProtocol, conf_catalog: dict[str, Any], conf_creds: dict[str, Any], feed_dict: dict[str, Any], save_version: str, load_versions: dict[str, str], ) -> None: """Hooks to be invoked after a data catalog is created. It receives the ``catalog`` as well as all the arguments for ``KedroContext._create_catalog``. Args: catalog: The catalog that was created. conf_catalog: The config from which the catalog was created. conf_creds: The credentials conf from which the catalog was created. feed_dict: The feed_dict that was added to the catalog after creation. save_version: The save_version used in ``save`` operations for all datasets in the catalog. load_versions: The load_versions used in ``load`` operations for each dataset in the catalog. """ pass
Hooks to be invoked after a data catalog is created. It receives the ``catalog`` as well as all the arguments for ``KedroContext._create_catalog``. Args: catalog: The catalog that was created. conf_catalog: The config from which the catalog was created. conf_creds: The credentials conf from which the catalog was created. feed_dict: The feed_dict that was added to the catalog after creation. save_version: The save_version used in ``save`` operations for all datasets in the catalog. load_versions: The load_versions used in ``load`` operations for each dataset in the catalog.
after_catalog_created
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def before_node_run( self, node: Node, catalog: CatalogProtocol, inputs: dict[str, Any], is_async: bool, session_id: str, ) -> dict[str, Any] | None: """Hook to be invoked before a node runs. The arguments received are the same as those used by ``kedro.runner.run_node`` Args: node: The ``Node`` to run. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session. Returns: Either None or a dictionary mapping dataset name(s) to new value(s). If returned, this dictionary will be used to update the node inputs, which allows to overwrite the node inputs. """ pass
Hook to be invoked before a node runs. The arguments received are the same as those used by ``kedro.runner.run_node`` Args: node: The ``Node`` to run. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session. Returns: Either None or a dictionary mapping dataset name(s) to new value(s). If returned, this dictionary will be used to update the node inputs, which allows to overwrite the node inputs.
before_node_run
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def after_node_run( # noqa: PLR0913 self, node: Node, catalog: CatalogProtocol, inputs: dict[str, Any], outputs: dict[str, Any], is_async: bool, session_id: str, ) -> None: """Hook to be invoked after a node runs. The arguments received are the same as those used by ``kedro.runner.run_node`` as well as the ``outputs`` of the node run. Args: node: The ``Node`` that ran. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. outputs: The dictionary of outputs dataset. The keys are dataset names and the values are the actual computed output data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session. """ pass
Hook to be invoked after a node runs. The arguments received are the same as those used by ``kedro.runner.run_node`` as well as the ``outputs`` of the node run. Args: node: The ``Node`` that ran. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. outputs: The dictionary of outputs dataset. The keys are dataset names and the values are the actual computed output data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session.
after_node_run
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def on_node_error( # noqa: PLR0913 self, error: Exception, node: Node, catalog: CatalogProtocol, inputs: dict[str, Any], is_async: bool, session_id: str, ) -> None: """Hook to be invoked if a node run throws an uncaught error. The signature of this error hook should match the signature of ``before_node_run`` along with the error that was raised. Args: error: The uncaught exception thrown during the node run. node: The ``Node`` to run. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session. """ pass
Hook to be invoked if a node run throws an uncaught error. The signature of this error hook should match the signature of ``before_node_run`` along with the error that was raised. Args: error: The uncaught exception thrown during the node run. node: The ``Node`` to run. catalog: An implemented instance of ``CatalogProtocol`` containing the node's inputs and outputs. inputs: The dictionary of inputs dataset. The keys are dataset names and the values are the actual loaded input data, not the dataset instance. is_async: Whether the node was run in ``async`` mode. session_id: The id of the session.
on_node_error
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def before_pipeline_run( self, run_params: dict[str, Any], pipeline: Pipeline, catalog: CatalogProtocol ) -> None: """Hook to be invoked before a pipeline runs. Args: run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } pipeline: The ``Pipeline`` that will be run. catalog: An implemented instance of ``CatalogProtocol`` to be used during the run. """ pass
Hook to be invoked before a pipeline runs. Args: run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } pipeline: The ``Pipeline`` that will be run. catalog: An implemented instance of ``CatalogProtocol`` to be used during the run.
before_pipeline_run
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def after_pipeline_run( self, run_params: dict[str, Any], run_result: dict[str, Any], pipeline: Pipeline, catalog: CatalogProtocol, ) -> None: """Hook to be invoked after a pipeline runs. Args: run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } run_result: The output of ``Pipeline`` run. pipeline: The ``Pipeline`` that was run. catalog: An implemented instance of ``CatalogProtocol`` used during the run. """ pass
Hook to be invoked after a pipeline runs. Args: run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } run_result: The output of ``Pipeline`` run. pipeline: The ``Pipeline`` that was run. catalog: An implemented instance of ``CatalogProtocol`` used during the run.
after_pipeline_run
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def on_pipeline_error( self, error: Exception, run_params: dict[str, Any], pipeline: Pipeline, catalog: CatalogProtocol, ) -> None: """Hook to be invoked if a pipeline run throws an uncaught Exception. The signature of this error hook should match the signature of ``before_pipeline_run`` along with the error that was raised. Args: error: The uncaught exception thrown during the pipeline run. run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } pipeline: The ``Pipeline`` that will was run. catalog: An implemented instance of ``CatalogProtocol`` used during the run. """ pass
Hook to be invoked if a pipeline run throws an uncaught Exception. The signature of this error hook should match the signature of ``before_pipeline_run`` along with the error that was raised. Args: error: The uncaught exception thrown during the pipeline run. run_params: The params used to run the pipeline. Should have the following schema:: { "session_id": str "project_path": str, "env": str, "kedro_version": str, "tags": Optional[List[str]], "from_nodes": Optional[List[str]], "to_nodes": Optional[List[str]], "node_names": Optional[List[str]], "from_inputs": Optional[List[str]], "to_outputs": Optional[List[str]], "load_versions": Optional[List[str]], "extra_params": Optional[Dict[str, Any]] "pipeline_name": str, "namespace": Optional[str], "runner": str, } pipeline: The ``Pipeline`` that will was run. catalog: An implemented instance of ``CatalogProtocol`` used during the run.
on_pipeline_error
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def before_dataset_loaded(self, dataset_name: str, node: Node) -> None: """Hook to be invoked before a dataset is loaded from the catalog. Args: dataset_name: name of the dataset to be loaded from the catalog. node: The ``Node`` to run. """ pass
Hook to be invoked before a dataset is loaded from the catalog. Args: dataset_name: name of the dataset to be loaded from the catalog. node: The ``Node`` to run.
before_dataset_loaded
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def after_dataset_loaded(self, dataset_name: str, data: Any, node: Node) -> None: """Hook to be invoked after a dataset is loaded from the catalog. Args: dataset_name: name of the dataset that was loaded from the catalog. data: the actual data that was loaded from the catalog. node: The ``Node`` to run. """ pass
Hook to be invoked after a dataset is loaded from the catalog. Args: dataset_name: name of the dataset that was loaded from the catalog. data: the actual data that was loaded from the catalog. node: The ``Node`` to run.
after_dataset_loaded
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def before_dataset_saved(self, dataset_name: str, data: Any, node: Node) -> None: """Hook to be invoked before a dataset is saved to the catalog. Args: dataset_name: name of the dataset to be saved to the catalog. data: the actual data to be saved to the catalog. node: The ``Node`` that ran. """ pass
Hook to be invoked before a dataset is saved to the catalog. Args: dataset_name: name of the dataset to be saved to the catalog. data: the actual data to be saved to the catalog. node: The ``Node`` that ran.
before_dataset_saved
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def after_dataset_saved(self, dataset_name: str, data: Any, node: Node) -> None: """Hook to be invoked after a dataset is saved in the catalog. Args: dataset_name: name of the dataset that was saved to the catalog. data: the actual data that was saved to the catalog. node: The ``Node`` that ran. """ pass
Hook to be invoked after a dataset is saved in the catalog. Args: dataset_name: name of the dataset that was saved to the catalog. data: the actual data that was saved to the catalog. node: The ``Node`` that ran.
after_dataset_saved
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def after_context_created( self, context: KedroContext, ) -> None: """Hooks to be invoked after a `KedroContext` is created. This is the earliest hook triggered within a Kedro run. The `KedroContext` stores useful information such as `credentials`, `config_loader` and `env`. Args: context: The context that was created. """
Hooks to be invoked after a `KedroContext` is created. This is the earliest hook triggered within a Kedro run. The `KedroContext` stores useful information such as `credentials`, `config_loader` and `env`. Args: context: The context that was created.
after_context_created
python
kedro-org/kedro
kedro/framework/hooks/specs.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/specs.py
Apache-2.0
def _create_hook_manager() -> PluginManager: """Create a new PluginManager instance and register Kedro's hook specs.""" manager = PluginManager(HOOK_NAMESPACE) manager.trace.root.setwriter(logger.debug) manager.enable_tracing() manager.add_hookspecs(NodeSpecs) manager.add_hookspecs(PipelineSpecs) manager.add_hookspecs(DataCatalogSpecs) manager.add_hookspecs(DatasetSpecs) manager.add_hookspecs(KedroContextSpecs) return manager
Create a new PluginManager instance and register Kedro's hook specs.
_create_hook_manager
python
kedro-org/kedro
kedro/framework/hooks/manager.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/manager.py
Apache-2.0
def _register_hooks(hook_manager: PluginManager, hooks: Iterable[Any]) -> None: """Register all hooks as specified in ``hooks`` with the global ``hook_manager``. Args: hook_manager: Hook manager instance to register the hooks with. hooks: Hooks that need to be registered. """ for hooks_collection in hooks: # Sometimes users might call hook registration more than once, in which # case hooks have already been registered, so we perform a simple check # here to avoid an error being raised and break user's workflow. if not hook_manager.is_registered(hooks_collection): if isclass(hooks_collection): raise TypeError( "KedroSession expects hooks to be registered as instances. " "Have you forgotten the `()` when registering a hook class ?" ) hook_manager.register(hooks_collection)
Register all hooks as specified in ``hooks`` with the global ``hook_manager``. Args: hook_manager: Hook manager instance to register the hooks with. hooks: Hooks that need to be registered.
_register_hooks
python
kedro-org/kedro
kedro/framework/hooks/manager.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/manager.py
Apache-2.0
def _register_hooks_entry_points( hook_manager: PluginManager, disabled_plugins: Iterable[str] ) -> None: """Register pluggy hooks from python package entrypoints. Args: hook_manager: Hook manager instance to register the hooks with. disabled_plugins: An iterable returning the names of plugins which hooks must not be registered; any already registered hooks will be unregistered. """ already_registered = hook_manager.get_plugins() # Method name is misleading: # entry points are standard and don't require setuptools, # see https://packaging.python.org/en/latest/specifications/entry-points/ hook_manager.load_setuptools_entrypoints(_PLUGIN_HOOKS) disabled_plugins = set(disabled_plugins) # Get list of plugin/distinfo tuples for all registered plugins. plugininfo = hook_manager.list_plugin_distinfo() plugin_names = set() disabled_plugin_names = set() for plugin, dist in plugininfo: if dist.project_name in disabled_plugins: # `unregister()` is used instead of `set_blocked()` because # we want to disable hooks for specific plugin based on project # name and not `entry_point` name. Also, we log project names with # version for which hooks were registered. hook_manager.unregister(plugin=plugin) disabled_plugin_names.add(f"{dist.project_name}-{dist.version}") elif plugin not in already_registered: plugin_names.add(f"{dist.project_name}-{dist.version}") if disabled_plugin_names: logger.debug( "Hooks are disabled for plugin(s): %s", ", ".join(sorted(disabled_plugin_names)), ) if plugin_names: logger.debug( "Registered hooks from %d installed plugin(s): %s", len(plugin_names), ", ".join(sorted(plugin_names)), )
Register pluggy hooks from python package entrypoints. Args: hook_manager: Hook manager instance to register the hooks with. disabled_plugins: An iterable returning the names of plugins which hooks must not be registered; any already registered hooks will be unregistered.
_register_hooks_entry_points
python
kedro-org/kedro
kedro/framework/hooks/manager.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/hooks/manager.py
Apache-2.0
def call(cmd: list[str], **kwargs: Any) -> None: # pragma: no cover """Run a subprocess command and raise if it fails. Args: cmd: List of command parts. **kwargs: Optional keyword arguments passed to `subprocess.run`. Raises: click.exceptions.Exit: If `subprocess.run` returns non-zero code. """ click.echo(shlex.join(cmd)) code = subprocess.run(cmd, **kwargs).returncode # noqa: PLW1510, S603 if code: raise click.exceptions.Exit(code=code)
Run a subprocess command and raise if it fails. Args: cmd: List of command parts. **kwargs: Optional keyword arguments passed to `subprocess.run`. Raises: click.exceptions.Exit: If `subprocess.run` returns non-zero code.
call
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def python_call( module: str, arguments: Iterable[str], **kwargs: Any ) -> None: # pragma: no cover """Run a subprocess command that invokes a Python module.""" call([sys.executable, "-m", module, *list(arguments)], **kwargs)
Run a subprocess command that invokes a Python module.
python_call
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def find_stylesheets() -> Iterable[str]: # pragma: no cover # TODO: Deprecate this function in favour of kedro-sphinx-theme """Fetch all stylesheets used in the official Kedro documentation""" css_path = Path(__file__).resolve().parents[1] / "html" / "_static" / "css" return (str(css_path / "copybutton.css"),)
Fetch all stylesheets used in the official Kedro documentation
find_stylesheets
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def forward_command( group: Any, name: str | None = None, forward_help: bool = False ) -> Any: """A command that receives the rest of the command line as 'args'.""" def wrapit(func: Any) -> Any: func = click.argument("args", nargs=-1, type=click.UNPROCESSED)(func) func = command_with_verbosity( group, name=name, context_settings={ "ignore_unknown_options": True, "help_option_names": [] if forward_help else ["-h", "--help"], }, )(func) return func return wrapit
A command that receives the rest of the command line as 'args'.
forward_command
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def get_pkg_version(reqs_path: (str | Path), package_name: str) -> str: """Get package version from requirements.txt. Args: reqs_path: Path to requirements.txt file. package_name: Package to search for. Returns: Package and its version as specified in requirements.txt. Raises: KedroCliError: If the file specified in ``reqs_path`` does not exist or ``package_name`` was not found in that file. """ warnings.warn( "`get_pkg_version()` has been deprecated and will be removed in Kedro 0.20.0", KedroDeprecationWarning, ) reqs_path = Path(reqs_path).absolute() if not reqs_path.is_file(): raise KedroCliError(f"Given path '{reqs_path}' is not a regular file.") pattern = re.compile(package_name + r"([^\w]|$)") with reqs_path.open("r", encoding="utf-8") as reqs_file: for req_line in reqs_file: req_line = req_line.strip() # noqa: PLW2901 if pattern.search(req_line): return req_line raise KedroCliError(f"Cannot find '{package_name}' package in '{reqs_path}'.")
Get package version from requirements.txt. Args: reqs_path: Path to requirements.txt file. package_name: Package to search for. Returns: Package and its version as specified in requirements.txt. Raises: KedroCliError: If the file specified in ``reqs_path`` does not exist or ``package_name`` was not found in that file.
get_pkg_version
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _click_verbose(func: Any) -> Any: """Click option for enabling verbose mode.""" return click.option( "--verbose", "-v", is_flag=True, callback=_update_verbose_flag, help="See extensive logging and error stack traces.", )(func)
Click option for enabling verbose mode.
_click_verbose
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def command_with_verbosity(group: click.core.Group, *args: Any, **kwargs: Any) -> Any: """Custom command decorator with verbose flag added.""" def decorator(func: Any) -> Any: func = _click_verbose(func) func = group.command(*args, **kwargs)(func) return func return decorator
Custom command decorator with verbose flag added.
command_with_verbosity
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _clean_pycache(path: Path) -> None: """Recursively clean all __pycache__ folders from `path`. Args: path: Existing local directory to clean __pycache__ folders from. """ to_delete = [each.resolve() for each in path.rglob("__pycache__")] for each in to_delete: shutil.rmtree(each, ignore_errors=True)
Recursively clean all __pycache__ folders from `path`. Args: path: Existing local directory to clean __pycache__ folders from.
_clean_pycache
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def split_string(ctx: click.Context, param: Any, value: str) -> list[str]: """Split string by comma.""" return [item.strip() for item in value.split(",") if item.strip()]
Split string by comma.
split_string
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def split_node_names(ctx: click.Context, param: Any, to_split: str) -> list[str]: """Split string by comma, ignoring commas enclosed by square parentheses. This avoids splitting the string of nodes names on commas included in default node names, which have the pattern <function_name>([<input_name>,...]) -> [<output_name>,...]) Note: - `to_split` will have such commas if and only if it includes a default node name. User-defined node names cannot include commas or square brackets. - This function will no longer be necessary from Kedro 0.19.*, in which default node names will no longer contain commas Args: to_split: the string to split safely Returns: A list containing the result of safe-splitting the string. """ result = [] argument, match_state = "", 0 for char in to_split + ",": if char == "[": match_state += 1 elif char == "]": match_state -= 1 if char == "," and match_state == 0 and argument: argument = argument.strip() result.append(argument) argument = "" else: argument += char return result
Split string by comma, ignoring commas enclosed by square parentheses. This avoids splitting the string of nodes names on commas included in default node names, which have the pattern <function_name>([<input_name>,...]) -> [<output_name>,...]) Note: - `to_split` will have such commas if and only if it includes a default node name. User-defined node names cannot include commas or square brackets. - This function will no longer be necessary from Kedro 0.19.*, in which default node names will no longer contain commas Args: to_split: the string to split safely Returns: A list containing the result of safe-splitting the string.
split_node_names
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def env_option(func_: Any | None = None, **kwargs: Any) -> Any: """Add `--env` CLI option to a function.""" default_args = {"type": str, "default": None, "help": ENV_HELP} kwargs = {**default_args, **kwargs} opt = click.option("--env", "-e", **kwargs) return opt(func_) if func_ else opt
Add `--env` CLI option to a function.
env_option
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0
def _get_entry_points(name: str) -> Any: """Get all kedro related entry points""" return importlib_metadata.entry_points().select( # type: ignore[no-untyped-call] group=ENTRY_POINT_GROUPS[name] )
Get all kedro related entry points
_get_entry_points
python
kedro-org/kedro
kedro/framework/cli/utils.py
https://github.com/kedro-org/kedro/blob/master/kedro/framework/cli/utils.py
Apache-2.0