index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
4,370 | dagster_snowflake.resources | get_connection | Gets a connection to Snowflake as a context manager.
If using the execute_query, execute_queries, or load_table_from_local_parquet methods,
you do not need to create a connection using this context manager.
Args:
raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw
connection. Defaults to True.
Examples:
.. code-block:: python
@op(
required_resource_keys={"snowflake"}
)
def get_query_status(query_id):
with context.resources.snowflake.get_connection() as conn:
# conn is a Snowflake Connection object or a SQLAlchemy Connection if
# sqlalchemy is specified as the connector in the Snowflake Resource config
return conn.get_query_status(query_id)
| @compat_model_validator(mode="before")
def validate_authentication(cls, values):
auths_set = 0
auths_set += 1 if values.get("password") is not None else 0
auths_set += 1 if values.get("private_key") is not None else 0
auths_set += 1 if values.get("private_key_path") is not None else 0
# if authenticator is set, there can be 0 or 1 additional auth method;
# otherwise, ensure at least 1 method is provided
check.invariant(
auths_set > 0 or values.get("authenticator") is not None,
"Missing config: Password, private key, or authenticator authentication required"
" for Snowflake resource.",
)
# ensure that only 1 non-authenticator method is provided
check.invariant(
auths_set <= 1,
"Incorrect config: Cannot provide both password and private key authentication to"
" Snowflake Resource.",
)
return values
| (self, raw_conn: bool = True) -> Iterator[Union[Any, snowflake.connector.connection.SnowflakeConnection]] |
4,371 | dagster_snowflake.resources | load_table_from_local_parquet | Stores the content of a parquet file to a Snowflake table.
Args:
src (str): the name of the file to store in Snowflake
table (str): the name of the table to store the data. If the table does not exist, it will
be created. Otherwise the contents of the table will be replaced with the data in src
Examples:
.. code-block:: python
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
@op
def write_parquet_file(snowflake: SnowflakeResource):
df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})
table = pa.Table.from_pandas(df)
pq.write_table(table, "example.parquet')
snowflake.load_table_from_local_parquet(
src="example.parquet",
table="MY_TABLE"
)
| @public
def load_table_from_local_parquet(self, src: str, table: str):
"""Stores the content of a parquet file to a Snowflake table.
Args:
src (str): the name of the file to store in Snowflake
table (str): the name of the table to store the data. If the table does not exist, it will
be created. Otherwise the contents of the table will be replaced with the data in src
Examples:
.. code-block:: python
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
@op
def write_parquet_file(snowflake: SnowflakeResource):
df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})
table = pa.Table.from_pandas(df)
pq.write_table(table, "example.parquet')
snowflake.load_table_from_local_parquet(
src="example.parquet",
table="MY_TABLE"
)
"""
check.str_param(src, "src")
check.str_param(table, "table")
sql_queries = [
f"CREATE OR REPLACE TABLE {table} ( data VARIANT DEFAULT NULL);",
"CREATE OR REPLACE FILE FORMAT parquet_format TYPE = 'parquet';",
f"PUT {src} @%{table};",
f"COPY INTO {table} FROM @%{table} FILE_FORMAT = (FORMAT_NAME = 'parquet_format');",
]
self.execute_queries(sql_queries)
| (self, src: str, table: str) |
4,372 | dagster_snowflake.snowflake_io_manager | SnowflakeIOManager | Base class for an IO manager definition that reads inputs from and writes outputs to Snowflake.
Examples:
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
defs = Definitions(
assets=[my_table],
resources={
"io_manager": MySnowflakeIOManager(database="my_database", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table]
resources={
"io_manager" MySnowflakeIOManager(database="my_database", schema="my_schema", ...)
}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
| class SnowflakeIOManager(ConfigurableIOManagerFactory):
"""Base class for an IO manager definition that reads inputs from and writes outputs to Snowflake.
Examples:
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
defs = Definitions(
assets=[my_table],
resources={
"io_manager": MySnowflakeIOManager(database="my_database", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table]
resources={
"io_manager" MySnowflakeIOManager(database="my_database", schema="my_schema", ...)
}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
"""
database: str = Field(description="Name of the database to use.")
account: str = Field(
description=(
"Your Snowflake account name. For more details, see the `Snowflake documentation."
" <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"
),
)
user: str = Field(description="User login name.")
schema_: Optional[str] = Field(
default=None, alias="schema", description="Name of the schema to use."
) # schema is a reserved word for pydantic
password: Optional[str] = Field(default=None, description="User password.")
warehouse: Optional[str] = Field(default=None, description="Name of the warehouse to use.")
role: Optional[str] = Field(default=None, description="Name of the role to use.")
private_key: Optional[str] = Field(
default=None,
description=(
"Raw private key to use. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details. To"
" avoid issues with newlines in the keys, you can base64 encode the key. You can"
" retrieve the base64 encoded key with this shell command: cat rsa_key.p8 | base64"
),
)
private_key_path: Optional[str] = Field(
default=None,
description=(
"Path to the private key. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."
),
)
private_key_password: Optional[str] = Field(
default=None,
description=(
"The password of the private key. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."
" Required for both private_key and private_key_path if the private key is encrypted."
" For unencrypted keys, this config can be omitted or set to None."
),
)
store_timestamps_as_strings: bool = Field(
default=False,
description=(
"If using Pandas DataFrames, whether to convert time data to strings. If True, time"
" data will be converted to strings when storing the DataFrame and converted back to"
" time data when loading the DataFrame. If False, time data without a timezone will be"
" set to UTC timezone to avoid a Snowflake bug. Defaults to False."
),
)
authenticator: Optional[str] = Field(
default=None,
description="Optional parameter to specify the authentication mechanism to use.",
)
@staticmethod
@abstractmethod
def type_handlers() -> Sequence[DbTypeHandler]:
"""type_handlers should return a list of the TypeHandlers that the I/O manager can use.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
"""
...
@staticmethod
def default_load_type() -> Optional[Type]:
"""If an asset or op is not annotated with an return type, default_load_type will be used to
determine which TypeHandler to use to store and load the output.
If left unimplemented, default_load_type will return None. In that case, if there is only
one TypeHandler, the I/O manager will default to loading unannotated outputs with that
TypeHandler.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
import pandas as pd
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@staticmethod
def default_load_type() -> Optional[Type]:
return pd.DataFrame
"""
return None
def create_io_manager(self, context) -> DbIOManager:
return DbIOManager(
db_client=SnowflakeDbClient(),
io_manager_name="SnowflakeIOManager",
database=self.database,
schema=self.schema_,
type_handlers=self.type_handlers(),
default_load_type=self.default_load_type(),
)
| (*, database: str, account: str, user: str, schema: Optional[str] = None, password: Optional[str] = None, warehouse: Optional[str] = None, role: Optional[str] = None, private_key: Optional[str] = None, private_key_path: Optional[str] = None, private_key_password: Optional[str] = None, store_timestamps_as_strings: bool = False, authenticator: Optional[str] = None) -> None |
4,373 | pydantic.main | __copy__ | Returns a shallow copy of the model. | def __copy__(self: Model) -> Model:
"""Returns a shallow copy of the model."""
cls = type(self)
m = cls.__new__(cls)
_object_setattr(m, '__dict__', copy(self.__dict__))
_object_setattr(m, '__pydantic_extra__', copy(self.__pydantic_extra__))
_object_setattr(m, '__pydantic_fields_set__', copy(self.__pydantic_fields_set__))
if not hasattr(self, '__pydantic_private__') or self.__pydantic_private__ is None:
_object_setattr(m, '__pydantic_private__', None)
else:
_object_setattr(
m,
'__pydantic_private__',
{k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined},
)
return m
| (self: ~Model) -> ~Model |
4,374 | pydantic.main | __deepcopy__ | Returns a deep copy of the model. | def __deepcopy__(self: Model, memo: dict[int, Any] | None = None) -> Model:
"""Returns a deep copy of the model."""
cls = type(self)
m = cls.__new__(cls)
_object_setattr(m, '__dict__', deepcopy(self.__dict__, memo=memo))
_object_setattr(m, '__pydantic_extra__', deepcopy(self.__pydantic_extra__, memo=memo))
# This next line doesn't need a deepcopy because __pydantic_fields_set__ is a set[str],
# and attempting a deepcopy would be marginally slower.
_object_setattr(m, '__pydantic_fields_set__', copy(self.__pydantic_fields_set__))
if not hasattr(self, '__pydantic_private__') or self.__pydantic_private__ is None:
_object_setattr(m, '__pydantic_private__', None)
else:
_object_setattr(
m,
'__pydantic_private__',
deepcopy({k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined}, memo=memo),
)
return m
| (self: ~Model, memo: Optional[dict[int, Any]] = None) -> ~Model |
4,375 | pydantic.main | __delattr__ | null | def __delattr__(self, item: str) -> Any:
if item in self.__private_attributes__:
attribute = self.__private_attributes__[item]
if hasattr(attribute, '__delete__'):
attribute.__delete__(self) # type: ignore
return
try:
# Note: self.__pydantic_private__ cannot be None if self.__private_attributes__ has items
del self.__pydantic_private__[item] # type: ignore
return
except KeyError as exc:
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc
self._check_frozen(item, None)
if item in self.model_fields:
object.__delattr__(self, item)
elif self.__pydantic_extra__ is not None and item in self.__pydantic_extra__:
del self.__pydantic_extra__[item]
else:
try:
object.__delattr__(self, item)
except AttributeError:
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
| (self, item: str) -> Any |
4,376 | pydantic.main | __eq__ | null | def __eq__(self, other: Any) -> bool:
if isinstance(other, BaseModel):
# When comparing instances of generic types for equality, as long as all field values are equal,
# only require their generic origin types to be equal, rather than exact type equality.
# This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1).
self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__
other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__
# Perform common checks first
if not (
self_type == other_type
and getattr(self, '__pydantic_private__', None) == getattr(other, '__pydantic_private__', None)
and self.__pydantic_extra__ == other.__pydantic_extra__
):
return False
# We only want to compare pydantic fields but ignoring fields is costly.
# We'll perform a fast check first, and fallback only when needed
# See GH-7444 and GH-7825 for rationale and a performance benchmark
# First, do the fast (and sometimes faulty) __dict__ comparison
if self.__dict__ == other.__dict__:
# If the check above passes, then pydantic fields are equal, we can return early
return True
# We don't want to trigger unnecessary costly filtering of __dict__ on all unequal objects, so we return
# early if there are no keys to ignore (we would just return False later on anyway)
model_fields = type(self).model_fields.keys()
if self.__dict__.keys() <= model_fields and other.__dict__.keys() <= model_fields:
return False
# If we reach here, there are non-pydantic-fields keys, mapped to unequal values, that we need to ignore
# Resort to costly filtering of the __dict__ objects
# We use operator.itemgetter because it is much faster than dict comprehensions
# NOTE: Contrary to standard python class and instances, when the Model class has a default value for an
# attribute and the model instance doesn't have a corresponding attribute, accessing the missing attribute
# raises an error in BaseModel.__getattr__ instead of returning the class attribute
# So we can use operator.itemgetter() instead of operator.attrgetter()
getter = operator.itemgetter(*model_fields) if model_fields else lambda _: _utils._SENTINEL
try:
return getter(self.__dict__) == getter(other.__dict__)
except KeyError:
# In rare cases (such as when using the deprecated BaseModel.copy() method),
# the __dict__ may not contain all model fields, which is how we can get here.
# getter(self.__dict__) is much faster than any 'safe' method that accounts
# for missing keys, and wrapping it in a `try` doesn't slow things down much
# in the common case.
self_fields_proxy = _utils.SafeGetItemProxy(self.__dict__)
other_fields_proxy = _utils.SafeGetItemProxy(other.__dict__)
return getter(self_fields_proxy) == getter(other_fields_proxy)
# other instance is not a BaseModel
else:
return NotImplemented # delegate to the other item in the comparison
| (self, other: Any) -> bool |
4,377 | dagster._config.pythonic_config.typing_utils | __get__ | null | def __get__(self: Self, obj: Any, owner: Any) -> Self:
# no-op implementation (only used to affect type signature)
return cast(Self, getattr(obj, self._assigned_name))
| (self: typing_extensions.Self, obj: Any, owner: Any) -> typing_extensions.Self |
4,378 | pydantic.main | __getattr__ | null | def __getattr__(self, item: str) -> Any:
private_attributes = object.__getattribute__(self, '__private_attributes__')
if item in private_attributes:
attribute = private_attributes[item]
if hasattr(attribute, '__get__'):
return attribute.__get__(self, type(self)) # type: ignore
try:
# Note: self.__pydantic_private__ cannot be None if self.__private_attributes__ has items
return self.__pydantic_private__[item] # type: ignore
except KeyError as exc:
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc
else:
# `__pydantic_extra__` can fail to be set if the model is not yet fully initialized.
# See `BaseModel.__repr_args__` for more details
try:
pydantic_extra = object.__getattribute__(self, '__pydantic_extra__')
except AttributeError:
pydantic_extra = None
if pydantic_extra:
try:
return pydantic_extra[item]
except KeyError as exc:
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc
else:
if hasattr(self.__class__, item):
return super().__getattribute__(item) # Raises AttributeError if appropriate
else:
# this is the current error
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
| (self, item: str) -> Any |
4,379 | pydantic.main | __getstate__ | null | def __getstate__(self) -> dict[Any, Any]:
private = self.__pydantic_private__
if private:
private = {k: v for k, v in private.items() if v is not PydanticUndefined}
return {
'__dict__': self.__dict__,
'__pydantic_extra__': self.__pydantic_extra__,
'__pydantic_fields_set__': self.__pydantic_fields_set__,
'__pydantic_private__': private,
}
| (self) -> dict[typing.Any, typing.Any] |
4,380 | pydantic._internal._model_construction | hash_func | null | def make_hash_func(cls: type[BaseModel]) -> Any:
getter = operator.itemgetter(*cls.model_fields.keys()) if cls.model_fields else lambda _: 0
def hash_func(self: Any) -> int:
try:
return hash(getter(self.__dict__))
except KeyError:
# In rare cases (such as when using the deprecated copy method), the __dict__ may not contain
# all model fields, which is how we can get here.
# getter(self.__dict__) is much faster than any 'safe' method that accounts for missing keys,
# and wrapping it in a `try` doesn't slow things down much in the common case.
return hash(getter(SafeGetItemProxy(self.__dict__)))
return hash_func
| (self: Any) -> int |
4,381 | dagster._config.pythonic_config.io_manager | __init__ | null | def __init__(self, **data: Any):
ConfigurableResourceFactory.__init__(self, **data)
| (self, **data: Any) |
4,382 | pydantic.main | __iter__ | So `dict(model)` works. | def __iter__(self) -> TupleGenerator:
"""So `dict(model)` works."""
yield from [(k, v) for (k, v) in self.__dict__.items() if not k.startswith('_')]
extra = self.__pydantic_extra__
if extra:
yield from extra.items()
| (self) -> Generator[Tuple[str, Any], NoneType, NoneType] |
4,383 | pydantic._internal._repr | __pretty__ | Used by devtools (https://python-devtools.helpmanual.io/) to pretty print objects. | def __pretty__(self, fmt: typing.Callable[[Any], Any], **kwargs: Any) -> typing.Generator[Any, None, None]:
"""Used by devtools (https://python-devtools.helpmanual.io/) to pretty print objects."""
yield self.__repr_name__() + '('
yield 1
for name, value in self.__repr_args__():
if name is not None:
yield name + '='
yield fmt(value)
yield ','
yield 0
yield -1
yield ')'
| (self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, NoneType, NoneType] |
4,384 | pydantic.main | __repr__ | null | def __repr__(self) -> str:
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
| (self) -> str |
4,385 | pydantic.main | __repr_args__ | null | def __repr_args__(self) -> _repr.ReprArgs:
for k, v in self.__dict__.items():
field = self.model_fields.get(k)
if field and field.repr:
yield k, v
# `__pydantic_extra__` can fail to be set if the model is not yet fully initialized.
# This can happen if a `ValidationError` is raised during initialization and the instance's
# repr is generated as part of the exception handling. Therefore, we use `getattr` here
# with a fallback, even though the type hints indicate the attribute will always be present.
try:
pydantic_extra = object.__getattribute__(self, '__pydantic_extra__')
except AttributeError:
pydantic_extra = None
if pydantic_extra is not None:
yield from ((k, v) for k, v in pydantic_extra.items())
yield from ((k, getattr(self, k)) for k, v in self.model_computed_fields.items() if v.repr)
| (self) -> '_repr.ReprArgs' |
4,386 | pydantic._internal._repr | __repr_name__ | Name of the instance's class, used in __repr__. | def __repr_name__(self) -> str:
"""Name of the instance's class, used in __repr__."""
return self.__class__.__name__
| (self) -> str |
4,387 | pydantic._internal._repr | __repr_str__ | null | def __repr_str__(self, join_str: str) -> str:
return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())
| (self, join_str: str) -> str |
4,388 | pydantic._internal._repr | __rich_repr__ | Used by Rich (https://rich.readthedocs.io/en/stable/pretty.html) to pretty print objects. | def __rich_repr__(self) -> RichReprResult:
"""Used by Rich (https://rich.readthedocs.io/en/stable/pretty.html) to pretty print objects."""
for name, field_repr in self.__repr_args__():
if name is None:
yield field_repr
else:
yield name, field_repr
| (self) -> 'RichReprResult' |
4,389 | dagster._config.pythonic_config.typing_utils | __set__ | null | def __set__(self, obj: Optional[object], value: Union[Any, "PartialResource[Any]"]) -> None:
# no-op implementation (only used to affect type signature)
setattr(obj, self._assigned_name, value)
| (self, obj: Optional[object], value: Union[Any, ForwardRef('PartialResource[Any]')]) -> None |
4,390 | dagster._config.pythonic_config.typing_utils | __set_name__ | null | def __set_name__(self, _owner, name):
self._assigned_name = name
| (self, _owner, name) |
4,391 | dagster._config.pythonic_config.config | __setattr__ | null | def __setattr__(self, name: str, value: Any):
from .resource import ConfigurableResourceFactory
# This is a hack to allow us to set attributes on the class that are not part of the
# config schema. Pydantic will normally raise an error if you try to set an attribute
# that is not part of the schema.
if _is_field_internal(name):
object.__setattr__(self, name, value)
return
try:
return super().__setattr__(name, value)
except (TypeError, ValueError) as e:
clsname = self.__class__.__name__
if _is_frozen_pydantic_error(e):
if isinstance(self, ConfigurableResourceFactory):
raise DagsterInvalidInvocationError(
f"'{clsname}' is a Pythonic resource and does not support item assignment,"
" as it inherits from 'pydantic.BaseModel' with frozen=True. If trying to"
" maintain state on this resource, consider building a separate, stateful"
" client class, and provide a method on the resource to construct and"
" return the stateful client."
) from e
else:
raise DagsterInvalidInvocationError(
f"'{clsname}' is a Pythonic config class and does not support item"
" assignment, as it inherits from 'pydantic.BaseModel' with frozen=True."
) from e
elif "object has no field" in str(e):
field_name = check.not_none(
re.search(r"object has no field \"(.*)\"", str(e))
).group(1)
if isinstance(self, ConfigurableResourceFactory):
raise DagsterInvalidInvocationError(
f"'{clsname}' is a Pythonic resource and does not support manipulating"
f" undeclared attribute '{field_name}' as it inherits from"
" 'pydantic.BaseModel' without extra=\"allow\". If trying to maintain"
" state on this resource, consider building a separate, stateful client"
" class, and provide a method on the resource to construct and return the"
" stateful client."
) from e
else:
raise DagsterInvalidInvocationError(
f"'{clsname}' is a Pythonic config class and does not support manipulating"
f" undeclared attribute '{field_name}' as it inherits from"
" 'pydantic.BaseModel' without extra=\"allow\"."
) from e
else:
raise
| (self, name: str, value: Any) |
4,392 | pydantic.main | __setstate__ | null | def __setstate__(self, state: dict[Any, Any]) -> None:
_object_setattr(self, '__pydantic_fields_set__', state['__pydantic_fields_set__'])
_object_setattr(self, '__pydantic_extra__', state['__pydantic_extra__'])
_object_setattr(self, '__pydantic_private__', state['__pydantic_private__'])
_object_setattr(self, '__dict__', state['__dict__'])
| (self, state: dict[typing.Any, typing.Any]) -> NoneType |
4,393 | pydantic.main | __str__ | null | def __str__(self) -> str:
return self.__repr_str__(' ')
| (self) -> str |
4,394 | pydantic.main | _calculate_keys | null | @typing_extensions.deprecated(
'The private method `_calculate_keys` will be removed and should no longer be used.',
category=None,
)
def _calculate_keys(self, *args: Any, **kwargs: Any) -> Any:
warnings.warn(
'The private method `_calculate_keys` will be removed and should no longer be used.',
category=PydanticDeprecatedSince20,
)
from .deprecated import copy_internals
return copy_internals._calculate_keys(self, *args, **kwargs)
| (self, *args: Any, **kwargs: Any) -> Any |
4,395 | pydantic.main | _check_frozen | null | def _check_frozen(self, name: str, value: Any) -> None:
if self.model_config.get('frozen', None):
typ = 'frozen_instance'
elif getattr(self.model_fields.get(name), 'frozen', False):
typ = 'frozen_field'
else:
return
error: pydantic_core.InitErrorDetails = {
'type': typ,
'loc': (name,),
'input': value,
}
raise pydantic_core.ValidationError.from_exception_data(self.__class__.__name__, [error])
| (self, name: str, value: Any) -> NoneType |
4,396 | dagster._config.pythonic_config.config | _convert_to_config_dictionary | Converts this Config object to a Dagster config dictionary, in the same format as the dictionary
accepted as run config or as YAML in the launchpad.
Inner fields are recursively converted to dictionaries, meaning nested config objects
or EnvVars will be converted to the appropriate dictionary representation.
| def _convert_to_config_dictionary(self) -> Mapping[str, Any]:
"""Converts this Config object to a Dagster config dictionary, in the same format as the dictionary
accepted as run config or as YAML in the launchpad.
Inner fields are recursively converted to dictionaries, meaning nested config objects
or EnvVars will be converted to the appropriate dictionary representation.
"""
public_fields = self._get_non_default_public_field_values()
return {
k: _config_value_to_dict_representation(model_fields(self).get(k), v)
for k, v in public_fields.items()
}
| (self) -> Mapping[str, Any] |
4,397 | pydantic.main | _copy_and_set_values | null | @typing_extensions.deprecated(
'The private method `_copy_and_set_values` will be removed and should no longer be used.',
category=None,
)
def _copy_and_set_values(self, *args: Any, **kwargs: Any) -> Any:
warnings.warn(
'The private method `_copy_and_set_values` will be removed and should no longer be used.',
category=PydanticDeprecatedSince20,
)
from .deprecated import copy_internals
return copy_internals._copy_and_set_values(self, *args, **kwargs)
| (self, *args: Any, **kwargs: Any) -> Any |
4,398 | dagster._config.pythonic_config.resource | _get_initialize_and_run_fn | null | def _get_initialize_and_run_fn(self) -> Callable:
return self._initialize_and_run_cm if self._is_cm_resource else self._initialize_and_run
| (self) -> Callable |
4,399 | dagster._config.pythonic_config.config | _get_non_default_public_field_values | null | def _get_non_default_public_field_values(self) -> Mapping[str, Any]:
return self.__class__._get_non_default_public_field_values_cls(dict(self)) # noqa: SLF001
| (self) -> Mapping[str, Any] |
4,400 | dagster._config.pythonic_config.resource | _initialize_and_run | null | def _initialize_and_run(self, context: InitResourceContext) -> TResValue:
with self._resolve_and_update_nested_resources(context) as has_nested_resource:
updated_resource = has_nested_resource.with_replaced_resource_context( # noqa: SLF001
context
)._with_updated_values(context.resource_config)
updated_resource.setup_for_execution(context)
return updated_resource.create_resource(context)
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> ~TResValue |
4,401 | dagster._config.pythonic_config.resource | _initialize_and_run_cm | null | def _is_dagster_maintained(self) -> bool:
return self._dagster_maintained
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> Generator[~TResValue, NoneType, NoneType] |
4,402 | pydantic.main | _iter | null | @typing_extensions.deprecated(
'The private method `_iter` will be removed and should no longer be used.', category=None
)
def _iter(self, *args: Any, **kwargs: Any) -> Any:
warnings.warn(
'The private method `_iter` will be removed and should no longer be used.',
category=PydanticDeprecatedSince20,
)
from .deprecated import copy_internals
return copy_internals._iter(self, *args, **kwargs)
| (self, *args: Any, **kwargs: Any) -> Any |
4,403 | dagster._config.pythonic_config.resource | _resolve_and_update_nested_resources | Updates any nested resources with the resource values from the context.
In this case, populating partially configured resources or
resources that return plain Python types.
Returns a new instance of the resource.
| def _is_dagster_maintained(self) -> bool:
return self._dagster_maintained
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> Generator[dagster._config.pythonic_config.resource.ConfigurableResourceFactory, NoneType, NoneType] |
4,404 | dagster._config.pythonic_config.resource | _resolve_required_resource_keys | null | def _resolve_required_resource_keys(
self, resource_mapping: Mapping[int, str]
) -> AbstractSet[str]:
from dagster._core.execution.build_resources import wrap_resource_for_execution
# All dependent resources which are not fully configured
# must be specified to the Definitions object so that the
# resource can be configured at runtime by the user
nested_partial_resource_keys = {
attr_name: resource_mapping.get(id(resource_def))
for attr_name, resource_def in self._nested_partial_resources.items()
}
check.invariant(
all(pointer_key is not None for pointer_key in nested_partial_resource_keys.values()),
"Any partially configured, nested resources must be provided to Definitions"
f" object: {nested_partial_resource_keys}",
)
# Recursively get all nested resource keys
nested_resource_required_keys: Set[str] = set()
for v in self._nested_partial_resources.values():
nested_resource_required_keys.update(
_resolve_required_resource_keys_for_resource(v, resource_mapping)
)
resources, _ = separate_resource_params(
cast(Type[BaseModel], self.__class__), self.__dict__
)
for v in resources.values():
nested_resource_required_keys.update(
_resolve_required_resource_keys_for_resource(
wrap_resource_for_execution(v), resource_mapping
)
)
out = set(cast(Set[str], nested_partial_resource_keys.values())).union(
nested_resource_required_keys
)
return out
| (self, resource_mapping: Mapping[int, str]) -> AbstractSet[str] |
4,405 | dagster._config.pythonic_config.resource | _with_updated_values | Returns a new instance of the resource with the given values.
Used when initializing a resource at runtime.
| def _with_updated_values(
self, values: Optional[Mapping[str, Any]]
) -> "ConfigurableResourceFactory[TResValue]":
"""Returns a new instance of the resource with the given values.
Used when initializing a resource at runtime.
"""
values = check.opt_mapping_param(values, "values", key_type=str)
# Since Resource extends BaseModel and is a dataclass, we know that the
# signature of any __init__ method will always consist of the fields
# of this class. We can therefore safely pass in the values as kwargs.
to_populate = self.__class__._get_non_default_public_field_values_cls( # noqa: SLF001
{**self._get_non_default_public_field_values(), **values}
)
out = self.__class__(**to_populate)
out._state__internal__ = out._state__internal__._replace( # noqa: SLF001
resource_context=self._state__internal__.resource_context
)
return out
| (self, values: Optional[Mapping[str, Any]]) -> dagster._config.pythonic_config.resource.ConfigurableResourceFactory |
4,406 | pydantic.main | copy | Returns a copy of the model.
!!! warning "Deprecated"
This method is now deprecated; use `model_copy` instead.
If you need `include` or `exclude`, use:
```py
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
data = {**data, **(update or {})}
copied = self.model_validate(data)
```
Args:
include: Optional set or mapping specifying which fields to include in the copied model.
exclude: Optional set or mapping specifying which fields to exclude in the copied model.
update: Optional dictionary of field-value pairs to override field values in the copied model.
deep: If True, the values of fields that are Pydantic models will be deep-copied.
Returns:
A copy of the model with included, excluded and updated fields as specified.
| @typing_extensions.deprecated(
'The `copy` method is deprecated; use `model_copy` instead. '
'See the docstring of `BaseModel.copy` for details about how to handle `include` and `exclude`.',
category=None,
)
def copy(
self: Model,
*,
include: AbstractSetIntStr | MappingIntStrAny | None = None,
exclude: AbstractSetIntStr | MappingIntStrAny | None = None,
update: typing.Dict[str, Any] | None = None, # noqa UP006
deep: bool = False,
) -> Model: # pragma: no cover
"""Returns a copy of the model.
!!! warning "Deprecated"
This method is now deprecated; use `model_copy` instead.
If you need `include` or `exclude`, use:
```py
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
data = {**data, **(update or {})}
copied = self.model_validate(data)
```
Args:
include: Optional set or mapping specifying which fields to include in the copied model.
exclude: Optional set or mapping specifying which fields to exclude in the copied model.
update: Optional dictionary of field-value pairs to override field values in the copied model.
deep: If True, the values of fields that are Pydantic models will be deep-copied.
Returns:
A copy of the model with included, excluded and updated fields as specified.
"""
warnings.warn(
'The `copy` method is deprecated; use `model_copy` instead. '
'See the docstring of `BaseModel.copy` for details about how to handle `include` and `exclude`.',
category=PydanticDeprecatedSince20,
)
from .deprecated import copy_internals
values = dict(
copy_internals._iter(
self, to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False
),
**(update or {}),
)
if self.__pydantic_private__ is None:
private = None
else:
private = {k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined}
if self.__pydantic_extra__ is None:
extra: dict[str, Any] | None = None
else:
extra = self.__pydantic_extra__.copy()
for k in list(self.__pydantic_extra__):
if k not in values: # k was in the exclude
extra.pop(k)
for k in list(values):
if k in self.__pydantic_extra__: # k must have come from extra
extra[k] = values.pop(k)
# new `__pydantic_fields_set__` can have unset optional fields with a set value in `update` kwarg
if update:
fields_set = self.__pydantic_fields_set__ | update.keys()
else:
fields_set = set(self.__pydantic_fields_set__)
# removing excluded fields from `__pydantic_fields_set__`
if exclude:
fields_set -= set(exclude)
return copy_internals._copy_and_set_values(self, values, fields_set, extra, private, deep=deep)
| (self: 'Model', *, include: 'AbstractSetIntStr | MappingIntStrAny | None' = None, exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None, update: 'typing.Dict[str, Any] | None' = None, deep: 'bool' = False) -> 'Model' |
4,407 | dagster_snowflake.snowflake_io_manager | create_io_manager | null | def create_io_manager(self, context) -> DbIOManager:
return DbIOManager(
db_client=SnowflakeDbClient(),
io_manager_name="SnowflakeIOManager",
database=self.database,
schema=self.schema_,
type_handlers=self.type_handlers(),
default_load_type=self.default_load_type(),
)
| (self, context) -> dagster._core.storage.db_io_manager.DbIOManager |
4,408 | dagster._config.pythonic_config.io_manager | create_resource | null | def create_resource(self, context: InitResourceContext) -> TResValue:
return self.create_io_manager(context)
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> ~TResValue |
4,409 | dagster_snowflake.snowflake_io_manager | default_load_type | If an asset or op is not annotated with an return type, default_load_type will be used to
determine which TypeHandler to use to store and load the output.
If left unimplemented, default_load_type will return None. In that case, if there is only
one TypeHandler, the I/O manager will default to loading unannotated outputs with that
TypeHandler.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
import pandas as pd
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@staticmethod
def default_load_type() -> Optional[Type]:
return pd.DataFrame
| @staticmethod
def default_load_type() -> Optional[Type]:
"""If an asset or op is not annotated with an return type, default_load_type will be used to
determine which TypeHandler to use to store and load the output.
If left unimplemented, default_load_type will return None. In that case, if there is only
one TypeHandler, the I/O manager will default to loading unannotated outputs with that
TypeHandler.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
import pandas as pd
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@staticmethod
def default_load_type() -> Optional[Type]:
return pd.DataFrame
"""
return None
| () -> Optional[Type] |
4,410 | pydantic.main | dict | null | @typing_extensions.deprecated('The `dict` method is deprecated; use `model_dump` instead.', category=None)
def dict( # noqa: D102
self,
*,
include: IncEx = None,
exclude: IncEx = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> typing.Dict[str, Any]: # noqa UP006
warnings.warn('The `dict` method is deprecated; use `model_dump` instead.', category=PydanticDeprecatedSince20)
return self.model_dump(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
| (self, *, include: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, exclude: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False) -> Dict[str, Any] |
4,411 | dagster._config.pythonic_config.resource | get_resource_context | Returns the context that this resource was initialized with. | def get_resource_context(self) -> InitResourceContext:
"""Returns the context that this resource was initialized with."""
return check.not_none(
self._state__internal__.resource_context,
additional_message="Attempted to get context before resource was initialized.",
)
| (self) -> dagster._core.execution.context.init.InitResourceContext |
4,412 | dagster._config.pythonic_config.io_manager | get_resource_definition | null | def __init__(
self,
configurable_resource_cls: Type,
resource_fn: ResourceFunction,
config_schema: Any,
description: Optional[str],
resolve_resource_keys: Callable[[Mapping[int, str]], AbstractSet[str]],
nested_resources: Mapping[str, Any],
input_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,
output_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,
dagster_maintained: bool = False,
):
input_config_schema_resolved: CoercableToConfigSchema = (
cast(Type[Config], input_config_schema).to_config_schema()
if safe_is_subclass(input_config_schema, Config)
else cast(CoercableToConfigSchema, input_config_schema)
)
output_config_schema_resolved: CoercableToConfigSchema = (
cast(Type[Config], output_config_schema).to_config_schema()
if safe_is_subclass(output_config_schema, Config)
else cast(CoercableToConfigSchema, output_config_schema)
)
super().__init__(
resource_fn=resource_fn,
config_schema=config_schema,
description=description,
input_config_schema=input_config_schema_resolved,
output_config_schema=output_config_schema_resolved,
)
self._resolve_resource_keys = resolve_resource_keys
self._nested_resources = nested_resources
self._configurable_resource_cls = configurable_resource_cls
self._dagster_maintained = dagster_maintained
| (self) -> dagster._config.pythonic_config.io_manager.ConfigurableIOManagerFactoryResourceDefinition |
4,413 | pydantic.main | json | null | @typing_extensions.deprecated('The `json` method is deprecated; use `model_dump_json` instead.', category=None)
def json( # noqa: D102
self,
*,
include: IncEx = None,
exclude: IncEx = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
encoder: typing.Callable[[Any], Any] | None = PydanticUndefined, # type: ignore[assignment]
models_as_dict: bool = PydanticUndefined, # type: ignore[assignment]
**dumps_kwargs: Any,
) -> str:
warnings.warn(
'The `json` method is deprecated; use `model_dump_json` instead.', category=PydanticDeprecatedSince20
)
if encoder is not PydanticUndefined:
raise TypeError('The `encoder` argument is no longer supported; use field serializers instead.')
if models_as_dict is not PydanticUndefined:
raise TypeError('The `models_as_dict` argument is no longer supported; use a model serializer instead.')
if dumps_kwargs:
raise TypeError('`dumps_kwargs` keyword arguments are no longer supported.')
return self.model_dump_json(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
| (self, *, include: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, exclude: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = PydanticUndefined, models_as_dict: bool = PydanticUndefined, **dumps_kwargs: Any) -> str |
4,414 | pydantic.main | model_copy | Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#model_copy
Returns a copy of the model.
Args:
update: Values to change/add in the new model. Note: the data is not validated
before creating the new model. You should trust this data.
deep: Set to `True` to make a deep copy of the model.
Returns:
New model instance.
| def model_copy(self: Model, *, update: dict[str, Any] | None = None, deep: bool = False) -> Model:
"""Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#model_copy
Returns a copy of the model.
Args:
update: Values to change/add in the new model. Note: the data is not validated
before creating the new model. You should trust this data.
deep: Set to `True` to make a deep copy of the model.
Returns:
New model instance.
"""
copied = self.__deepcopy__() if deep else self.__copy__()
if update:
if self.model_config.get('extra') == 'allow':
for k, v in update.items():
if k in self.model_fields:
copied.__dict__[k] = v
else:
if copied.__pydantic_extra__ is None:
copied.__pydantic_extra__ = {}
copied.__pydantic_extra__[k] = v
else:
copied.__dict__.update(update)
copied.__pydantic_fields_set__.update(update.keys())
return copied
| (self: ~Model, *, update: Optional[dict[str, Any]] = None, deep: bool = False) -> ~Model |
4,415 | pydantic.main | model_dump | Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
Args:
mode: The mode in which `to_python` should run.
If mode is 'json', the output will only contain JSON serializable types.
If mode is 'python', the output may contain non-JSON-serializable Python objects.
include: A set of fields to include in the output.
exclude: A set of fields to exclude from the output.
context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value.
exclude_none: Whether to exclude fields that have a value of `None`.
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
| def model_dump(
self,
*,
mode: Literal['json', 'python'] | str = 'python',
include: IncEx = None,
exclude: IncEx = None,
context: dict[str, Any] | None = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal['none', 'warn', 'error'] = True,
serialize_as_any: bool = False,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
Args:
mode: The mode in which `to_python` should run.
If mode is 'json', the output will only contain JSON serializable types.
If mode is 'python', the output may contain non-JSON-serializable Python objects.
include: A set of fields to include in the output.
exclude: A set of fields to exclude from the output.
context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value.
exclude_none: Whether to exclude fields that have a value of `None`.
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
"""
return self.__pydantic_serializer__.to_python(
self,
mode=mode,
by_alias=by_alias,
include=include,
exclude=exclude,
context=context,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
round_trip=round_trip,
warnings=warnings,
serialize_as_any=serialize_as_any,
)
| (self, *, mode: Union[Literal['json', 'python'], str] = 'python', include: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, exclude: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, context: Optional[dict[str, Any]] = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: Union[bool, Literal['none', 'warn', 'error']] = True, serialize_as_any: bool = False) -> dict[str, typing.Any] |
4,416 | pydantic.main | model_dump_json | Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump_json
Generates a JSON representation of the model using Pydantic's `to_json` method.
Args:
indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
include: Field(s) to include in the JSON output.
exclude: Field(s) to exclude from the JSON output.
context: Additional context to pass to the serializer.
by_alias: Whether to serialize using field aliases.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value.
exclude_none: Whether to exclude fields that have a value of `None`.
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A JSON string representation of the model.
| def model_dump_json(
self,
*,
indent: int | None = None,
include: IncEx = None,
exclude: IncEx = None,
context: dict[str, Any] | None = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal['none', 'warn', 'error'] = True,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump_json
Generates a JSON representation of the model using Pydantic's `to_json` method.
Args:
indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
include: Field(s) to include in the JSON output.
exclude: Field(s) to exclude from the JSON output.
context: Additional context to pass to the serializer.
by_alias: Whether to serialize using field aliases.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value.
exclude_none: Whether to exclude fields that have a value of `None`.
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A JSON string representation of the model.
"""
return self.__pydantic_serializer__.to_json(
self,
indent=indent,
include=include,
exclude=exclude,
context=context,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
round_trip=round_trip,
warnings=warnings,
serialize_as_any=serialize_as_any,
).decode()
| (self, *, indent: Optional[int] = None, include: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, exclude: Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any], NoneType] = None, context: Optional[dict[str, Any]] = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: Union[bool, Literal['none', 'warn', 'error']] = True, serialize_as_any: bool = False) -> str |
4,417 | pydantic.main | model_post_init | Override this method to perform additional initialization after `__init__` and `model_construct`.
This is useful if you want to do some validation that requires the entire model to be initialized.
| def model_post_init(self, __context: Any) -> None:
"""Override this method to perform additional initialization after `__init__` and `model_construct`.
This is useful if you want to do some validation that requires the entire model to be initialized.
"""
pass
| (self, _BaseModel__context: Any) -> NoneType |
4,418 | dagster._config.pythonic_config.resource | process_config_and_initialize | Initializes this resource, fully processing its config and returning the prepared
resource value.
| def process_config_and_initialize(self) -> TResValue:
"""Initializes this resource, fully processing its config and returning the prepared
resource value.
"""
from dagster._config.post_process import post_process_config
return self.from_resource_context(
build_init_resource_context(
config=post_process_config(
self._config_schema.config_type, self._convert_to_config_dictionary()
).value
)
)
| (self) -> ~TResValue |
4,419 | dagster._config.pythonic_config.resource | setup_for_execution | Optionally override this method to perform any pre-execution steps
needed before the resource is used in execution.
| def setup_for_execution(self, context: InitResourceContext) -> None:
"""Optionally override this method to perform any pre-execution steps
needed before the resource is used in execution.
"""
pass
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> NoneType |
4,420 | dagster._config.pythonic_config.resource | teardown_after_execution | Optionally override this method to perform any post-execution steps
needed after the resource is used in execution.
teardown_after_execution will be called even if any part of the run fails.
It will not be called if setup_for_execution fails.
| def teardown_after_execution(self, context: InitResourceContext) -> None:
"""Optionally override this method to perform any post-execution steps
needed after the resource is used in execution.
teardown_after_execution will be called even if any part of the run fails.
It will not be called if setup_for_execution fails.
"""
pass
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> NoneType |
4,421 | dagster_snowflake.snowflake_io_manager | type_handlers | type_handlers should return a list of the TypeHandlers that the I/O manager can use.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
| @staticmethod
@abstractmethod
def type_handlers() -> Sequence[DbTypeHandler]:
"""type_handlers should return a list of the TypeHandlers that the I/O manager can use.
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
"""
...
| () -> Sequence[dagster._core.storage.db_io_manager.DbTypeHandler] |
4,422 | dagster._config.pythonic_config.resource | with_replaced_resource_context | Returns a new instance of the resource with the given resource init context bound. | def with_replaced_resource_context(
self, resource_context: InitResourceContext
) -> "ConfigurableResourceFactory[TResValue]":
"""Returns a new instance of the resource with the given resource init context bound."""
# This utility is used to create a copy of this resource, without adjusting
# any values in this case
copy = self._with_updated_values({})
copy._state__internal__ = copy._state__internal__._replace( # noqa: SLF001
resource_context=resource_context
)
return copy
| (self, resource_context: dagster._core.execution.context.init.InitResourceContext) -> dagster._config.pythonic_config.resource.ConfigurableResourceFactory |
4,423 | dagster._config.pythonic_config.resource | with_resource_context | null | @property
def required_resource_keys(self) -> AbstractSet[str]:
return _resolve_required_resource_keys_for_resource(
self._resource, self._resource_id_to_key_mapping
)
| (self, resource_context: dagster._core.execution.context.init.InitResourceContext) -> dagster._config.pythonic_config.resource.ConfigurableResourceFactory |
4,424 | dagster._config.pythonic_config.resource | yield_for_execution | Optionally override this method to perform any lifecycle steps
before or after the resource is used in execution. By default, calls
setup_for_execution before yielding, and teardown_after_execution after yielding.
Note that if you override this method and want setup_for_execution or
teardown_after_execution to be called, you must invoke them yourself.
| def _is_dagster_maintained(self) -> bool:
return self._dagster_maintained
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> Generator[~TResValue, NoneType, NoneType] |
4,425 | dagster_snowflake.resources | SnowflakeResource | A resource for connecting to the Snowflake data warehouse.
If connector configuration is not set, SnowflakeResource.get_connection() will return a
`snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__
object. If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will
return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__
or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__.
A simple example of loading data into Snowflake and subsequently querying that data is shown below:
Examples:
.. code-block:: python
from dagster import job, op
from dagster_snowflake import SnowflakeResource
@op
def get_one(snowflake_resource: SnowflakeResource):
with snowflake_resource.get_connection() as conn:
# conn is a snowflake.connector.Connection object
conn.cursor().execute("SELECT 1")
@job
def my_snowflake_job():
get_one()
my_snowflake_job.execute_in_process(
resources={
'snowflake_resource': SnowflakeResource(
account=EnvVar("SNOWFLAKE_ACCOUNT"),
user=EnvVar("SNOWFLAKE_USER"),
password=EnvVar("SNOWFLAKE_PASSWORD")
database="MY_DATABASE",
schema="MY_SCHEMA",
warehouse="MY_WAREHOUSE"
)
}
)
| class SnowflakeResource(ConfigurableResource, IAttachDifferentObjectToOpContext):
"""A resource for connecting to the Snowflake data warehouse.
If connector configuration is not set, SnowflakeResource.get_connection() will return a
`snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__
object. If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will
return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__
or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__.
A simple example of loading data into Snowflake and subsequently querying that data is shown below:
Examples:
.. code-block:: python
from dagster import job, op
from dagster_snowflake import SnowflakeResource
@op
def get_one(snowflake_resource: SnowflakeResource):
with snowflake_resource.get_connection() as conn:
# conn is a snowflake.connector.Connection object
conn.cursor().execute("SELECT 1")
@job
def my_snowflake_job():
get_one()
my_snowflake_job.execute_in_process(
resources={
'snowflake_resource': SnowflakeResource(
account=EnvVar("SNOWFLAKE_ACCOUNT"),
user=EnvVar("SNOWFLAKE_USER"),
password=EnvVar("SNOWFLAKE_PASSWORD")
database="MY_DATABASE",
schema="MY_SCHEMA",
warehouse="MY_WAREHOUSE"
)
}
)
"""
account: Optional[str] = Field(
default=None,
description=(
"Your Snowflake account name. For more details, see the `Snowflake documentation."
" <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"
),
)
user: str = Field(description="User login name.")
password: Optional[str] = Field(default=None, description="User password.")
database: Optional[str] = Field(
default=None,
description=(
"Name of the default database to use. After login, you can use ``USE DATABASE`` "
" to change the database."
),
)
schema_: Optional[str] = Field(
default=None,
description=(
"Name of the default schema to use. After login, you can use ``USE SCHEMA`` to "
"change the schema."
),
alias="schema",
) # schema is a reserved word for pydantic
role: Optional[str] = Field(
default=None,
description=(
"Name of the default role to use. After login, you can use ``USE ROLE`` to change "
" the role."
),
)
warehouse: Optional[str] = Field(
default=None,
description=(
"Name of the default warehouse to use. After login, you can use ``USE WAREHOUSE`` "
"to change the role."
),
)
private_key: Optional[str] = Field(
default=None,
description=(
"Raw private key to use. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."
" Alternately, set private_key_path and private_key_password. To avoid issues with"
" newlines in the keys, you can base64 encode the key. You can retrieve the base64"
" encoded key with this shell command: ``cat rsa_key.p8 | base64``"
),
)
private_key_password: Optional[str] = Field(
default=None,
description=(
"Raw private key password to use. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."
" Required for both ``private_key`` and ``private_key_path`` if the private key is"
" encrypted. For unencrypted keys, this config can be omitted or set to None."
),
)
private_key_path: Optional[str] = Field(
default=None,
description=(
"Raw private key path to use. See the `Snowflake documentation"
" <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."
" Alternately, set the raw private key as ``private_key``."
),
)
autocommit: Optional[bool] = Field(
default=None,
description=(
"None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True "
"or False to enable or disable autocommit mode in the session, respectively."
),
)
client_prefetch_threads: Optional[int] = Field(
default=None,
description=(
"Number of threads used to download the results sets (4 by default). "
"Increasing the value improves fetch performance but requires more memory."
),
)
client_session_keep_alive: Optional[bool] = Field(
default=None,
description=(
"False by default. Set this to True to keep the session active indefinitely, "
"even if there is no activity from the user. Make certain to call the close method to "
"terminate the thread properly or the process may hang."
),
)
login_timeout: Optional[int] = Field(
default=None,
description=(
"Timeout in seconds for login. By default, 60 seconds. The login request gives "
'up after the timeout length if the HTTP response is "success".'
),
)
network_timeout: Optional[int] = Field(
default=None,
description=(
"Timeout in seconds for all other operations. By default, none/infinite. A general"
" request gives up after the timeout length if the HTTP response is not 'success'."
),
)
ocsp_response_cache_filename: Optional[str] = Field(
default=None,
description=(
"URI for the OCSP response cache file. By default, the OCSP response cache "
"file is created in the cache directory."
),
)
validate_default_parameters: Optional[bool] = Field(
default=None,
description=(
"If True, raise an exception if the warehouse, database, or schema doesn't exist."
" Defaults to False."
),
)
paramstyle: Optional[str] = Field(
default=None,
description=(
"pyformat by default for client side binding. Specify qmark or numeric to "
"change bind variable formats for server side binding."
),
)
timezone: Optional[str] = Field(
default=None,
description=(
"None by default, which honors the Snowflake parameter TIMEZONE. Set to a "
"valid time zone (e.g. America/Los_Angeles) to set the session time zone."
),
)
connector: Optional[str] = Field(
default=None,
description=(
"Indicate alternative database connection engine. Permissible option is "
"'sqlalchemy' otherwise defaults to use the Snowflake Connector for Python."
),
is_required=False,
)
cache_column_metadata: Optional[str] = Field(
default=None,
description=(
"Optional parameter when connector is set to sqlalchemy. Snowflake SQLAlchemy takes a"
" flag ``cache_column_metadata=True`` such that all of column metadata for all tables"
' are "cached"'
),
)
numpy: Optional[bool] = Field(
default=None,
description=(
"Optional parameter when connector is set to sqlalchemy. To enable fetching "
"NumPy data types, add numpy=True to the connection parameters."
),
)
authenticator: Optional[str] = Field(
default=None,
description="Optional parameter to specify the authentication mechanism to use.",
)
@validator("paramstyle")
def validate_paramstyle(cls, v: Optional[str]) -> Optional[str]:
valid_config = ["pyformat", "qmark", "numeric"]
if v is not None and v not in valid_config:
raise ValueError(
"Snowflake Resource: 'paramstyle' configuration value must be one of:"
f" {','.join(valid_config)}."
)
return v
@validator("connector")
def validate_connector(cls, v: Optional[str]) -> Optional[str]:
if v is not None and v != "sqlalchemy":
raise ValueError(
"Snowflake Resource: 'connector' configuration value must be None or sqlalchemy."
)
return v
@compat_model_validator(mode="before")
def validate_authentication(cls, values):
auths_set = 0
auths_set += 1 if values.get("password") is not None else 0
auths_set += 1 if values.get("private_key") is not None else 0
auths_set += 1 if values.get("private_key_path") is not None else 0
# if authenticator is set, there can be 0 or 1 additional auth method;
# otherwise, ensure at least 1 method is provided
check.invariant(
auths_set > 0 or values.get("authenticator") is not None,
"Missing config: Password, private key, or authenticator authentication required"
" for Snowflake resource.",
)
# ensure that only 1 non-authenticator method is provided
check.invariant(
auths_set <= 1,
"Incorrect config: Cannot provide both password and private key authentication to"
" Snowflake Resource.",
)
return values
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@property
@cached_method
def _connection_args(self) -> Mapping[str, Any]:
conn_args = {
k: self._resolved_config_dict.get(k)
for k in (
"account",
"user",
"password",
"database",
"schema",
"role",
"warehouse",
"autocommit",
"client_prefetch_threads",
"client_session_keep_alive",
"login_timeout",
"network_timeout",
"ocsp_response_cache_filename",
"validate_default_parameters",
"paramstyle",
"timezone",
"authenticator",
)
if self._resolved_config_dict.get(k) is not None
}
if (
self._resolved_config_dict.get("private_key", None) is not None
or self._resolved_config_dict.get("private_key_path", None) is not None
):
conn_args["private_key"] = self._snowflake_private_key(self._resolved_config_dict)
return conn_args
@property
@cached_method
def _sqlalchemy_connection_args(self) -> Mapping[str, Any]:
conn_args: Dict[str, Any] = {
k: self._resolved_config_dict.get(k)
for k in (
"account",
"user",
"password",
"database",
"schema",
"role",
"warehouse",
"cache_column_metadata",
"numpy",
)
if self._resolved_config_dict.get(k) is not None
}
return conn_args
@property
@cached_method
def _sqlalchemy_engine_args(self) -> Mapping[str, Any]:
config = self._resolved_config_dict
sqlalchemy_engine_args = {}
if (
config.get("private_key", None) is not None
or config.get("private_key_path", None) is not None
):
# sqlalchemy passes private key args separately, so store them in a new dict
sqlalchemy_engine_args["private_key"] = self._snowflake_private_key(config)
if config.get("authenticator", None) is not None:
sqlalchemy_engine_args["authenticator"] = config["authenticator"]
return sqlalchemy_engine_args
def _snowflake_private_key(self, config) -> bytes:
# If the user has defined a path to a private key, we will use that.
if config.get("private_key_path", None) is not None:
# read the file from the path.
with open(config.get("private_key_path"), "rb") as key:
private_key = key.read()
else:
private_key = config.get("private_key", None)
kwargs = {}
if config.get("private_key_password", None) is not None:
kwargs["password"] = config["private_key_password"].encode()
else:
kwargs["password"] = None
try:
p_key = serialization.load_pem_private_key(
private_key, backend=default_backend(), **kwargs
)
except TypeError:
try:
private_key = base64.b64decode(private_key)
p_key = serialization.load_pem_private_key(
private_key, backend=default_backend(), **kwargs
)
except ValueError:
raise ValueError(
"Unable to load private key. You may need to base64 encode your private key."
" You can retrieve the base64 encoded key with this shell command: cat"
" rsa_key.p8 | base64"
)
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return pkb
@public
@contextmanager
def get_connection(
self, raw_conn: bool = True
) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:
"""Gets a connection to Snowflake as a context manager.
If connector configuration is not set, SnowflakeResource.get_connection() will return a
`snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__
If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will
return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__
or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__
if raw_conn=True.
Args:
raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw
connection. Defaults to True.
Examples:
.. code-block:: python
@op
def get_query_status(snowflake: SnowflakeResource, query_id):
with snowflake.get_connection() as conn:
# conn is a Snowflake Connection object or a SQLAlchemy Connection if
# sqlalchemy is specified as the connector in the Snowflake Resource config
return conn.get_query_status(query_id)
"""
if self.connector == "sqlalchemy":
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine
engine = create_engine(
URL(**self._sqlalchemy_connection_args), connect_args=self._sqlalchemy_engine_args
)
conn = engine.raw_connection() if raw_conn else engine.connect()
yield conn
conn.close()
engine.dispose()
else:
conn = snowflake.connector.connect(**self._connection_args)
yield conn
if not self.autocommit:
conn.commit()
conn.close()
def get_object_to_set_on_execution_context(self) -> Any:
# Directly create a SnowflakeConnection here for backcompat since the SnowflakeConnection
# has methods this resource does not have
return SnowflakeConnection(
config=self._resolved_config_dict,
log=get_dagster_logger(),
snowflake_connection_resource=self,
)
| (*, account: Optional[str] = None, user: str, password: Optional[str] = None, database: Optional[str] = None, schema: Optional[str] = None, role: Optional[str] = None, warehouse: Optional[str] = None, private_key: Optional[str] = None, private_key_password: Optional[str] = None, private_key_path: Optional[str] = None, autocommit: Optional[bool] = None, client_prefetch_threads: Optional[int] = None, client_session_keep_alive: Optional[bool] = None, login_timeout: Optional[int] = None, network_timeout: Optional[int] = None, ocsp_response_cache_filename: Optional[str] = None, validate_default_parameters: Optional[bool] = None, paramstyle: Optional[str] = None, timezone: Optional[str] = None, connector: Optional[str] = None, cache_column_metadata: Optional[str] = None, numpy: Optional[bool] = None, authenticator: Optional[str] = None) -> None |
4,434 | dagster._config.pythonic_config.resource | __init__ | null | def __init__(self, **data: Any):
resource_pointers, data_without_resources = separate_resource_params(self.__class__, data)
schema = infer_schema_from_config_class(
self.__class__, fields_to_omit=set(resource_pointers.keys())
)
# Populate config values
super().__init__(**data_without_resources, **resource_pointers)
# We pull the values from the Pydantic config object, which may cast values
# to the correct type under the hood - useful in particular for enums
casted_data_without_resources = {
k: v
for k, v in self._convert_to_config_dictionary().items()
if k in data_without_resources
}
resolved_config_dict = config_dictionary_from_values(casted_data_without_resources, schema)
self._state__internal__ = ConfigurableResourceFactoryState(
# We keep track of any resources we depend on which are not fully configured
# so that we can retrieve them at runtime
nested_partial_resources={
k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))
},
resolved_config_dict=resolved_config_dict,
# These are unfortunately named very similarily
config_schema=_curry_config_schema(schema, resolved_config_dict),
schema=schema,
nested_resources={k: v for k, v in resource_pointers.items()},
resource_context=None,
)
| (self, **data: Any) |
4,458 | dagster_snowflake.resources | _snowflake_private_key | null | def _snowflake_private_key(self, config) -> bytes:
# If the user has defined a path to a private key, we will use that.
if config.get("private_key_path", None) is not None:
# read the file from the path.
with open(config.get("private_key_path"), "rb") as key:
private_key = key.read()
else:
private_key = config.get("private_key", None)
kwargs = {}
if config.get("private_key_password", None) is not None:
kwargs["password"] = config["private_key_password"].encode()
else:
kwargs["password"] = None
try:
p_key = serialization.load_pem_private_key(
private_key, backend=default_backend(), **kwargs
)
except TypeError:
try:
private_key = base64.b64decode(private_key)
p_key = serialization.load_pem_private_key(
private_key, backend=default_backend(), **kwargs
)
except ValueError:
raise ValueError(
"Unable to load private key. You may need to base64 encode your private key."
" You can retrieve the base64 encoded key with this shell command: cat"
" rsa_key.p8 | base64"
)
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return pkb
| (self, config) -> bytes |
4,461 | dagster._config.pythonic_config.resource | create_resource | Returns the object that this resource hands to user code, accessible by ops or assets
through the context or resource parameters. This works like the function decorated
with @resource when using function-based resources.
For ConfigurableResource, this function will return itself, passing
the actual ConfigurableResource object to user code.
| def create_resource(self, context: InitResourceContext) -> TResValue:
"""Returns the object that this resource hands to user code, accessible by ops or assets
through the context or resource parameters. This works like the function decorated
with @resource when using function-based resources.
For ConfigurableResource, this function will return itself, passing
the actual ConfigurableResource object to user code.
"""
return cast(TResValue, self)
| (self, context: dagster._core.execution.context.init.InitResourceContext) -> ~TResValue |
4,463 | dagster_snowflake.resources | get_connection | Gets a connection to Snowflake as a context manager.
If connector configuration is not set, SnowflakeResource.get_connection() will return a
`snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__
If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will
return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__
or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__
if raw_conn=True.
Args:
raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw
connection. Defaults to True.
Examples:
.. code-block:: python
@op
def get_query_status(snowflake: SnowflakeResource, query_id):
with snowflake.get_connection() as conn:
# conn is a Snowflake Connection object or a SQLAlchemy Connection if
# sqlalchemy is specified as the connector in the Snowflake Resource config
return conn.get_query_status(query_id)
| @compat_model_validator(mode="before")
def validate_authentication(cls, values):
auths_set = 0
auths_set += 1 if values.get("password") is not None else 0
auths_set += 1 if values.get("private_key") is not None else 0
auths_set += 1 if values.get("private_key_path") is not None else 0
# if authenticator is set, there can be 0 or 1 additional auth method;
# otherwise, ensure at least 1 method is provided
check.invariant(
auths_set > 0 or values.get("authenticator") is not None,
"Missing config: Password, private key, or authenticator authentication required"
" for Snowflake resource.",
)
# ensure that only 1 non-authenticator method is provided
check.invariant(
auths_set <= 1,
"Incorrect config: Cannot provide both password and private key authentication to"
" Snowflake Resource.",
)
return values
| (self, raw_conn: bool = True) -> Iterator[Union[Any, snowflake.connector.connection.SnowflakeConnection]] |
4,464 | dagster_snowflake.resources | get_object_to_set_on_execution_context | null | def get_object_to_set_on_execution_context(self) -> Any:
# Directly create a SnowflakeConnection here for backcompat since the SnowflakeConnection
# has methods this resource does not have
return SnowflakeConnection(
config=self._resolved_config_dict,
log=get_dagster_logger(),
snowflake_connection_resource=self,
)
| (self) -> Any |
4,466 | dagster._config.pythonic_config.resource | get_resource_definition | null | import contextlib
import inspect
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Generator,
Generic,
List,
Mapping,
NamedTuple,
Optional,
Set,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import TypeAlias, TypeGuard, get_args, get_origin
from dagster import (
Field as DagsterField,
)
from dagster._annotations import deprecated
from dagster._config.field_utils import config_dictionary_from_values
from dagster._config.pythonic_config.typing_utils import (
TypecheckAllowPartialResourceInitParams,
)
from dagster._config.validate import validate_config
from dagster._core.definitions.definition_config_schema import (
ConfiguredDefinitionConfigSchema,
DefinitionConfigSchema,
)
from dagster._core.errors import DagsterInvalidConfigError
from dagster._core.execution.context.init import InitResourceContext, build_init_resource_context
from dagster._model.pydantic_compat_layer import (
model_fields,
)
from dagster._utils.cached_method import cached_method
from .attach_other_object_to_context import (
IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,
)
from .type_check_utils import is_optional
try:
from functools import cached_property # type: ignore # (py37 compat)
except ImportError:
class cached_property:
pass
| (self) -> dagster._config.pythonic_config.resource.ConfigurableResourceFactoryResourceDefinition |
4,478 | dagster_snowflake.snowflake_io_manager | build_snowflake_io_manager | Builds an IO manager definition that reads inputs from and writes outputs to Snowflake.
Args:
type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between
slices of Snowflake tables and an in-memory type - e.g. a Pandas DataFrame. If only
one DbTypeHandler is provided, it will be used as teh default_load_type.
default_load_type (Type): When an input has no type annotation, load it as this type.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake import build_snowflake_io_manager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions
@asset(
key_prefix=["my_prefix"]
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_second_table() -> pd.DataFrame: # the name of the asset will be the table name
...
snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()])
defs = Definitions(
assets=[my_table, my_second_table],
resources={
"io_manager": snowflake_io_manager.configured({
"database": "my_database",
"account" : {"env": "SNOWFLAKE_ACCOUNT"}
...
})
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table]
resources={"io_manager" snowflake_io_manager.configured(
{"database": "my_database", "schema": "my_schema", ...} # will be used as the schema
)}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
| def build_snowflake_io_manager(
type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None
) -> IOManagerDefinition:
"""Builds an IO manager definition that reads inputs from and writes outputs to Snowflake.
Args:
type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between
slices of Snowflake tables and an in-memory type - e.g. a Pandas DataFrame. If only
one DbTypeHandler is provided, it will be used as teh default_load_type.
default_load_type (Type): When an input has no type annotation, load it as this type.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake import build_snowflake_io_manager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions
@asset(
key_prefix=["my_prefix"]
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_second_table() -> pd.DataFrame: # the name of the asset will be the table name
...
snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()])
defs = Definitions(
assets=[my_table, my_second_table],
resources={
"io_manager": snowflake_io_manager.configured({
"database": "my_database",
"account" : {"env": "SNOWFLAKE_ACCOUNT"}
...
})
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table]
resources={"io_manager" snowflake_io_manager.configured(
{"database": "my_database", "schema": "my_schema", ...} # will be used as the schema
)}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
"""
@dagster_maintained_io_manager
@io_manager(config_schema=SnowflakeIOManager.to_config_schema())
def snowflake_io_manager(init_context):
return DbIOManager(
type_handlers=type_handlers,
db_client=SnowflakeDbClient(),
io_manager_name="SnowflakeIOManager",
database=init_context.resource_config["database"],
schema=init_context.resource_config.get("schema"),
default_load_type=default_load_type,
)
return snowflake_io_manager
| (type_handlers: Sequence[dagster._core.storage.db_io_manager.DbTypeHandler], default_load_type: Optional[Type] = None) -> dagster._core.storage.io_manager.IOManagerDefinition |
4,479 | dagster_snowflake.resources | fetch_last_updated_timestamps | Fetch the last updated times of a list of tables in Snowflake.
If the underlying query to fetch the last updated time returns no results, a ValueError will be raised.
Args:
snowflake_connection (Union[SqlDbConnection, SnowflakeConnection]): A connection to Snowflake.
Accepts either a SnowflakeConnection or a sqlalchemy connection object,
which are the two types of connections emittable from the snowflake resource.
schema (str): The schema of the tables to fetch the last updated time for.
tables (Sequence[str]): A list of table names to fetch the last updated time for.
database (Optional[str]): The database of the table. Only required if the connection
has not been set with a database.
Returns:
Mapping[str, datetime]: A dictionary of table names to their last updated time in UTC.
| def fetch_last_updated_timestamps(
*,
snowflake_connection: Union[SqlDbConnection, snowflake.connector.SnowflakeConnection],
schema: str,
tables: Sequence[str],
database: Optional[str] = None,
) -> Mapping[str, datetime]:
"""Fetch the last updated times of a list of tables in Snowflake.
If the underlying query to fetch the last updated time returns no results, a ValueError will be raised.
Args:
snowflake_connection (Union[SqlDbConnection, SnowflakeConnection]): A connection to Snowflake.
Accepts either a SnowflakeConnection or a sqlalchemy connection object,
which are the two types of connections emittable from the snowflake resource.
schema (str): The schema of the tables to fetch the last updated time for.
tables (Sequence[str]): A list of table names to fetch the last updated time for.
database (Optional[str]): The database of the table. Only required if the connection
has not been set with a database.
Returns:
Mapping[str, datetime]: A dictionary of table names to their last updated time in UTC.
"""
check.invariant(len(tables) > 0, "Must provide at least one table name to query upon.")
# Table names in snowflake's information schema are stored in uppercase
uppercase_tables = [table.upper() for table in tables]
tables_str = ", ".join([f"'{table_name}'" for table_name in uppercase_tables])
fully_qualified_table_name = (
f"{database}.information_schema.tables" if database else "information_schema.tables"
)
query = f"""
SELECT table_name, CONVERT_TIMEZONE('UTC', last_altered) AS last_altered
FROM {fully_qualified_table_name}
WHERE table_schema = '{schema}' AND table_name IN ({tables_str});
"""
result = snowflake_connection.cursor().execute(query)
if not result:
raise ValueError("No results returned from Snowflake update time query.")
result_mapping = {table_name: last_altered for table_name, last_altered in result}
result_correct_case = {}
for table_name in tables:
if table_name.upper() not in result_mapping:
raise ValueError(f"Table {table_name} could not be found.")
last_altered = result_mapping[table_name.upper()]
check.invariant(
isinstance(last_altered, datetime),
"Expected last_altered to be a datetime, but it was not.",
)
result_correct_case[table_name] = last_altered
return result_correct_case
| (*, snowflake_connection: Union[Any, snowflake.connector.connection.SnowflakeConnection], schema: str, tables: Sequence[str], database: Optional[str] = None) -> Mapping[str, datetime.datetime] |
4,483 | dagster_snowflake.ops | snowflake_op_for_query | This function is an op factory that constructs an op to execute a snowflake query.
Note that you can only use `snowflake_op_for_query` if you know the query you'd like to
execute at graph construction time. If you'd like to execute queries dynamically during
job execution, you should manually execute those queries in your custom op using the
snowflake resource.
Args:
sql (str): The sql query that will execute against the provided snowflake resource.
parameters (dict): The parameters for the sql query.
Returns:
OpDefinition: Returns the constructed op definition.
| def snowflake_op_for_query(sql, parameters=None):
"""This function is an op factory that constructs an op to execute a snowflake query.
Note that you can only use `snowflake_op_for_query` if you know the query you'd like to
execute at graph construction time. If you'd like to execute queries dynamically during
job execution, you should manually execute those queries in your custom op using the
snowflake resource.
Args:
sql (str): The sql query that will execute against the provided snowflake resource.
parameters (dict): The parameters for the sql query.
Returns:
OpDefinition: Returns the constructed op definition.
"""
return _core_create_snowflake_command(op, "op", sql, parameters)
| (sql, parameters=None) |
4,487 | ifconf.common | ConfigBuilder | null | class ConfigBuilder(metaclass=abc.ABCMeta):
@abc.abstractmethod
def add_attr(self, name, default=None, required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_boolean(self, name, default=False, required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_int(self, name, default=0, required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_float(self, name, default=0.0, required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_dict(self, name, default={}, required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_list(self, name, default=[], required=False, hidden=False, help=''):
pass
@abc.abstractmethod
def add_attr_path(self, name, default=None, required=False, hidden=False, help=''):
pass
| () |
4,488 | ifconf.common | add_attr | null | @abc.abstractmethod
def add_attr(self, name, default=None, required=False, hidden=False, help=''):
pass
| (self, name, default=None, required=False, hidden=False, help='') |
4,489 | ifconf.common | add_attr_boolean | null | @abc.abstractmethod
def add_attr_boolean(self, name, default=False, required=False, hidden=False, help=''):
pass
| (self, name, default=False, required=False, hidden=False, help='') |
4,490 | ifconf.common | add_attr_dict | null | @abc.abstractmethod
def add_attr_dict(self, name, default={}, required=False, hidden=False, help=''):
pass
| (self, name, default={}, required=False, hidden=False, help='') |
4,491 | ifconf.common | add_attr_float | null | @abc.abstractmethod
def add_attr_float(self, name, default=0.0, required=False, hidden=False, help=''):
pass
| (self, name, default=0.0, required=False, hidden=False, help='') |
4,492 | ifconf.common | add_attr_int | null | @abc.abstractmethod
def add_attr_int(self, name, default=0, required=False, hidden=False, help=''):
pass
| (self, name, default=0, required=False, hidden=False, help='') |
4,493 | ifconf.common | add_attr_list | null | @abc.abstractmethod
def add_attr_list(self, name, default=[], required=False, hidden=False, help=''):
pass
| (self, name, default=[], required=False, hidden=False, help='') |
4,494 | ifconf.common | add_attr_path | null | @abc.abstractmethod
def add_attr_path(self, name, default=None, required=False, hidden=False, help=''):
pass
| (self, name, default=None, required=False, hidden=False, help='') |
4,497 | ifconf.common | config_callback | null | def config_callback(section = None):
def _decorator(func):
if hasattr(func, '__SECTION__'):
return func
func.__MODULE_NAME__ = get_module_name_for(func)
func.__SECTION__ = section if type(section) == str and section else '{}_{}'.format(func.__MODULE_NAME__, func.__name__)
return func
return _decorator(section) if callable(section) else _decorator
| (section=None) |
4,498 | ifconf.main | configure_main | null | def configure_main(argparser = None
, with_default_args = True
, config_arg = 'config.ini'
, config_path = []
, with_config_logging = True
, callback_methods = []):
global __MAIN_CONFIG__
__MAIN_CONFIG__ = Config(argparser)
callback_methods = callback_methods if hasattr(callback_methods, '__iter__') else [callback_methods]
callback_methods = [config_callback()(m) for m in callback_methods]
if with_default_args:
add_default_argument(__MAIN_CONFIG__.argparser, config_arg)
PrintConfigAction.set_callback_methods(callback_methods)
__MAIN_CONFIG__.parse(config_path)
if with_config_logging:
configure_logging(__MAIN_CONFIG__)
else:
__MAIN_CONFIG__.logger = logging.getLogger()
for m in callback_methods:
loader = ConfigLoader.load(m, __MAIN_CONFIG__)
try:
loader.configure(True)
except Exception as e:
__MAIN_CONFIG__.err.append('Failed to load configuration for the module [{}]. Error: {}'.format(loader.section, e))
for e in __MAIN_CONFIG__.err:
__MAIN_CONFIG__.logger.warning(e)
__MAIN_CONFIG__.logger.info('Completed configuration for file {}'.format(__MAIN_CONFIG__.config_path))
return __MAIN_CONFIG__
| (argparser=None, with_default_args=True, config_arg='config.ini', config_path=[], with_config_logging=True, callback_methods=[]) |
4,499 | ifconf.module | configure_module | null | def configure_module(*callback_methods, override = {}, immutable = True):
config = get_main_config()
loaders = [ConfigLoader.load(config_callback()(callback), config) for callback in callback_methods]
loaders[0].prepend_name_value_dict(override)
return reduce(lambda a,b: a.append_name_values(b), loaders).configure(immutable)
| (*callback_methods, override={}, immutable=True) |
4,505 | face_recognition_models | cnn_face_detector_model_location | null | def cnn_face_detector_model_location():
return resource_filename(__name__, "models/mmod_human_face_detector.dat")
| () |
4,506 | face_recognition_models | face_recognition_model_location | null | def face_recognition_model_location():
return resource_filename(__name__, "models/dlib_face_recognition_resnet_model_v1.dat")
| () |
4,507 | face_recognition_models | pose_predictor_five_point_model_location | null | def pose_predictor_five_point_model_location():
return resource_filename(__name__, "models/shape_predictor_5_face_landmarks.dat")
| () |
4,508 | face_recognition_models | pose_predictor_model_location | null | def pose_predictor_model_location():
return resource_filename(__name__, "models/shape_predictor_68_face_landmarks.dat")
| () |
4,509 | optbinning.binning.binning_process | BinningProcess | Binning process to compute optimal binning of variables in a dataset,
given a binary, continuous or multiclass target dtype.
Parameters
----------
variable_names : array-like
List of variable names.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
selection_criteria : dict or None (default=None)
Variable selection criteria. See notes.
.. versionadded:: 0.6.0
fixed_variables : array-like or None
List of variables to be fixed. The binning process will retain these
variables if the selection criteria is not satisfied.
.. versionadded:: 0.12.1
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
categorical_variables : array-like or None, optional (default=None)
List of variables numerical variables to be considered categorical.
These are nominal variables. Not applicable when target type is
multiclass.
binning_fit_params : dict or None, optional (default=None)
Dictionary with optimal binning fitting options for specific variables.
Example: ``{"variable_1": {"max_n_bins": 4}}``.
binning_transform_params : dict or None, optional (default=None)
Dictionary with optimal binning transform options for specific
variables. Example ``{"variable_1": {"metric": "event_rate"}}``.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
.. versionadded:: 0.7.1
verbose : bool (default=False)
Enable verbose output.
Notes
-----
Parameter ``selection_criteria`` allows to specify criteria for
variable selection. The input is a dictionary as follows
.. code::
selection_criteria = {
"metric_1":
{
"min": 0, "max": 1, "strategy": "highest", "top": 0.25
},
"metric_2":
{
"min": 0.02
}
}
where several metrics can be combined. For example, above dictionary
indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2"
greater or equal than 0.02 are selected. Supported key values are:
* keys ``min`` and ``max`` support numerical values.
* key ``strategy`` supports options "highest" and "lowest".
* key ``top`` supports an integer or decimal (percentage).
.. warning::
If the binning process instance is going to be saved, do not pass the
option ``"solver": "mip"`` via the ``binning_fit_params`` parameter.
| class BinningProcess(Base, BaseEstimator, BaseBinningProcess):
"""Binning process to compute optimal binning of variables in a dataset,
given a binary, continuous or multiclass target dtype.
Parameters
----------
variable_names : array-like
List of variable names.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
selection_criteria : dict or None (default=None)
Variable selection criteria. See notes.
.. versionadded:: 0.6.0
fixed_variables : array-like or None
List of variables to be fixed. The binning process will retain these
variables if the selection criteria is not satisfied.
.. versionadded:: 0.12.1
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
categorical_variables : array-like or None, optional (default=None)
List of variables numerical variables to be considered categorical.
These are nominal variables. Not applicable when target type is
multiclass.
binning_fit_params : dict or None, optional (default=None)
Dictionary with optimal binning fitting options for specific variables.
Example: ``{"variable_1": {"max_n_bins": 4}}``.
binning_transform_params : dict or None, optional (default=None)
Dictionary with optimal binning transform options for specific
variables. Example ``{"variable_1": {"metric": "event_rate"}}``.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
.. versionadded:: 0.7.1
verbose : bool (default=False)
Enable verbose output.
Notes
-----
Parameter ``selection_criteria`` allows to specify criteria for
variable selection. The input is a dictionary as follows
.. code::
selection_criteria = {
"metric_1":
{
"min": 0, "max": 1, "strategy": "highest", "top": 0.25
},
"metric_2":
{
"min": 0.02
}
}
where several metrics can be combined. For example, above dictionary
indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2"
greater or equal than 0.02 are selected. Supported key values are:
* keys ``min`` and ``max`` support numerical values.
* key ``strategy`` supports options "highest" and "lowest".
* key ``top`` supports an integer or decimal (percentage).
.. warning::
If the binning process instance is going to be saved, do not pass the
option ``"solver": "mip"`` via the ``binning_fit_params`` parameter.
"""
def __init__(self, variable_names, max_n_prebins=20, min_prebin_size=0.05,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, max_pvalue=None,
max_pvalue_policy="consecutive", selection_criteria=None,
fixed_variables=None, categorical_variables=None,
special_codes=None, split_digits=None,
binning_fit_params=None, binning_transform_params=None,
n_jobs=None, verbose=False):
self.variable_names = variable_names
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.selection_criteria = selection_criteria
self.fixed_variables = fixed_variables
self.binning_fit_params = binning_fit_params
self.binning_transform_params = binning_transform_params
self.special_codes = special_codes
self.split_digits = split_digits
self.categorical_variables = categorical_variables
self.n_jobs = n_jobs
self.verbose = verbose
# auxiliary
self._n_samples = None
self._n_variables = None
self._target_dtype = None
self._n_numerical = None
self._n_categorical = None
self._n_selected = None
self._binned_variables = {}
self._variable_dtypes = {}
self._variable_stats = {}
self._support = None
# timing
self._time_total = None
self._is_updated = False
self._is_fitted = False
def fit(self, X, y, sample_weight=None, check_input=False):
"""Fit the binning process. Fit the optimal binning to all variables
according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
.. versionchanged:: 0.4.0
X supports ``numpy.ndarray`` and ``pandas.DataFrame``.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit(X, y, sample_weight, check_input)
def fit_disk(self, input_path, target, **kwargs):
"""Fit the binning process according to the given training data on
disk.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv or .parquet.
target : str
Target column.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv`` or
``pandas.read_parquet``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_disk(input_path, target, **kwargs)
def fit_from_dict(self, dict_optb):
"""Fit the binning process from a dict of OptimalBinning objects
already fitted.
Parameters
----------
dict_optb : dict
Dictionary with OptimalBinning objects for binary, continuous
or multiclass target. All objects must share the same class.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_from_dict(dict_optb)
def fit_transform(self, X, y, sample_weight=None, metric=None,
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the binning process according to the given training data, then
transform it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array, shape = (n_samples, n_features_new)
Transformed array.
"""
return self.fit(X, y, sample_weight, check_input).transform(
X, metric, metric_special, metric_missing, show_digits,
check_input)
def fit_transform_disk(self, input_path, output_path, target, chunksize,
metric=None, metric_special=0, metric_missing=0,
show_digits=2, **kwargs):
"""Fit the binning process according to the given training data on
disk, then transform it and save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
target : str
Target column.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self.fit_disk(input_path, target, **kwargs).transform_disk(
input_path, output_path, chunksize, metric, metric_special,
metric_missing, show_digits, **kwargs)
def transform(self, X, metric=None, metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array or pandas.DataFrame, shape = (n_samples,
n_features_new)
Transformed array.
"""
self._check_is_fitted()
return self._transform(X, metric, metric_special, metric_missing,
show_digits, check_input)
def transform_disk(self, input_path, output_path, chunksize, metric=None,
metric_special=0, metric_missing=0, show_digits=2,
**kwargs):
"""Transform given data on disk to metric using bins from each fitted
optimal binning. Save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
self._check_is_fitted()
return self._transform_disk(input_path, output_path, chunksize, metric,
metric_special, metric_missing,
show_digits, **kwargs)
def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
n_numerical = list(self._variable_dtypes.values()).count("numerical")
n_categorical = self._n_variables - n_numerical
self._n_selected = np.count_nonzero(self._support)
dict_user_options = self.get_params()
print_binning_process_information(
print_level, self._n_samples, self._n_variables,
self._target_dtype, n_numerical, n_categorical,
self._n_selected, self._time_total, dict_user_options)
def summary(self):
"""Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
"""
self._check_is_fitted()
if self._is_updated:
self._binning_selection_criteria()
self._is_updated = False
df_summary = pd.DataFrame.from_dict(self._variable_stats).T
df_summary.reset_index(inplace=True)
df_summary.rename(columns={"index": "name"}, inplace=True)
df_summary["selected"] = self._support
columns = ["name", "dtype", "status", "selected", "n_bins"]
columns += _METRICS[self._target_dtype]["metrics"]
return df_summary[columns]
def get_binned_variable(self, name):
"""Return optimal binning object for a given variable name.
Parameters
----------
name : string
The variable name.
"""
self._check_is_fitted()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name in self.variable_names:
return self._binned_variables[name]
else:
raise ValueError("name {} does not match a binned variable."
.format(name))
def update_binned_variable(self, name, optb):
"""Update optimal binning object for a given variable.
Parameters
----------
name : string
The variable name.
optb : object
The optimal binning object already fitted.
"""
self._check_is_fitted()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name not in self.variable_names:
raise ValueError("name {} does not match a binned variable."
.format(name))
optb_types = _OPTB_TYPES + _OPTBPW_TYPES
if not isinstance(optb, optb_types):
raise TypeError("Object {} must be of type ({}); got {}"
.format(name, optb_types, type(optb)))
# Check current class
if self._target_dtype == "binary":
optb_binary = (OptimalBinning, OptimalPWBinning)
if not isinstance(optb, optb_binary):
raise TypeError("target is binary and Object {} must be of "
"type {}.".format(optb, optb_binary))
elif self._target_dtype == "continuous":
optb_continuous = (ContinuousOptimalBinning,
ContinuousOptimalPWBinning)
if not isinstance(optb, optb_continuous):
raise TypeError("target is continuous and Object {} must be "
"of type {}.".format(optb, optb_continuous))
elif self._target_dtype == "multiclass":
if not isinstance(optb, MulticlassOptimalBinning):
raise TypeError("target is multiclass and Object {} must be "
"of type {}.".format(
optb, MulticlassOptimalBinning))
optb_old = self._binned_variables[name]
if optb_old.name and optb_old.name != optb.name:
raise ValueError("Update object name must match old object name; "
"{} != {}.".format(optb_old.name, optb.name))
if optb.name and name != optb.name:
raise ValueError("name and object name must coincide.")
self._binned_variables[name] = optb
self._is_updated = True
def get_support(self, indices=False, names=False):
"""Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
"""
self._check_is_fitted()
if indices and names:
raise ValueError("Only indices or names can be True.")
mask = self._support
if indices:
return np.where(mask)[0]
elif names:
return np.asarray(self.variable_names)[mask]
else:
return mask
def _fit(self, X, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# check X dtype
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.")
# check target dtype
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous", "multiclass"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
# check sample weight
if sample_weight is not None and self._target_dtype != "binary":
raise ValueError("Target type {} does not support sample weight."
.format(self._target_dtype))
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
# check X and y data
if check_input:
X = check_array(X, ensure_2d=False, dtype=None,
force_all_finite='allow-nan')
y = check_array(y, ensure_2d=False, dtype=None,
force_all_finite=True)
check_consistent_length(X, y)
self._n_samples, self._n_variables = X.shape
if self._n_variables != len(self.variable_names):
raise ValueError("The number of columns must be equal to the"
"length of variable_names.")
if self.verbose:
logger.info("Dataset: number of samples: {}."
.format(self._n_samples))
logger.info("Dataset: number of variables: {}."
.format(self._n_variables))
# Number of jobs
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Options: number of jobs (cores): {}."
.format(n_jobs))
if n_jobs == 1:
for i, name in enumerate(self.variable_names):
if self.verbose:
logger.info("Binning variable ({} / {}): {}."
.format(i, self._n_variables, name))
if isinstance(X, np.ndarray):
dtype, optb = _fit_variable(
X[:, i], y, name, self._target_dtype,
self.categorical_variables, self.binning_fit_params,
self.max_n_prebins, self.min_prebin_size,
self.min_n_bins, self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
else:
dtype, optb = _fit_variable(
X[name], y, name, self._target_dtype,
self.categorical_variables, self.binning_fit_params,
self.max_n_prebins, self.min_prebin_size,
self.min_n_bins, self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
else:
ids = np.arange(len(self.variable_names))
id_blocks = np.array_split(ids, n_jobs)
names = np.asarray(self.variable_names)
if isinstance(X, np.ndarray):
blocks = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(_fit_block)(
X[:, id_block], y, names[id_block],
self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins,
self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
for id_block in id_blocks)
else:
blocks = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(_fit_block)(
X[names[id_block]], y, names[id_block],
self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins,
self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
for id_block in id_blocks)
for b in blocks:
vt, bv = b
self._variable_dtypes.update(vt)
self._binned_variables.update(bv)
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_disk(self, input_path, target, **kwargs):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Input file extension
extension = input_path.split(".")[1]
# Check extension
if extension not in ("csv", "parquet"):
raise ValueError("input_path extension must be csv or parquet; "
"got {}.".format(extension))
# Check target
if not isinstance(target, str):
raise TypeError("target must be a string.")
# Retrieve target and check dtype
y = _read_column(input_path, extension, target, **kwargs)
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous", "multiclass"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
if self.fixed_variables is not None:
for fv in self.fixed_variables:
if fv not in self.variable_names:
raise ValueError("Variable {} to be fixed is not a valid "
"variable name.".format(fv))
self._n_samples = len(y)
self._n_variables = len(self.variable_names)
if self.verbose:
logger.info("Dataset: number of samples: {}."
.format(self._n_samples))
logger.info("Dataset: number of variables: {}."
.format(self._n_variables))
for name in self.variable_names:
x = _read_column(input_path, extension, name, **kwargs)
dtype, optb = _fit_variable(
x, y, name, self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins, self.max_n_bins,
self.min_bin_size, self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_from_dict(self, dict_optb):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
if not isinstance(dict_optb, dict):
raise TypeError("dict_optb must be a dict.")
# Check variable names
if set(dict_optb.keys()) != set(self.variable_names):
raise ValueError("dict_optb keys and variable names must "
"coincide.")
# Check objects class
optb_types = _OPTB_TYPES
types = set()
for name, optb in dict_optb.items():
if not isinstance(name, str):
raise TypeError("Object key must be a string.")
if not isinstance(optb, optb_types):
raise TypeError("Object {} must be of type ({}); got {}"
.format(name, optb_types, type(optb)))
types.add(type(optb).__name__)
if len(types) > 1:
raise TypeError("All binning objects must be of the same "
"class.")
# Check if fitted
if not optb._is_fitted:
raise NotFittedError("Object with key={} is not fitted yet. "
"Call 'fit' for this object before "
"passing to a binning process."
.format(name))
# Check if name was provided and matches dict_optb key.
if optb.name and optb.name != name:
raise ValueError("Object with key={} has attribute name={}. "
"If object has a name those must coincide."
.format(name, optb.name))
obj_class = types.pop()
if obj_class == "OptimalBinning":
self._target_dtype = "binary"
elif obj_class == "ContinuousOptimalBinning":
self._target_dtype = "continuous"
elif obj_class == "MulticlassOptimalBinning":
self._target_dtype = "multiclass"
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
self._n_samples = 0
self._n_variables = len(self.variable_names)
for name, optb in dict_optb.items():
self._variable_dtypes[name] = optb.dtype
self._binned_variables[name] = optb
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _transform(self, X, metric, metric_special, metric_missing,
show_digits, check_input):
# Check X dtype
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.")
n_samples, n_variables = X.shape
mask = self.get_support()
if not mask.any():
warn("No variables were selected: either the data is"
" too noisy or the selection_criteria too strict.",
UserWarning)
return np.empty(0).reshape((n_samples, 0))
if isinstance(X, np.ndarray) and len(mask) != n_variables:
raise ValueError("X has a different shape that during fitting.")
if isinstance(X, pd.DataFrame):
selected_variables = self.get_support(names=True)
for name in selected_variables:
if name not in X.columns:
raise ValueError("Selected variable {} must be a column "
"in the input dataframe.".format(name))
# Check metric
if metric in ("indices", "bins"):
if any(isinstance(optb, _OPTBPW_TYPES)
for optb in self._binned_variables.values()):
raise TypeError("metric {} not supported for piecewise "
"optimal binning objects.".format(metric))
indices_selected_variables = self.get_support(indices=True)
n_selected_variables = len(indices_selected_variables)
# Check if specific binning transform metrics were supplied, and
# whether these are compatible. Default base metric is the binning
# process transform metric.
base_metric = metric
if self.binning_transform_params is not None:
metrics = set()
if metric is not None:
metrics.add(metric)
for idx in indices_selected_variables:
name = self.variable_names[idx]
params = self.binning_transform_params.get(name, {})
metrics.add(params.get("metric", metric))
if len(metrics) > 1:
# indices and default transform metrics are numeric. If bins
# metrics is present the dtypes are incompatible.
if "bins" in metrics:
raise ValueError(
"metric 'bins' cannot be mixed with numeric metrics.")
else:
base_metric = metrics.pop()
if base_metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif base_metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, idx in enumerate(indices_selected_variables):
name = self.variable_names[idx]
optb = self._binned_variables[name]
if isinstance(X, np.ndarray):
x = X[:, idx]
else:
x = X[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric = params.get("metric", metric)
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": x,
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"check_input": check_input,
"show_digits": show_digits
}
if isinstance(optb, _OPTBPW_TYPES):
tparams.pop("show_digits")
if metric is None:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
if isinstance(X, pd.DataFrame):
return pd.DataFrame(
X_transform, columns=selected_variables, index=X.index)
return X_transform
def _transform_disk(self, input_path, output_path, chunksize, metric,
metric_special, metric_missing, show_digits, **kwargs):
# check input_path and output_path extensions
input_extension = input_path.split(".")[1]
output_extension = output_path.split(".")[1]
if input_extension != "csv" or output_extension != "csv":
raise ValueError("input_path and output_path must be csv files.")
# check chunksize
if not isinstance(chunksize, numbers.Integral) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer; got {}."
.format(chunksize))
# Check metric
if metric in ("indices", "bins"):
if any(isinstance(optb, _OPTBPW_TYPES)
for optb in self._binned_variables.values()):
raise TypeError("metric {} not supported for piecewise "
"optimal binning objects.".format(metric))
selected_variables = self.get_support(names=True)
n_selected_variables = len(selected_variables)
# Check if specific binning transform metrics were supplied, and
# whether these are compatible. Default base metric is the binning
# process transform metric.
base_metric = metric
if self.binning_transform_params is not None:
metrics = set()
if metric is not None:
metrics.add(metric)
for name in selected_variables:
params = self.binning_transform_params.get(name, {})
metrics.add(params.get("metric", metric))
if len(metrics) > 1:
# indices and default transform metrics are numeric. If bins
# metrics is present the dtypes are incompatible.
if "bins" in metrics:
raise ValueError(
"metric 'bins' cannot be mixed with numeric metrics.")
else:
base_metric = metrics.pop()
chunks = pd.read_csv(input_path, engine='c', chunksize=chunksize,
usecols=selected_variables, **kwargs)
for k, chunk in enumerate(chunks):
n_samples, n_variables = chunk.shape
if base_metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif base_metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, name in enumerate(selected_variables):
optb = self._binned_variables[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric = params.get("metric", metric)
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": chunk[name],
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"show_digits": show_digits
}
if isinstance(optb, _OPTBPW_TYPES):
tparams.pop("show_digits")
if metric is None:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
df = pd.DataFrame(X_transform, columns=selected_variables)
df.to_csv(output_path, mode='a', index=False, header=(k == 0))
return self
| (variable_names, max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, max_pvalue=None, max_pvalue_policy='consecutive', selection_criteria=None, fixed_variables=None, categorical_variables=None, special_codes=None, split_digits=None, binning_fit_params=None, binning_transform_params=None, n_jobs=None, verbose=False) |
4,510 | sklearn.base | __getstate__ | null | def __getstate__(self):
if getattr(self, "__slots__", None):
raise TypeError(
"You cannot use `__slots__` in objects inheriting from "
"`sklearn.base.BaseEstimator`."
)
try:
state = super().__getstate__()
if state is None:
# For Python 3.11+, empty instance (no `__slots__`,
# and `__dict__`) will return a state equal to `None`.
state = self.__dict__.copy()
except AttributeError:
# Python < 3.11
state = self.__dict__.copy()
if type(self).__module__.startswith("sklearn."):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
| (self) |
4,511 | optbinning.binning.binning_process | __init__ | null | def __init__(self, variable_names, max_n_prebins=20, min_prebin_size=0.05,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, max_pvalue=None,
max_pvalue_policy="consecutive", selection_criteria=None,
fixed_variables=None, categorical_variables=None,
special_codes=None, split_digits=None,
binning_fit_params=None, binning_transform_params=None,
n_jobs=None, verbose=False):
self.variable_names = variable_names
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.selection_criteria = selection_criteria
self.fixed_variables = fixed_variables
self.binning_fit_params = binning_fit_params
self.binning_transform_params = binning_transform_params
self.special_codes = special_codes
self.split_digits = split_digits
self.categorical_variables = categorical_variables
self.n_jobs = n_jobs
self.verbose = verbose
# auxiliary
self._n_samples = None
self._n_variables = None
self._target_dtype = None
self._n_numerical = None
self._n_categorical = None
self._n_selected = None
self._binned_variables = {}
self._variable_dtypes = {}
self._variable_stats = {}
self._support = None
# timing
self._time_total = None
self._is_updated = False
self._is_fitted = False
| (self, variable_names, max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, max_pvalue=None, max_pvalue_policy='consecutive', selection_criteria=None, fixed_variables=None, categorical_variables=None, special_codes=None, split_digits=None, binning_fit_params=None, binning_transform_params=None, n_jobs=None, verbose=False) |
4,512 | sklearn.base | __repr__ | null | def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True,
indent=1,
indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len("".join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r"^(\s*\S){%d}" % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if "\n" in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r"[^\n]*\n"
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = "..."
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
return repr_
| (self, N_CHAR_MAX=700) |
4,513 | sklearn.base | __setstate__ | null | def __setstate__(self, state):
if type(self).__module__.startswith("sklearn."):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
InconsistentVersionWarning(
estimator_name=self.__class__.__name__,
current_sklearn_version=__version__,
original_sklearn_version=pickle_version,
),
)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
| (self, state) |
4,514 | sklearn.base | __sklearn_clone__ | null | def __sklearn_clone__(self):
return _clone_parametrized(self)
| (self) |
4,515 | optbinning.binning.binning_process | _binning_selection_criteria | null | def _binning_selection_criteria(self):
for i, name in enumerate(self.variable_names):
optb = self._binned_variables[name]
optb.binning_table.build()
n_bins = len(optb.splits)
if isinstance(optb, OptimalPWBinning) or optb.dtype == "numerical":
n_bins += 1
if isinstance(optb, OptimalPWBinning):
dtype = "numerical"
else:
dtype = optb.dtype
info = {"dtype": dtype,
"status": optb.status,
"n_bins": n_bins}
optb.binning_table.analysis(print_output=False)
if self._target_dtype == "binary":
metrics = {
"iv": optb.binning_table.iv,
"gini": optb.binning_table.gini,
"js": optb.binning_table.js,
"quality_score": optb.binning_table.quality_score}
elif self._target_dtype == "multiclass":
metrics = {
"js": optb.binning_table.js,
"quality_score": optb.binning_table.quality_score}
elif self._target_dtype == "continuous":
metrics = {
"woe": optb.binning_table.woe,
"quality_score": optb.binning_table.quality_score}
info = {**info, **metrics}
self._variable_stats[name] = info
self._support_selection_criteria()
| (self) |
4,516 | sklearn.base | _check_feature_names | Set or check the `feature_names_in_` attribute.
.. versionadded:: 1.0
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
The input samples.
reset : bool
Whether to reset the `feature_names_in_` attribute.
If False, the input will be checked for consistency with
feature names of data provided when reset was last True.
.. note::
It is recommended to call `reset=True` in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
| def _check_feature_names(self, X, *, reset):
"""Set or check the `feature_names_in_` attribute.
.. versionadded:: 1.0
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
The input samples.
reset : bool
Whether to reset the `feature_names_in_` attribute.
If False, the input will be checked for consistency with
feature names of data provided when reset was last True.
.. note::
It is recommended to call `reset=True` in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
if reset:
feature_names_in = _get_feature_names(X)
if feature_names_in is not None:
self.feature_names_in_ = feature_names_in
elif hasattr(self, "feature_names_in_"):
# Delete the attribute when the estimator is fitted on a new dataset
# that has no feature names.
delattr(self, "feature_names_in_")
return
fitted_feature_names = getattr(self, "feature_names_in_", None)
X_feature_names = _get_feature_names(X)
if fitted_feature_names is None and X_feature_names is None:
# no feature names seen in fit and in X
return
if X_feature_names is not None and fitted_feature_names is None:
warnings.warn(
f"X has feature names, but {self.__class__.__name__} was fitted without"
" feature names"
)
return
if X_feature_names is None and fitted_feature_names is not None:
warnings.warn(
"X does not have valid feature names, but"
f" {self.__class__.__name__} was fitted with feature names"
)
return
# validate the feature names against the `feature_names_in_` attribute
if len(fitted_feature_names) != len(X_feature_names) or np.any(
fitted_feature_names != X_feature_names
):
message = (
"The feature names should match those that were passed during fit.\n"
)
fitted_feature_names_set = set(fitted_feature_names)
X_feature_names_set = set(X_feature_names)
unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)
missing_names = sorted(fitted_feature_names_set - X_feature_names_set)
def add_names(names):
output = ""
max_n_names = 5
for i, name in enumerate(names):
if i >= max_n_names:
output += "- ...\n"
break
output += f"- {name}\n"
return output
if unexpected_names:
message += "Feature names unseen at fit time:\n"
message += add_names(unexpected_names)
if missing_names:
message += "Feature names seen at fit time, yet now missing:\n"
message += add_names(missing_names)
if not missing_names and not unexpected_names:
message += (
"Feature names must be in the same order as they were in fit.\n"
)
raise ValueError(message)
| (self, X, *, reset) |
4,517 | optbinning.binning.base | _check_is_fitted | null | def _check_is_fitted(self):
if not self._is_fitted:
raise NotFittedError("This {} instance is not fitted yet. Call "
"'fit' with appropriate arguments."
.format(self.__class__.__name__))
| (self) |
4,518 | sklearn.base | _check_n_features | Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
| def _check_n_features(self, X, reset):
"""Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
try:
n_features = _num_features(X)
except TypeError as e:
if not reset and hasattr(self, "n_features_in_"):
raise ValueError(
"X does not contain any features, but "
f"{self.__class__.__name__} is expecting "
f"{self.n_features_in_} features"
) from e
# If the number of features is not defined and reset=True,
# then we skip this check
return
if reset:
self.n_features_in_ = n_features
return
if not hasattr(self, "n_features_in_"):
# Skip this check if the expected number of expected input features
# was not recorded by calling fit first. This is typically the case
# for stateless transformers.
return
if n_features != self.n_features_in_:
raise ValueError(
f"X has {n_features} features, but {self.__class__.__name__} "
f"is expecting {self.n_features_in_} features as input."
)
| (self, X, reset) |
4,519 | optbinning.binning.binning_process | _fit | null | def _fit(self, X, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# check X dtype
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.")
# check target dtype
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous", "multiclass"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
# check sample weight
if sample_weight is not None and self._target_dtype != "binary":
raise ValueError("Target type {} does not support sample weight."
.format(self._target_dtype))
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
# check X and y data
if check_input:
X = check_array(X, ensure_2d=False, dtype=None,
force_all_finite='allow-nan')
y = check_array(y, ensure_2d=False, dtype=None,
force_all_finite=True)
check_consistent_length(X, y)
self._n_samples, self._n_variables = X.shape
if self._n_variables != len(self.variable_names):
raise ValueError("The number of columns must be equal to the"
"length of variable_names.")
if self.verbose:
logger.info("Dataset: number of samples: {}."
.format(self._n_samples))
logger.info("Dataset: number of variables: {}."
.format(self._n_variables))
# Number of jobs
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Options: number of jobs (cores): {}."
.format(n_jobs))
if n_jobs == 1:
for i, name in enumerate(self.variable_names):
if self.verbose:
logger.info("Binning variable ({} / {}): {}."
.format(i, self._n_variables, name))
if isinstance(X, np.ndarray):
dtype, optb = _fit_variable(
X[:, i], y, name, self._target_dtype,
self.categorical_variables, self.binning_fit_params,
self.max_n_prebins, self.min_prebin_size,
self.min_n_bins, self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
else:
dtype, optb = _fit_variable(
X[name], y, name, self._target_dtype,
self.categorical_variables, self.binning_fit_params,
self.max_n_prebins, self.min_prebin_size,
self.min_n_bins, self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
else:
ids = np.arange(len(self.variable_names))
id_blocks = np.array_split(ids, n_jobs)
names = np.asarray(self.variable_names)
if isinstance(X, np.ndarray):
blocks = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(_fit_block)(
X[:, id_block], y, names[id_block],
self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins,
self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
for id_block in id_blocks)
else:
blocks = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(_fit_block)(
X[names[id_block]], y, names[id_block],
self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins,
self.max_n_bins, self.min_bin_size,
self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits, sample_weight)
for id_block in id_blocks)
for b in blocks:
vt, bv = b
self._variable_dtypes.update(vt)
self._binned_variables.update(bv)
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, X, y, sample_weight, check_input) |
4,520 | optbinning.binning.binning_process | _fit_disk | null | def _fit_disk(self, input_path, target, **kwargs):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Input file extension
extension = input_path.split(".")[1]
# Check extension
if extension not in ("csv", "parquet"):
raise ValueError("input_path extension must be csv or parquet; "
"got {}.".format(extension))
# Check target
if not isinstance(target, str):
raise TypeError("target must be a string.")
# Retrieve target and check dtype
y = _read_column(input_path, extension, target, **kwargs)
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous", "multiclass"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
if self.fixed_variables is not None:
for fv in self.fixed_variables:
if fv not in self.variable_names:
raise ValueError("Variable {} to be fixed is not a valid "
"variable name.".format(fv))
self._n_samples = len(y)
self._n_variables = len(self.variable_names)
if self.verbose:
logger.info("Dataset: number of samples: {}."
.format(self._n_samples))
logger.info("Dataset: number of variables: {}."
.format(self._n_variables))
for name in self.variable_names:
x = _read_column(input_path, extension, name, **kwargs)
dtype, optb = _fit_variable(
x, y, name, self._target_dtype, self.categorical_variables,
self.binning_fit_params, self.max_n_prebins,
self.min_prebin_size, self.min_n_bins, self.max_n_bins,
self.min_bin_size, self.max_pvalue, self.max_pvalue_policy,
self.special_codes, self.split_digits)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, input_path, target, **kwargs) |
4,521 | optbinning.binning.binning_process | _fit_from_dict | null | def _fit_from_dict(self, dict_optb):
time_init = time.perf_counter()
if self.verbose:
logger.info("Binning process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
if not isinstance(dict_optb, dict):
raise TypeError("dict_optb must be a dict.")
# Check variable names
if set(dict_optb.keys()) != set(self.variable_names):
raise ValueError("dict_optb keys and variable names must "
"coincide.")
# Check objects class
optb_types = _OPTB_TYPES
types = set()
for name, optb in dict_optb.items():
if not isinstance(name, str):
raise TypeError("Object key must be a string.")
if not isinstance(optb, optb_types):
raise TypeError("Object {} must be of type ({}); got {}"
.format(name, optb_types, type(optb)))
types.add(type(optb).__name__)
if len(types) > 1:
raise TypeError("All binning objects must be of the same "
"class.")
# Check if fitted
if not optb._is_fitted:
raise NotFittedError("Object with key={} is not fitted yet. "
"Call 'fit' for this object before "
"passing to a binning process."
.format(name))
# Check if name was provided and matches dict_optb key.
if optb.name and optb.name != name:
raise ValueError("Object with key={} has attribute name={}. "
"If object has a name those must coincide."
.format(name, optb.name))
obj_class = types.pop()
if obj_class == "OptimalBinning":
self._target_dtype = "binary"
elif obj_class == "ContinuousOptimalBinning":
self._target_dtype = "continuous"
elif obj_class == "MulticlassOptimalBinning":
self._target_dtype = "multiclass"
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
self._n_samples = 0
self._n_variables = len(self.variable_names)
for name, optb in dict_optb.items():
self._variable_dtypes[name] = optb.dtype
self._binned_variables[name] = optb
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, dict_optb) |
4,522 | sklearn.utils._estimator_html_repr | _get_doc_link | Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
| def _get_doc_link(self):
"""Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
"""
if self.__class__.__module__.split(".")[0] != self._doc_link_module:
return ""
if self._doc_link_url_param_generator is None:
estimator_name = self.__class__.__name__
# Construct the estimator's module name, up to the first private submodule.
# This works because in scikit-learn all public estimators are exposed at
# that level, even if they actually live in a private sub-module.
estimator_module = ".".join(
itertools.takewhile(
lambda part: not part.startswith("_"),
self.__class__.__module__.split("."),
)
)
return self._doc_link_template.format(
estimator_module=estimator_module, estimator_name=estimator_name
)
return self._doc_link_template.format(
**self._doc_link_url_param_generator(self)
)
| (self) |
4,523 | sklearn.utils._metadata_requests | _get_metadata_request | Get requested data properties.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
request : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance.
| def _get_metadata_request(self):
"""Get requested data properties.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
request : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance.
"""
if hasattr(self, "_metadata_request"):
requests = get_routing_for_object(self._metadata_request)
else:
requests = self._get_default_requests()
return requests
| (self) |
4,524 | sklearn.base | _get_tags | null | def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, "_more_tags"):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
| (self) |
4,525 | sklearn.base | _more_tags | null | def _more_tags(self):
return _DEFAULT_TAGS
| (self) |
4,526 | sklearn.base | _repr_html_inner | This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
| def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return estimator_html_repr(self)
| (self) |
4,527 | sklearn.base | _repr_mimebundle_ | Mime bundle used by jupyter kernels to display estimator | def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == "diagram":
output["text/html"] = estimator_html_repr(self)
return output
| (self, **kwargs) |
4,528 | optbinning.binning.binning_process | _support_selection_criteria | null | def _support_selection_criteria(self):
self._support = np.full(self._n_variables, True, dtype=bool)
if self.selection_criteria is None:
return
default_metrics_info = _METRICS[self._target_dtype]
criteria_metrics = self.selection_criteria.keys()
binning_metrics = pd.DataFrame.from_dict(self._variable_stats).T
for metric in default_metrics_info["metrics"]:
if metric in criteria_metrics:
metric_info = self.selection_criteria[metric]
metric_values = binning_metrics[metric].values
if "min" in metric_info:
self._support &= metric_values >= metric_info["min"]
if "max" in metric_info:
self._support &= metric_values <= metric_info["max"]
if all(m in metric_info for m in ("strategy", "top")):
indices_valid = np.where(self._support)[0]
metric_values = metric_values[indices_valid]
n_valid = len(metric_values)
# Auxiliary support
support = np.full(self._n_variables, False, dtype=bool)
top = metric_info["top"]
if not isinstance(top, numbers.Integral):
top = int(np.ceil(n_valid * top))
n_selected = min(n_valid, top)
if metric_info["strategy"] == "highest":
mask = np.argsort(-metric_values)[:n_selected]
elif metric_info["strategy"] == "lowest":
mask = np.argsort(metric_values)[:n_selected]
support[indices_valid[mask]] = True
self._support &= support
# Fixed variables
if self.fixed_variables is not None:
for fv in self.fixed_variables:
idfv = list(self.variable_names).index(fv)
self._support[idfv] = True
| (self) |
Subsets and Splits