response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Merge two filters.
This makes a copy so we do not alter the original data. | def merge_include_exclude_filters(
base_filter: dict[str, Any], add_filter: dict[str, Any]
) -> dict[str, Any]:
"""Merge two filters.
This makes a copy so we do not alter the original data.
"""
return {
filter_type: {
matcher: base_filter[filter_type][matcher]
| add_filter[filter_type][matcher]
for matcher in FITLER_MATCHERS
}
for filter_type in FILTER_TYPES
} |
Build a sql filter from config. | def sqlalchemy_filter_from_include_exclude_conf(conf: ConfigType) -> Filters | None:
"""Build a sql filter from config."""
exclude = conf.get(CONF_EXCLUDE, {})
include = conf.get(CONF_INCLUDE, {})
filters = Filters(
excluded_entities=exclude.get(CONF_ENTITIES, []),
excluded_domains=exclude.get(CONF_DOMAINS, []),
excluded_entity_globs=exclude.get(CONF_ENTITY_GLOBS, []),
included_entities=include.get(CONF_ENTITIES, []),
included_domains=include.get(CONF_DOMAINS, []),
included_entity_globs=include.get(CONF_ENTITY_GLOBS, []),
)
return filters if filters.has_config else None |
Translate glob to sql. | def _globs_to_like(
glob_strs: Iterable[str], columns: Iterable[Column], encoder: Callable[[Any], Any]
) -> ColumnElement:
"""Translate glob to sql."""
matchers = [
(
column.is_not(None)
& cast(column, Text()).like(
encoder(glob_str).translate(GLOB_TO_SQL_CHARS), escape="\\"
)
)
for glob_str in glob_strs
for column in columns
]
return or_(*matchers) if matchers else or_(False) |
Convert a list of domains to sql LIKE matchers. | def like_domain_matchers(domains: Iterable[str]) -> list[str]:
"""Convert a list of domains to sql LIKE matchers."""
return [f"{domain}.%" for domain in domains] |
Raise if the exception and cause do not contain the match substrs. | def raise_if_exception_missing_str(ex: Exception, match_substrs: Iterable[str]) -> None:
"""Raise if the exception and cause do not contain the match substrs."""
lower_ex_strs = [str(ex).lower(), str(ex.__cause__).lower()]
for str_sub in match_substrs:
for exc_str in lower_ex_strs:
if exc_str and str_sub in exc_str:
return
raise ex |
Get the schema version. | def _get_schema_version(session: Session) -> int | None:
"""Get the schema version."""
res = (
session.query(SchemaChanges.schema_version)
.order_by(SchemaChanges.change_id.desc())
.first()
)
return getattr(res, "schema_version", None) |
Get the schema version. | def get_schema_version(session_maker: Callable[[], Session]) -> int | None:
"""Get the schema version."""
try:
with session_scope(session=session_maker(), read_only=True) as session:
return _get_schema_version(session)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error when determining DB schema version")
return None |
Check if the schema is current. | def _schema_is_current(current_version: int) -> bool:
"""Check if the schema is current."""
return current_version == SCHEMA_VERSION |
Check if the schema is valid.
This checks that the schema is the current version as well as for some common schema
errors caused by manual migration between database engines, for example importing an
SQLite database to MariaDB. | def validate_db_schema(
hass: HomeAssistant, instance: Recorder, session_maker: Callable[[], Session]
) -> SchemaValidationStatus | None:
"""Check if the schema is valid.
This checks that the schema is the current version as well as for some common schema
errors caused by manual migration between database engines, for example importing an
SQLite database to MariaDB.
"""
schema_errors: set[str] = set()
current_version = get_schema_version(session_maker)
if current_version is None:
return None
if is_current := _schema_is_current(current_version):
# We can only check for further errors if the schema is current, because
# columns may otherwise not exist etc.
schema_errors = _find_schema_errors(hass, instance, session_maker)
valid = is_current and not schema_errors
return SchemaValidationStatus(current_version, schema_errors, valid) |
Find schema errors. | def _find_schema_errors(
hass: HomeAssistant, instance: Recorder, session_maker: Callable[[], Session]
) -> set[str]:
"""Find schema errors."""
schema_errors: set[str] = set()
schema_errors |= statistics_validate_db_schema(instance)
schema_errors |= states_validate_db_schema(instance)
schema_errors |= events_validate_db_schema(instance)
return schema_errors |
Check if live migration is possible. | def live_migration(schema_status: SchemaValidationStatus) -> bool:
"""Check if live migration is possible."""
return schema_status.current_version >= LIVE_MIGRATION_MIN_SCHEMA_VERSION |
Check if the schema needs to be upgraded. | def migrate_schema(
instance: Recorder,
hass: HomeAssistant,
engine: Engine,
session_maker: Callable[[], Session],
schema_status: SchemaValidationStatus,
) -> None:
"""Check if the schema needs to be upgraded."""
current_version = schema_status.current_version
if current_version != SCHEMA_VERSION:
_LOGGER.warning(
"Database is about to upgrade from schema version: %s to: %s",
current_version,
SCHEMA_VERSION,
)
db_ready = False
for version in range(current_version, SCHEMA_VERSION):
if (
live_migration(dataclass_replace(schema_status, current_version=version))
and not db_ready
):
db_ready = True
instance.migration_is_live = True
hass.add_job(instance.async_set_db_ready)
new_version = version + 1
_LOGGER.info("Upgrading recorder db schema to version %s", new_version)
_apply_update(
instance, hass, engine, session_maker, new_version, current_version
)
with session_scope(session=session_maker()) as session:
session.add(SchemaChanges(schema_version=new_version))
# Log at the same level as the long schema changes
# so its clear that the upgrade is done
_LOGGER.warning("Upgrade to version %s done", new_version)
if schema_errors := schema_status.schema_errors:
_LOGGER.warning(
"Database is about to correct DB schema errors: %s",
", ".join(sorted(schema_errors)),
)
statistics_correct_db_schema(instance, schema_errors)
states_correct_db_schema(instance, schema_errors)
events_correct_db_schema(instance, schema_errors)
if current_version != SCHEMA_VERSION:
instance.queue_task(PostSchemaMigrationTask(current_version, SCHEMA_VERSION))
# Make sure the post schema migration task is committed in case
# the next task does not have commit_before = True
instance.queue_task(CommitTask()) |
Create an index for the specified table.
The index name should match the name given for the index
within the table definition described in the models | def _create_index(
session_maker: Callable[[], Session], table_name: str, index_name: str
) -> None:
"""Create an index for the specified table.
The index name should match the name given for the index
within the table definition described in the models
"""
table = Table(table_name, Base.metadata)
_LOGGER.debug("Looking up index %s for table %s", index_name, table_name)
# Look up the index object by name from the table is the models
index_list = [idx for idx in table.indexes if idx.name == index_name]
if not index_list:
_LOGGER.debug("The index %s no longer exists", index_name)
return
index = index_list[0]
_LOGGER.debug("Creating %s index", index_name)
_LOGGER.warning(
(
"Adding index `%s` to table `%s`. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!"
),
index_name,
table_name,
)
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
index.create(connection)
except (InternalError, OperationalError, ProgrammingError) as err:
raise_if_exception_missing_str(err, ["already exists", "duplicate"])
_LOGGER.warning(
"Index %s already exists on %s, continuing", index_name, table_name
)
_LOGGER.debug("Finished creating %s", index_name) |
Execute a query or collect an error. | def _execute_or_collect_error(
session_maker: Callable[[], Session], query: str, errors: list[str]
) -> bool:
"""Execute a query or collect an error."""
with session_scope(session=session_maker()) as session:
try:
session.connection().execute(text(query))
except SQLAlchemyError as err:
errors.append(str(err))
return False
return True |
Drop an index from a specified table.
There is no universal way to do something like `DROP INDEX IF EXISTS`
so we will simply execute the DROP command and ignore any exceptions
WARNING: Due to some engines (MySQL at least) being unable to use bind
parameters in a DROP INDEX statement (at least via SQLAlchemy), the query
string here is generated from the method parameters without sanitizing.
DO NOT USE THIS FUNCTION IN ANY OPERATION THAT TAKES USER INPUT. | def _drop_index(
session_maker: Callable[[], Session],
table_name: str,
index_name: str,
quiet: bool | None = None,
) -> None:
"""Drop an index from a specified table.
There is no universal way to do something like `DROP INDEX IF EXISTS`
so we will simply execute the DROP command and ignore any exceptions
WARNING: Due to some engines (MySQL at least) being unable to use bind
parameters in a DROP INDEX statement (at least via SQLAlchemy), the query
string here is generated from the method parameters without sanitizing.
DO NOT USE THIS FUNCTION IN ANY OPERATION THAT TAKES USER INPUT.
"""
_LOGGER.warning(
(
"Dropping index `%s` from table `%s`. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!"
),
index_name,
table_name,
)
index_to_drop: str | None = None
with session_scope(session=session_maker()) as session:
index_to_drop = get_index_by_name(session, table_name, index_name)
if index_to_drop is None:
_LOGGER.debug(
"The index %s on table %s no longer exists", index_name, table_name
)
return
errors: list[str] = []
for query in (
# Engines like DB2/Oracle
f"DROP INDEX {index_name}",
# Engines like SQLite, SQL Server
f"DROP INDEX {table_name}.{index_name}",
# Engines like MySQL, MS Access
f"DROP INDEX {index_name} ON {table_name}",
# Engines like postgresql may have a prefix
# ex idx_16532_ix_events_event_type_time_fired
f"DROP INDEX {index_to_drop}",
):
if _execute_or_collect_error(session_maker, query, errors):
_LOGGER.debug(
"Finished dropping index %s from table %s", index_name, table_name
)
return
if not quiet:
_LOGGER.warning(
(
"Failed to drop index `%s` from table `%s`. Schema "
"Migration will continue; this is not a "
"critical operation: %s"
),
index_name,
table_name,
errors,
) |
Add columns to a table. | def _add_columns(
session_maker: Callable[[], Session], table_name: str, columns_def: list[str]
) -> None:
"""Add columns to a table."""
_LOGGER.warning(
(
"Adding columns %s to table %s. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!"
),
", ".join(column.split(" ")[0] for column in columns_def),
table_name,
)
columns_def = [f"ADD {col_def}" for col_def in columns_def]
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(
text(
"ALTER TABLE {table} {columns_def}".format(
table=table_name, columns_def=", ".join(columns_def)
)
)
)
except (InternalError, OperationalError, ProgrammingError):
# Some engines support adding all columns at once,
# this error is when they don't
_LOGGER.info("Unable to use quick column add. Adding 1 by 1")
else:
return
for column_def in columns_def:
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(text(f"ALTER TABLE {table_name} {column_def}"))
except (InternalError, OperationalError, ProgrammingError) as err:
raise_if_exception_missing_str(err, ["already exists", "duplicate"])
_LOGGER.warning(
"Column %s already exists on %s, continuing",
column_def.split(" ")[1],
table_name,
) |
Modify columns in a table. | def _modify_columns(
session_maker: Callable[[], Session],
engine: Engine,
table_name: str,
columns_def: list[str],
) -> None:
"""Modify columns in a table."""
if engine.dialect.name == SupportedDialect.SQLITE:
_LOGGER.debug(
(
"Skipping to modify columns %s in table %s; "
"Modifying column length in SQLite is unnecessary, "
"it does not impose any length restrictions"
),
", ".join(column.split(" ")[0] for column in columns_def),
table_name,
)
return
_LOGGER.warning(
(
"Modifying columns %s in table %s. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!"
),
", ".join(column.split(" ")[0] for column in columns_def),
table_name,
)
if engine.dialect.name == SupportedDialect.POSTGRESQL:
columns_def = [
"ALTER {column} TYPE {type}".format(
**dict(zip(["column", "type"], col_def.split(" ", 1), strict=False))
)
for col_def in columns_def
]
elif engine.dialect.name == "mssql":
columns_def = [f"ALTER COLUMN {col_def}" for col_def in columns_def]
else:
columns_def = [f"MODIFY {col_def}" for col_def in columns_def]
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(
text(
"ALTER TABLE {table} {columns_def}".format(
table=table_name, columns_def=", ".join(columns_def)
)
)
)
except (InternalError, OperationalError):
_LOGGER.info("Unable to use quick column modify. Modifying 1 by 1")
else:
return
for column_def in columns_def:
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(text(f"ALTER TABLE {table_name} {column_def}"))
except (InternalError, OperationalError):
_LOGGER.exception(
"Could not modify column %s in table %s", column_def, table_name
) |
Add the options to foreign key constraints. | def _update_states_table_with_foreign_key_options(
session_maker: Callable[[], Session], engine: Engine
) -> None:
"""Add the options to foreign key constraints."""
inspector = sqlalchemy.inspect(engine)
alters = [
{
"old_fk": ForeignKeyConstraint((), (), name=foreign_key["name"]),
"columns": foreign_key["constrained_columns"],
}
for foreign_key in inspector.get_foreign_keys(TABLE_STATES)
if foreign_key["name"]
and (
# MySQL/MariaDB will have empty options
not foreign_key.get("options")
# Postgres will have ondelete set to None
or foreign_key.get("options", {}).get("ondelete") is None
)
]
if not alters:
return
states_key_constraints = Base.metadata.tables[TABLE_STATES].foreign_key_constraints
old_states_table = Table( # noqa: F841
TABLE_STATES,
MetaData(),
*(alter["old_fk"] for alter in alters), # type: ignore[arg-type]
)
for alter in alters:
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(DropConstraint(alter["old_fk"])) # type: ignore[no-untyped-call]
for fkc in states_key_constraints:
if fkc.column_keys == alter["columns"]:
connection.execute(AddConstraint(fkc)) # type: ignore[no-untyped-call]
except (InternalError, OperationalError):
_LOGGER.exception(
"Could not update foreign options in %s table", TABLE_STATES
) |
Drop foreign key constraints for a table on specific columns. | def _drop_foreign_key_constraints(
session_maker: Callable[[], Session], engine: Engine, table: str, columns: list[str]
) -> None:
"""Drop foreign key constraints for a table on specific columns."""
inspector = sqlalchemy.inspect(engine)
drops = [
ForeignKeyConstraint((), (), name=foreign_key["name"])
for foreign_key in inspector.get_foreign_keys(table)
if foreign_key["name"] and foreign_key["constrained_columns"] == columns
]
# Bind the ForeignKeyConstraints to the table
old_table = Table(table, MetaData(), *drops) # noqa: F841
for drop in drops:
with session_scope(session=session_maker()) as session:
try:
connection = session.connection()
connection.execute(DropConstraint(drop)) # type: ignore[no-untyped-call]
except (InternalError, OperationalError):
_LOGGER.exception(
"Could not drop foreign constraints in %s table on %s",
TABLE_STATES,
columns,
) |
Perform operations to bring schema up to date. | def _apply_update( # noqa: C901
instance: Recorder,
hass: HomeAssistant,
engine: Engine,
session_maker: Callable[[], Session],
new_version: int,
old_version: int,
) -> None:
"""Perform operations to bring schema up to date."""
assert engine.dialect.name is not None, "Dialect name must be set"
dialect = try_parse_enum(SupportedDialect, engine.dialect.name)
_column_types = _COLUMN_TYPES_FOR_DIALECT.get(dialect, _SQLITE_COLUMN_TYPES)
if new_version == 1:
# This used to create ix_events_time_fired, but it was removed in version 32
pass
elif new_version == 2:
# Create compound start/end index for recorder_runs
_create_index(session_maker, "recorder_runs", "ix_recorder_runs_start_end")
# This used to create ix_states_last_updated bit it was removed in version 32
elif new_version == 3:
# There used to be a new index here, but it was removed in version 4.
pass
elif new_version == 4:
# Queries were rewritten in this schema release. Most indexes from
# earlier versions of the schema are no longer needed.
if old_version == 3:
# Remove index that was added in version 3
_drop_index(session_maker, "states", "ix_states_created_domain")
if old_version == 2:
# Remove index that was added in version 2
_drop_index(session_maker, "states", "ix_states_entity_id_created")
# Remove indexes that were added in version 0
_drop_index(session_maker, "states", "states__state_changes")
_drop_index(session_maker, "states", "states__significant_changes")
_drop_index(session_maker, "states", "ix_states_entity_id_created")
# This used to create ix_states_entity_id_last_updated,
# but it was removed in version 32
elif new_version == 5:
# Create supporting index for States.event_id foreign key
_create_index(session_maker, "states", LEGACY_STATES_EVENT_ID_INDEX)
elif new_version == 6:
_add_columns(
session_maker,
"events",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(session_maker, "events", "ix_events_context_id")
# This used to create ix_events_context_user_id,
# but it was removed in version 28
_add_columns(
session_maker,
"states",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(session_maker, "states", "ix_states_context_id")
# This used to create ix_states_context_user_id,
# but it was removed in version 28
elif new_version == 7:
# There used to be a ix_states_entity_id index here,
# but it was removed in later schema
pass
elif new_version == 8:
_add_columns(session_maker, "events", ["context_parent_id CHARACTER(36)"])
_add_columns(session_maker, "states", ["old_state_id INTEGER"])
# This used to create ix_events_context_parent_id,
# but it was removed in version 28
elif new_version == 9:
# We now get the context from events with a join
# since its always there on state_changed events
#
# Ideally we would drop the columns from the states
# table as well but sqlite doesn't support that
# and we would have to move to something like
# sqlalchemy alembic to make that work
#
# no longer dropping ix_states_context_id since its recreated in 28
_drop_index(session_maker, "states", "ix_states_context_user_id")
# This index won't be there if they were not running
# nightly but we don't treat that as a critical issue
_drop_index(session_maker, "states", "ix_states_context_parent_id")
# Redundant keys on composite index:
# We already have ix_states_entity_id_last_updated
_drop_index(session_maker, "states", "ix_states_entity_id")
# This used to create ix_events_event_type_time_fired,
# but it was removed in version 32
_drop_index(session_maker, "events", "ix_events_event_type")
elif new_version == 10:
# Now done in step 11
pass
elif new_version == 11:
_create_index(session_maker, "states", "ix_states_old_state_id")
_update_states_table_with_foreign_key_options(session_maker, engine)
elif new_version == 12:
if engine.dialect.name == SupportedDialect.MYSQL:
_modify_columns(session_maker, engine, "events", ["event_data LONGTEXT"])
_modify_columns(session_maker, engine, "states", ["attributes LONGTEXT"])
elif new_version == 13:
if engine.dialect.name == SupportedDialect.MYSQL:
_modify_columns(
session_maker,
engine,
"events",
["time_fired DATETIME(6)", "created DATETIME(6)"],
)
_modify_columns(
session_maker,
engine,
"states",
[
"last_changed DATETIME(6)",
"last_updated DATETIME(6)",
"created DATETIME(6)",
],
)
elif new_version == 14:
_modify_columns(session_maker, engine, "events", ["event_type VARCHAR(64)"])
elif new_version == 15:
# This dropped the statistics table, done again in version 18.
pass
elif new_version == 16:
_drop_foreign_key_constraints(
session_maker, engine, TABLE_STATES, ["old_state_id"]
)
elif new_version == 17:
# This dropped the statistics table, done again in version 18.
pass
elif new_version == 18:
# Recreate the statistics and statistics meta tables.
#
# Order matters! Statistics and StatisticsShortTerm have a relation with
# StatisticsMeta, so statistics need to be deleted before meta (or in pair
# depending on the SQL backend); and meta needs to be created before statistics.
# We need to cast __table__ to Table, explanation in
# https://github.com/sqlalchemy/sqlalchemy/issues/9130
Base.metadata.drop_all(
bind=engine,
tables=[
cast(Table, StatisticsShortTerm.__table__),
cast(Table, Statistics.__table__),
cast(Table, StatisticsMeta.__table__),
],
)
cast(Table, StatisticsMeta.__table__).create(engine)
cast(Table, StatisticsShortTerm.__table__).create(engine)
cast(Table, Statistics.__table__).create(engine)
elif new_version == 19:
# This adds the statistic runs table, insert a fake run to prevent duplicating
# statistics.
with session_scope(session=session_maker()) as session:
session.add(StatisticsRuns(start=get_start_time()))
elif new_version == 20:
# This changed the precision of statistics from float to double
if engine.dialect.name in [SupportedDialect.MYSQL, SupportedDialect.POSTGRESQL]:
_modify_columns(
session_maker,
engine,
"statistics",
[
f"{column} {DOUBLE_PRECISION_TYPE_SQL}"
for column in ("max", "mean", "min", "state", "sum")
],
)
elif new_version == 21:
# Try to change the character set of the statistic_meta table
if engine.dialect.name == SupportedDialect.MYSQL:
for table in ("events", "states", "statistics_meta"):
_correct_table_character_set_and_collation(table, session_maker)
elif new_version == 22:
# Recreate the all statistics tables for Oracle DB with Identity columns
#
# Order matters! Statistics has a relation with StatisticsMeta,
# so statistics need to be deleted before meta (or in pair depending
# on the SQL backend); and meta needs to be created before statistics.
if engine.dialect.name == "oracle":
# We need to cast __table__ to Table, explanation in
# https://github.com/sqlalchemy/sqlalchemy/issues/9130
Base.metadata.drop_all(
bind=engine,
tables=[
cast(Table, StatisticsShortTerm.__table__),
cast(Table, Statistics.__table__),
cast(Table, StatisticsMeta.__table__),
cast(Table, StatisticsRuns.__table__),
],
)
cast(Table, StatisticsRuns.__table__).create(engine)
cast(Table, StatisticsMeta.__table__).create(engine)
cast(Table, StatisticsShortTerm.__table__).create(engine)
cast(Table, Statistics.__table__).create(engine)
# Block 5-minute statistics for one hour from the last run, or it will overlap
# with existing hourly statistics. Don't block on a database with no existing
# statistics.
with session_scope(session=session_maker()) as session:
if session.query(Statistics.id).count() and (
last_run_string := session.query(
func.max(StatisticsRuns.start)
).scalar()
):
last_run_start_time = process_timestamp(last_run_string)
if last_run_start_time:
fake_start_time = last_run_start_time + timedelta(minutes=5)
while fake_start_time < last_run_start_time + timedelta(hours=1):
session.add(StatisticsRuns(start=fake_start_time))
fake_start_time += timedelta(minutes=5)
# When querying the database, be careful to only explicitly query for columns
# which were present in schema version 22. If querying the table, SQLAlchemy
# will refer to future columns.
with session_scope(session=session_maker()) as session:
for sum_statistic in session.query(StatisticsMeta.id).filter_by(
has_sum=true()
):
last_statistic = (
session.query(
Statistics.start,
Statistics.last_reset,
Statistics.state,
Statistics.sum,
)
.filter_by(metadata_id=sum_statistic.id)
.order_by(Statistics.start.desc())
.first()
)
if last_statistic:
session.add(
StatisticsShortTerm(
metadata_id=sum_statistic.id,
start=last_statistic.start,
last_reset=last_statistic.last_reset,
state=last_statistic.state,
sum=last_statistic.sum,
)
)
elif new_version == 23:
# Add name column to StatisticsMeta
_add_columns(session_maker, "statistics_meta", ["name VARCHAR(255)"])
elif new_version == 24:
# This used to create the unique indices for start and statistic_id
# but we changed the format in schema 34 which will now take care
# of removing any duplicate if they still exist.
pass
elif new_version == 25:
_add_columns(
session_maker, "states", [f"attributes_id {_column_types.big_int_type}"]
)
_create_index(session_maker, "states", "ix_states_attributes_id")
elif new_version == 26:
_create_index(session_maker, "statistics_runs", "ix_statistics_runs_start")
elif new_version == 27:
_add_columns(session_maker, "events", [f"data_id {_column_types.big_int_type}"])
_create_index(session_maker, "events", "ix_events_data_id")
elif new_version == 28:
_add_columns(session_maker, "events", ["origin_idx INTEGER"])
# We never use the user_id or parent_id index
_drop_index(session_maker, "events", "ix_events_context_user_id")
_drop_index(session_maker, "events", "ix_events_context_parent_id")
_add_columns(
session_maker,
"states",
[
"origin_idx INTEGER",
"context_id VARCHAR(36)",
"context_user_id VARCHAR(36)",
"context_parent_id VARCHAR(36)",
],
)
_create_index(session_maker, "states", "ix_states_context_id")
# Once there are no longer any state_changed events
# in the events table we can drop the index on states.event_id
elif new_version == 29:
# Recreate statistics_meta index to block duplicated statistic_id
_drop_index(session_maker, "statistics_meta", "ix_statistics_meta_statistic_id")
if engine.dialect.name == SupportedDialect.MYSQL:
# Ensure the row format is dynamic or the index
# unique will be too large
with (
contextlib.suppress(SQLAlchemyError),
session_scope(session=session_maker()) as session,
):
connection = session.connection()
# This is safe to run multiple times and fast
# since the table is small.
connection.execute(
text("ALTER TABLE statistics_meta ROW_FORMAT=DYNAMIC")
)
try:
_create_index(
session_maker, "statistics_meta", "ix_statistics_meta_statistic_id"
)
except DatabaseError:
# There may be duplicated statistics_meta entries, delete duplicates
# and try again
with session_scope(session=session_maker()) as session:
delete_statistics_meta_duplicates(instance, session)
_create_index(
session_maker, "statistics_meta", "ix_statistics_meta_statistic_id"
)
elif new_version == 30:
# This added a column to the statistics_meta table, removed again before
# release of HA Core 2022.10.0
# SQLite 3.31.0 does not support dropping columns.
# Once we require SQLite >= 3.35.5, we should drop the column:
# ALTER TABLE statistics_meta DROP COLUMN state_unit_of_measurement
pass
elif new_version == 31:
# Once we require SQLite >= 3.35.5, we should drop the column:
# ALTER TABLE events DROP COLUMN time_fired
# ALTER TABLE states DROP COLUMN last_updated
# ALTER TABLE states DROP COLUMN last_changed
_add_columns(
session_maker, "events", [f"time_fired_ts {_column_types.timestamp_type}"]
)
_add_columns(
session_maker,
"states",
[
f"last_updated_ts {_column_types.timestamp_type}",
f"last_changed_ts {_column_types.timestamp_type}",
],
)
_create_index(session_maker, "events", "ix_events_time_fired_ts")
_create_index(session_maker, "events", "ix_events_event_type_time_fired_ts")
_create_index(session_maker, "states", "ix_states_entity_id_last_updated_ts")
_create_index(session_maker, "states", "ix_states_last_updated_ts")
_migrate_columns_to_timestamp(instance, session_maker, engine)
elif new_version == 32:
# Migration is done in two steps to ensure we can start using
# the new columns before we wipe the old ones.
_drop_index(session_maker, "states", "ix_states_entity_id_last_updated")
_drop_index(session_maker, "events", "ix_events_event_type_time_fired")
_drop_index(session_maker, "states", "ix_states_last_updated")
_drop_index(session_maker, "events", "ix_events_time_fired")
elif new_version == 33:
# This index is no longer used and can cause MySQL to use the wrong index
# when querying the states table.
# https://github.com/home-assistant/core/issues/83787
# There was an index cleanup here but its now done in schema 39
pass
elif new_version == 34:
# Once we require SQLite >= 3.35.5, we should drop the columns:
# ALTER TABLE statistics DROP COLUMN created
# ALTER TABLE statistics DROP COLUMN start
# ALTER TABLE statistics DROP COLUMN last_reset
# ALTER TABLE statistics_short_term DROP COLUMN created
# ALTER TABLE statistics_short_term DROP COLUMN start
# ALTER TABLE statistics_short_term DROP COLUMN last_reset
_add_columns(
session_maker,
"statistics",
[
f"created_ts {_column_types.timestamp_type}",
f"start_ts {_column_types.timestamp_type}",
f"last_reset_ts {_column_types.timestamp_type}",
],
)
_add_columns(
session_maker,
"statistics_short_term",
[
f"created_ts {_column_types.timestamp_type}",
f"start_ts {_column_types.timestamp_type}",
f"last_reset_ts {_column_types.timestamp_type}",
],
)
_create_index(session_maker, "statistics", "ix_statistics_start_ts")
_create_index(
session_maker, "statistics", "ix_statistics_statistic_id_start_ts"
)
_create_index(
session_maker, "statistics_short_term", "ix_statistics_short_term_start_ts"
)
_create_index(
session_maker,
"statistics_short_term",
"ix_statistics_short_term_statistic_id_start_ts",
)
_migrate_statistics_columns_to_timestamp_removing_duplicates(
hass, instance, session_maker, engine
)
elif new_version == 35:
# Migration is done in two steps to ensure we can start using
# the new columns before we wipe the old ones.
_drop_index(
session_maker, "statistics", "ix_statistics_statistic_id_start", quiet=True
)
_drop_index(
session_maker,
"statistics_short_term",
"ix_statistics_short_term_statistic_id_start",
quiet=True,
)
# ix_statistics_start and ix_statistics_statistic_id_start are still used
# for the post migration cleanup and can be removed in a future version.
elif new_version == 36:
for table in ("states", "events"):
_add_columns(
session_maker,
table,
[
f"context_id_bin {_column_types.context_bin_type}",
f"context_user_id_bin {_column_types.context_bin_type}",
f"context_parent_id_bin {_column_types.context_bin_type}",
],
)
_create_index(session_maker, "events", "ix_events_context_id_bin")
_create_index(session_maker, "states", "ix_states_context_id_bin")
elif new_version == 37:
_add_columns(
session_maker, "events", [f"event_type_id {_column_types.big_int_type}"]
)
_create_index(session_maker, "events", "ix_events_event_type_id")
_drop_index(session_maker, "events", "ix_events_event_type_time_fired_ts")
_create_index(session_maker, "events", "ix_events_event_type_id_time_fired_ts")
elif new_version == 38:
_add_columns(
session_maker, "states", [f"metadata_id {_column_types.big_int_type}"]
)
_create_index(session_maker, "states", "ix_states_metadata_id")
_create_index(session_maker, "states", "ix_states_metadata_id_last_updated_ts")
elif new_version == 39:
# Dropping indexes with PostgreSQL never worked correctly if there was a prefix
# so we need to cleanup leftover indexes.
_drop_index(
session_maker, "events", "ix_events_event_type_time_fired_ts", quiet=True
)
_drop_index(session_maker, "events", "ix_events_event_type", quiet=True)
_drop_index(
session_maker, "events", "ix_events_event_type_time_fired", quiet=True
)
_drop_index(session_maker, "events", "ix_events_time_fired", quiet=True)
_drop_index(session_maker, "events", "ix_events_context_user_id", quiet=True)
_drop_index(session_maker, "events", "ix_events_context_parent_id", quiet=True)
_drop_index(
session_maker, "states", "ix_states_entity_id_last_updated", quiet=True
)
_drop_index(session_maker, "states", "ix_states_last_updated", quiet=True)
_drop_index(session_maker, "states", "ix_states_entity_id", quiet=True)
_drop_index(session_maker, "states", "ix_states_context_user_id", quiet=True)
_drop_index(session_maker, "states", "ix_states_context_parent_id", quiet=True)
_drop_index(session_maker, "states", "ix_states_created_domain", quiet=True)
_drop_index(session_maker, "states", "ix_states_entity_id_created", quiet=True)
_drop_index(session_maker, "states", "states__state_changes", quiet=True)
_drop_index(session_maker, "states", "states__significant_changes", quiet=True)
_drop_index(session_maker, "states", "ix_states_entity_id_created", quiet=True)
_drop_index(
session_maker, "statistics", "ix_statistics_statistic_id_start", quiet=True
)
_drop_index(
session_maker,
"statistics_short_term",
"ix_statistics_short_term_statistic_id_start",
quiet=True,
)
elif new_version == 40:
# ix_events_event_type_id is a left-prefix of ix_events_event_type_id_time_fired_ts
_drop_index(session_maker, "events", "ix_events_event_type_id")
# ix_states_metadata_id is a left-prefix of ix_states_metadata_id_last_updated_ts
_drop_index(session_maker, "states", "ix_states_metadata_id")
# ix_statistics_metadata_id is a left-prefix of ix_statistics_statistic_id_start_ts
_drop_index(session_maker, "statistics", "ix_statistics_metadata_id")
# ix_statistics_short_term_metadata_id is a left-prefix of ix_statistics_short_term_statistic_id_start_ts
_drop_index(
session_maker,
"statistics_short_term",
"ix_statistics_short_term_metadata_id",
)
elif new_version == 41:
_create_index(session_maker, "event_types", "ix_event_types_event_type")
_create_index(session_maker, "states_meta", "ix_states_meta_entity_id")
elif new_version == 42:
# If the user had a previously failed migration, or they
# downgraded from 2023.3.x to an older version we will have
# unmigrated statistics columns so we want to clean this up
# one last time since compiling the statistics will be slow
# or fail if we have unmigrated statistics.
_migrate_statistics_columns_to_timestamp_removing_duplicates(
hass, instance, session_maker, engine
)
elif new_version == 43:
_add_columns(
session_maker,
"states",
[f"last_reported_ts {_column_types.timestamp_type}"],
)
else:
raise ValueError(f"No schema migration defined for version {new_version}") |
Migrate statistics columns to timestamp or cleanup duplicates. | def _migrate_statistics_columns_to_timestamp_removing_duplicates(
hass: HomeAssistant,
instance: Recorder,
session_maker: Callable[[], Session],
engine: Engine,
) -> None:
"""Migrate statistics columns to timestamp or cleanup duplicates."""
try:
_migrate_statistics_columns_to_timestamp(instance, session_maker, engine)
except IntegrityError as ex:
_LOGGER.error(
"Statistics table contains duplicate entries: %s; "
"Cleaning up duplicates and trying again; "
"This will take a while; "
"Please be patient!",
ex,
)
# There may be duplicated statistics entries, delete duplicates
# and try again
with session_scope(session=session_maker()) as session:
delete_statistics_duplicates(instance, hass, session)
try:
_migrate_statistics_columns_to_timestamp(instance, session_maker, engine)
except IntegrityError:
_LOGGER.warning(
"Statistics table still contains duplicate entries after cleanup; "
"Falling back to a one by one migration"
)
_migrate_statistics_columns_to_timestamp_one_by_one(instance, session_maker)
# Log at error level to ensure the user sees this message in the log
# since we logged the error above.
_LOGGER.error(
"Statistics migration successfully recovered after statistics table duplicate cleanup"
) |
Correct issues detected by validate_db_schema. | def _correct_table_character_set_and_collation(
table: str,
session_maker: Callable[[], Session],
) -> None:
"""Correct issues detected by validate_db_schema."""
# Attempt to convert the table to utf8mb4
_LOGGER.warning(
"Updating character set and collation of table %s to utf8mb4. "
"Note: this can take several minutes on large databases and slow "
"computers. Please be patient!",
table,
)
with (
contextlib.suppress(SQLAlchemyError),
session_scope(session=session_maker()) as session,
):
connection = session.connection()
connection.execute(
# Using LOCK=EXCLUSIVE to prevent the database from corrupting
# https://github.com/home-assistant/core/issues/56104
text(
f"ALTER TABLE {table} CONVERT TO CHARACTER SET "
f"{MYSQL_DEFAULT_CHARSET} "
f"COLLATE {MYSQL_COLLATE}, LOCK=EXCLUSIVE"
)
) |
Post schema migration.
Run any housekeeping tasks after the schema migration has completed.
Post schema migration is run after the schema migration has completed
and the queue has been processed to ensure that we reduce the memory
pressure since events are held in memory until the queue is processed
which is blocked from being processed until the schema migration is
complete. | def post_schema_migration(
instance: Recorder,
old_version: int,
new_version: int,
) -> None:
"""Post schema migration.
Run any housekeeping tasks after the schema migration has completed.
Post schema migration is run after the schema migration has completed
and the queue has been processed to ensure that we reduce the memory
pressure since events are held in memory until the queue is processed
which is blocked from being processed until the schema migration is
complete.
"""
if old_version < 32 <= new_version:
# In version 31 we migrated all the time_fired, last_updated, and last_changed
# columns to be timestamps. In version 32 we need to wipe the old columns
# since they are no longer used and take up a significant amount of space.
assert instance.event_session is not None
assert instance.engine is not None
_wipe_old_string_time_columns(instance, instance.engine, instance.event_session)
if old_version < 35 <= new_version:
# In version 34 we migrated all the created, start, and last_reset
# columns to be timestamps. In version 34 we need to wipe the old columns
# since they are no longer used and take up a significant amount of space.
_wipe_old_string_statistics_columns(instance) |
Wipe old string statistics columns to save space. | def _wipe_old_string_statistics_columns(instance: Recorder) -> None:
"""Wipe old string statistics columns to save space."""
instance.queue_task(StatisticsTimestampMigrationCleanupTask()) |
Wipe old string time columns to save space. | def _wipe_old_string_time_columns(
instance: Recorder, engine: Engine, session: Session
) -> None:
"""Wipe old string time columns to save space."""
# Wipe Events.time_fired since its been replaced by Events.time_fired_ts
# Wipe States.last_updated since its been replaced by States.last_updated_ts
# Wipe States.last_changed since its been replaced by States.last_changed_ts
#
if engine.dialect.name == SupportedDialect.SQLITE:
session.execute(text("UPDATE events set time_fired=NULL;"))
session.commit()
session.execute(text("UPDATE states set last_updated=NULL, last_changed=NULL;"))
session.commit()
elif engine.dialect.name == SupportedDialect.MYSQL:
#
# Since this is only to save space we limit the number of rows we update
# to 100,000 per table since we do not want to block the database for too long
# or run out of innodb_buffer_pool_size on MySQL. The old data will eventually
# be cleaned up by the recorder purge if we do not do it now.
#
session.execute(text("UPDATE events set time_fired=NULL LIMIT 100000;"))
session.commit()
session.execute(
text(
"UPDATE states set last_updated=NULL, last_changed=NULL "
" LIMIT 100000;"
)
)
session.commit()
elif engine.dialect.name == SupportedDialect.POSTGRESQL:
#
# Since this is only to save space we limit the number of rows we update
# to 100,000 per table since we do not want to block the database for too long
# or run out ram with postgresql. The old data will eventually
# be cleaned up by the recorder purge if we do not do it now.
#
session.execute(
text(
"UPDATE events set time_fired=NULL "
"where event_id in "
"(select event_id from events where time_fired_ts is NOT NULL LIMIT 100000);"
)
)
session.commit()
session.execute(
text(
"UPDATE states set last_updated=NULL, last_changed=NULL "
"where state_id in "
"(select state_id from states where last_updated_ts is NOT NULL LIMIT 100000);"
)
)
session.commit() |
Migrate columns to use timestamp. | def _migrate_columns_to_timestamp(
instance: Recorder, session_maker: Callable[[], Session], engine: Engine
) -> None:
"""Migrate columns to use timestamp."""
# Migrate all data in Events.time_fired to Events.time_fired_ts
# Migrate all data in States.last_updated to States.last_updated_ts
# Migrate all data in States.last_changed to States.last_changed_ts
result: CursorResult | None = None
if engine.dialect.name == SupportedDialect.SQLITE:
# With SQLite we do this in one go since it is faster
with session_scope(session=session_maker()) as session:
connection = session.connection()
connection.execute(
text(
'UPDATE events set time_fired_ts=strftime("%s",time_fired) + '
"cast(substr(time_fired,-7) AS FLOAT);"
)
)
connection.execute(
text(
'UPDATE states set last_updated_ts=strftime("%s",last_updated) + '
"cast(substr(last_updated,-7) AS FLOAT), "
'last_changed_ts=strftime("%s",last_changed) + '
"cast(substr(last_changed,-7) AS FLOAT);"
)
)
elif engine.dialect.name == SupportedDialect.MYSQL:
# With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table until the rowcount is 0
while result is None or result.rowcount > 0:
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
"UPDATE events set time_fired_ts="
"IF(time_fired is NULL or UNIX_TIMESTAMP(time_fired) is NULL,0,"
"UNIX_TIMESTAMP(time_fired)"
") "
"where time_fired_ts is NULL "
"LIMIT 100000;"
)
)
result = None
while result is None or result.rowcount > 0: # type: ignore[unreachable]
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
"UPDATE states set last_updated_ts="
"IF(last_updated is NULL or UNIX_TIMESTAMP(last_updated) is NULL,0,"
"UNIX_TIMESTAMP(last_updated) "
"), "
"last_changed_ts="
"UNIX_TIMESTAMP(last_changed) "
"where last_updated_ts is NULL "
"LIMIT 100000;"
)
)
elif engine.dialect.name == SupportedDialect.POSTGRESQL:
# With Postgresql we do this in chunks to avoid using too much memory
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table until the rowcount is 0
while result is None or result.rowcount > 0:
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
"UPDATE events SET "
"time_fired_ts= "
"(case when time_fired is NULL then 0 else EXTRACT(EPOCH FROM time_fired::timestamptz) end) "
"WHERE event_id IN ( "
"SELECT event_id FROM events where time_fired_ts is NULL LIMIT 100000 "
" );"
)
)
result = None
while result is None or result.rowcount > 0: # type: ignore[unreachable]
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
"UPDATE states set last_updated_ts="
"(case when last_updated is NULL then 0 else EXTRACT(EPOCH FROM last_updated::timestamptz) end), "
"last_changed_ts=EXTRACT(EPOCH FROM last_changed::timestamptz) "
"where state_id IN ( "
"SELECT state_id FROM states where last_updated_ts is NULL LIMIT 100000 "
" );"
)
) |
Migrate statistics columns to use timestamp on by one.
If something manually inserted data into the statistics table
in the past it may have inserted duplicate rows.
Before we had the unique index on (statistic_id, start) this
the data could have been inserted without any errors and we
could end up with duplicate rows that go undetected (even by
our current duplicate cleanup code) until we try to migrate the
data to use timestamps.
This will migrate the data one by one to ensure we do not hit any
duplicate rows, and remove the duplicate rows as they are found. | def _migrate_statistics_columns_to_timestamp_one_by_one(
instance: Recorder, session_maker: Callable[[], Session]
) -> None:
"""Migrate statistics columns to use timestamp on by one.
If something manually inserted data into the statistics table
in the past it may have inserted duplicate rows.
Before we had the unique index on (statistic_id, start) this
the data could have been inserted without any errors and we
could end up with duplicate rows that go undetected (even by
our current duplicate cleanup code) until we try to migrate the
data to use timestamps.
This will migrate the data one by one to ensure we do not hit any
duplicate rows, and remove the duplicate rows as they are found.
"""
for find_func, migrate_func, delete_func in (
(
find_unmigrated_statistics_rows,
migrate_single_statistics_row_to_timestamp,
delete_duplicate_statistics_row,
),
(
find_unmigrated_short_term_statistics_rows,
migrate_single_short_term_statistics_row_to_timestamp,
delete_duplicate_short_term_statistics_row,
),
):
with session_scope(session=session_maker()) as session:
while stats := session.execute(find_func(instance.max_bind_vars)).all():
for statistic_id, start, created, last_reset in stats:
start_ts = datetime_to_timestamp_or_none(process_timestamp(start))
created_ts = datetime_to_timestamp_or_none(
process_timestamp(created)
)
last_reset_ts = datetime_to_timestamp_or_none(
process_timestamp(last_reset)
)
try:
session.execute(
migrate_func(
statistic_id, start_ts, created_ts, last_reset_ts
)
)
except IntegrityError:
# This can happen if we have duplicate rows
# in the statistics table.
session.execute(delete_func(statistic_id))
session.commit() |
Migrate statistics columns to use timestamp. | def _migrate_statistics_columns_to_timestamp(
instance: Recorder, session_maker: Callable[[], Session], engine: Engine
) -> None:
"""Migrate statistics columns to use timestamp."""
# Migrate all data in statistics.start to statistics.start_ts
# Migrate all data in statistics.created to statistics.created_ts
# Migrate all data in statistics.last_reset to statistics.last_reset_ts
# Migrate all data in statistics_short_term.start to statistics_short_term.start_ts
# Migrate all data in statistics_short_term.created to statistics_short_term.created_ts
# Migrate all data in statistics_short_term.last_reset to statistics_short_term.last_reset_ts
result: CursorResult | None = None
if engine.dialect.name == SupportedDialect.SQLITE:
# With SQLite we do this in one go since it is faster
for table in STATISTICS_TABLES:
with session_scope(session=session_maker()) as session:
session.connection().execute(
text(
f"UPDATE {table} set start_ts=strftime('%s',start) + " # noqa: S608
"cast(substr(start,-7) AS FLOAT), "
f"created_ts=strftime('%s',created) + "
"cast(substr(created,-7) AS FLOAT), "
f"last_reset_ts=strftime('%s',last_reset) + "
"cast(substr(last_reset,-7) AS FLOAT) where start_ts is NULL;"
)
)
elif engine.dialect.name == SupportedDialect.MYSQL:
# With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table until the rowcount is 0
for table in STATISTICS_TABLES:
result = None
while result is None or result.rowcount > 0: # type: ignore[unreachable]
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
f"UPDATE {table} set start_ts=" # noqa: S608
"IF(start is NULL or UNIX_TIMESTAMP(start) is NULL,0,"
"UNIX_TIMESTAMP(start) "
"), "
"created_ts="
"UNIX_TIMESTAMP(created), "
"last_reset_ts="
"UNIX_TIMESTAMP(last_reset) "
"where start_ts is NULL "
"LIMIT 100000;"
)
)
elif engine.dialect.name == SupportedDialect.POSTGRESQL:
# With Postgresql we do this in chunks to avoid using too much memory
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table until the rowcount is 0
for table in STATISTICS_TABLES:
result = None
while result is None or result.rowcount > 0: # type: ignore[unreachable]
with session_scope(session=session_maker()) as session:
result = session.connection().execute(
text(
f"UPDATE {table} set start_ts=" # noqa: S608
"(case when start is NULL then 0 else EXTRACT(EPOCH FROM start::timestamptz) end), "
"created_ts=EXTRACT(EPOCH FROM created::timestamptz), "
"last_reset_ts=EXTRACT(EPOCH FROM last_reset::timestamptz) "
"where id IN ("
f"SELECT id FROM {table} where start_ts is NULL LIMIT 100000"
");"
)
) |
Convert a context_id to bytes. | def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
with contextlib.suppress(ValueError):
# There may be garbage in the context_id column
# from custom integrations that are not UUIDs or
# ULIDs that filled the column to the max length
# so we need to catch the ValueError and return
# None if it happens
if len(context_id) == 26:
return ulid_to_bytes(context_id)
return UUID(context_id).bytes
return None |
Generate a ulid with a specific timestamp. | def _generate_ulid_bytes_at_time(timestamp: float | None) -> bytes:
"""Generate a ulid with a specific timestamp."""
return ulid_to_bytes(ulid_at_time(timestamp or time())) |
Migrate states context_ids to use binary format. | def migrate_states_context_ids(instance: Recorder) -> bool:
"""Migrate states context_ids to use binary format."""
_to_bytes = _context_id_to_bytes
session_maker = instance.get_session
_LOGGER.debug("Migrating states context_ids to binary format")
with session_scope(session=session_maker()) as session:
if states := session.execute(
find_states_context_ids_to_migrate(instance.max_bind_vars)
).all():
session.execute(
update(States),
[
{
"state_id": state_id,
"context_id": None,
"context_id_bin": _to_bytes(context_id)
or _generate_ulid_bytes_at_time(last_updated_ts),
"context_user_id": None,
"context_user_id_bin": _to_bytes(context_user_id),
"context_parent_id": None,
"context_parent_id_bin": _to_bytes(context_parent_id),
}
for state_id, last_updated_ts, context_id, context_user_id, context_parent_id in states
],
)
# If there is more work to do return False
# so that we can be called again
if is_done := not states:
_mark_migration_done(session, StatesContextIDMigration)
if is_done:
_drop_index(session_maker, "states", "ix_states_context_id")
_LOGGER.debug("Migrating states context_ids to binary format: done=%s", is_done)
return is_done |
Migrate events context_ids to use binary format. | def migrate_events_context_ids(instance: Recorder) -> bool:
"""Migrate events context_ids to use binary format."""
_to_bytes = _context_id_to_bytes
session_maker = instance.get_session
_LOGGER.debug("Migrating context_ids to binary format")
with session_scope(session=session_maker()) as session:
if events := session.execute(
find_events_context_ids_to_migrate(instance.max_bind_vars)
).all():
session.execute(
update(Events),
[
{
"event_id": event_id,
"context_id": None,
"context_id_bin": _to_bytes(context_id)
or _generate_ulid_bytes_at_time(time_fired_ts),
"context_user_id": None,
"context_user_id_bin": _to_bytes(context_user_id),
"context_parent_id": None,
"context_parent_id_bin": _to_bytes(context_parent_id),
}
for event_id, time_fired_ts, context_id, context_user_id, context_parent_id in events
],
)
# If there is more work to do return False
# so that we can be called again
if is_done := not events:
_mark_migration_done(session, EventsContextIDMigration)
if is_done:
_drop_index(session_maker, "events", "ix_events_context_id")
_LOGGER.debug("Migrating events context_ids to binary format: done=%s", is_done)
return is_done |
Migrate event_type to event_type_ids. | def migrate_event_type_ids(instance: Recorder) -> bool:
"""Migrate event_type to event_type_ids."""
session_maker = instance.get_session
_LOGGER.debug("Migrating event_types")
event_type_manager = instance.event_type_manager
with session_scope(session=session_maker()) as session:
if events := session.execute(
find_event_type_to_migrate(instance.max_bind_vars)
).all():
event_types = {event_type for _, event_type in events}
if None in event_types:
# event_type should never be None but we need to be defensive
# so we don't fail the migration because of a bad state
event_types.remove(None)
event_types.add(_EMPTY_EVENT_TYPE)
event_type_to_id = event_type_manager.get_many(event_types, session)
if missing_event_types := {
event_type
for event_type, event_id in event_type_to_id.items()
if event_id is None
}:
missing_db_event_types = [
EventTypes(event_type=event_type)
for event_type in missing_event_types
]
session.add_all(missing_db_event_types)
session.flush() # Assign ids
for db_event_type in missing_db_event_types:
# We cannot add the assigned ids to the event_type_manager
# because the commit could get rolled back
assert (
db_event_type.event_type is not None
), "event_type should never be None"
event_type_to_id[db_event_type.event_type] = (
db_event_type.event_type_id
)
event_type_manager.clear_non_existent(db_event_type.event_type)
session.execute(
update(Events),
[
{
"event_id": event_id,
"event_type": None,
"event_type_id": event_type_to_id[
_EMPTY_EVENT_TYPE if event_type is None else event_type
],
}
for event_id, event_type in events
],
)
# If there is more work to do return False
# so that we can be called again
if is_done := not events:
_mark_migration_done(session, EventTypeIDMigration)
if is_done:
instance.event_type_manager.active = True
_LOGGER.debug("Migrating event_types done=%s", is_done)
return is_done |
Migrate entity_ids to states_meta.
We do this in two steps because we need the history queries to work
while we are migrating.
1. Link the states to the states_meta table
2. Remove the entity_id column from the states table (in post_migrate_entity_ids) | def migrate_entity_ids(instance: Recorder) -> bool:
"""Migrate entity_ids to states_meta.
We do this in two steps because we need the history queries to work
while we are migrating.
1. Link the states to the states_meta table
2. Remove the entity_id column from the states table (in post_migrate_entity_ids)
"""
_LOGGER.debug("Migrating entity_ids")
states_meta_manager = instance.states_meta_manager
with session_scope(session=instance.get_session()) as session:
if states := session.execute(
find_entity_ids_to_migrate(instance.max_bind_vars)
).all():
entity_ids = {entity_id for _, entity_id in states}
if None in entity_ids:
# entity_id should never be None but we need to be defensive
# so we don't fail the migration because of a bad state
entity_ids.remove(None)
entity_ids.add(_EMPTY_ENTITY_ID)
entity_id_to_metadata_id = states_meta_manager.get_many(
entity_ids, session, True
)
if missing_entity_ids := {
entity_id
for entity_id, metadata_id in entity_id_to_metadata_id.items()
if metadata_id is None
}:
missing_states_metadata = [
StatesMeta(entity_id=entity_id) for entity_id in missing_entity_ids
]
session.add_all(missing_states_metadata)
session.flush() # Assign ids
for db_states_metadata in missing_states_metadata:
# We cannot add the assigned ids to the event_type_manager
# because the commit could get rolled back
assert (
db_states_metadata.entity_id is not None
), "entity_id should never be None"
entity_id_to_metadata_id[db_states_metadata.entity_id] = (
db_states_metadata.metadata_id
)
session.execute(
update(States),
[
{
"state_id": state_id,
# We cannot set "entity_id": None yet since
# the history queries still need to work while the
# migration is in progress and we will do this in
# post_migrate_entity_ids
"metadata_id": entity_id_to_metadata_id[
_EMPTY_ENTITY_ID if entity_id is None else entity_id
],
}
for state_id, entity_id in states
],
)
# If there is more work to do return False
# so that we can be called again
if is_done := not states:
_mark_migration_done(session, EntityIDMigration)
_LOGGER.debug("Migrating entity_ids done=%s", is_done)
return is_done |
Remove old entity_id strings from states.
We cannot do this in migrate_entity_ids since the history queries
still need to work while the migration is in progress. | def post_migrate_entity_ids(instance: Recorder) -> bool:
"""Remove old entity_id strings from states.
We cannot do this in migrate_entity_ids since the history queries
still need to work while the migration is in progress.
"""
session_maker = instance.get_session
_LOGGER.debug("Cleanup legacy entity_ids")
with session_scope(session=session_maker()) as session:
cursor_result = session.connection().execute(batch_cleanup_entity_ids())
is_done = not cursor_result or cursor_result.rowcount == 0
# If there is more work to do return False
# so that we can be called again
if is_done:
# Drop the old indexes since they are no longer needed
_drop_index(session_maker, "states", LEGACY_STATES_ENTITY_ID_LAST_UPDATED_INDEX)
_LOGGER.debug("Cleanup legacy entity_ids done=%s", is_done)
return is_done |
Remove old event_id index from states.
We used to link states to events using the event_id column but we no
longer store state changed events in the events table.
If all old states have been purged and existing states are in the new
format we can drop the index since it can take up ~10MB per 1M rows. | def cleanup_legacy_states_event_ids(instance: Recorder) -> bool:
"""Remove old event_id index from states.
We used to link states to events using the event_id column but we no
longer store state changed events in the events table.
If all old states have been purged and existing states are in the new
format we can drop the index since it can take up ~10MB per 1M rows.
"""
session_maker = instance.get_session
_LOGGER.debug("Cleanup legacy entity_ids")
with session_scope(session=session_maker()) as session:
result = session.execute(has_used_states_event_ids()).scalar()
# In the future we may migrate existing states to the new format
# but in practice very few of these still exist in production and
# removing the index is the likely all that needs to happen.
all_gone = not result
if all_gone:
# Only drop the index if there are no more event_ids in the states table
# ex all NULL
assert instance.engine is not None, "engine should never be None"
if instance.dialect_name != SupportedDialect.SQLITE:
# SQLite does not support dropping foreign key constraints
# so we can't drop the index at this time but we can avoid
# looking for legacy rows during purge
_drop_foreign_key_constraints(
session_maker, instance.engine, TABLE_STATES, ["event_id"]
)
_drop_index(session_maker, "states", LEGACY_STATES_EVENT_ID_INDEX)
instance.use_legacy_events_index = False
return True |
Initialize a new database.
The function determines the schema version by inspecting the db structure.
When the schema version is not present in the db, either db was just
created with the correct schema, or this is a db created before schema
versions were tracked. For now, we'll test if the changes for schema
version 1 are present to make the determination. Eventually this logic
can be removed and we can assume a new db is being created. | def _initialize_database(session: Session) -> bool:
"""Initialize a new database.
The function determines the schema version by inspecting the db structure.
When the schema version is not present in the db, either db was just
created with the correct schema, or this is a db created before schema
versions were tracked. For now, we'll test if the changes for schema
version 1 are present to make the determination. Eventually this logic
can be removed and we can assume a new db is being created.
"""
inspector = sqlalchemy.inspect(session.connection())
indexes = inspector.get_indexes("events")
for index in indexes:
if index["column_names"] in (["time_fired"], ["time_fired_ts"]):
# Schema addition from version 1 detected. New DB.
session.add(StatisticsRuns(start=get_start_time()))
session.add(SchemaChanges(schema_version=SCHEMA_VERSION))
return True
# Version 1 schema changes not found, this db needs to be migrated.
current_version = SchemaChanges(schema_version=0)
session.add(current_version)
return True |
Initialize a new database. | def initialize_database(session_maker: Callable[[], Session]) -> bool:
"""Initialize a new database."""
try:
with session_scope(session=session_maker(), read_only=True) as session:
if _get_schema_version(session) is not None:
return True
with session_scope(session=session_maker()) as session:
return _initialize_database(session)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error when initialise database")
return False |
Mark a migration as done in the database. | def _mark_migration_done(
session: Session, migration: type[BaseRunTimeMigration]
) -> None:
"""Mark a migration as done in the database."""
session.merge(
MigrationChanges(
migration_id=migration.migration_id, version=migration.migration_version
)
) |
Purge events and states older than purge_before.
Cleans up an timeframe of an hour, based on the oldest record. | def purge_old_data(
instance: Recorder,
purge_before: datetime,
repack: bool,
apply_filter: bool = False,
events_batch_size: int = DEFAULT_EVENTS_BATCHES_PER_PURGE,
states_batch_size: int = DEFAULT_STATES_BATCHES_PER_PURGE,
) -> bool:
"""Purge events and states older than purge_before.
Cleans up an timeframe of an hour, based on the oldest record.
"""
_LOGGER.debug(
"Purging states and events before target %s",
purge_before.isoformat(sep=" ", timespec="seconds"),
)
with session_scope(session=instance.get_session()) as session:
# Purge a max of max_bind_vars, based on the oldest states or events record
has_more_to_purge = False
if instance.use_legacy_events_index and _purging_legacy_format(session):
_LOGGER.debug(
"Purge running in legacy format as there are states with event_id"
" remaining"
)
has_more_to_purge |= _purge_legacy_format(instance, session, purge_before)
else:
_LOGGER.debug(
"Purge running in new format as there are NO states with event_id"
" remaining"
)
# Once we are done purging legacy rows, we use the new method
has_more_to_purge |= _purge_states_and_attributes_ids(
instance, session, states_batch_size, purge_before
)
has_more_to_purge |= _purge_events_and_data_ids(
instance, session, events_batch_size, purge_before
)
statistics_runs = _select_statistics_runs_to_purge(
session, purge_before, instance.max_bind_vars
)
short_term_statistics = _select_short_term_statistics_to_purge(
session, purge_before, instance.max_bind_vars
)
if statistics_runs:
_purge_statistics_runs(session, statistics_runs)
if short_term_statistics:
_purge_short_term_statistics(session, short_term_statistics)
if has_more_to_purge or statistics_runs or short_term_statistics:
# Return false, as we might not be done yet.
_LOGGER.debug("Purging hasn't fully completed yet")
return False
if apply_filter and _purge_filtered_data(instance, session) is False:
_LOGGER.debug("Cleanup filtered data hasn't fully completed yet")
return False
# This purge cycle is finished, clean up old event types and
# recorder runs
if instance.event_type_manager.active:
_purge_old_event_types(instance, session)
if instance.states_meta_manager.active:
_purge_old_entity_ids(instance, session)
_purge_old_recorder_runs(instance, session, purge_before)
if repack:
repack_database(instance)
return True |
Check if there are any legacy event_id linked states rows remaining. | def _purging_legacy_format(session: Session) -> bool:
"""Check if there are any legacy event_id linked states rows remaining."""
return bool(session.execute(find_legacy_row()).scalar()) |
Purge rows that are still linked by the event_ids. | def _purge_legacy_format(
instance: Recorder, session: Session, purge_before: datetime
) -> bool:
"""Purge rows that are still linked by the event_ids."""
(
event_ids,
state_ids,
attributes_ids,
data_ids,
) = _select_legacy_event_state_and_attributes_and_data_ids_to_purge(
session, purge_before, instance.max_bind_vars
)
_purge_state_ids(instance, session, state_ids)
_purge_unused_attributes_ids(instance, session, attributes_ids)
_purge_event_ids(session, event_ids)
_purge_unused_data_ids(instance, session, data_ids)
# The database may still have some rows that have an event_id but are not
# linked to any event. These rows are not linked to any event because the
# event was deleted. We need to purge these rows as well or we will never
# switch to the new format which will prevent us from purging any events
# that happened after the detached states.
(
detached_state_ids,
detached_attributes_ids,
) = _select_legacy_detached_state_and_attributes_and_data_ids_to_purge(
session, purge_before, instance.max_bind_vars
)
_purge_state_ids(instance, session, detached_state_ids)
_purge_unused_attributes_ids(instance, session, detached_attributes_ids)
return bool(
event_ids
or state_ids
or attributes_ids
or data_ids
or detached_state_ids
or detached_attributes_ids
) |
Purge states and linked attributes id in a batch.
Returns true if there are more states to purge. | def _purge_states_and_attributes_ids(
instance: Recorder,
session: Session,
states_batch_size: int,
purge_before: datetime,
) -> bool:
"""Purge states and linked attributes id in a batch.
Returns true if there are more states to purge.
"""
database_engine = instance.database_engine
assert database_engine is not None
has_remaining_state_ids_to_purge = True
# There are more states relative to attributes_ids so
# we purge enough state_ids to try to generate a full
# size batch of attributes_ids that will be around the size
# max_bind_vars
attributes_ids_batch: set[int] = set()
max_bind_vars = instance.max_bind_vars
for _ in range(states_batch_size):
state_ids, attributes_ids = _select_state_attributes_ids_to_purge(
session, purge_before, max_bind_vars
)
if not state_ids:
has_remaining_state_ids_to_purge = False
break
_purge_state_ids(instance, session, state_ids)
attributes_ids_batch = attributes_ids_batch | attributes_ids
_purge_unused_attributes_ids(instance, session, attributes_ids_batch)
_LOGGER.debug(
"After purging states and attributes_ids remaining=%s",
has_remaining_state_ids_to_purge,
)
return has_remaining_state_ids_to_purge |
Purge states and linked attributes id in a batch.
Returns true if there are more states to purge. | def _purge_events_and_data_ids(
instance: Recorder,
session: Session,
events_batch_size: int,
purge_before: datetime,
) -> bool:
"""Purge states and linked attributes id in a batch.
Returns true if there are more states to purge.
"""
has_remaining_event_ids_to_purge = True
# There are more events relative to data_ids so
# we purge enough event_ids to try to generate a full
# size batch of data_ids that will be around the size
# max_bind_vars
data_ids_batch: set[int] = set()
max_bind_vars = instance.max_bind_vars
for _ in range(events_batch_size):
event_ids, data_ids = _select_event_data_ids_to_purge(
session, purge_before, max_bind_vars
)
if not event_ids:
has_remaining_event_ids_to_purge = False
break
_purge_event_ids(session, event_ids)
data_ids_batch = data_ids_batch | data_ids
_purge_unused_data_ids(instance, session, data_ids_batch)
_LOGGER.debug(
"After purging event and data_ids remaining=%s",
has_remaining_event_ids_to_purge,
)
return has_remaining_event_ids_to_purge |
Return sets of state and attribute ids to purge. | def _select_state_attributes_ids_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> tuple[set[int], set[int]]:
"""Return sets of state and attribute ids to purge."""
state_ids = set()
attributes_ids = set()
for state_id, attributes_id in session.execute(
find_states_to_purge(purge_before.timestamp(), max_bind_vars)
).all():
state_ids.add(state_id)
if attributes_id:
attributes_ids.add(attributes_id)
_LOGGER.debug(
"Selected %s state ids and %s attributes_ids to remove",
len(state_ids),
len(attributes_ids),
)
return state_ids, attributes_ids |
Return sets of event and data ids to purge. | def _select_event_data_ids_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> tuple[set[int], set[int]]:
"""Return sets of event and data ids to purge."""
event_ids = set()
data_ids = set()
for event_id, data_id in session.execute(
find_events_to_purge(purge_before.timestamp(), max_bind_vars)
).all():
event_ids.add(event_id)
if data_id:
data_ids.add(data_id)
_LOGGER.debug(
"Selected %s event ids and %s data_ids to remove", len(event_ids), len(data_ids)
)
return event_ids, data_ids |
Return a set of attributes ids that are not used by any states in the db. | def _select_unused_attributes_ids(
instance: Recorder,
session: Session,
attributes_ids: set[int],
database_engine: DatabaseEngine,
) -> set[int]:
"""Return a set of attributes ids that are not used by any states in the db."""
if not attributes_ids:
return set()
seen_ids: set[int] = set()
if not database_engine.optimizer.slow_range_in_select:
#
# SQLite has a superior query optimizer for the distinct query below as it uses
# the covering index without having to examine the rows directly for both of the
# queries below.
#
# We use the distinct query for SQLite since the query in the other branch can
# generate more than 500 unions which SQLite does not support.
#
# How MariaDB's query optimizer handles this query:
# > explain select distinct attributes_id from states where attributes_id in
# (136723);
# ...Using index
#
for attributes_ids_chunk in chunked_or_all(
attributes_ids, instance.max_bind_vars
):
seen_ids.update(
state[0]
for state in session.execute(
attributes_ids_exist_in_states_with_fast_in_distinct(
attributes_ids_chunk
)
).all()
)
else:
#
# This branch is for DBMS that cannot optimize the distinct query well and has
# to examine all the rows that match.
#
# This branch uses a union of simple queries, as each query is optimized away
# as the answer to the query can be found in the index.
#
# The below query works for SQLite as long as there are no more than 500
# attributes_id to be selected. We currently do not have MySQL or PostgreSQL
# servers running in the test suite; we test this path using SQLite when there
# are less than 500 attributes_id.
#
# How MariaDB's query optimizer handles this query:
# > explain select min(attributes_id) from states where attributes_id = 136723;
# ...Select tables optimized away
#
# We used to generate a query based on how many attribute_ids to find but
# that meant sqlalchemy Transparent SQL Compilation Caching was working against
# us by cached up to max_bind_vars different statements which could be
# up to 500MB for large database due to the complexity of the ORM objects.
#
# We now break the query into groups of 100 and use a lambda_stmt to ensure
# that the query is only cached once.
#
groups = [iter(attributes_ids)] * 100
for attr_ids in zip_longest(*groups, fillvalue=None):
seen_ids |= {
attrs_id[0]
for attrs_id in session.execute(
attributes_ids_exist_in_states(*attr_ids) # type: ignore[arg-type]
).all()
if attrs_id[0] is not None
}
to_remove = attributes_ids - seen_ids
_LOGGER.debug(
"Selected %s shared attributes to remove",
len(to_remove),
)
return to_remove |
Purge unused attributes ids. | def _purge_unused_attributes_ids(
instance: Recorder,
session: Session,
attributes_ids_batch: set[int],
) -> None:
"""Purge unused attributes ids."""
database_engine = instance.database_engine
assert database_engine is not None
if unused_attribute_ids_set := _select_unused_attributes_ids(
instance, session, attributes_ids_batch, database_engine
):
_purge_batch_attributes_ids(instance, session, unused_attribute_ids_set) |
Return a set of event data ids that are not used by any events in the db. | def _select_unused_event_data_ids(
instance: Recorder,
session: Session,
data_ids: set[int],
database_engine: DatabaseEngine,
) -> set[int]:
"""Return a set of event data ids that are not used by any events in the db."""
if not data_ids:
return set()
seen_ids: set[int] = set()
# See _select_unused_attributes_ids for why this function
# branches for non-sqlite databases.
if not database_engine.optimizer.slow_range_in_select:
for data_ids_chunk in chunked_or_all(data_ids, instance.max_bind_vars):
seen_ids.update(
state[0]
for state in session.execute(
data_ids_exist_in_events_with_fast_in_distinct(data_ids_chunk)
).all()
)
else:
groups = [iter(data_ids)] * 100
for data_ids_group in zip_longest(*groups, fillvalue=None):
seen_ids |= {
data_id[0]
for data_id in session.execute(
data_ids_exist_in_events(*data_ids_group) # type: ignore[arg-type]
).all()
if data_id[0] is not None
}
to_remove = data_ids - seen_ids
_LOGGER.debug("Selected %s shared event data to remove", len(to_remove))
return to_remove |
Return a list of statistic runs to purge.
Takes care to keep the newest run. | def _select_statistics_runs_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> list[int]:
"""Return a list of statistic runs to purge.
Takes care to keep the newest run.
"""
statistic_runs = session.execute(
find_statistics_runs_to_purge(purge_before, max_bind_vars)
).all()
statistic_runs_list = [run_id for (run_id,) in statistic_runs]
# Exclude the newest statistics run
if (
last_run := session.execute(find_latest_statistics_runs_run_id()).scalar()
) and last_run in statistic_runs_list:
statistic_runs_list.remove(last_run)
_LOGGER.debug("Selected %s statistic runs to remove", len(statistic_runs))
return statistic_runs_list |
Return a list of short term statistics to purge. | def _select_short_term_statistics_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> list[int]:
"""Return a list of short term statistics to purge."""
statistics = session.execute(
find_short_term_statistics_to_purge(purge_before, max_bind_vars)
).all()
_LOGGER.debug("Selected %s short term statistics to remove", len(statistics))
return [statistic_id for (statistic_id,) in statistics] |
Return a list of state, and attribute ids to purge.
We do not link these anymore since state_change events
do not exist in the events table anymore, however we
still need to be able to purge them. | def _select_legacy_detached_state_and_attributes_and_data_ids_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> tuple[set[int], set[int]]:
"""Return a list of state, and attribute ids to purge.
We do not link these anymore since state_change events
do not exist in the events table anymore, however we
still need to be able to purge them.
"""
states = session.execute(
find_legacy_detached_states_and_attributes_to_purge(
purge_before.timestamp(), max_bind_vars
)
).all()
_LOGGER.debug("Selected %s state ids to remove", len(states))
state_ids = set()
attributes_ids = set()
for state_id, attributes_id in states:
if state_id:
state_ids.add(state_id)
if attributes_id:
attributes_ids.add(attributes_id)
return state_ids, attributes_ids |
Return a list of event, state, and attribute ids to purge linked by the event_id.
We do not link these anymore since state_change events
do not exist in the events table anymore, however we
still need to be able to purge them. | def _select_legacy_event_state_and_attributes_and_data_ids_to_purge(
session: Session, purge_before: datetime, max_bind_vars: int
) -> tuple[set[int], set[int], set[int], set[int]]:
"""Return a list of event, state, and attribute ids to purge linked by the event_id.
We do not link these anymore since state_change events
do not exist in the events table anymore, however we
still need to be able to purge them.
"""
events = session.execute(
find_legacy_event_state_and_attributes_and_data_ids_to_purge(
purge_before.timestamp(), max_bind_vars
)
).all()
_LOGGER.debug("Selected %s event ids to remove", len(events))
event_ids = set()
state_ids = set()
attributes_ids = set()
data_ids = set()
for event_id, data_id, state_id, attributes_id in events:
event_ids.add(event_id)
if state_id:
state_ids.add(state_id)
if attributes_id:
attributes_ids.add(attributes_id)
if data_id:
data_ids.add(data_id)
return event_ids, state_ids, attributes_ids, data_ids |
Disconnect states and delete by state id. | def _purge_state_ids(instance: Recorder, session: Session, state_ids: set[int]) -> None:
"""Disconnect states and delete by state id."""
if not state_ids:
return
# Update old_state_id to NULL before deleting to ensure
# the delete does not fail due to a foreign key constraint
# since some databases (MSSQL) cannot do the ON DELETE SET NULL
# for us.
disconnected_rows = session.execute(disconnect_states_rows(state_ids))
_LOGGER.debug("Updated %s states to remove old_state_id", disconnected_rows)
deleted_rows = session.execute(delete_states_rows(state_ids))
_LOGGER.debug("Deleted %s states", deleted_rows)
# Evict eny entries in the old_states cache referring to a purged state
instance.states_manager.evict_purged_state_ids(state_ids) |
Delete old attributes ids in batches of max_bind_vars. | def _purge_batch_attributes_ids(
instance: Recorder, session: Session, attributes_ids: set[int]
) -> None:
"""Delete old attributes ids in batches of max_bind_vars."""
for attributes_ids_chunk in chunked_or_all(attributes_ids, instance.max_bind_vars):
deleted_rows = session.execute(
delete_states_attributes_rows(attributes_ids_chunk)
)
_LOGGER.debug("Deleted %s attribute states", deleted_rows)
# Evict any entries in the state_attributes_ids cache referring to a purged state
instance.state_attributes_manager.evict_purged(attributes_ids) |
Delete old event data ids in batches of max_bind_vars. | def _purge_batch_data_ids(
instance: Recorder, session: Session, data_ids: set[int]
) -> None:
"""Delete old event data ids in batches of max_bind_vars."""
for data_ids_chunk in chunked_or_all(data_ids, instance.max_bind_vars):
deleted_rows = session.execute(delete_event_data_rows(data_ids_chunk))
_LOGGER.debug("Deleted %s data events", deleted_rows)
# Evict any entries in the event_data_ids cache referring to a purged state
instance.event_data_manager.evict_purged(data_ids) |
Delete by run_id. | def _purge_statistics_runs(session: Session, statistics_runs: list[int]) -> None:
"""Delete by run_id."""
deleted_rows = session.execute(delete_statistics_runs_rows(statistics_runs))
_LOGGER.debug("Deleted %s statistic runs", deleted_rows) |
Delete by id. | def _purge_short_term_statistics(
session: Session, short_term_statistics: list[int]
) -> None:
"""Delete by id."""
deleted_rows = session.execute(
delete_statistics_short_term_rows(short_term_statistics)
)
_LOGGER.debug("Deleted %s short term statistics", deleted_rows) |
Delete by event id. | def _purge_event_ids(session: Session, event_ids: set[int]) -> None:
"""Delete by event id."""
if not event_ids:
return
deleted_rows = session.execute(delete_event_rows(event_ids))
_LOGGER.debug("Deleted %s events", deleted_rows) |
Purge all old recorder runs. | def _purge_old_recorder_runs(
instance: Recorder, session: Session, purge_before: datetime
) -> None:
"""Purge all old recorder runs."""
# Recorder runs is small, no need to batch run it
deleted_rows = session.execute(
delete_recorder_runs_rows(
purge_before, instance.recorder_runs_manager.current.run_id
)
)
_LOGGER.debug("Deleted %s recorder_runs", deleted_rows) |
Purge all old event types. | def _purge_old_event_types(instance: Recorder, session: Session) -> None:
"""Purge all old event types."""
# Event types is small, no need to batch run it
purge_event_types = set()
event_type_ids = set()
for event_type_id, event_type in session.execute(find_event_types_to_purge()):
purge_event_types.add(event_type)
event_type_ids.add(event_type_id)
if not event_type_ids:
return
deleted_rows = session.execute(delete_event_types_rows(event_type_ids))
_LOGGER.debug("Deleted %s event types", deleted_rows)
# Evict any entries in the event_type cache referring to a purged state
instance.event_type_manager.evict_purged(purge_event_types) |
Purge all old entity_ids. | def _purge_old_entity_ids(instance: Recorder, session: Session) -> None:
"""Purge all old entity_ids."""
# entity_ids are small, no need to batch run it
purge_entity_ids = set()
states_metadata_ids = set()
for metadata_id, entity_id in session.execute(find_entity_ids_to_purge()):
purge_entity_ids.add(entity_id)
states_metadata_ids.add(metadata_id)
if not states_metadata_ids:
return
deleted_rows = session.execute(delete_states_meta_rows(states_metadata_ids))
_LOGGER.debug("Deleted %s states meta", deleted_rows)
# Evict any entries in the event_type cache referring to a purged state
instance.states_meta_manager.evict_purged(purge_entity_ids)
instance.states_manager.evict_purged_entity_ids(purge_entity_ids) |
Remove filtered states and events that shouldn't be in the database. | def _purge_filtered_data(instance: Recorder, session: Session) -> bool:
"""Remove filtered states and events that shouldn't be in the database."""
_LOGGER.debug("Cleanup filtered data")
database_engine = instance.database_engine
assert database_engine is not None
now_timestamp = time.time()
# Check if excluded entity_ids are in database
entity_filter = instance.entity_filter
has_more_states_to_purge = False
excluded_metadata_ids: list[str] = [
metadata_id
for (metadata_id, entity_id) in session.query(
StatesMeta.metadata_id, StatesMeta.entity_id
).all()
if not entity_filter(entity_id)
]
if excluded_metadata_ids:
has_more_states_to_purge = _purge_filtered_states(
instance, session, excluded_metadata_ids, database_engine, now_timestamp
)
# Check if excluded event_types are in database
has_more_events_to_purge = False
if (
event_type_to_event_type_ids := instance.event_type_manager.get_many(
instance.exclude_event_types, session
)
) and (
excluded_event_type_ids := [
event_type_id
for event_type_id in event_type_to_event_type_ids.values()
if event_type_id is not None
]
):
has_more_events_to_purge = _purge_filtered_events(
instance, session, excluded_event_type_ids, now_timestamp
)
# Purge has completed if there are not more state or events to purge
return not (has_more_states_to_purge or has_more_events_to_purge) |
Remove filtered states and linked events.
Return true if all states are purged | def _purge_filtered_states(
instance: Recorder,
session: Session,
metadata_ids_to_purge: list[str],
database_engine: DatabaseEngine,
purge_before_timestamp: float,
) -> bool:
"""Remove filtered states and linked events.
Return true if all states are purged
"""
state_ids: tuple[int, ...]
attributes_ids: tuple[int, ...]
event_ids: tuple[int, ...]
to_purge = list(
session.query(States.state_id, States.attributes_id, States.event_id)
.filter(States.metadata_id.in_(metadata_ids_to_purge))
.filter(States.last_updated_ts < purge_before_timestamp)
.limit(instance.max_bind_vars)
.all()
)
if not to_purge:
return True
state_ids, attributes_ids, event_ids = zip(*to_purge, strict=False)
filtered_event_ids = {id_ for id_ in event_ids if id_ is not None}
_LOGGER.debug(
"Selected %s state_ids to remove that should be filtered", len(state_ids)
)
_purge_state_ids(instance, session, set(state_ids))
# These are legacy events that are linked to a state that are no longer
# created but since we did not remove them when we stopped adding new ones
# we will need to purge them here.
_purge_event_ids(session, filtered_event_ids)
unused_attribute_ids_set = _select_unused_attributes_ids(
instance,
session,
{id_ for id_ in attributes_ids if id_ is not None},
database_engine,
)
_purge_batch_attributes_ids(instance, session, unused_attribute_ids_set)
return False |
Remove filtered events and linked states.
Return true if all events are purged. | def _purge_filtered_events(
instance: Recorder,
session: Session,
excluded_event_type_ids: list[int],
purge_before_timestamp: float,
) -> bool:
"""Remove filtered events and linked states.
Return true if all events are purged.
"""
database_engine = instance.database_engine
assert database_engine is not None
to_purge = list(
session.query(Events.event_id, Events.data_id)
.filter(Events.event_type_id.in_(excluded_event_type_ids))
.filter(Events.time_fired_ts < purge_before_timestamp)
.limit(instance.max_bind_vars)
.all()
)
if not to_purge:
return True
event_ids, data_ids = zip(*to_purge, strict=False)
event_ids_set = set(event_ids)
_LOGGER.debug(
"Selected %s event_ids to remove that should be filtered", len(event_ids_set)
)
if (
instance.use_legacy_events_index
and (
states := session.query(States.state_id)
.filter(States.event_id.in_(event_ids_set))
.all()
)
and (state_ids := {state_id for (state_id,) in states})
):
# These are legacy states that are linked to an event that are no longer
# created but since we did not remove them when we stopped adding new ones
# we will need to purge them here.
_purge_state_ids(instance, session, state_ids)
_purge_event_ids(session, event_ids_set)
if unused_data_ids_set := _select_unused_event_data_ids(
instance, session, set(data_ids), database_engine
):
_purge_batch_data_ids(instance, session, unused_data_ids_set)
return False |
Purge states and events of specified entities. | def purge_entity_data(
instance: Recorder, entity_filter: Callable[[str], bool], purge_before: datetime
) -> bool:
"""Purge states and events of specified entities."""
database_engine = instance.database_engine
assert database_engine is not None
purge_before_timestamp = purge_before.timestamp()
with session_scope(session=instance.get_session()) as session:
selected_metadata_ids: list[str] = [
metadata_id
for (metadata_id, entity_id) in session.query(
StatesMeta.metadata_id, StatesMeta.entity_id
).all()
if entity_filter(entity_id)
]
_LOGGER.debug("Purging entity data for %s", selected_metadata_ids)
if not selected_metadata_ids:
return True
# Purge a max of max_bind_vars, based on the oldest states
# or events record.
if not _purge_filtered_states(
instance,
session,
selected_metadata_ids,
database_engine,
purge_before_timestamp,
):
_LOGGER.debug("Purging entity data hasn't fully completed yet")
return False
_purge_old_entity_ids(instance, session)
return True |
Generate a select for event type ids.
This query is intentionally not a lambda statement as it is used inside
other lambda statements. | def select_event_type_ids(event_types: tuple[str, ...]) -> Select:
"""Generate a select for event type ids.
This query is intentionally not a lambda statement as it is used inside
other lambda statements.
"""
return select(EventTypes.event_type_id).where(
EventTypes.event_type.in_(event_types)
) |
Load shared attributes from the database. | def get_shared_attributes(hashes: list[int]) -> StatementLambdaElement:
"""Load shared attributes from the database."""
return lambda_stmt(
lambda: select(
StateAttributes.attributes_id, StateAttributes.shared_attrs
).where(StateAttributes.hash.in_(hashes))
) |
Load shared event data from the database. | def get_shared_event_datas(hashes: list[int]) -> StatementLambdaElement:
"""Load shared event data from the database."""
return lambda_stmt(
lambda: select(EventData.data_id, EventData.shared_data).where(
EventData.hash.in_(hashes)
)
) |
Find an event_type id by event_type. | def find_event_type_ids(event_types: Iterable[str]) -> StatementLambdaElement:
"""Find an event_type id by event_type."""
return lambda_stmt(
lambda: select(EventTypes.event_type_id, EventTypes.event_type).filter(
EventTypes.event_type.in_(event_types)
)
) |
Find all metadata_ids and entity_ids. | def find_all_states_metadata_ids() -> StatementLambdaElement:
"""Find all metadata_ids and entity_ids."""
return lambda_stmt(lambda: select(StatesMeta.metadata_id, StatesMeta.entity_id)) |
Find metadata_ids by entity_ids. | def find_states_metadata_ids(entity_ids: Iterable[str]) -> StatementLambdaElement:
"""Find metadata_ids by entity_ids."""
return lambda_stmt(
lambda: select(StatesMeta.metadata_id, StatesMeta.entity_id).filter(
StatesMeta.entity_id.in_(entity_ids)
)
) |
Check if a state attributes id exists in the states table. | def _state_attrs_exist(attr: int | None) -> Select:
"""Check if a state attributes id exists in the states table."""
return select(func.min(States.attributes_id)).where(States.attributes_id == attr) |
Find attributes ids that exist in the states table. | def attributes_ids_exist_in_states_with_fast_in_distinct(
attributes_ids: Iterable[int],
) -> StatementLambdaElement:
"""Find attributes ids that exist in the states table."""
return lambda_stmt(
lambda: select(distinct(States.attributes_id)).filter(
States.attributes_id.in_(attributes_ids)
)
) |
Generate the find attributes select only once.
https://docs.sqlalchemy.org/en/14/core/connections.html#quick-guidelines-for-lambdas | def attributes_ids_exist_in_states(
attr1: int,
attr2: int | None,
attr3: int | None,
attr4: int | None,
attr5: int | None,
attr6: int | None,
attr7: int | None,
attr8: int | None,
attr9: int | None,
attr10: int | None,
attr11: int | None,
attr12: int | None,
attr13: int | None,
attr14: int | None,
attr15: int | None,
attr16: int | None,
attr17: int | None,
attr18: int | None,
attr19: int | None,
attr20: int | None,
attr21: int | None,
attr22: int | None,
attr23: int | None,
attr24: int | None,
attr25: int | None,
attr26: int | None,
attr27: int | None,
attr28: int | None,
attr29: int | None,
attr30: int | None,
attr31: int | None,
attr32: int | None,
attr33: int | None,
attr34: int | None,
attr35: int | None,
attr36: int | None,
attr37: int | None,
attr38: int | None,
attr39: int | None,
attr40: int | None,
attr41: int | None,
attr42: int | None,
attr43: int | None,
attr44: int | None,
attr45: int | None,
attr46: int | None,
attr47: int | None,
attr48: int | None,
attr49: int | None,
attr50: int | None,
attr51: int | None,
attr52: int | None,
attr53: int | None,
attr54: int | None,
attr55: int | None,
attr56: int | None,
attr57: int | None,
attr58: int | None,
attr59: int | None,
attr60: int | None,
attr61: int | None,
attr62: int | None,
attr63: int | None,
attr64: int | None,
attr65: int | None,
attr66: int | None,
attr67: int | None,
attr68: int | None,
attr69: int | None,
attr70: int | None,
attr71: int | None,
attr72: int | None,
attr73: int | None,
attr74: int | None,
attr75: int | None,
attr76: int | None,
attr77: int | None,
attr78: int | None,
attr79: int | None,
attr80: int | None,
attr81: int | None,
attr82: int | None,
attr83: int | None,
attr84: int | None,
attr85: int | None,
attr86: int | None,
attr87: int | None,
attr88: int | None,
attr89: int | None,
attr90: int | None,
attr91: int | None,
attr92: int | None,
attr93: int | None,
attr94: int | None,
attr95: int | None,
attr96: int | None,
attr97: int | None,
attr98: int | None,
attr99: int | None,
attr100: int | None,
) -> StatementLambdaElement:
"""Generate the find attributes select only once.
https://docs.sqlalchemy.org/en/14/core/connections.html#quick-guidelines-for-lambdas
"""
return lambda_stmt(
lambda: union_all(
_state_attrs_exist(attr1),
_state_attrs_exist(attr2),
_state_attrs_exist(attr3),
_state_attrs_exist(attr4),
_state_attrs_exist(attr5),
_state_attrs_exist(attr6),
_state_attrs_exist(attr7),
_state_attrs_exist(attr8),
_state_attrs_exist(attr9),
_state_attrs_exist(attr10),
_state_attrs_exist(attr11),
_state_attrs_exist(attr12),
_state_attrs_exist(attr13),
_state_attrs_exist(attr14),
_state_attrs_exist(attr15),
_state_attrs_exist(attr16),
_state_attrs_exist(attr17),
_state_attrs_exist(attr18),
_state_attrs_exist(attr19),
_state_attrs_exist(attr20),
_state_attrs_exist(attr21),
_state_attrs_exist(attr22),
_state_attrs_exist(attr23),
_state_attrs_exist(attr24),
_state_attrs_exist(attr25),
_state_attrs_exist(attr26),
_state_attrs_exist(attr27),
_state_attrs_exist(attr28),
_state_attrs_exist(attr29),
_state_attrs_exist(attr30),
_state_attrs_exist(attr31),
_state_attrs_exist(attr32),
_state_attrs_exist(attr33),
_state_attrs_exist(attr34),
_state_attrs_exist(attr35),
_state_attrs_exist(attr36),
_state_attrs_exist(attr37),
_state_attrs_exist(attr38),
_state_attrs_exist(attr39),
_state_attrs_exist(attr40),
_state_attrs_exist(attr41),
_state_attrs_exist(attr42),
_state_attrs_exist(attr43),
_state_attrs_exist(attr44),
_state_attrs_exist(attr45),
_state_attrs_exist(attr46),
_state_attrs_exist(attr47),
_state_attrs_exist(attr48),
_state_attrs_exist(attr49),
_state_attrs_exist(attr50),
_state_attrs_exist(attr51),
_state_attrs_exist(attr52),
_state_attrs_exist(attr53),
_state_attrs_exist(attr54),
_state_attrs_exist(attr55),
_state_attrs_exist(attr56),
_state_attrs_exist(attr57),
_state_attrs_exist(attr58),
_state_attrs_exist(attr59),
_state_attrs_exist(attr60),
_state_attrs_exist(attr61),
_state_attrs_exist(attr62),
_state_attrs_exist(attr63),
_state_attrs_exist(attr64),
_state_attrs_exist(attr65),
_state_attrs_exist(attr66),
_state_attrs_exist(attr67),
_state_attrs_exist(attr68),
_state_attrs_exist(attr69),
_state_attrs_exist(attr70),
_state_attrs_exist(attr71),
_state_attrs_exist(attr72),
_state_attrs_exist(attr73),
_state_attrs_exist(attr74),
_state_attrs_exist(attr75),
_state_attrs_exist(attr76),
_state_attrs_exist(attr77),
_state_attrs_exist(attr78),
_state_attrs_exist(attr79),
_state_attrs_exist(attr80),
_state_attrs_exist(attr81),
_state_attrs_exist(attr82),
_state_attrs_exist(attr83),
_state_attrs_exist(attr84),
_state_attrs_exist(attr85),
_state_attrs_exist(attr86),
_state_attrs_exist(attr87),
_state_attrs_exist(attr88),
_state_attrs_exist(attr89),
_state_attrs_exist(attr90),
_state_attrs_exist(attr91),
_state_attrs_exist(attr92),
_state_attrs_exist(attr93),
_state_attrs_exist(attr94),
_state_attrs_exist(attr95),
_state_attrs_exist(attr96),
_state_attrs_exist(attr97),
_state_attrs_exist(attr98),
_state_attrs_exist(attr99),
_state_attrs_exist(attr100),
)
) |
Find data ids that exist in the events table. | def data_ids_exist_in_events_with_fast_in_distinct(
data_ids: Iterable[int],
) -> StatementLambdaElement:
"""Find data ids that exist in the events table."""
return lambda_stmt(
lambda: select(distinct(Events.data_id)).filter(Events.data_id.in_(data_ids))
) |
Check if a event data id exists in the events table. | def _event_data_id_exist(data_id: int | None) -> Select:
"""Check if a event data id exists in the events table."""
return select(func.min(Events.data_id)).where(Events.data_id == data_id) |
Generate the find event data select only once.
https://docs.sqlalchemy.org/en/14/core/connections.html#quick-guidelines-for-lambdas | def data_ids_exist_in_events(
id1: int,
id2: int | None,
id3: int | None,
id4: int | None,
id5: int | None,
id6: int | None,
id7: int | None,
id8: int | None,
id9: int | None,
id10: int | None,
id11: int | None,
id12: int | None,
id13: int | None,
id14: int | None,
id15: int | None,
id16: int | None,
id17: int | None,
id18: int | None,
id19: int | None,
id20: int | None,
id21: int | None,
id22: int | None,
id23: int | None,
id24: int | None,
id25: int | None,
id26: int | None,
id27: int | None,
id28: int | None,
id29: int | None,
id30: int | None,
id31: int | None,
id32: int | None,
id33: int | None,
id34: int | None,
id35: int | None,
id36: int | None,
id37: int | None,
id38: int | None,
id39: int | None,
id40: int | None,
id41: int | None,
id42: int | None,
id43: int | None,
id44: int | None,
id45: int | None,
id46: int | None,
id47: int | None,
id48: int | None,
id49: int | None,
id50: int | None,
id51: int | None,
id52: int | None,
id53: int | None,
id54: int | None,
id55: int | None,
id56: int | None,
id57: int | None,
id58: int | None,
id59: int | None,
id60: int | None,
id61: int | None,
id62: int | None,
id63: int | None,
id64: int | None,
id65: int | None,
id66: int | None,
id67: int | None,
id68: int | None,
id69: int | None,
id70: int | None,
id71: int | None,
id72: int | None,
id73: int | None,
id74: int | None,
id75: int | None,
id76: int | None,
id77: int | None,
id78: int | None,
id79: int | None,
id80: int | None,
id81: int | None,
id82: int | None,
id83: int | None,
id84: int | None,
id85: int | None,
id86: int | None,
id87: int | None,
id88: int | None,
id89: int | None,
id90: int | None,
id91: int | None,
id92: int | None,
id93: int | None,
id94: int | None,
id95: int | None,
id96: int | None,
id97: int | None,
id98: int | None,
id99: int | None,
id100: int | None,
) -> StatementLambdaElement:
"""Generate the find event data select only once.
https://docs.sqlalchemy.org/en/14/core/connections.html#quick-guidelines-for-lambdas
"""
return lambda_stmt(
lambda: union_all(
_event_data_id_exist(id1),
_event_data_id_exist(id2),
_event_data_id_exist(id3),
_event_data_id_exist(id4),
_event_data_id_exist(id5),
_event_data_id_exist(id6),
_event_data_id_exist(id7),
_event_data_id_exist(id8),
_event_data_id_exist(id9),
_event_data_id_exist(id10),
_event_data_id_exist(id11),
_event_data_id_exist(id12),
_event_data_id_exist(id13),
_event_data_id_exist(id14),
_event_data_id_exist(id15),
_event_data_id_exist(id16),
_event_data_id_exist(id17),
_event_data_id_exist(id18),
_event_data_id_exist(id19),
_event_data_id_exist(id20),
_event_data_id_exist(id21),
_event_data_id_exist(id22),
_event_data_id_exist(id23),
_event_data_id_exist(id24),
_event_data_id_exist(id25),
_event_data_id_exist(id26),
_event_data_id_exist(id27),
_event_data_id_exist(id28),
_event_data_id_exist(id29),
_event_data_id_exist(id30),
_event_data_id_exist(id31),
_event_data_id_exist(id32),
_event_data_id_exist(id33),
_event_data_id_exist(id34),
_event_data_id_exist(id35),
_event_data_id_exist(id36),
_event_data_id_exist(id37),
_event_data_id_exist(id38),
_event_data_id_exist(id39),
_event_data_id_exist(id40),
_event_data_id_exist(id41),
_event_data_id_exist(id42),
_event_data_id_exist(id43),
_event_data_id_exist(id44),
_event_data_id_exist(id45),
_event_data_id_exist(id46),
_event_data_id_exist(id47),
_event_data_id_exist(id48),
_event_data_id_exist(id49),
_event_data_id_exist(id50),
_event_data_id_exist(id51),
_event_data_id_exist(id52),
_event_data_id_exist(id53),
_event_data_id_exist(id54),
_event_data_id_exist(id55),
_event_data_id_exist(id56),
_event_data_id_exist(id57),
_event_data_id_exist(id58),
_event_data_id_exist(id59),
_event_data_id_exist(id60),
_event_data_id_exist(id61),
_event_data_id_exist(id62),
_event_data_id_exist(id63),
_event_data_id_exist(id64),
_event_data_id_exist(id65),
_event_data_id_exist(id66),
_event_data_id_exist(id67),
_event_data_id_exist(id68),
_event_data_id_exist(id69),
_event_data_id_exist(id70),
_event_data_id_exist(id71),
_event_data_id_exist(id72),
_event_data_id_exist(id73),
_event_data_id_exist(id74),
_event_data_id_exist(id75),
_event_data_id_exist(id76),
_event_data_id_exist(id77),
_event_data_id_exist(id78),
_event_data_id_exist(id79),
_event_data_id_exist(id80),
_event_data_id_exist(id81),
_event_data_id_exist(id82),
_event_data_id_exist(id83),
_event_data_id_exist(id84),
_event_data_id_exist(id85),
_event_data_id_exist(id86),
_event_data_id_exist(id87),
_event_data_id_exist(id88),
_event_data_id_exist(id89),
_event_data_id_exist(id90),
_event_data_id_exist(id91),
_event_data_id_exist(id92),
_event_data_id_exist(id93),
_event_data_id_exist(id94),
_event_data_id_exist(id95),
_event_data_id_exist(id96),
_event_data_id_exist(id97),
_event_data_id_exist(id98),
_event_data_id_exist(id99),
_event_data_id_exist(id100),
)
) |
Disconnect states rows. | def disconnect_states_rows(state_ids: Iterable[int]) -> StatementLambdaElement:
"""Disconnect states rows."""
return lambda_stmt(
lambda: update(States)
.where(States.old_state_id.in_(state_ids))
.values(old_state_id=None)
.execution_options(synchronize_session=False)
) |
Delete states rows. | def delete_states_rows(state_ids: Iterable[int]) -> StatementLambdaElement:
"""Delete states rows."""
return lambda_stmt(
lambda: delete(States)
.where(States.state_id.in_(state_ids))
.execution_options(synchronize_session=False)
) |
Delete event_data rows. | def delete_event_data_rows(data_ids: Iterable[int]) -> StatementLambdaElement:
"""Delete event_data rows."""
return lambda_stmt(
lambda: delete(EventData)
.where(EventData.data_id.in_(data_ids))
.execution_options(synchronize_session=False)
) |
Delete states_attributes rows. | def delete_states_attributes_rows(
attributes_ids: Iterable[int],
) -> StatementLambdaElement:
"""Delete states_attributes rows."""
return lambda_stmt(
lambda: delete(StateAttributes)
.where(StateAttributes.attributes_id.in_(attributes_ids))
.execution_options(synchronize_session=False)
) |
Delete statistics_runs rows. | def delete_statistics_runs_rows(
statistics_runs: Iterable[int],
) -> StatementLambdaElement:
"""Delete statistics_runs rows."""
return lambda_stmt(
lambda: delete(StatisticsRuns)
.where(StatisticsRuns.run_id.in_(statistics_runs))
.execution_options(synchronize_session=False)
) |
Delete statistics_short_term rows. | def delete_statistics_short_term_rows(
short_term_statistics: Iterable[int],
) -> StatementLambdaElement:
"""Delete statistics_short_term rows."""
return lambda_stmt(
lambda: delete(StatisticsShortTerm)
.where(StatisticsShortTerm.id.in_(short_term_statistics))
.execution_options(synchronize_session=False)
) |
Delete statistics_short_term rows. | def delete_event_rows(
event_ids: Iterable[int],
) -> StatementLambdaElement:
"""Delete statistics_short_term rows."""
return lambda_stmt(
lambda: delete(Events)
.where(Events.event_id.in_(event_ids))
.execution_options(synchronize_session=False)
) |
Delete recorder_runs rows. | def delete_recorder_runs_rows(
purge_before: datetime, current_run_id: int
) -> StatementLambdaElement:
"""Delete recorder_runs rows."""
return lambda_stmt(
lambda: delete(RecorderRuns)
.filter(RecorderRuns.start < purge_before)
.filter(RecorderRuns.run_id != current_run_id)
.execution_options(synchronize_session=False)
) |
Find events to purge. | def find_events_to_purge(
purge_before: float, max_bind_vars: int
) -> StatementLambdaElement:
"""Find events to purge."""
return lambda_stmt(
lambda: select(Events.event_id, Events.data_id)
.filter(Events.time_fired_ts < purge_before)
.limit(max_bind_vars)
) |
Find states to purge. | def find_states_to_purge(
purge_before: float, max_bind_vars: int
) -> StatementLambdaElement:
"""Find states to purge."""
return lambda_stmt(
lambda: select(States.state_id, States.attributes_id)
.filter(States.last_updated_ts < purge_before)
.limit(max_bind_vars)
) |
Find short term statistics to purge. | def find_short_term_statistics_to_purge(
purge_before: datetime, max_bind_vars: int
) -> StatementLambdaElement:
"""Find short term statistics to purge."""
purge_before_ts = purge_before.timestamp()
return lambda_stmt(
lambda: select(StatisticsShortTerm.id)
.filter(StatisticsShortTerm.start_ts < purge_before_ts)
.limit(max_bind_vars)
) |
Find statistics_runs to purge. | def find_statistics_runs_to_purge(
purge_before: datetime, max_bind_vars: int
) -> StatementLambdaElement:
"""Find statistics_runs to purge."""
return lambda_stmt(
lambda: select(StatisticsRuns.run_id)
.filter(StatisticsRuns.start < purge_before)
.limit(max_bind_vars)
) |
Find the latest statistics_runs run_id. | def find_latest_statistics_runs_run_id() -> StatementLambdaElement:
"""Find the latest statistics_runs run_id."""
return lambda_stmt(lambda: select(func.max(StatisticsRuns.run_id))) |
Find the latest row in the legacy format to purge. | def find_legacy_event_state_and_attributes_and_data_ids_to_purge(
purge_before: float, max_bind_vars: int
) -> StatementLambdaElement:
"""Find the latest row in the legacy format to purge."""
return lambda_stmt(
lambda: select(
Events.event_id, Events.data_id, States.state_id, States.attributes_id
)
.outerjoin(States, Events.event_id == States.event_id)
.filter(Events.time_fired_ts < purge_before)
.limit(max_bind_vars)
) |
Find states rows with event_id set but not linked event_id in Events. | def find_legacy_detached_states_and_attributes_to_purge(
purge_before: float, max_bind_vars: int
) -> StatementLambdaElement:
"""Find states rows with event_id set but not linked event_id in Events."""
return lambda_stmt(
lambda: select(States.state_id, States.attributes_id)
.outerjoin(Events, States.event_id == Events.event_id)
.filter(States.event_id.isnot(None))
.filter(
(States.last_updated_ts < purge_before) | States.last_updated_ts.is_(None)
)
.filter(Events.event_id.is_(None))
.limit(max_bind_vars)
) |
Check if there are still states in the table with an event_id. | def find_legacy_row() -> StatementLambdaElement:
"""Check if there are still states in the table with an event_id."""
return lambda_stmt(lambda: select(func.max(States.event_id))) |
Find events context_ids to migrate. | def find_events_context_ids_to_migrate(max_bind_vars: int) -> StatementLambdaElement:
"""Find events context_ids to migrate."""
return lambda_stmt(
lambda: select(
Events.event_id,
Events.time_fired_ts,
Events.context_id,
Events.context_user_id,
Events.context_parent_id,
)
.filter(Events.context_id_bin.is_(None))
.limit(max_bind_vars)
) |
Find events event_type to migrate. | def find_event_type_to_migrate(max_bind_vars: int) -> StatementLambdaElement:
"""Find events event_type to migrate."""
return lambda_stmt(
lambda: select(
Events.event_id,
Events.event_type,
)
.filter(Events.event_type_id.is_(None))
.limit(max_bind_vars)
) |
Find entity_id to migrate. | def find_entity_ids_to_migrate(max_bind_vars: int) -> StatementLambdaElement:
"""Find entity_id to migrate."""
return lambda_stmt(
lambda: select(
States.state_id,
States.entity_id,
)
.filter(States.metadata_id.is_(None))
.limit(max_bind_vars)
) |
Find entity_id to cleanup. | def batch_cleanup_entity_ids() -> StatementLambdaElement:
"""Find entity_id to cleanup."""
# Self join because This version of MariaDB doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery'
return lambda_stmt(
lambda: update(States)
.where(
States.state_id.in_(
select(States.state_id)
.join(
states_with_entity_ids := select(
States.state_id.label("state_id_with_entity_id")
)
.filter(States.entity_id.is_not(None))
.limit(5000)
.subquery(),
States.state_id == states_with_entity_ids.c.state_id_with_entity_id,
)
.alias("states_with_entity_ids")
.select()
)
)
.values(entity_id=None)
) |
Check if there are used event_ids in the states table. | def has_used_states_event_ids() -> StatementLambdaElement:
"""Check if there are used event_ids in the states table."""
return lambda_stmt(
lambda: select(States.state_id).filter(States.event_id.isnot(None)).limit(1)
) |
Check if there are events context ids to migrate. | def has_events_context_ids_to_migrate() -> StatementLambdaElement:
"""Check if there are events context ids to migrate."""
return lambda_stmt(
lambda: select(Events.event_id).filter(Events.context_id_bin.is_(None)).limit(1)
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.