response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Run checks that will generate a sqlite3 exception if there is corruption. | def run_checks_on_open_db(dbpath: str, cursor: SQLiteCursor) -> None:
"""Run checks that will generate a sqlite3 exception if there is corruption."""
sanity_check_passed = basic_sanity_check(cursor)
last_run_was_clean = last_run_was_recently_clean(cursor)
if sanity_check_passed and last_run_was_clean:
_LOGGER.debug(
"The system was restarted cleanly and passed the basic sanity check"
)
return
if not sanity_check_passed:
_LOGGER.warning(
"The database sanity check failed to validate the sqlite3 database at %s",
dbpath,
)
if not last_run_was_clean:
_LOGGER.warning(
(
"The system could not validate that the sqlite3 database at %s was"
" shutdown cleanly"
),
dbpath,
) |
Move away a broken sqlite3 database. | def move_away_broken_database(dbfile: str) -> None:
"""Move away a broken sqlite3 database."""
isotime = dt_util.utcnow().isoformat()
corrupt_postfix = f".corrupt.{isotime}"
_LOGGER.error(
(
"The system will rename the corrupt database file %s to %s in order to"
" allow startup to proceed"
),
dbfile,
f"{dbfile}{corrupt_postfix}",
)
for postfix in SQLITE3_POSTFIXES:
path = f"{dbfile}{postfix}"
if not os.path.exists(path):
continue
os.rename(path, f"{path}{corrupt_postfix}") |
Execute a single statement with a dbapi connection. | def execute_on_connection(dbapi_connection: DBAPIConnection, statement: str) -> None:
"""Execute a single statement with a dbapi connection."""
cursor = dbapi_connection.cursor()
cursor.execute(statement)
cursor.close() |
Execute a single statement with a dbapi connection and return the result. | def query_on_connection(dbapi_connection: DBAPIConnection, statement: str) -> Any:
"""Execute a single statement with a dbapi connection and return the result."""
cursor = dbapi_connection.cursor()
cursor.execute(statement)
result = cursor.fetchall()
cursor.close()
return result |
Warn about unsupported database version. | def _fail_unsupported_dialect(dialect_name: str) -> NoReturn:
"""Warn about unsupported database version."""
_LOGGER.error(
(
"Database %s is not supported; Home Assistant supports %s. "
"Starting with Home Assistant 2022.6 this prevents the recorder from "
"starting. Please migrate your database to a supported software"
),
dialect_name,
"MariaDB ≥ 10.3, MySQL ≥ 8.0, PostgreSQL ≥ 12, SQLite ≥ 3.31.0",
)
raise UnsupportedDialect |
Warn about unsupported database version. | def _fail_unsupported_version(
server_version: str, dialect_name: str, minimum_version: str
) -> NoReturn:
"""Warn about unsupported database version."""
_LOGGER.error(
(
"Version %s of %s is not supported; minimum supported version is %s. "
"Starting with Home Assistant 2022.6 this prevents the recorder from "
"starting. Please upgrade your database software"
),
server_version,
dialect_name,
minimum_version,
)
raise UnsupportedDialect |
Attempt to extract version from server response. | def _extract_version_from_server_response(
server_response: str,
) -> AwesomeVersion | None:
"""Attempt to extract version from server response."""
try:
return AwesomeVersion(
server_response,
ensure_strategy=AwesomeVersionStrategy.SIMPLEVER,
find_first_match=True,
)
except AwesomeVersionException:
return None |
Fast version of mysqldb DateTime_or_None.
https://github.com/PyMySQL/mysqlclient/blob/v2.1.0/MySQLdb/times.py#L66 | def _datetime_or_none(value: str) -> datetime | None:
"""Fast version of mysqldb DateTime_or_None.
https://github.com/PyMySQL/mysqlclient/blob/v2.1.0/MySQLdb/times.py#L66
"""
try:
return ciso8601.parse_datetime(value)
except ValueError:
return None |
Build a MySQLDB conv dict that uses cisco8601 to parse datetimes. | def build_mysqldb_conv() -> dict:
"""Build a MySQLDB conv dict that uses cisco8601 to parse datetimes."""
# Late imports since we only call this if they are using mysqldb
# pylint: disable=import-outside-toplevel
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.converters import conversions
return {**conversions, FIELD_TYPE.DATETIME: _datetime_or_none} |
Create an issue for the index range regression in older MariaDB.
The range scan issue was fixed in MariaDB 10.5.17, 10.6.9, 10.7.5, 10.8.4 and later. | def _async_create_mariadb_range_index_regression_issue(
hass: HomeAssistant, version: AwesomeVersion
) -> None:
"""Create an issue for the index range regression in older MariaDB.
The range scan issue was fixed in MariaDB 10.5.17, 10.6.9, 10.7.5, 10.8.4 and later.
"""
if version >= MARIA_DB_108:
min_version = RECOMMENDED_MIN_VERSION_MARIA_DB_108
elif version >= MARIA_DB_107:
min_version = RECOMMENDED_MIN_VERSION_MARIA_DB_107
elif version >= MARIA_DB_106:
min_version = RECOMMENDED_MIN_VERSION_MARIA_DB_106
else:
min_version = RECOMMENDED_MIN_VERSION_MARIA_DB
ir.async_create_issue(
hass,
DOMAIN,
"maria_db_range_index_regression",
is_fixable=False,
severity=ir.IssueSeverity.CRITICAL,
learn_more_url="https://jira.mariadb.org/browse/MDEV-25020",
translation_key="maria_db_range_index_regression",
translation_placeholders={"min_version": str(min_version)},
) |
Create an issue when the backup fails because we run out of resources. | def async_create_backup_failure_issue(
hass: HomeAssistant,
local_start_time: datetime,
) -> None:
"""Create an issue when the backup fails because we run out of resources."""
ir.async_create_issue(
hass,
DOMAIN,
"backup_failed_out_of_resources",
is_fixable=False,
severity=ir.IssueSeverity.CRITICAL,
learn_more_url="https://www.home-assistant.io/integrations/recorder",
translation_key="backup_failed_out_of_resources",
translation_placeholders={"start_time": local_start_time.strftime("%H:%M:%S")},
) |
Execute statements needed for dialect connection. | def setup_connection_for_dialect(
instance: Recorder,
dialect_name: str,
dbapi_connection: DBAPIConnection,
first_connection: bool,
) -> DatabaseEngine | None:
"""Execute statements needed for dialect connection."""
version: AwesomeVersion | None = None
slow_range_in_select = False
if dialect_name == SupportedDialect.SQLITE:
max_bind_vars = SQLITE_MAX_BIND_VARS
if first_connection:
old_isolation = dbapi_connection.isolation_level # type: ignore[attr-defined]
dbapi_connection.isolation_level = None # type: ignore[attr-defined]
execute_on_connection(dbapi_connection, "PRAGMA journal_mode=WAL")
dbapi_connection.isolation_level = old_isolation # type: ignore[attr-defined]
# WAL mode only needs to be setup once
# instead of every time we open the sqlite connection
# as its persistent and isn't free to call every time.
result = query_on_connection(dbapi_connection, "SELECT sqlite_version()")
version_string = result[0][0]
version = _extract_version_from_server_response(version_string)
if not version or version < MIN_VERSION_SQLITE:
_fail_unsupported_version(
version or version_string, "SQLite", MIN_VERSION_SQLITE
)
if version and version > MIN_VERSION_SQLITE_MODERN_BIND_VARS:
max_bind_vars = SQLITE_MODERN_MAX_BIND_VARS
# The upper bound on the cache size is approximately 16MiB of memory
execute_on_connection(dbapi_connection, "PRAGMA cache_size = -16384")
#
# Enable FULL synchronous if they have a commit interval of 0
# or NORMAL if they do not.
#
# https://sqlite.org/pragma.html#pragma_synchronous
# The synchronous=NORMAL setting is a good choice for most applications
# running in WAL mode.
#
synchronous = "NORMAL" if instance.commit_interval else "FULL"
execute_on_connection(dbapi_connection, f"PRAGMA synchronous={synchronous}")
# enable support for foreign keys
execute_on_connection(dbapi_connection, "PRAGMA foreign_keys=ON")
elif dialect_name == SupportedDialect.MYSQL:
max_bind_vars = DEFAULT_MAX_BIND_VARS
execute_on_connection(dbapi_connection, "SET session wait_timeout=28800")
if first_connection:
result = query_on_connection(dbapi_connection, "SELECT VERSION()")
version_string = result[0][0]
version = _extract_version_from_server_response(version_string)
is_maria_db = "mariadb" in version_string.lower()
if is_maria_db:
if not version or version < MIN_VERSION_MARIA_DB:
_fail_unsupported_version(
version or version_string, "MariaDB", MIN_VERSION_MARIA_DB
)
if version and (
(version < RECOMMENDED_MIN_VERSION_MARIA_DB)
or (MARIA_DB_106 <= version < RECOMMENDED_MIN_VERSION_MARIA_DB_106)
or (MARIA_DB_107 <= version < RECOMMENDED_MIN_VERSION_MARIA_DB_107)
or (MARIA_DB_108 <= version < RECOMMENDED_MIN_VERSION_MARIA_DB_108)
):
instance.hass.add_job(
_async_create_mariadb_range_index_regression_issue,
instance.hass,
version,
)
elif not version or version < MIN_VERSION_MYSQL:
_fail_unsupported_version(
version or version_string, "MySQL", MIN_VERSION_MYSQL
)
slow_range_in_select = bool(
not version
or version < MARIADB_WITH_FIXED_IN_QUERIES_105
or MARIA_DB_106 <= version < MARIADB_WITH_FIXED_IN_QUERIES_106
or MARIA_DB_107 <= version < MARIADB_WITH_FIXED_IN_QUERIES_107
or MARIA_DB_108 <= version < MARIADB_WITH_FIXED_IN_QUERIES_108
)
# Ensure all times are using UTC to avoid issues with daylight savings
execute_on_connection(dbapi_connection, "SET time_zone = '+00:00'")
elif dialect_name == SupportedDialect.POSTGRESQL:
max_bind_vars = DEFAULT_MAX_BIND_VARS
if first_connection:
# server_version_num was added in 2006
result = query_on_connection(dbapi_connection, "SHOW server_version")
version_string = result[0][0]
version = _extract_version_from_server_response(version_string)
if not version or version < MIN_VERSION_PGSQL:
_fail_unsupported_version(
version or version_string, "PostgreSQL", MIN_VERSION_PGSQL
)
else:
_fail_unsupported_dialect(dialect_name)
if not first_connection:
return None
return DatabaseEngine(
dialect=SupportedDialect(dialect_name),
version=version,
optimizer=DatabaseOptimizer(slow_range_in_select=slow_range_in_select),
max_bind_vars=max_bind_vars,
) |
End any incomplete recorder runs. | def end_incomplete_runs(session: Session, start_time: datetime) -> None:
"""End any incomplete recorder runs."""
for run in session.query(RecorderRuns).filter_by(end=None):
run.closed_incorrect = True
run.end = start_time
_LOGGER.warning(
"Ended unfinished session (id=%s from %s)", run.run_id, run.start
)
session.add(run) |
Return True if the error is retryable. | def _is_retryable_error(instance: Recorder, err: OperationalError) -> bool:
"""Return True if the error is retryable."""
assert instance.engine is not None
return bool(
instance.engine.dialect.name == SupportedDialect.MYSQL
and isinstance(err.orig, BaseException)
and err.orig.args
and err.orig.args[0] in RETRYABLE_MYSQL_ERRORS
) |
Try to execute a database job.
The job should return True if it finished, and False if it needs to be rescheduled. | def retryable_database_job(
description: str,
) -> Callable[[_FuncType[_RecorderT, _P]], _FuncType[_RecorderT, _P]]:
"""Try to execute a database job.
The job should return True if it finished, and False if it needs to be rescheduled.
"""
def decorator(job: _FuncType[_RecorderT, _P]) -> _FuncType[_RecorderT, _P]:
@functools.wraps(job)
def wrapper(instance: _RecorderT, *args: _P.args, **kwargs: _P.kwargs) -> bool:
try:
return job(instance, *args, **kwargs)
except OperationalError as err:
if _is_retryable_error(instance, err):
assert isinstance(err.orig, BaseException) # noqa: PT017
_LOGGER.info(
"%s; %s not completed, retrying", err.orig.args[1], description
)
time.sleep(instance.db_retry_wait)
# Failed with retryable error
return False
_LOGGER.warning("Error executing %s: %s", description, err)
# Failed with permanent error
return True
return wrapper
return decorator |
Try to execute a database job multiple times.
This wrapper handles InnoDB deadlocks and lock timeouts.
This is different from retryable_database_job in that it will retry the job
attempts number of times instead of returning False if the job fails. | def database_job_retry_wrapper(
description: str, attempts: int = 5
) -> Callable[[_WrappedFuncType[_RecorderT, _P]], _WrappedFuncType[_RecorderT, _P]]:
"""Try to execute a database job multiple times.
This wrapper handles InnoDB deadlocks and lock timeouts.
This is different from retryable_database_job in that it will retry the job
attempts number of times instead of returning False if the job fails.
"""
def decorator(
job: _WrappedFuncType[_RecorderT, _P],
) -> _WrappedFuncType[_RecorderT, _P]:
@functools.wraps(job)
def wrapper(instance: _RecorderT, *args: _P.args, **kwargs: _P.kwargs) -> None:
for attempt in range(attempts):
try:
job(instance, *args, **kwargs)
except OperationalError as err:
if attempt == attempts - 1 or not _is_retryable_error(
instance, err
):
raise
assert isinstance(err.orig, BaseException) # noqa: PT017
_LOGGER.info(
"%s; %s failed, retrying", err.orig.args[1], description
)
time.sleep(instance.db_retry_wait)
# Failed with retryable error
else:
return
return wrapper
return decorator |
Run any database cleanups that need to happen periodically.
These cleanups will happen nightly or after any purge. | def periodic_db_cleanups(instance: Recorder) -> None:
"""Run any database cleanups that need to happen periodically.
These cleanups will happen nightly or after any purge.
"""
assert instance.engine is not None
if instance.engine.dialect.name == SupportedDialect.SQLITE:
# Execute sqlite to create a wal checkpoint and free up disk space
_LOGGER.debug("WAL checkpoint")
with instance.engine.connect() as connection:
connection.execute(text("PRAGMA wal_checkpoint(TRUNCATE);"))
connection.execute(text("PRAGMA OPTIMIZE;")) |
Lock database for writes. | def write_lock_db_sqlite(instance: Recorder) -> Generator[None, None, None]:
"""Lock database for writes."""
assert instance.engine is not None
with instance.engine.connect() as connection:
# Execute sqlite to create a wal checkpoint
# This is optional but makes sure the backup is going to be minimal
connection.execute(text("PRAGMA wal_checkpoint(TRUNCATE)"))
# Create write lock
_LOGGER.debug("Lock database")
connection.execute(text("BEGIN IMMEDIATE;"))
try:
yield
finally:
_LOGGER.debug("Unlock database")
connection.execute(text("END;")) |
Determine if a migration is in progress.
This is a thin wrapper that allows us to change
out the implementation later. | def async_migration_in_progress(hass: HomeAssistant) -> bool:
"""Determine if a migration is in progress.
This is a thin wrapper that allows us to change
out the implementation later.
"""
if DATA_INSTANCE not in hass.data:
return False
instance = get_instance(hass)
return instance.migration_in_progress |
Determine if a migration is live.
This is a thin wrapper that allows us to change
out the implementation later. | def async_migration_is_live(hass: HomeAssistant) -> bool:
"""Determine if a migration is live.
This is a thin wrapper that allows us to change
out the implementation later.
"""
if DATA_INSTANCE not in hass.data:
return False
instance: Recorder = hass.data[DATA_INSTANCE]
return instance.migration_is_live |
Return the datetime.date for the second sunday of a month. | def second_sunday(year: int, month: int) -> date:
"""Return the datetime.date for the second sunday of a month."""
second = date(year, month, FIRST_POSSIBLE_SUNDAY)
day_of_week = second.weekday()
if day_of_week == SUNDAY_WEEKDAY:
return second
return second.replace(
day=(FIRST_POSSIBLE_SUNDAY + (SUNDAY_WEEKDAY - day_of_week) % DAYS_IN_WEEK)
) |
Check if a time is the second sunday of the month. | def is_second_sunday(date_time: datetime) -> bool:
"""Check if a time is the second sunday of the month."""
return bool(second_sunday(date_time.year, date_time.month).day == date_time.day) |
Get the recorder instance. | def get_instance(hass: HomeAssistant) -> Recorder:
"""Get the recorder instance."""
instance: Recorder = hass.data[DATA_INSTANCE]
return instance |
Return start and end datetimes for a statistic period definition. | def resolve_period(
period_def: StatisticPeriod,
) -> tuple[datetime | None, datetime | None]:
"""Return start and end datetimes for a statistic period definition."""
start_time = None
end_time = None
if "calendar" in period_def:
calendar_period = period_def["calendar"]["period"]
start_of_day = dt_util.start_of_local_day()
cal_offset = period_def["calendar"].get("offset", 0)
if calendar_period == "hour":
start_time = dt_util.now().replace(minute=0, second=0, microsecond=0)
start_time += timedelta(hours=cal_offset)
end_time = start_time + timedelta(hours=1)
elif calendar_period == "day":
start_time = start_of_day
start_time += timedelta(days=cal_offset)
end_time = start_time + timedelta(days=1)
elif calendar_period == "week":
start_time = start_of_day - timedelta(days=start_of_day.weekday())
start_time += timedelta(days=cal_offset * 7)
end_time = start_time + timedelta(weeks=1)
elif calendar_period == "month":
start_time = start_of_day.replace(day=28)
# This works for up to 48 months of offset
start_time = (start_time + timedelta(days=cal_offset * 31)).replace(day=1)
end_time = (start_time + timedelta(days=31)).replace(day=1)
else: # calendar_period = "year"
start_time = start_of_day.replace(month=12, day=31)
# This works for 100+ years of offset
start_time = (start_time + timedelta(days=cal_offset * 366)).replace(
month=1, day=1
)
end_time = (start_time + timedelta(days=365)).replace(day=1)
start_time = dt_util.as_utc(start_time)
end_time = dt_util.as_utc(end_time)
elif "fixed_period" in period_def:
start_time = period_def["fixed_period"].get("start_time")
end_time = period_def["fixed_period"].get("end_time")
elif "rolling_window" in period_def:
duration = period_def["rolling_window"]["duration"]
now = dt_util.utcnow()
start_time = now - duration
end_time = start_time + duration
if offset := period_def["rolling_window"].get("offset"):
start_time += offset
end_time += offset
return (start_time, end_time) |
Return first n items of the iterable as a list.
From itertools recipes | def take(take_num: int, iterable: Iterable) -> list[Any]:
"""Return first n items of the iterable as a list.
From itertools recipes
"""
return list(islice(iterable, take_num)) |
Break *iterable* into lists of length *n*.
From more-itertools | def chunked(iterable: Iterable, chunked_num: int) -> Iterable[Any]:
"""Break *iterable* into lists of length *n*.
From more-itertools
"""
return iter(partial(take, chunked_num, iter(iterable)), []) |
Break *collection* into iterables of length *n*.
Returns the collection if its length is less than *n*.
Unlike chunked, this function requires a collection so it can
determine the length of the collection and return the collection
if it is less than *n*. | def chunked_or_all(iterable: Collection[Any], chunked_num: int) -> Iterable[Any]:
"""Break *collection* into iterables of length *n*.
Returns the collection if its length is less than *n*.
Unlike chunked, this function requires a collection so it can
determine the length of the collection and return the collection
if it is less than *n*.
"""
if len(iterable) <= chunked_num:
return (iterable,)
return chunked(iterable, chunked_num) |
Get an index by name. | def get_index_by_name(session: Session, table_name: str, index_name: str) -> str | None:
"""Get an index by name."""
connection = session.connection()
inspector = inspect(connection)
indexes = inspector.get_indexes(table_name)
return next(
(
possible_index["name"]
for possible_index in indexes
if possible_index["name"]
and (
possible_index["name"] == index_name
or possible_index["name"].endswith(f"_{index_name}")
)
),
None,
) |
Create a filter for unique constraint integrity errors. | def filter_unique_constraint_integrity_error(
instance: Recorder, row_type: str
) -> Callable[[Exception], bool]:
"""Create a filter for unique constraint integrity errors."""
def _filter_unique_constraint_integrity_error(err: Exception) -> bool:
"""Handle unique constraint integrity errors."""
if not isinstance(err, StatementError):
return False
assert instance.engine is not None
dialect_name = instance.engine.dialect.name
ignore = False
if (
dialect_name == SupportedDialect.SQLITE
and "UNIQUE constraint failed" in str(err)
):
ignore = True
if (
dialect_name == SupportedDialect.POSTGRESQL
and err.orig
and hasattr(err.orig, "pgcode")
and err.orig.pgcode == "23505"
):
ignore = True
if (
dialect_name == SupportedDialect.MYSQL
and err.orig
and hasattr(err.orig, "args")
):
with contextlib.suppress(TypeError):
if err.orig.args[0] == 1062:
ignore = True
if ignore:
_LOGGER.warning(
(
"Blocked attempt to insert duplicated %s rows, please report"
" at %s"
),
row_type,
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue+label%3A%22integration%3A+recorder%22",
exc_info=err,
)
return ignore
return _filter_unique_constraint_integrity_error |
Set up the recorder websocket API. | def async_setup(hass: HomeAssistant) -> None:
"""Set up the recorder websocket API."""
websocket_api.async_register_command(hass, ws_adjust_sum_statistics)
websocket_api.async_register_command(hass, ws_change_statistics_unit)
websocket_api.async_register_command(hass, ws_clear_statistics)
websocket_api.async_register_command(hass, ws_get_statistic_during_period)
websocket_api.async_register_command(hass, ws_get_statistics_during_period)
websocket_api.async_register_command(hass, ws_get_statistics_metadata)
websocket_api.async_register_command(hass, ws_list_statistic_ids)
websocket_api.async_register_command(hass, ws_import_statistics)
websocket_api.async_register_command(hass, ws_info)
websocket_api.async_register_command(hass, ws_update_statistics_metadata)
websocket_api.async_register_command(hass, ws_validate_statistics) |
Fetch statistics and convert them to json in the executor. | def _ws_get_statistic_during_period(
hass: HomeAssistant,
msg_id: int,
start_time: dt | None,
end_time: dt | None,
statistic_id: str,
types: set[Literal["max", "mean", "min", "change"]] | None,
units: dict[str, str],
) -> bytes:
"""Fetch statistics and convert them to json in the executor."""
return json_bytes(
messages.result_message(
msg_id,
statistic_during_period(
hass, start_time, end_time, statistic_id, types, units=units
),
)
) |
Fetch statistics and convert them to json in the executor. | def _ws_get_statistics_during_period(
hass: HomeAssistant,
msg_id: int,
start_time: dt,
end_time: dt | None,
statistic_ids: set[str] | None,
period: Literal["5minute", "day", "hour", "week", "month"],
units: dict[str, str],
types: set[Literal["change", "last_reset", "max", "mean", "min", "state", "sum"]],
) -> bytes:
"""Fetch statistics and convert them to json in the executor."""
result = statistics_during_period(
hass,
start_time,
end_time,
statistic_ids,
period,
units,
types,
)
for statistic_id in result:
for item in result[statistic_id]:
if (start := item.get("start")) is not None:
item["start"] = int(start * 1000)
if (end := item.get("end")) is not None:
item["end"] = int(end * 1000)
if (last_reset := item.get("last_reset")) is not None:
item["last_reset"] = int(last_reset * 1000)
return json_bytes(messages.result_message(msg_id, result)) |
Fetch a list of available statistic_id and convert them to JSON.
Runs in the executor. | def _ws_get_list_statistic_ids(
hass: HomeAssistant,
msg_id: int,
statistic_type: Literal["mean", "sum"] | None = None,
) -> bytes:
"""Fetch a list of available statistic_id and convert them to JSON.
Runs in the executor.
"""
return json_bytes(
messages.result_message(msg_id, list_statistic_ids(hass, None, statistic_type))
) |
Clear statistics for a list of statistic_ids.
Note: The WS call posts a job to the recorder's queue and then returns, it doesn't
wait until the job is completed. | def ws_clear_statistics(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Clear statistics for a list of statistic_ids.
Note: The WS call posts a job to the recorder's queue and then returns, it doesn't
wait until the job is completed.
"""
get_instance(hass).async_clear_statistics(msg["statistic_ids"])
connection.send_result(msg["id"]) |
Update statistics metadata for a statistic_id.
Only the normalized unit of measurement can be updated. | def ws_update_statistics_metadata(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Update statistics metadata for a statistic_id.
Only the normalized unit of measurement can be updated.
"""
get_instance(hass).async_update_statistics_metadata(
msg["statistic_id"], new_unit_of_measurement=msg["unit_of_measurement"]
)
connection.send_result(msg["id"]) |
Change the unit_of_measurement for a statistic_id.
All existing statistics will be converted to the new unit. | def ws_change_statistics_unit(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Change the unit_of_measurement for a statistic_id.
All existing statistics will be converted to the new unit.
"""
async_change_statistics_unit(
hass,
msg["statistic_id"],
new_unit_of_measurement=msg["new_unit_of_measurement"],
old_unit_of_measurement=msg["old_unit_of_measurement"],
)
connection.send_result(msg["id"]) |
Import statistics. | def ws_import_statistics(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Import statistics."""
metadata = msg["metadata"]
stats = msg["stats"]
if valid_entity_id(metadata["statistic_id"]):
async_import_statistics(hass, metadata, stats)
else:
async_add_external_statistics(hass, metadata, stats)
connection.send_result(msg["id"]) |
Return status of the recorder. | def ws_info(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Return status of the recorder."""
if instance := get_instance(hass):
backlog = instance.backlog
migration_in_progress = instance.migration_in_progress
migration_is_live = instance.migration_is_live
recording = instance.recording
# We avoid calling is_alive() as it can block waiting
# for the thread state lock which will block the event loop.
is_running = instance.is_running
max_backlog = instance.max_backlog
else:
backlog = None
migration_in_progress = False
migration_is_live = False
recording = False
is_running = False
max_backlog = None
recorder_info = {
"backlog": backlog,
"max_backlog": max_backlog,
"migration_in_progress": migration_in_progress,
"migration_is_live": migration_is_live,
"recording": recording,
"thread_running": is_running,
}
connection.send_result(msg["id"], recorder_info) |
Validate database URL. | def validate_db_url(db_url: str) -> Any:
"""Validate database URL."""
# Don't allow on-memory sqlite databases
if (
db_url == SQLITE_URL_PREFIX
or (db_url.startswith(SQLITE_URL_PREFIX) and ":memory:" in db_url)
) and not ALLOW_IN_MEMORY_DB:
raise vol.Invalid("In-memory SQLite database is not supported")
return db_url |
Check if an entity is being recorded.
Async friendly. | def is_entity_recorded(hass: HomeAssistant, entity_id: str) -> bool:
"""Check if an entity is being recorded.
Async friendly.
"""
if DATA_INSTANCE not in hass.data:
return False
instance = get_instance(hass)
return instance.entity_filter(entity_id) |
Get the column names for the columns that need to be checked for precision. | def _get_precision_column_types(
table_object: type[DeclarativeBase],
) -> list[str]:
"""Get the column names for the columns that need to be checked for precision."""
return [
column.key
for column in table_object.__table__.columns
if column.type is DOUBLE_TYPE
] |
Do some basic checks for common schema errors caused by manual migration. | def validate_table_schema_supports_utf8(
instance: Recorder,
table_object: type[DeclarativeBase],
columns: tuple[InstrumentedAttribute, ...],
) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
# Lack of full utf8 support is only an issue for MySQL / MariaDB
if instance.dialect_name != SupportedDialect.MYSQL:
return schema_errors
try:
schema_errors = _validate_table_schema_supports_utf8(
instance, table_object, columns
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error when validating DB schema")
_log_schema_errors(table_object, schema_errors)
return schema_errors |
Verify the table has the correct collation. | def validate_table_schema_has_correct_collation(
instance: Recorder,
table_object: type[DeclarativeBase],
) -> set[str]:
"""Verify the table has the correct collation."""
schema_errors: set[str] = set()
# Lack of full utf8 support is only an issue for MySQL / MariaDB
if instance.dialect_name != SupportedDialect.MYSQL:
return schema_errors
try:
schema_errors = _validate_table_schema_has_correct_collation(
instance, table_object
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error when validating DB schema")
_log_schema_errors(table_object, schema_errors)
return schema_errors |
Ensure the table has the correct collation to avoid union errors with mixed collations. | def _validate_table_schema_has_correct_collation(
instance: Recorder,
table_object: type[DeclarativeBase],
) -> set[str]:
"""Ensure the table has the correct collation to avoid union errors with mixed collations."""
schema_errors: set[str] = set()
# Mark the session as read_only to ensure that the test data is not committed
# to the database and we always rollback when the scope is exited
with session_scope(session=instance.get_session(), read_only=True) as session:
table = table_object.__tablename__
metadata_obj = MetaData()
reflected_table = Table(table, metadata_obj, autoload_with=instance.engine)
connection = session.connection()
dialect_kwargs = reflected_table.dialect_kwargs
# Check if the table has a collation set, if its not set than its
# using the server default collation for the database
collate = (
dialect_kwargs.get("mysql_collate")
or dialect_kwargs.get("mariadb_collate")
# pylint: disable-next=protected-access
or connection.dialect._fetch_setting(connection, "collation_server") # type: ignore[attr-defined]
)
if collate and collate != "utf8mb4_unicode_ci":
_LOGGER.debug(
"Database %s collation is not utf8mb4_unicode_ci",
table,
)
schema_errors.add(f"{table}.utf8mb4_unicode_ci")
return schema_errors |
Do some basic checks for common schema errors caused by manual migration. | def _validate_table_schema_supports_utf8(
instance: Recorder,
table_object: type[DeclarativeBase],
columns: tuple[InstrumentedAttribute, ...],
) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
# Mark the session as read_only to ensure that the test data is not committed
# to the database and we always rollback when the scope is exited
with session_scope(session=instance.get_session(), read_only=True) as session:
db_object = table_object(**{column.key: UTF8_NAME for column in columns})
table = table_object.__tablename__
# Try inserting some data which needs utf8mb4 support
session.add(db_object)
try:
session.flush()
except OperationalError as err:
if err.orig and err.orig.args[0] == MYSQL_ERR_INCORRECT_STRING_VALUE:
_LOGGER.debug(
"Database %s statistics_meta does not support 4-byte UTF-8",
table,
)
schema_errors.add(f"{table}.4-byte UTF-8")
return schema_errors
raise
finally:
session.rollback()
return schema_errors |
Do some basic checks for common schema errors caused by manual migration. | def validate_db_schema_precision(
instance: Recorder,
table_object: type[DeclarativeBase],
) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
# Wrong precision is only an issue for MySQL / MariaDB / PostgreSQL
if instance.dialect_name not in (
SupportedDialect.MYSQL,
SupportedDialect.POSTGRESQL,
):
return schema_errors
try:
schema_errors = _validate_db_schema_precision(instance, table_object)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error when validating DB schema")
_log_schema_errors(table_object, schema_errors)
return schema_errors |
Do some basic checks for common schema errors caused by manual migration. | def _validate_db_schema_precision(
instance: Recorder,
table_object: type[DeclarativeBase],
) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
columns = _get_precision_column_types(table_object)
# Mark the session as read_only to ensure that the test data is not committed
# to the database and we always rollback when the scope is exited
with session_scope(session=instance.get_session(), read_only=True) as session:
db_object = table_object(**{column: PRECISE_NUMBER for column in columns})
table = table_object.__tablename__
try:
session.add(db_object)
session.flush()
session.refresh(db_object)
_check_columns(
schema_errors=schema_errors,
stored={column: getattr(db_object, column) for column in columns},
expected={column: PRECISE_NUMBER for column in columns},
columns=columns,
table_name=table,
supports="double precision",
)
finally:
session.rollback()
return schema_errors |
Log schema errors. | def _log_schema_errors(
table_object: type[DeclarativeBase], schema_errors: set[str]
) -> None:
"""Log schema errors."""
if not schema_errors:
return
_LOGGER.debug(
"Detected %s schema errors: %s",
table_object.__tablename__,
", ".join(sorted(schema_errors)),
) |
Check that the columns in the table support the given feature.
Errors are logged and added to the schema_errors set. | def _check_columns(
schema_errors: set[str],
stored: Mapping,
expected: Mapping,
columns: Iterable[str],
table_name: str,
supports: str,
) -> None:
"""Check that the columns in the table support the given feature.
Errors are logged and added to the schema_errors set.
"""
for column in columns:
if stored[column] == expected[column]:
continue
schema_errors.add(f"{table_name}.{supports}")
_LOGGER.error(
"Column %s in database table %s does not support %s (stored=%s != expected=%s)",
column,
table_name,
supports,
stored[column],
expected[column],
) |
Correct utf8 issues detected by validate_db_schema. | def correct_db_schema_utf8(
instance: Recorder, table_object: type[DeclarativeBase], schema_errors: set[str]
) -> None:
"""Correct utf8 issues detected by validate_db_schema."""
table_name = table_object.__tablename__
if (
f"{table_name}.4-byte UTF-8" in schema_errors
or f"{table_name}.utf8mb4_unicode_ci" in schema_errors
):
from ..migration import ( # pylint: disable=import-outside-toplevel
_correct_table_character_set_and_collation,
)
_correct_table_character_set_and_collation(table_name, instance.get_session) |
Correct precision issues detected by validate_db_schema. | def correct_db_schema_precision(
instance: Recorder,
table_object: type[DeclarativeBase],
schema_errors: set[str],
) -> None:
"""Correct precision issues detected by validate_db_schema."""
table_name = table_object.__tablename__
if f"{table_name}.double precision" in schema_errors:
from ..migration import ( # pylint: disable=import-outside-toplevel
_modify_columns,
)
precision_columns = _get_precision_column_types(table_object)
# Attempt to convert timestamp columns to µs precision
session_maker = instance.get_session
engine = instance.engine
assert engine is not None, "Engine should be set"
_modify_columns(
session_maker,
engine,
table_name,
[f"{column} {DOUBLE_PRECISION_TYPE_SQL}" for column in precision_columns],
) |
Do some basic checks for common schema errors caused by manual migration. | def validate_db_schema(instance: Recorder) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors = validate_table_schema_supports_utf8(
instance, EventData, (EventData.shared_data,)
) | validate_db_schema_precision(instance, Events)
for table in (Events, EventData):
schema_errors |= validate_table_schema_has_correct_collation(instance, table)
return schema_errors |
Correct issues detected by validate_db_schema. | def correct_db_schema(
instance: Recorder,
schema_errors: set[str],
) -> None:
"""Correct issues detected by validate_db_schema."""
for table in (Events, EventData):
correct_db_schema_utf8(instance, table, schema_errors)
correct_db_schema_precision(instance, Events, schema_errors) |
Do some basic checks for common schema errors caused by manual migration. | def validate_db_schema(instance: Recorder) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
for table, columns in TABLE_UTF8_COLUMNS.items():
schema_errors |= validate_table_schema_supports_utf8(instance, table, columns)
schema_errors |= validate_db_schema_precision(instance, States)
for table in (States, StateAttributes):
schema_errors |= validate_table_schema_has_correct_collation(instance, table)
return schema_errors |
Correct issues detected by validate_db_schema. | def correct_db_schema(
instance: Recorder,
schema_errors: set[str],
) -> None:
"""Correct issues detected by validate_db_schema."""
for table in (States, StateAttributes):
correct_db_schema_utf8(instance, table, schema_errors)
correct_db_schema_precision(instance, States, schema_errors) |
Find duplicated statistics. | def _find_duplicates(
session: Session, table: type[StatisticsBase]
) -> tuple[list[int], list[dict]]:
"""Find duplicated statistics."""
subquery = (
session.query(
table.start,
table.metadata_id,
literal_column("1").label("is_duplicate"),
)
.group_by(table.metadata_id, table.start)
.having(func.count() > 1)
.subquery()
)
query = (
session.query(
table.id,
table.metadata_id,
table.created,
table.start,
table.mean,
table.min,
table.max,
table.last_reset,
table.state,
table.sum,
)
.outerjoin(
subquery,
(subquery.c.metadata_id == table.metadata_id)
& (subquery.c.start == table.start),
)
.filter(subquery.c.is_duplicate == 1)
.order_by(table.metadata_id, table.start, table.id.desc())
.limit(1000 * SQLITE_MAX_BIND_VARS)
)
duplicates = execute(query)
original_as_dict = {}
start = None
metadata_id = None
duplicate_ids: list[int] = []
non_identical_duplicates_as_dict: list[dict] = []
if not duplicates:
return (duplicate_ids, non_identical_duplicates_as_dict)
def columns_to_dict(duplicate: Row) -> dict:
"""Convert a SQLAlchemy row to dict."""
dict_ = {}
for key in (
"id",
"metadata_id",
"start",
"created",
"mean",
"min",
"max",
"last_reset",
"state",
"sum",
):
dict_[key] = getattr(duplicate, key)
return dict_
def compare_statistic_rows(row1: dict, row2: dict) -> bool:
"""Compare two statistics rows, ignoring id and created."""
ignore_keys = {"id", "created"}
keys1 = set(row1).difference(ignore_keys)
keys2 = set(row2).difference(ignore_keys)
return keys1 == keys2 and all(row1[k] == row2[k] for k in keys1)
for duplicate in duplicates:
if start != duplicate.start or metadata_id != duplicate.metadata_id:
original_as_dict = columns_to_dict(duplicate)
start = duplicate.start
metadata_id = duplicate.metadata_id
continue
duplicate_as_dict = columns_to_dict(duplicate)
duplicate_ids.append(duplicate.id)
if not compare_statistic_rows(original_as_dict, duplicate_as_dict):
non_identical_duplicates_as_dict.append(
{"duplicate": duplicate_as_dict, "original": original_as_dict}
)
return (duplicate_ids, non_identical_duplicates_as_dict) |
Identify and delete duplicated statistics from a specified table. | def _delete_duplicates_from_table(
session: Session, table: type[StatisticsBase]
) -> tuple[int, list[dict]]:
"""Identify and delete duplicated statistics from a specified table."""
all_non_identical_duplicates: list[dict] = []
total_deleted_rows = 0
while True:
duplicate_ids, non_identical_duplicates = _find_duplicates(session, table)
if not duplicate_ids:
break
all_non_identical_duplicates.extend(non_identical_duplicates)
for i in range(0, len(duplicate_ids), SQLITE_MAX_BIND_VARS):
deleted_rows = (
session.query(table)
.filter(table.id.in_(duplicate_ids[i : i + SQLITE_MAX_BIND_VARS]))
.delete(synchronize_session=False)
)
total_deleted_rows += deleted_rows
return (total_deleted_rows, all_non_identical_duplicates) |
Identify and delete duplicated statistics.
A backup will be made of duplicated statistics before it is deleted. | def delete_statistics_duplicates(
instance: Recorder, hass: HomeAssistant, session: Session
) -> None:
"""Identify and delete duplicated statistics.
A backup will be made of duplicated statistics before it is deleted.
"""
deleted_statistics_rows, non_identical_duplicates = _delete_duplicates_from_table(
session, Statistics
)
if deleted_statistics_rows:
_LOGGER.info("Deleted %s duplicated statistics rows", deleted_statistics_rows)
if non_identical_duplicates:
isotime = dt_util.utcnow().isoformat()
backup_file_name = f"deleted_statistics.{isotime}.json"
backup_path = hass.config.path(STORAGE_DIR, backup_file_name)
os.makedirs(os.path.dirname(backup_path), exist_ok=True)
with open(backup_path, "w", encoding="utf8") as backup_file:
json.dump(
non_identical_duplicates,
backup_file,
indent=4,
sort_keys=True,
cls=JSONEncoder,
)
_LOGGER.warning(
(
"Deleted %s non identical duplicated %s rows, a backup of the deleted"
" rows has been saved to %s"
),
len(non_identical_duplicates),
Statistics.__tablename__,
backup_path,
)
deleted_short_term_statistics_rows, _ = _delete_duplicates_from_table(
session, StatisticsShortTerm
)
if deleted_short_term_statistics_rows:
_LOGGER.warning(
"Deleted duplicated short term statistic rows, please report at %s",
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue+label%3A%22integration%3A+recorder%22",
) |
Find duplicated statistics_meta. | def _find_statistics_meta_duplicates(session: Session) -> list[int]:
"""Find duplicated statistics_meta."""
# When querying the database, be careful to only explicitly query for columns
# which were present in schema version 29. If querying the table, SQLAlchemy
# will refer to future columns.
subquery = (
session.query(
StatisticsMeta.statistic_id,
literal_column("1").label("is_duplicate"),
)
.group_by(StatisticsMeta.statistic_id)
.having(func.count() > 1)
.subquery()
)
query = (
session.query(StatisticsMeta.statistic_id, StatisticsMeta.id)
.outerjoin(
subquery,
(subquery.c.statistic_id == StatisticsMeta.statistic_id),
)
.filter(subquery.c.is_duplicate == 1)
.order_by(StatisticsMeta.statistic_id, StatisticsMeta.id.desc())
.limit(1000 * SQLITE_MAX_BIND_VARS)
)
duplicates = execute(query)
statistic_id = None
duplicate_ids: list[int] = []
if not duplicates:
return duplicate_ids
for duplicate in duplicates:
if statistic_id != duplicate.statistic_id:
statistic_id = duplicate.statistic_id
continue
duplicate_ids.append(duplicate.id)
return duplicate_ids |
Identify and delete duplicated statistics from a specified table. | def _delete_statistics_meta_duplicates(session: Session) -> int:
"""Identify and delete duplicated statistics from a specified table."""
total_deleted_rows = 0
while True:
duplicate_ids = _find_statistics_meta_duplicates(session)
if not duplicate_ids:
break
for i in range(0, len(duplicate_ids), SQLITE_MAX_BIND_VARS):
deleted_rows = (
session.query(StatisticsMeta)
.filter(
StatisticsMeta.id.in_(duplicate_ids[i : i + SQLITE_MAX_BIND_VARS])
)
.delete(synchronize_session=False)
)
total_deleted_rows += deleted_rows
return total_deleted_rows |
Identify and delete duplicated statistics_meta.
This is used when migrating from schema version 28 to schema version 29. | def delete_statistics_meta_duplicates(instance: Recorder, session: Session) -> None:
"""Identify and delete duplicated statistics_meta.
This is used when migrating from schema version 28 to schema version 29.
"""
deleted_statistics_rows = _delete_statistics_meta_duplicates(session)
if deleted_statistics_rows:
statistics_meta_manager = instance.statistics_meta_manager
statistics_meta_manager.reset()
statistics_meta_manager.load(session)
_LOGGER.info(
"Deleted %s duplicated statistics_meta rows", deleted_statistics_rows
) |
Do some basic checks for common schema errors caused by manual migration. | def validate_db_schema(instance: Recorder) -> set[str]:
"""Do some basic checks for common schema errors caused by manual migration."""
schema_errors: set[str] = set()
schema_errors |= validate_table_schema_supports_utf8(
instance, StatisticsMeta, (StatisticsMeta.statistic_id,)
)
for table in (Statistics, StatisticsShortTerm):
schema_errors |= validate_db_schema_precision(instance, table)
schema_errors |= validate_table_schema_has_correct_collation(instance, table)
if schema_errors:
_LOGGER.debug(
"Detected statistics schema errors: %s", ", ".join(sorted(schema_errors))
)
return schema_errors |
Correct issues detected by validate_db_schema. | def correct_db_schema(
instance: Recorder,
schema_errors: set[str],
) -> None:
"""Correct issues detected by validate_db_schema."""
correct_db_schema_utf8(instance, StatisticsMeta, schema_errors)
for table in (Statistics, StatisticsShortTerm):
correct_db_schema_precision(instance, table, schema_errors)
correct_db_schema_utf8(instance, table, schema_errors) |
Return the lambda_stmt and if StateAttributes should be joined.
Because these are lambda_stmt the values inside the lambdas need
to be explicitly written out to avoid caching the wrong values. | def _lambda_stmt_and_join_attributes(
schema_version: int, no_attributes: bool, include_last_changed: bool = True
) -> tuple[StatementLambdaElement, bool]:
"""Return the lambda_stmt and if StateAttributes should be joined.
Because these are lambda_stmt the values inside the lambdas need
to be explicitly written out to avoid caching the wrong values.
"""
# If no_attributes was requested we do the query
# without the attributes fields and do not join the
# state_attributes table
if no_attributes:
if schema_version >= 31:
if include_last_changed:
return (
lambda_stmt(lambda: select(*_QUERY_STATE_NO_ATTR)),
False,
)
return (
lambda_stmt(lambda: select(*_QUERY_STATE_NO_ATTR_NO_LAST_CHANGED)),
False,
)
if include_last_changed:
return (
lambda_stmt(lambda: select(*_QUERY_STATE_NO_ATTR_PRE_SCHEMA_31)),
False,
)
return (
lambda_stmt(
lambda: select(*_QUERY_STATE_NO_ATTR_NO_LAST_CHANGED_PRE_SCHEMA_31)
),
False,
)
# If we in the process of migrating schema we do
# not want to join the state_attributes table as we
# do not know if it will be there yet
if schema_version < 25:
if include_last_changed:
return (
lambda_stmt(lambda: select(*_QUERY_STATES_PRE_SCHEMA_25)),
False,
)
return (
lambda_stmt(lambda: select(*_QUERY_STATES_PRE_SCHEMA_25_NO_LAST_CHANGED)),
False,
)
if schema_version >= 31:
if include_last_changed:
return lambda_stmt(lambda: select(*_QUERY_STATES)), True
return lambda_stmt(lambda: select(*_QUERY_STATES_NO_LAST_CHANGED)), True
# Finally if no migration is in progress and no_attributes
# was not requested, we query both attributes columns and
# join state_attributes
if include_last_changed:
return lambda_stmt(lambda: select(*_QUERY_STATES_PRE_SCHEMA_31)), True
return (
lambda_stmt(lambda: select(*_QUERY_STATES_NO_LAST_CHANGED_PRE_SCHEMA_31)),
True,
) |
Wrap get_significant_states_with_session with an sql session. | def get_significant_states(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Wrap get_significant_states_with_session with an sql session."""
with session_scope(hass=hass, read_only=True) as session:
return get_significant_states_with_session(
hass,
session,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
minimal_response,
no_attributes,
compressed_state_format,
) |
Query the database for significant state changes. | def _significant_states_stmt(
schema_version: int,
start_time: datetime,
end_time: datetime | None,
entity_ids: list[str],
significant_changes_only: bool,
no_attributes: bool,
) -> StatementLambdaElement:
"""Query the database for significant state changes."""
stmt, join_attributes = _lambda_stmt_and_join_attributes(
schema_version, no_attributes, include_last_changed=not significant_changes_only
)
if (
len(entity_ids) == 1
and significant_changes_only
and split_entity_id(entity_ids[0])[0] not in SIGNIFICANT_DOMAINS
):
if schema_version >= 31:
stmt += lambda q: q.filter(
(States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
else:
stmt += lambda q: q.filter(
(States.last_changed == States.last_updated)
| States.last_changed.is_(None)
)
elif significant_changes_only:
if schema_version >= 31:
stmt += lambda q: q.filter(
or_(
*[
States.entity_id.like(entity_domain)
for entity_domain in SIGNIFICANT_DOMAINS_ENTITY_ID_LIKE
],
(
(States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
),
)
)
else:
stmt += lambda q: q.filter(
or_(
*[
States.entity_id.like(entity_domain)
for entity_domain in SIGNIFICANT_DOMAINS_ENTITY_ID_LIKE
],
(
(States.last_changed == States.last_updated)
| States.last_changed.is_(None)
),
)
)
stmt += lambda q: q.filter(States.entity_id.in_(entity_ids))
if schema_version >= 31:
start_time_ts = start_time.timestamp()
stmt += lambda q: q.filter(States.last_updated_ts > start_time_ts)
if end_time:
end_time_ts = end_time.timestamp()
stmt += lambda q: q.filter(States.last_updated_ts < end_time_ts)
else:
stmt += lambda q: q.filter(States.last_updated > start_time)
if end_time:
stmt += lambda q: q.filter(States.last_updated < end_time)
if join_attributes:
stmt += lambda q: q.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
if schema_version >= 31:
stmt += lambda q: q.order_by(States.entity_id, States.last_updated_ts)
else:
stmt += lambda q: q.order_by(States.entity_id, States.last_updated)
return stmt |
Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs). | def get_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
if filters is not None:
raise NotImplementedError("Filters are no longer supported")
if not entity_ids:
raise ValueError("entity_ids must be provided")
stmt = _significant_states_stmt(
_schema_version(hass),
start_time,
end_time,
entity_ids,
significant_changes_only,
no_attributes,
)
states = execute_stmt_lambda_element(session, stmt, None, end_time)
return _sorted_states_to_dict(
hass,
session,
states,
start_time,
entity_ids,
include_start_time_state,
minimal_response,
no_attributes,
compressed_state_format,
) |
Variant of get_significant_states_with_session.
Difference with get_significant_states_with_session is that it does not
return minimal responses. | def get_full_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
no_attributes: bool = False,
) -> dict[str, list[State]]:
"""Variant of get_significant_states_with_session.
Difference with get_significant_states_with_session is that it does not
return minimal responses.
"""
return cast(
dict[str, list[State]],
get_significant_states_with_session(
hass=hass,
session=session,
start_time=start_time,
end_time=end_time,
entity_ids=entity_ids,
filters=filters,
include_start_time_state=include_start_time_state,
significant_changes_only=significant_changes_only,
minimal_response=False,
no_attributes=no_attributes,
),
) |
Return states changes during UTC period start_time - end_time. | def state_changes_during_period(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_id: str | None = None,
no_attributes: bool = False,
descending: bool = False,
limit: int | None = None,
include_start_time_state: bool = True,
) -> dict[str, list[State]]:
"""Return states changes during UTC period start_time - end_time."""
if not entity_id:
raise ValueError("entity_id must be provided")
entity_ids = [entity_id.lower()]
with session_scope(hass=hass, read_only=True) as session:
stmt = _state_changed_during_period_stmt(
_schema_version(hass),
start_time,
end_time,
entity_id,
no_attributes,
descending,
limit,
)
states = execute_stmt_lambda_element(session, stmt, None, end_time)
return cast(
dict[str, list[State]],
_sorted_states_to_dict(
hass,
session,
states,
start_time,
entity_ids,
include_start_time_state=include_start_time_state,
),
) |
Return the last number_of_states. | def get_last_state_changes(
hass: HomeAssistant, number_of_states: int, entity_id: str
) -> dict[str, list[State]]:
"""Return the last number_of_states."""
entity_id_lower = entity_id.lower()
entity_ids = [entity_id_lower]
with session_scope(hass=hass, read_only=True) as session:
stmt = _get_last_state_changes_stmt(
_schema_version(hass), number_of_states, entity_id_lower
)
states = list(execute_stmt_lambda_element(session, stmt))
return cast(
dict[str, list[State]],
_sorted_states_to_dict(
hass,
session,
reversed(states),
dt_util.utcnow(),
entity_ids,
include_start_time_state=False,
),
) |
Baked query to get states for specific entities. | def _get_states_for_entities_stmt(
schema_version: int,
run_start: datetime,
utc_point_in_time: datetime,
entity_ids: list[str],
no_attributes: bool,
) -> StatementLambdaElement:
"""Baked query to get states for specific entities."""
stmt, join_attributes = _lambda_stmt_and_join_attributes(
schema_version, no_attributes, include_last_changed=True
)
# We got an include-list of entities, accelerate the query by filtering already
# in the inner query.
if schema_version >= 31:
run_start_ts = process_timestamp(run_start).timestamp()
utc_point_in_time_ts = dt_util.utc_to_timestamp(utc_point_in_time)
stmt += lambda q: q.join(
(
most_recent_states_for_entities_by_date := (
select(
States.entity_id.label("max_entity_id"),
func.max(States.last_updated_ts).label("max_last_updated"),
)
.filter(
(States.last_updated_ts >= run_start_ts)
& (States.last_updated_ts < utc_point_in_time_ts)
)
.filter(States.entity_id.in_(entity_ids))
.group_by(States.entity_id)
.subquery()
)
),
and_(
States.entity_id
== most_recent_states_for_entities_by_date.c.max_entity_id,
States.last_updated_ts
== most_recent_states_for_entities_by_date.c.max_last_updated,
),
)
else:
stmt += lambda q: q.join(
(
most_recent_states_for_entities_by_date := select(
States.entity_id.label("max_entity_id"),
func.max(States.last_updated).label("max_last_updated"),
)
.filter(
(States.last_updated >= run_start)
& (States.last_updated < utc_point_in_time)
)
.filter(States.entity_id.in_(entity_ids))
.group_by(States.entity_id)
.subquery()
),
and_(
States.entity_id
== most_recent_states_for_entities_by_date.c.max_entity_id,
States.last_updated
== most_recent_states_for_entities_by_date.c.max_last_updated,
),
)
if join_attributes:
stmt += lambda q: q.outerjoin(
StateAttributes, (States.attributes_id == StateAttributes.attributes_id)
)
return stmt |
Return the states at a specific point in time. | def _get_rows_with_session(
hass: HomeAssistant,
session: Session,
utc_point_in_time: datetime,
entity_ids: list[str],
run: RecorderRuns | None = None,
no_attributes: bool = False,
) -> Iterable[Row]:
"""Return the states at a specific point in time."""
schema_version = _schema_version(hass)
if len(entity_ids) == 1:
return execute_stmt_lambda_element(
session,
_get_single_entity_states_stmt(
schema_version, utc_point_in_time, entity_ids[0], no_attributes
),
)
if run is None:
run = recorder.get_instance(hass).recorder_runs_manager.get(utc_point_in_time)
if run is None or process_timestamp(run.start) > utc_point_in_time:
# History did not run before utc_point_in_time
return []
# We have more than one entity to look at so we need to do a query on states
# since the last recorder run started.
stmt = _get_states_for_entities_stmt(
schema_version, run.start, utc_point_in_time, entity_ids, no_attributes
)
return execute_stmt_lambda_element(session, stmt) |
Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly. | def _sorted_states_to_dict(
hass: HomeAssistant,
session: Session,
states: Iterable[Row],
start_time: datetime,
entity_ids: list[str],
include_start_time_state: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
schema_version = _schema_version(hass)
_process_timestamp: Callable[[datetime], float | str]
field_map = _FIELD_MAP if schema_version >= 31 else _FIELD_MAP_PRE_SCHEMA_31
state_class: Callable[
[Row, dict[str, dict[str, Any]], datetime | None], State | dict[str, Any]
]
if compressed_state_format:
if schema_version >= 31:
state_class = legacy_row_to_compressed_state
else:
state_class = legacy_row_to_compressed_state_pre_schema_31
_process_timestamp = process_datetime_to_timestamp
attr_time = COMPRESSED_STATE_LAST_UPDATED
attr_state = COMPRESSED_STATE_STATE
else:
if schema_version >= 31:
state_class = LegacyLazyState
else:
state_class = LegacyLazyStatePreSchema31
_process_timestamp = process_timestamp_to_utc_isoformat
attr_time = LAST_CHANGED_KEY
attr_state = STATE_KEY
result: dict[str, list[State | dict[str, Any]]] = defaultdict(list)
# Set all entity IDs to empty lists in result set to maintain the order
for ent_id in entity_ids:
result[ent_id] = []
# Get the states at the start time
time.perf_counter()
initial_states: dict[str, Row] = {}
if include_start_time_state:
initial_states = {
row.entity_id: row
for row in _get_rows_with_session(
hass,
session,
start_time,
entity_ids,
no_attributes=no_attributes,
)
}
if len(entity_ids) == 1:
states_iter: Iterable[tuple[str, Iterator[Row]]] = (
(entity_ids[0], iter(states)),
)
else:
key_func = attrgetter("entity_id")
states_iter = groupby(states, key_func)
# Append all changes to it
for ent_id, group in states_iter:
attr_cache: dict[str, dict[str, Any]] = {}
prev_state: Column | str
ent_results = result[ent_id]
if row := initial_states.pop(ent_id, None):
prev_state = row.state
ent_results.append(state_class(row, attr_cache, start_time))
if not minimal_response or split_entity_id(ent_id)[0] in NEED_ATTRIBUTE_DOMAINS:
ent_results.extend(
state_class(db_state, attr_cache, None) for db_state in group
)
continue
# With minimal response we only provide a native
# State for the first and last response. All the states
# in-between only provide the "state" and the
# "last_changed".
if not ent_results:
if (first_state := next(group, None)) is None:
continue
prev_state = first_state.state
ent_results.append(state_class(first_state, attr_cache, None))
state_idx = field_map["state"]
#
# minimal_response only makes sense with last_updated == last_updated
#
# We use last_updated for for last_changed since its the same
#
# With minimal response we do not care about attribute
# changes so we can filter out duplicate states
if schema_version < 31:
last_updated_idx = field_map["last_updated"]
for row in group:
if (state := row[state_idx]) != prev_state:
ent_results.append(
{
attr_state: state,
attr_time: _process_timestamp(row[last_updated_idx]),
}
)
prev_state = state
continue
last_updated_ts_idx = field_map["last_updated_ts"]
if compressed_state_format:
for row in group:
if (state := row[state_idx]) != prev_state:
ent_results.append(
{
attr_state: state,
attr_time: row[last_updated_ts_idx],
}
)
prev_state = state
continue
for row in group:
if (state := row[state_idx]) != prev_state:
ent_results.append(
{
attr_state: state,
attr_time: process_timestamp_to_utc_isoformat(
dt_util.utc_from_timestamp(row[last_updated_ts_idx])
),
}
)
prev_state = state
# If there are no states beyond the initial state,
# the state a was never popped from initial_states
for ent_id, row in initial_states.items():
result[ent_id].append(state_class(row, {}, start_time))
# Filter out the empty lists if some states had 0 results.
return {key: val for key, val in result.items() if val} |
Return the statement and if StateAttributes should be joined. | def _stmt_and_join_attributes(
no_attributes: bool,
include_last_changed: bool,
include_last_reported: bool,
) -> Select:
"""Return the statement and if StateAttributes should be joined."""
_select = select(States.metadata_id, States.state, States.last_updated_ts)
if include_last_changed:
_select = _select.add_columns(States.last_changed_ts)
if include_last_reported:
_select = _select.add_columns(States.last_reported_ts)
if not no_attributes:
_select = _select.add_columns(SHARED_ATTR_OR_LEGACY_ATTRIBUTES)
return _select |
Return the statement and if StateAttributes should be joined. | def _stmt_and_join_attributes_for_start_state(
no_attributes: bool,
include_last_changed: bool,
include_last_reported: bool,
) -> Select:
"""Return the statement and if StateAttributes should be joined."""
_select = select(States.metadata_id, States.state)
_select = _select.add_columns(literal(value=0).label("last_updated_ts"))
if include_last_changed:
_select = _select.add_columns(literal(value=0).label("last_changed_ts"))
if include_last_reported:
_select = _select.add_columns(literal(value=0).label("last_reported_ts"))
if not no_attributes:
_select = _select.add_columns(SHARED_ATTR_OR_LEGACY_ATTRIBUTES)
return _select |
Return the statement to select from the union. | def _select_from_subquery(
subquery: Subquery | CompoundSelect,
no_attributes: bool,
include_last_changed: bool,
include_last_reported: bool,
) -> Select:
"""Return the statement to select from the union."""
base_select = select(
subquery.c.metadata_id,
subquery.c.state,
subquery.c.last_updated_ts,
)
if include_last_changed:
base_select = base_select.add_columns(subquery.c.last_changed_ts)
if include_last_reported:
base_select = base_select.add_columns(subquery.c.last_reported_ts)
if no_attributes:
return base_select
return base_select.add_columns(subquery.c.attributes) |
Wrap get_significant_states_with_session with an sql session. | def get_significant_states(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Wrap get_significant_states_with_session with an sql session."""
with session_scope(hass=hass, read_only=True) as session:
return get_significant_states_with_session(
hass,
session,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
minimal_response,
no_attributes,
compressed_state_format,
) |
Query the database for significant state changes. | def _significant_states_stmt(
start_time_ts: float,
end_time_ts: float | None,
single_metadata_id: int | None,
metadata_ids: list[int],
metadata_ids_in_significant_domains: list[int],
significant_changes_only: bool,
no_attributes: bool,
include_start_time_state: bool,
run_start_ts: float | None,
) -> Select | CompoundSelect:
"""Query the database for significant state changes."""
include_last_changed = not significant_changes_only
stmt = _stmt_and_join_attributes(no_attributes, include_last_changed, False)
if significant_changes_only:
# Since we are filtering on entity_id (metadata_id) we can avoid
# the join of the states_meta table since we already know which
# metadata_ids are in the significant domains.
if metadata_ids_in_significant_domains:
stmt = stmt.filter(
States.metadata_id.in_(metadata_ids_in_significant_domains)
| (States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
else:
stmt = stmt.filter(
(States.last_changed_ts == States.last_updated_ts)
| States.last_changed_ts.is_(None)
)
stmt = stmt.filter(States.metadata_id.in_(metadata_ids)).filter(
States.last_updated_ts > start_time_ts
)
if end_time_ts:
stmt = stmt.filter(States.last_updated_ts < end_time_ts)
if not no_attributes:
stmt = stmt.outerjoin(
StateAttributes, States.attributes_id == StateAttributes.attributes_id
)
if not include_start_time_state or not run_start_ts:
return stmt.order_by(States.metadata_id, States.last_updated_ts)
unioned_subquery = union_all(
_select_from_subquery(
_get_start_time_state_stmt(
run_start_ts,
start_time_ts,
single_metadata_id,
metadata_ids,
no_attributes,
include_last_changed,
).subquery(),
no_attributes,
include_last_changed,
False,
),
_select_from_subquery(
stmt.subquery(), no_attributes, include_last_changed, False
),
).subquery()
return _select_from_subquery(
unioned_subquery,
no_attributes,
include_last_changed,
False,
).order_by(unioned_subquery.c.metadata_id, unioned_subquery.c.last_updated_ts) |
Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs). | def get_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
if filters is not None:
raise NotImplementedError("Filters are no longer supported")
if not entity_ids:
raise ValueError("entity_ids must be provided")
entity_id_to_metadata_id: dict[str, int | None] | None = None
metadata_ids_in_significant_domains: list[int] = []
instance = recorder.get_instance(hass)
if not (
entity_id_to_metadata_id := instance.states_meta_manager.get_many(
entity_ids, session, False
)
) or not (possible_metadata_ids := extract_metadata_ids(entity_id_to_metadata_id)):
return {}
metadata_ids = possible_metadata_ids
if significant_changes_only:
metadata_ids_in_significant_domains = [
metadata_id
for entity_id, metadata_id in entity_id_to_metadata_id.items()
if metadata_id is not None
and split_entity_id(entity_id)[0] in SIGNIFICANT_DOMAINS
]
run_start_ts: float | None = None
if include_start_time_state and not (
run_start_ts := _get_run_start_ts_for_utc_point_in_time(hass, start_time)
):
include_start_time_state = False
start_time_ts = dt_util.utc_to_timestamp(start_time)
end_time_ts = datetime_to_timestamp_or_none(end_time)
single_metadata_id = metadata_ids[0] if len(metadata_ids) == 1 else None
stmt = lambda_stmt(
lambda: _significant_states_stmt(
start_time_ts,
end_time_ts,
single_metadata_id,
metadata_ids,
metadata_ids_in_significant_domains,
significant_changes_only,
no_attributes,
include_start_time_state,
run_start_ts,
),
track_on=[
bool(single_metadata_id),
bool(metadata_ids_in_significant_domains),
bool(end_time_ts),
significant_changes_only,
no_attributes,
include_start_time_state,
],
)
return _sorted_states_to_dict(
execute_stmt_lambda_element(session, stmt, None, end_time, orm_rows=False),
start_time_ts if include_start_time_state else None,
entity_ids,
entity_id_to_metadata_id,
minimal_response,
compressed_state_format,
no_attributes=no_attributes,
) |
Variant of get_significant_states_with_session.
Difference with get_significant_states_with_session is that it does not
return minimal responses. | def get_full_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
no_attributes: bool = False,
) -> dict[str, list[State]]:
"""Variant of get_significant_states_with_session.
Difference with get_significant_states_with_session is that it does not
return minimal responses.
"""
return cast(
dict[str, list[State]],
get_significant_states_with_session(
hass=hass,
session=session,
start_time=start_time,
end_time=end_time,
entity_ids=entity_ids,
filters=filters,
include_start_time_state=include_start_time_state,
significant_changes_only=significant_changes_only,
minimal_response=False,
no_attributes=no_attributes,
),
) |
Return states changes during UTC period start_time - end_time. | def state_changes_during_period(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_id: str | None = None,
no_attributes: bool = False,
descending: bool = False,
limit: int | None = None,
include_start_time_state: bool = True,
) -> dict[str, list[State]]:
"""Return states changes during UTC period start_time - end_time."""
has_last_reported = (
recorder.get_instance(hass).schema_version >= LAST_REPORTED_SCHEMA_VERSION
)
if not entity_id:
raise ValueError("entity_id must be provided")
entity_ids = [entity_id.lower()]
with session_scope(hass=hass, read_only=True) as session:
instance = recorder.get_instance(hass)
if not (
possible_metadata_id := instance.states_meta_manager.get(
entity_id, session, False
)
):
return {}
single_metadata_id = possible_metadata_id
entity_id_to_metadata_id: dict[str, int | None] = {
entity_id: single_metadata_id
}
run_start_ts: float | None = None
if include_start_time_state and not (
run_start_ts := _get_run_start_ts_for_utc_point_in_time(hass, start_time)
):
include_start_time_state = False
start_time_ts = dt_util.utc_to_timestamp(start_time)
end_time_ts = datetime_to_timestamp_or_none(end_time)
stmt = lambda_stmt(
lambda: _state_changed_during_period_stmt(
start_time_ts,
end_time_ts,
single_metadata_id,
no_attributes,
limit,
include_start_time_state,
run_start_ts,
has_last_reported,
),
track_on=[
bool(end_time_ts),
no_attributes,
bool(limit),
include_start_time_state,
has_last_reported,
],
)
return cast(
dict[str, list[State]],
_sorted_states_to_dict(
execute_stmt_lambda_element(
session, stmt, None, end_time, orm_rows=False
),
start_time_ts if include_start_time_state else None,
entity_ids,
entity_id_to_metadata_id,
descending=descending,
no_attributes=no_attributes,
),
) |
Return the last number_of_states. | def get_last_state_changes(
hass: HomeAssistant, number_of_states: int, entity_id: str
) -> dict[str, list[State]]:
"""Return the last number_of_states."""
has_last_reported = (
recorder.get_instance(hass).schema_version >= LAST_REPORTED_SCHEMA_VERSION
)
entity_id_lower = entity_id.lower()
entity_ids = [entity_id_lower]
# Calling this function with number_of_states > 1 can cause instability
# because it has to scan the table to find the last number_of_states states
# because the metadata_id_last_updated_ts index is in ascending order.
with session_scope(hass=hass, read_only=True) as session:
instance = recorder.get_instance(hass)
if not (
possible_metadata_id := instance.states_meta_manager.get(
entity_id, session, False
)
):
return {}
metadata_id = possible_metadata_id
entity_id_to_metadata_id: dict[str, int | None] = {entity_id_lower: metadata_id}
if number_of_states == 1:
stmt = lambda_stmt(
lambda: _get_last_state_changes_single_stmt(metadata_id),
)
else:
stmt = lambda_stmt(
lambda: _get_last_state_changes_multiple_stmt(
number_of_states, metadata_id, has_last_reported
),
track_on=[has_last_reported],
)
states = list(execute_stmt_lambda_element(session, stmt, orm_rows=False))
return cast(
dict[str, list[State]],
_sorted_states_to_dict(
reversed(states),
None,
entity_ids,
entity_id_to_metadata_id,
no_attributes=False,
),
) |
Baked query to get states for specific entities. | def _get_start_time_state_for_entities_stmt(
run_start_ts: float,
epoch_time: float,
metadata_ids: list[int],
no_attributes: bool,
include_last_changed: bool,
) -> Select:
"""Baked query to get states for specific entities."""
# We got an include-list of entities, accelerate the query by filtering already
# in the inner and the outer query.
stmt = (
_stmt_and_join_attributes_for_start_state(
no_attributes, include_last_changed, False
)
.join(
(
most_recent_states_for_entities_by_date := (
select(
States.metadata_id.label("max_metadata_id"),
func.max(States.last_updated_ts).label("max_last_updated"),
)
.filter(
(States.last_updated_ts >= run_start_ts)
& (States.last_updated_ts < epoch_time)
& States.metadata_id.in_(metadata_ids)
)
.group_by(States.metadata_id)
.subquery()
)
),
and_(
States.metadata_id
== most_recent_states_for_entities_by_date.c.max_metadata_id,
States.last_updated_ts
== most_recent_states_for_entities_by_date.c.max_last_updated,
),
)
.filter(
(States.last_updated_ts >= run_start_ts)
& (States.last_updated_ts < epoch_time)
& States.metadata_id.in_(metadata_ids)
)
)
if no_attributes:
return stmt
return stmt.outerjoin(
StateAttributes, (States.attributes_id == StateAttributes.attributes_id)
) |
Return the start time of a run. | def _get_run_start_ts_for_utc_point_in_time(
hass: HomeAssistant, utc_point_in_time: datetime
) -> float | None:
"""Return the start time of a run."""
run = recorder.get_instance(hass).recorder_runs_manager.get(utc_point_in_time)
if (
run is not None
and (run_start := process_timestamp(run.start)) < utc_point_in_time
):
return run_start.timestamp()
# History did not run before utc_point_in_time but we still
return None |
Return the states at a specific point in time. | def _get_start_time_state_stmt(
run_start_ts: float,
epoch_time: float,
single_metadata_id: int | None,
metadata_ids: list[int],
no_attributes: bool,
include_last_changed: bool,
) -> Select:
"""Return the states at a specific point in time."""
if single_metadata_id:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
return _get_single_entity_start_time_stmt(
epoch_time,
single_metadata_id,
no_attributes,
include_last_changed,
False,
)
# We have more than one entity to look at so we need to do a query on states
# since the last recorder run started.
return _get_start_time_state_for_entities_stmt(
run_start_ts,
epoch_time,
metadata_ids,
no_attributes,
include_last_changed,
) |
Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly. | def _sorted_states_to_dict(
states: Iterable[Row],
start_time_ts: float | None,
entity_ids: list[str],
entity_id_to_metadata_id: dict[str, int | None],
minimal_response: bool = False,
compressed_state_format: bool = False,
descending: bool = False,
no_attributes: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
field_map = _FIELD_MAP
state_class: Callable[
[Row, dict[str, dict[str, Any]], float | None, str, str, float | None, bool],
State | dict[str, Any],
]
if compressed_state_format:
state_class = row_to_compressed_state
attr_time = COMPRESSED_STATE_LAST_UPDATED
attr_state = COMPRESSED_STATE_STATE
else:
state_class = LazyState
attr_time = LAST_CHANGED_KEY
attr_state = STATE_KEY
# Set all entity IDs to empty lists in result set to maintain the order
result: dict[str, list[State | dict[str, Any]]] = {
entity_id: [] for entity_id in entity_ids
}
metadata_id_to_entity_id: dict[int, str] = {}
metadata_id_to_entity_id = {
v: k for k, v in entity_id_to_metadata_id.items() if v is not None
}
# Get the states at the start time
if len(entity_ids) == 1:
metadata_id = entity_id_to_metadata_id[entity_ids[0]]
assert metadata_id is not None # should not be possible if we got here
states_iter: Iterable[tuple[int, Iterator[Row]]] = (
(metadata_id, iter(states)),
)
else:
key_func = itemgetter(field_map["metadata_id"])
states_iter = groupby(states, key_func)
state_idx = field_map["state"]
last_updated_ts_idx = field_map["last_updated_ts"]
# Append all changes to it
for metadata_id, group in states_iter:
entity_id = metadata_id_to_entity_id[metadata_id]
attr_cache: dict[str, dict[str, Any]] = {}
ent_results = result[entity_id]
if (
not minimal_response
or split_entity_id(entity_id)[0] in NEED_ATTRIBUTE_DOMAINS
):
ent_results.extend(
state_class(
db_state,
attr_cache,
start_time_ts,
entity_id,
db_state[state_idx],
db_state[last_updated_ts_idx],
False,
)
for db_state in group
)
continue
prev_state: str | None = None
# With minimal response we only provide a native
# State for the first and last response. All the states
# in-between only provide the "state" and the
# "last_changed".
if not ent_results:
if (first_state := next(group, None)) is None:
continue
prev_state = first_state[state_idx]
ent_results.append(
state_class(
first_state,
attr_cache,
start_time_ts,
entity_id,
prev_state, # type: ignore[arg-type]
first_state[last_updated_ts_idx],
no_attributes,
)
)
#
# minimal_response only makes sense with last_updated == last_updated
#
# We use last_updated for for last_changed since its the same
#
# With minimal response we do not care about attribute
# changes so we can filter out duplicate states
if compressed_state_format:
# Compressed state format uses the timestamp directly
ent_results.extend(
{
attr_state: (prev_state := state),
attr_time: row[last_updated_ts_idx],
}
for row in group
if (state := row[state_idx]) != prev_state
)
continue
# Non-compressed state format returns an ISO formatted string
_utc_from_timestamp = dt_util.utc_from_timestamp
ent_results.extend(
{
attr_state: (prev_state := state),
attr_time: _utc_from_timestamp(row[last_updated_ts_idx]).isoformat(),
}
for row in group
if (state := row[state_idx]) != prev_state
)
if descending:
for ent_results in result.values():
ent_results.reverse()
# Filter out the empty lists if some states had 0 results.
return {key: val for key, val in result.items() if val} |
Return a dict of significant states during a time period. | def get_full_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
no_attributes: bool = False,
) -> dict[str, list[State]]:
"""Return a dict of significant states during a time period."""
if not recorder.get_instance(hass).states_meta_manager.active:
from .legacy import ( # pylint: disable=import-outside-toplevel
get_full_significant_states_with_session as _legacy_get_full_significant_states_with_session,
)
_target = _legacy_get_full_significant_states_with_session
else:
_target = _modern_get_full_significant_states_with_session
return _target(
hass,
session,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
no_attributes,
) |
Return the last number_of_states. | def get_last_state_changes(
hass: HomeAssistant, number_of_states: int, entity_id: str
) -> dict[str, list[State]]:
"""Return the last number_of_states."""
if not recorder.get_instance(hass).states_meta_manager.active:
from .legacy import ( # pylint: disable=import-outside-toplevel
get_last_state_changes as _legacy_get_last_state_changes,
)
_target = _legacy_get_last_state_changes
else:
_target = _modern_get_last_state_changes
return _target(hass, number_of_states, entity_id) |
Return a dict of significant states during a time period. | def get_significant_states(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Return a dict of significant states during a time period."""
if not recorder.get_instance(hass).states_meta_manager.active:
from .legacy import ( # pylint: disable=import-outside-toplevel
get_significant_states as _legacy_get_significant_states,
)
_target = _legacy_get_significant_states
else:
_target = _modern_get_significant_states
return _target(
hass,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
minimal_response,
no_attributes,
compressed_state_format,
) |
Return a dict of significant states during a time period. | def get_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> dict[str, list[State | dict[str, Any]]]:
"""Return a dict of significant states during a time period."""
if not recorder.get_instance(hass).states_meta_manager.active:
from .legacy import ( # pylint: disable=import-outside-toplevel
get_significant_states_with_session as _legacy_get_significant_states_with_session,
)
_target = _legacy_get_significant_states_with_session
else:
_target = _modern_get_significant_states_with_session
return _target(
hass,
session,
start_time,
end_time,
entity_ids,
filters,
include_start_time_state,
significant_changes_only,
minimal_response,
no_attributes,
compressed_state_format,
) |
Return a list of states that changed during a time period. | def state_changes_during_period(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
entity_id: str | None = None,
no_attributes: bool = False,
descending: bool = False,
limit: int | None = None,
include_start_time_state: bool = True,
) -> dict[str, list[State]]:
"""Return a list of states that changed during a time period."""
if not recorder.get_instance(hass).states_meta_manager.active:
from .legacy import ( # pylint: disable=import-outside-toplevel
state_changes_during_period as _legacy_state_changes_during_period,
)
_target = _legacy_state_changes_during_period
else:
_target = _modern_state_changes_during_period
return _target(
hass,
start_time,
end_time,
entity_id,
no_attributes,
descending,
limit,
include_start_time_state,
) |
Convert an ulid to bytes. | def ulid_to_bytes_or_none(ulid: str | None) -> bytes | None:
"""Convert an ulid to bytes."""
if ulid is None:
return None
try:
return ulid_to_bytes(ulid)
except ValueError:
_LOGGER.exception("Error converting ulid %s to bytes", ulid)
return None |
Convert bytes to a ulid. | def bytes_to_ulid_or_none(_bytes: bytes | None) -> str | None:
"""Convert bytes to a ulid."""
if _bytes is None:
return None
try:
return bytes_to_ulid(_bytes)
except ValueError:
_LOGGER.exception("Error converting bytes %s to ulid", _bytes)
return None |
Convert a uuid hex to bytes. | def uuid_hex_to_bytes_or_none(uuid_hex: str | None) -> bytes | None:
"""Convert a uuid hex to bytes."""
if uuid_hex is None:
return None
with suppress(ValueError):
return UUID(hex=uuid_hex).bytes
return None |
Convert bytes to a uuid hex. | def bytes_to_uuid_hex_or_none(_bytes: bytes | None) -> str | None:
"""Convert bytes to a uuid hex."""
if _bytes is None:
return None
with suppress(ValueError):
return UUID(bytes=_bytes).hex
return None |
Extract event_type ids from event_type_to_event_type_id. | def extract_event_type_ids(
event_type_to_event_type_id: dict[EventType[Any] | str, int | None],
) -> list[int]:
"""Extract event_type ids from event_type_to_event_type_id."""
return [
event_type_id
for event_type_id in event_type_to_event_type_id.values()
if event_type_id is not None
] |
Convert a database row to a compressed state before schema 31. | def legacy_row_to_compressed_state_pre_schema_31(
row: Row,
attr_cache: dict[str, dict[str, Any]],
start_time: datetime | None,
) -> dict[str, Any]:
"""Convert a database row to a compressed state before schema 31."""
comp_state = {
COMPRESSED_STATE_STATE: row.state,
COMPRESSED_STATE_ATTRIBUTES: decode_attributes_from_row_legacy(row, attr_cache),
}
if start_time:
comp_state[COMPRESSED_STATE_LAST_UPDATED] = start_time.timestamp()
else:
row_last_updated: datetime = row.last_updated
comp_state[COMPRESSED_STATE_LAST_UPDATED] = process_datetime_to_timestamp(
row_last_updated
)
if (
row_changed_changed := row.last_changed
) and row_last_updated != row_changed_changed:
comp_state[COMPRESSED_STATE_LAST_CHANGED] = process_datetime_to_timestamp(
row_changed_changed
)
return comp_state |
Convert a database row to a compressed state schema 31 and later. | def legacy_row_to_compressed_state(
row: Row,
attr_cache: dict[str, dict[str, Any]],
start_time: datetime | None,
entity_id: str | None = None,
) -> dict[str, Any]:
"""Convert a database row to a compressed state schema 31 and later."""
comp_state = {
COMPRESSED_STATE_STATE: row.state,
COMPRESSED_STATE_ATTRIBUTES: decode_attributes_from_row_legacy(row, attr_cache),
}
if start_time:
comp_state[COMPRESSED_STATE_LAST_UPDATED] = dt_util.utc_to_timestamp(start_time)
else:
row_last_updated_ts: float = row.last_updated_ts
comp_state[COMPRESSED_STATE_LAST_UPDATED] = row_last_updated_ts
if (
row_last_changed_ts := row.last_changed_ts
) and row_last_updated_ts != row_last_changed_ts:
comp_state[COMPRESSED_STATE_LAST_CHANGED] = row_last_changed_ts
return comp_state |
Decode attributes from a database row. | def decode_attributes_from_row_legacy(
row: Row, attr_cache: dict[str, dict[str, Any]]
) -> dict[str, Any]:
"""Decode attributes from a database row."""
return decode_attributes_from_source(
getattr(row, "shared_attrs", None) or getattr(row, "attributes", None),
attr_cache,
) |
Extract metadata ids from entity_id_to_metadata_id. | def extract_metadata_ids(
entity_id_to_metadata_id: dict[str, int | None],
) -> list[int]:
"""Extract metadata ids from entity_id_to_metadata_id."""
return [
metadata_id
for metadata_id in entity_id_to_metadata_id.values()
if metadata_id is not None
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.