response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Ensure STATISTIC_UNIT_TO_UNIT_CONVERTER is aligned with UNIT_CONVERTERS.
def test_converters_align_with_sensor() -> None: """Ensure STATISTIC_UNIT_TO_UNIT_CONVERTER is aligned with UNIT_CONVERTERS.""" for converter in UNIT_CONVERTERS.values(): assert converter in STATISTIC_UNIT_TO_UNIT_CONVERTER.values() for converter in STATISTIC_UNIT_TO_UNIT_CONVERTER.values(): assert converter in UNIT_CONVERTERS.values()
Test compiling hourly statistics.
def test_compile_hourly_statistics(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test compiling hourly statistics.""" hass = hass_recorder() instance = recorder.get_instance(hass) setup_component(hass, "sensor", {}) zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist) # Should not fail if there is nothing there yet with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) do_adhoc_statistics(hass, start=four) wait_recording_done(hass) metadata = get_metadata(hass, statistic_ids={"sensor.test1", "sensor.test2"}) assert metadata["sensor.test1"][1]["has_mean"] is True assert metadata["sensor.test1"][1]["has_sum"] is False assert metadata["sensor.test2"][1]["has_mean"] is True assert metadata["sensor.test2"][1]["has_sum"] is False expected_1 = { "start": process_timestamp(zero).timestamp(), "end": process_timestamp(zero + timedelta(minutes=5)).timestamp(), "mean": pytest.approx(14.915254237288135), "min": pytest.approx(10.0), "max": pytest.approx(20.0), "last_reset": None, } expected_2 = { "start": process_timestamp(four).timestamp(), "end": process_timestamp(four + timedelta(minutes=5)).timestamp(), "mean": pytest.approx(20.0), "min": pytest.approx(20.0), "max": pytest.approx(20.0), "last_reset": None, } expected_stats1 = [expected_1, expected_2] expected_stats2 = [expected_1, expected_2] # Test statistics_during_period stats = statistics_during_period( hass, zero, period="5minute", statistic_ids={"sensor.test1", "sensor.test2"} ) assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Test statistics_during_period with a far future start and end date future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, future, end_time=future, period="5minute", statistic_ids={"sensor.test1", "sensor.test2"}, ) assert stats == {} # Test statistics_during_period with a far future end date stats = statistics_during_period( hass, zero, end_time=future, period="5minute", statistic_ids={"sensor.test1", "sensor.test2"}, ) assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} stats = statistics_during_period( hass, zero, statistic_ids={"sensor.test2"}, period="5minute" ) assert stats == {"sensor.test2": expected_stats2} stats = statistics_during_period( hass, zero, statistic_ids={"sensor.test3"}, period="5minute" ) assert stats == {} # Test get_last_short_term_statistics and get_latest_short_term_statistics stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} stats = get_last_short_term_statistics( hass, 1, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": [expected_2]} with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": [expected_2]} # Now wipe the latest_short_term_statistics_ids table and test again # to make sure we can rebuild the missing data run_cache = get_short_term_statistics_run_cache(instance.hass) run_cache._latest_id_by_metadata_id = {} with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": [expected_2]} metadata = get_metadata(hass, statistic_ids={"sensor.test1"}) with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, metadata=metadata, ) assert stats == {"sensor.test1": [expected_2]} stats = get_last_short_term_statistics( hass, 2, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": expected_stats1[::-1]} stats = get_last_short_term_statistics( hass, 3, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": expected_stats1[::-1]} stats = get_last_short_term_statistics( hass, 1, "sensor.test3", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} instance.get_session().query(StatisticsShortTerm).delete() # Should not fail there is nothing in the table with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} # Delete again, and manually wipe the cache since we deleted all the data instance.get_session().query(StatisticsShortTerm).delete() run_cache = get_short_term_statistics_run_cache(instance.hass) run_cache._latest_id_by_metadata_id = {} # And test again to make sure there is no data with session_scope(hass=hass, read_only=True) as session: stats = get_latest_short_term_statistics_with_session( hass, session, {"sensor.test1"}, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {}
Generate some fake statistics.
def mock_sensor_statistics(): """Generate some fake statistics.""" def sensor_stats(entity_id, start): """Generate fake statistics.""" return { "meta": { "has_mean": True, "has_sum": False, "name": None, "statistic_id": entity_id, "unit_of_measurement": "dogs", }, "stat": {"start": start}, } def get_fake_stats(_hass, session, start, _end): return statistics.PlatformCompiledStatistics( [ sensor_stats("sensor.test1", start), sensor_stats("sensor.test2", start), sensor_stats("sensor.test3", start), ], get_metadata( _hass, statistic_ids={"sensor.test1", "sensor.test2", "sensor.test3"} ), ) with patch( "homeassistant.components.sensor.recorder.compile_statistics", side_effect=get_fake_stats, ): yield
Mock out Statistics.from_stats.
def mock_from_stats(): """Mock out Statistics.from_stats.""" counter = 0 real_from_stats = StatisticsShortTerm.from_stats def from_stats(metadata_id, stats): nonlocal counter if counter == 0 and metadata_id == 2: counter += 1 return None return real_from_stats(metadata_id, stats) with patch( "homeassistant.components.recorder.statistics.StatisticsShortTerm.from_stats", side_effect=from_stats, autospec=True, ): yield
Test exception handling when compiling periodic statistics.
def test_compile_periodic_statistics_exception( hass_recorder: Callable[..., HomeAssistant], mock_sensor_statistics, mock_from_stats ) -> None: """Test exception handling when compiling periodic statistics.""" hass = hass_recorder() setup_component(hass, "sensor", {}) now = dt_util.utcnow() do_adhoc_statistics(hass, start=now) do_adhoc_statistics(hass, start=now + timedelta(minutes=5)) wait_recording_done(hass) expected_1 = { "start": process_timestamp(now).timestamp(), "end": process_timestamp(now + timedelta(minutes=5)).timestamp(), "mean": None, "min": None, "max": None, "last_reset": None, "state": None, "sum": None, } expected_2 = { "start": process_timestamp(now + timedelta(minutes=5)).timestamp(), "end": process_timestamp(now + timedelta(minutes=10)).timestamp(), "mean": None, "min": None, "max": None, "last_reset": None, "state": None, "sum": None, } expected_stats1 = [expected_1, expected_2] expected_stats2 = [expected_2] expected_stats3 = [expected_1, expected_2] stats = statistics_during_period(hass, now, period="5minute") assert stats == { "sensor.test1": expected_stats1, "sensor.test2": expected_stats2, "sensor.test3": expected_stats3, }
Test statistics is migrated when entity_id is changed.
def test_rename_entity(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test statistics is migrated when entity_id is changed.""" hass = hass_recorder() setup_component(hass, "sensor", {}) entity_reg = mock_registry(hass) @callback def add_entry(): reg_entry = entity_reg.async_get_or_create( "sensor", "test", "unique_0000", suggested_object_id="test1", ) assert reg_entry.entity_id == "sensor.test1" hass.add_job(add_entry) hass.block_till_done() zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist) for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) wait_recording_done(hass) expected_1 = { "start": process_timestamp(zero).timestamp(), "end": process_timestamp(zero + timedelta(minutes=5)).timestamp(), "mean": pytest.approx(14.915254237288135), "min": pytest.approx(10.0), "max": pytest.approx(20.0), "last_reset": None, "state": None, "sum": None, } expected_stats1 = [expected_1] expected_stats2 = [expected_1] expected_stats99 = [expected_1] stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} @callback def rename_entry(): entity_reg.async_update_entity("sensor.test1", new_entity_id="sensor.test99") hass.add_job(rename_entry) wait_recording_done(hass) stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test99": expected_stats99, "sensor.test2": expected_stats2}
Test statistics_during_period can handle a list instead of a set.
def test_statistics_during_period_set_back_compat( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test statistics_during_period can handle a list instead of a set.""" hass = hass_recorder() setup_component(hass, "sensor", {}) # This should not throw an exception when passed a list instead of a set assert ( statistics.statistics_during_period( hass, dt_util.utcnow(), None, statistic_ids=["sensor.test1"], period="5minute", units=None, types=set(), ) == {} )
Test statistics is migrated when entity_id is changed. This test relies on the safeguard in the statistics_meta_manager and should not hit the filter_unique_constraint_integrity_error safeguard.
def test_rename_entity_collision( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test statistics is migrated when entity_id is changed. This test relies on the safeguard in the statistics_meta_manager and should not hit the filter_unique_constraint_integrity_error safeguard. """ hass = hass_recorder() setup_component(hass, "sensor", {}) entity_reg = mock_registry(hass) @callback def add_entry(): reg_entry = entity_reg.async_get_or_create( "sensor", "test", "unique_0000", suggested_object_id="test1", ) assert reg_entry.entity_id == "sensor.test1" hass.add_job(add_entry) hass.block_till_done() zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist) for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) wait_recording_done(hass) expected_1 = { "start": process_timestamp(zero).timestamp(), "end": process_timestamp(zero + timedelta(minutes=5)).timestamp(), "mean": pytest.approx(14.915254237288135), "min": pytest.approx(10.0), "max": pytest.approx(20.0), "last_reset": None, "state": None, "sum": None, } expected_stats1 = [expected_1] expected_stats2 = [expected_1] stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Insert metadata for sensor.test99 metadata_1 = { "has_mean": True, "has_sum": False, "name": "Total imported energy", "source": "test", "statistic_id": "sensor.test99", "unit_of_measurement": "kWh", } with session_scope(hass=hass) as session: session.add(recorder.db_schema.StatisticsMeta.from_meta(metadata_1)) # Rename entity sensor.test1 to sensor.test99 @callback def rename_entry(): entity_reg.async_update_entity("sensor.test1", new_entity_id="sensor.test99") hass.add_job(rename_entry) wait_recording_done(hass) # Statistics failed to migrate due to the collision stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Verify the safeguard in the states meta manager was hit assert ( "Cannot rename statistic_id `sensor.test1` to `sensor.test99` " "because the new statistic_id is already in use" ) in caplog.text # Verify the filter_unique_constraint_integrity_error safeguard was not hit assert "Blocked attempt to insert duplicated statistic rows" not in caplog.text
Test statistics is migrated when entity_id is changed. This test disables the safeguard in the statistics_meta_manager and relies on the filter_unique_constraint_integrity_error safeguard.
def test_rename_entity_collision_states_meta_check_disabled( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test statistics is migrated when entity_id is changed. This test disables the safeguard in the statistics_meta_manager and relies on the filter_unique_constraint_integrity_error safeguard. """ hass = hass_recorder() setup_component(hass, "sensor", {}) entity_reg = mock_registry(hass) @callback def add_entry(): reg_entry = entity_reg.async_get_or_create( "sensor", "test", "unique_0000", suggested_object_id="test1", ) assert reg_entry.entity_id == "sensor.test1" hass.add_job(add_entry) hass.block_till_done() zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist) for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) wait_recording_done(hass) expected_1 = { "start": process_timestamp(zero).timestamp(), "end": process_timestamp(zero + timedelta(minutes=5)).timestamp(), "mean": pytest.approx(14.915254237288135), "min": pytest.approx(10.0), "max": pytest.approx(20.0), "last_reset": None, "state": None, "sum": None, } expected_stats1 = [expected_1] expected_stats2 = [expected_1] stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Insert metadata for sensor.test99 metadata_1 = { "has_mean": True, "has_sum": False, "name": "Total imported energy", "source": "test", "statistic_id": "sensor.test99", "unit_of_measurement": "kWh", } with session_scope(hass=hass) as session: session.add(recorder.db_schema.StatisticsMeta.from_meta(metadata_1)) instance = recorder.get_instance(hass) # Patch out the safeguard in the states meta manager # so that we hit the filter_unique_constraint_integrity_error safeguard in the statistics with patch.object(instance.statistics_meta_manager, "get", return_value=None): # Rename entity sensor.test1 to sensor.test99 @callback def rename_entry(): entity_reg.async_update_entity( "sensor.test1", new_entity_id="sensor.test99" ) hass.add_job(rename_entry) wait_recording_done(hass) # Statistics failed to migrate due to the collision stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Verify the filter_unique_constraint_integrity_error safeguard was hit assert "Blocked attempt to insert duplicated statistic rows" in caplog.text # Verify the safeguard in the states meta manager was not hit assert ( "Cannot rename statistic_id `sensor.test1` to `sensor.test99` " "because the new statistic_id is already in use" ) not in caplog.text
Test statistics with same start time is not compiled.
def test_statistics_duplicated( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test statistics with same start time is not compiled.""" hass = hass_recorder() setup_component(hass, "sensor", {}) zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text with patch( "homeassistant.components.sensor.recorder.compile_statistics", return_value=statistics.PlatformCompiledStatistics([], {}), ) as compile_statistics: do_adhoc_statistics(hass, start=zero) wait_recording_done(hass) assert compile_statistics.called compile_statistics.reset_mock() assert "Compiling statistics for" in caplog.text assert "Statistics already compiled" not in caplog.text caplog.clear() do_adhoc_statistics(hass, start=zero) wait_recording_done(hass) assert not compile_statistics.called compile_statistics.reset_mock() assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" in caplog.text caplog.clear()
Test validation of external statistics.
def test_external_statistics_errors( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test validation of external statistics.""" hass = hass_recorder() wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() last_reset = zero.replace(minute=0, second=0, microsecond=0) - timedelta(days=1) period1 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1) _external_statistics = { "start": period1, "last_reset": last_reset, "state": 0, "sum": 2, } _external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } # Attempt to insert statistics for an entity external_metadata = { **_external_metadata, "statistic_id": "sensor.total_energy_import", } external_statistics = {**_external_statistics} with pytest.raises(HomeAssistantError): async_add_external_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"sensor.total_energy_import"}) == {} # Attempt to insert statistics for the wrong domain external_metadata = {**_external_metadata, "source": "other"} external_statistics = {**_external_statistics} with pytest.raises(HomeAssistantError): async_add_external_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"test:total_energy_import"}) == {} # Attempt to insert statistics for a naive starting time external_metadata = {**_external_metadata} external_statistics = { **_external_statistics, "start": period1.replace(tzinfo=None), } with pytest.raises(HomeAssistantError): async_add_external_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"test:total_energy_import"}) == {} # Attempt to insert statistics for an invalid starting time external_metadata = {**_external_metadata} external_statistics = {**_external_statistics, "start": period1.replace(minute=1)} with pytest.raises(HomeAssistantError): async_add_external_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"test:total_energy_import"}) == {} # Attempt to insert statistics with a naive last_reset external_metadata = {**_external_metadata} external_statistics = { **_external_statistics, "last_reset": last_reset.replace(tzinfo=None), } with pytest.raises(HomeAssistantError): async_add_external_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"test:total_energy_import"}) == {}
Test validation of imported statistics.
def test_import_statistics_errors( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test validation of imported statistics.""" hass = hass_recorder() wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() last_reset = zero.replace(minute=0, second=0, microsecond=0) - timedelta(days=1) period1 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1) _external_statistics = { "start": period1, "last_reset": last_reset, "state": 0, "sum": 2, } _external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "recorder", "statistic_id": "sensor.total_energy_import", "unit_of_measurement": "kWh", } # Attempt to insert statistics for an external source external_metadata = { **_external_metadata, "statistic_id": "test:total_energy_import", } external_statistics = {**_external_statistics} with pytest.raises(HomeAssistantError): async_import_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"test:total_energy_import"}) == {} # Attempt to insert statistics for the wrong domain external_metadata = {**_external_metadata, "source": "sensor"} external_statistics = {**_external_statistics} with pytest.raises(HomeAssistantError): async_import_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"sensor.total_energy_import"}) == {} # Attempt to insert statistics for a naive starting time external_metadata = {**_external_metadata} external_statistics = { **_external_statistics, "start": period1.replace(tzinfo=None), } with pytest.raises(HomeAssistantError): async_import_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"sensor.total_energy_import"}) == {} # Attempt to insert statistics for an invalid starting time external_metadata = {**_external_metadata} external_statistics = {**_external_statistics, "start": period1.replace(minute=1)} with pytest.raises(HomeAssistantError): async_import_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"sensor.total_energy_import"}) == {} # Attempt to insert statistics with a naive last_reset external_metadata = {**_external_metadata} external_statistics = { **_external_statistics, "last_reset": last_reset.replace(tzinfo=None), } with pytest.raises(HomeAssistantError): async_import_statistics(hass, external_metadata, (external_statistics,)) wait_recording_done(hass) assert statistics_during_period(hass, zero, period="hour") == {} assert list_statistic_ids(hass) == [] assert get_metadata(hass, statistic_ids={"sensor.total_energy_import"}) == {}
Test daily statistics.
def test_daily_statistics_sum( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test daily statistics.""" hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2022-10-04 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2022-10-04 23:00:00")) period5 = dt_util.as_utc(dt_util.parse_datetime("2022-10-05 00:00:00")) period6 = dt_util.as_utc(dt_util.parse_datetime("2022-10-05 23:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period5, "last_reset": None, "state": 4, "sum": 6, }, { "start": period6, "last_reset": None, "state": 5, "sum": 7, }, ) external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } async_add_external_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) stats = statistics_during_period( hass, zero, period="day", statistic_ids={"test:total_energy_import"} ) day1_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) day1_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-04 00:00:00")) day2_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-04 00:00:00")) day2_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-05 00:00:00")) day3_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-05 00:00:00")) day3_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-06 00:00:00")) expected_stats = { "test:total_energy_import": [ { "start": day1_start.timestamp(), "end": day1_end.timestamp(), "last_reset": None, "state": 1.0, "sum": 3.0, }, { "start": day2_start.timestamp(), "end": day2_end.timestamp(), "last_reset": None, "state": 3.0, "sum": 5.0, }, { "start": day3_start.timestamp(), "end": day3_end.timestamp(), "last_reset": None, "state": 5.0, "sum": 7.0, }, ] } assert stats == expected_stats # Get change stats = statistics_during_period( hass, start_time=period1, statistic_ids={"test:total_energy_import"}, period="day", types={"change"}, ) assert stats == { "test:total_energy_import": [ { "start": day1_start.timestamp(), "end": day1_end.timestamp(), "change": 3.0, }, { "start": day2_start.timestamp(), "end": day2_end.timestamp(), "change": 2.0, }, { "start": day3_start.timestamp(), "end": day3_end.timestamp(), "change": 2.0, }, ] } # Get data with start during the first period stats = statistics_during_period( hass, start_time=period1 + timedelta(hours=1), statistic_ids={"test:total_energy_import"}, period="day", ) assert stats == expected_stats # Get data with end during the third period stats = statistics_during_period( hass, start_time=zero, end_time=period6 - timedelta(hours=1), statistic_ids={"test:total_energy_import"}, period="day", ) assert stats == expected_stats # Try to get data for entities which do not exist stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="day", ) assert stats == expected_stats # Use 5minute to ensure table switch works stats = statistics_during_period( hass, start_time=zero, statistic_ids=["test:total_energy_import", "with_other"], period="5minute", ) assert stats == {} # Ensure future date has not data future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, end_time=future, period="day" ) assert stats == {}
Test weekly statistics.
def test_weekly_statistics_mean( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test weekly statistics.""" hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2022-10-05 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2022-10-16 23:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "max": 0, "mean": 10, "min": -100, }, { "start": period2, "last_reset": None, "max": 10, "mean": 20, "min": -90, }, { "start": period3, "last_reset": None, "max": 20, "mean": 30, "min": -80, }, { "start": period4, "last_reset": None, "max": 30, "mean": 40, "min": -70, }, ) external_metadata = { "has_mean": True, "has_sum": False, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } async_add_external_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) # Get all data stats = statistics_during_period( hass, zero, period="week", statistic_ids={"test:total_energy_import"} ) week1_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) week1_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) week2_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) week2_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-17 00:00:00")) expected_stats = { "test:total_energy_import": [ { "start": week1_start.timestamp(), "end": week1_end.timestamp(), "last_reset": None, "max": 10, "mean": 15, "min": -100, }, { "start": week2_start.timestamp(), "end": week2_end.timestamp(), "last_reset": None, "max": 30, "mean": 35, "min": -80, }, ] } assert stats == expected_stats # Get data starting with start of the first period stats = statistics_during_period( hass, start_time=period1, statistic_ids={"test:total_energy_import"}, period="week", ) assert stats == expected_stats # Get data with start during the first period stats = statistics_during_period( hass, start_time=period1 + timedelta(days=1), statistic_ids={"test:total_energy_import"}, period="week", ) assert stats == expected_stats # Try to get data for entities which do not exist stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="week", ) assert stats == expected_stats # Use 5minute to ensure table switch works stats = statistics_during_period( hass, start_time=zero, statistic_ids=["test:total_energy_import", "with_other"], period="5minute", ) assert stats == {} # Ensure future date has not data future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, end_time=future, period="week" ) assert stats == {}
Test weekly statistics.
def test_weekly_statistics_sum( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test weekly statistics.""" hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2022-10-09 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2022-10-16 23:00:00")) period5 = dt_util.as_utc(dt_util.parse_datetime("2022-10-17 00:00:00")) period6 = dt_util.as_utc(dt_util.parse_datetime("2022-10-23 23:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period5, "last_reset": None, "state": 4, "sum": 6, }, { "start": period6, "last_reset": None, "state": 5, "sum": 7, }, ) external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } async_add_external_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) stats = statistics_during_period( hass, zero, period="week", statistic_ids={"test:total_energy_import"} ) week1_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-03 00:00:00")) week1_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) week2_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-10 00:00:00")) week2_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-17 00:00:00")) week3_start = dt_util.as_utc(dt_util.parse_datetime("2022-10-17 00:00:00")) week3_end = dt_util.as_utc(dt_util.parse_datetime("2022-10-24 00:00:00")) expected_stats = { "test:total_energy_import": [ { "start": week1_start.timestamp(), "end": week1_end.timestamp(), "last_reset": None, "state": 1.0, "sum": 3.0, }, { "start": week2_start.timestamp(), "end": week2_end.timestamp(), "last_reset": None, "state": 3.0, "sum": 5.0, }, { "start": week3_start.timestamp(), "end": week3_end.timestamp(), "last_reset": None, "state": 5.0, "sum": 7.0, }, ] } assert stats == expected_stats # Get change stats = statistics_during_period( hass, start_time=period1, statistic_ids={"test:total_energy_import"}, period="week", types={"change"}, ) assert stats == { "test:total_energy_import": [ { "start": week1_start.timestamp(), "end": week1_end.timestamp(), "change": 3.0, }, { "start": week2_start.timestamp(), "end": week2_end.timestamp(), "change": 2.0, }, { "start": week3_start.timestamp(), "end": week3_end.timestamp(), "change": 2.0, }, ] } # Get data with start during the first period stats = statistics_during_period( hass, start_time=period1 + timedelta(days=1), statistic_ids={"test:total_energy_import"}, period="week", ) assert stats == expected_stats # Get data with end during the third period stats = statistics_during_period( hass, start_time=zero, end_time=period6 - timedelta(days=1), statistic_ids={"test:total_energy_import"}, period="week", ) assert stats == expected_stats # Try to get data for entities which do not exist stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="week", ) assert stats == expected_stats # Use 5minute to ensure table switch works stats = statistics_during_period( hass, start_time=zero, statistic_ids=["test:total_energy_import", "with_other"], period="5minute", ) assert stats == {} # Ensure future date has not data future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, end_time=future, period="week" ) assert stats == {}
Test monthly statistics.
def test_monthly_statistics_sum( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test monthly statistics.""" hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) period5 = dt_util.as_utc(dt_util.parse_datetime("2021-11-01 00:00:00")) period6 = dt_util.as_utc(dt_util.parse_datetime("2021-11-30 23:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period5, "last_reset": None, "state": 4, "sum": 6, }, { "start": period6, "last_reset": None, "state": 5, "sum": 7, }, ) external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } async_add_external_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) stats = statistics_during_period( hass, zero, period="month", statistic_ids={"test:total_energy_import"} ) sep_start = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) sep_end = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) oct_start = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) oct_end = dt_util.as_utc(dt_util.parse_datetime("2021-11-01 00:00:00")) nov_start = dt_util.as_utc(dt_util.parse_datetime("2021-11-01 00:00:00")) nov_end = dt_util.as_utc(dt_util.parse_datetime("2021-12-01 00:00:00")) expected_stats = { "test:total_energy_import": [ { "start": sep_start.timestamp(), "end": sep_end.timestamp(), "last_reset": None, "state": pytest.approx(1.0), "sum": pytest.approx(3.0), }, { "start": oct_start.timestamp(), "end": oct_end.timestamp(), "last_reset": None, "state": pytest.approx(3.0), "sum": pytest.approx(5.0), }, { "start": nov_start.timestamp(), "end": nov_end.timestamp(), "last_reset": None, "state": 5.0, "sum": 7.0, }, ] } assert stats == expected_stats # Get change stats = statistics_during_period( hass, start_time=period1, statistic_ids={"test:total_energy_import"}, period="month", types={"change"}, ) assert stats == { "test:total_energy_import": [ { "start": sep_start.timestamp(), "end": sep_end.timestamp(), "change": 3.0, }, { "start": oct_start.timestamp(), "end": oct_end.timestamp(), "change": 2.0, }, { "start": nov_start.timestamp(), "end": nov_end.timestamp(), "change": 2.0, }, ] } # Get data with start during the first period stats = statistics_during_period( hass, start_time=period1 + timedelta(days=1), statistic_ids={"test:total_energy_import"}, period="month", ) assert stats == expected_stats # Get data with end during the third period stats = statistics_during_period( hass, start_time=zero, end_time=period6 - timedelta(days=1), statistic_ids={"test:total_energy_import"}, period="month", ) assert stats == expected_stats # Try to get data for entities which do not exist stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="month", ) assert stats == expected_stats # Get only sum stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="month", types={"sum"}, ) assert stats == { "test:total_energy_import": [ { "start": sep_start.timestamp(), "end": sep_end.timestamp(), "sum": pytest.approx(3.0), }, { "start": oct_start.timestamp(), "end": oct_end.timestamp(), "sum": pytest.approx(5.0), }, { "start": nov_start.timestamp(), "end": nov_end.timestamp(), "sum": pytest.approx(7.0), }, ] } # Get only sum + convert units stats = statistics_during_period( hass, start_time=zero, statistic_ids={"not", "the", "same", "test:total_energy_import"}, period="month", types={"sum"}, units={"energy": "Wh"}, ) assert stats == { "test:total_energy_import": [ { "start": sep_start.timestamp(), "end": sep_end.timestamp(), "sum": pytest.approx(3000.0), }, { "start": oct_start.timestamp(), "end": oct_end.timestamp(), "sum": pytest.approx(5000.0), }, { "start": nov_start.timestamp(), "end": nov_end.timestamp(), "sum": pytest.approx(7000.0), }, ] } # Use 5minute to ensure table switch works stats = statistics_during_period( hass, start_time=zero, statistic_ids=["test:total_energy_import", "with_other"], period="5minute", ) assert stats == {} # Ensure future date has not data future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, end_time=future, period="month" ) assert stats == {}
Test cache key for _generate_statistics_during_period_stmt.
def test_cache_key_for_generate_statistics_during_period_stmt() -> None: """Test cache key for _generate_statistics_during_period_stmt.""" stmt = _generate_statistics_during_period_stmt( dt_util.utcnow(), dt_util.utcnow(), [0], StatisticsShortTerm, set() ) cache_key_1 = stmt._generate_cache_key() stmt2 = _generate_statistics_during_period_stmt( dt_util.utcnow(), dt_util.utcnow(), [0], StatisticsShortTerm, set() ) cache_key_2 = stmt2._generate_cache_key() assert cache_key_1 == cache_key_2 stmt3 = _generate_statistics_during_period_stmt( dt_util.utcnow(), dt_util.utcnow(), [0], StatisticsShortTerm, {"sum", "mean"}, ) cache_key_3 = stmt3._generate_cache_key() assert cache_key_1 != cache_key_3
Test cache key for _generate_get_metadata_stmt.
def test_cache_key_for_generate_get_metadata_stmt() -> None: """Test cache key for _generate_get_metadata_stmt.""" stmt_mean = _generate_get_metadata_stmt([0], "mean") stmt_mean2 = _generate_get_metadata_stmt([1], "mean") stmt_sum = _generate_get_metadata_stmt([0], "sum") stmt_none = _generate_get_metadata_stmt() assert stmt_mean._generate_cache_key() == stmt_mean2._generate_cache_key() assert stmt_mean._generate_cache_key() != stmt_sum._generate_cache_key() assert stmt_mean._generate_cache_key() != stmt_none._generate_cache_key()
Test cache key for _generate_max_mean_min_statistic_in_sub_period_stmt.
def test_cache_key_for_generate_max_mean_min_statistic_in_sub_period_stmt() -> None: """Test cache key for _generate_max_mean_min_statistic_in_sub_period_stmt.""" columns = select(StatisticsShortTerm.metadata_id, StatisticsShortTerm.start_ts) stmt = _generate_max_mean_min_statistic_in_sub_period_stmt( columns, dt_util.utcnow(), dt_util.utcnow(), StatisticsShortTerm, [0], ) cache_key_1 = stmt._generate_cache_key() stmt2 = _generate_max_mean_min_statistic_in_sub_period_stmt( columns, dt_util.utcnow(), dt_util.utcnow(), StatisticsShortTerm, [0], ) cache_key_2 = stmt2._generate_cache_key() assert cache_key_1 == cache_key_2 columns2 = select( StatisticsShortTerm.metadata_id, StatisticsShortTerm.start_ts, StatisticsShortTerm.sum, StatisticsShortTerm.mean, ) stmt3 = _generate_max_mean_min_statistic_in_sub_period_stmt( columns2, dt_util.utcnow(), dt_util.utcnow(), StatisticsShortTerm, [0], ) cache_key_3 = stmt3._generate_cache_key() assert cache_key_1 != cache_key_3
Test cache key for _generate_statistics_at_time_stmt.
def test_cache_key_for_generate_statistics_at_time_stmt() -> None: """Test cache key for _generate_statistics_at_time_stmt.""" stmt = _generate_statistics_at_time_stmt(StatisticsShortTerm, {0}, 0.0, set()) cache_key_1 = stmt._generate_cache_key() stmt2 = _generate_statistics_at_time_stmt(StatisticsShortTerm, {0}, 0.0, set()) cache_key_2 = stmt2._generate_cache_key() assert cache_key_1 == cache_key_2 stmt3 = _generate_statistics_at_time_stmt( StatisticsShortTerm, {0}, 0.0, {"sum", "mean"} ) cache_key_3 = stmt3._generate_cache_key() assert cache_key_1 != cache_key_3
Test deriving change from sum statistic.
def test_change( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test deriving change from sum statistic.""" hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 01:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 02:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 03:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 5, }, { "start": period4, "last_reset": None, "state": 3, "sum": 8, }, ) external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "recorder", "statistic_id": "sensor.total_energy_import", "unit_of_measurement": "kWh", } async_import_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) # Get change from far in the past stats = statistics_during_period( hass, zero, period="hour", statistic_ids={"sensor.total_energy_import"}, types={"change"}, ) hour1_start = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 00:00:00")) hour1_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 01:00:00")) hour2_start = hour1_end hour2_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 02:00:00")) hour3_start = hour2_end hour3_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 03:00:00")) hour4_start = hour3_end hour4_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 04:00:00")) expected_stats = { "sensor.total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": 2.0, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 3.0, }, ] } assert stats == expected_stats # Get change + sum from far in the past stats = statistics_during_period( hass, zero, period="hour", statistic_ids={"sensor.total_energy_import"}, types={"change", "sum"}, ) hour1_start = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 00:00:00")) hour1_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 01:00:00")) hour2_start = hour1_end hour2_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 02:00:00")) hour3_start = hour2_end hour3_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 03:00:00")) hour4_start = hour3_end hour4_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 04:00:00")) expected_stats_change_sum = { "sensor.total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0, "sum": 2.0, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0, "sum": 3.0, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": 2.0, "sum": 5.0, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 3.0, "sum": 8.0, }, ] } assert stats == expected_stats_change_sum # Get change from far in the past with unit conversion stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) expected_stats_wh = { "sensor.total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0 * 1000, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0 * 1000, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": 2.0 * 1000, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 3.0 * 1000, }, ] } assert stats == expected_stats_wh # Get change from far in the past with implicit unit conversion hass.states.async_set( "sensor.total_energy_import", "unknown", {"unit_of_measurement": "MWh"} ) stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) expected_stats_mwh = { "sensor.total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0 / 1000, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0 / 1000, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": 2.0 / 1000, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 3.0 / 1000, }, ] } assert stats == expected_stats_mwh hass.states.async_remove("sensor.total_energy_import") # Get change from the first recorded hour stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == expected_stats # Get change from the first recorded hour with unit conversion stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) assert stats == expected_stats_wh # Get change from the first recorded hour with implicit unit conversion hass.states.async_set( "sensor.total_energy_import", "unknown", {"unit_of_measurement": "MWh"} ) stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == expected_stats_mwh hass.states.async_remove("sensor.total_energy_import") # Get change from the second recorded hour stats = statistics_during_period( hass, start_time=hour2_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "sensor.total_energy_import": expected_stats["sensor.total_energy_import"][1:4] } # Get change from the second recorded hour with unit conversion stats = statistics_during_period( hass, start_time=hour2_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) assert stats == { "sensor.total_energy_import": expected_stats_wh["sensor.total_energy_import"][ 1:4 ] } # Get change from the second recorded hour with implicit unit conversion hass.states.async_set( "sensor.total_energy_import", "unknown", {"unit_of_measurement": "MWh"} ) stats = statistics_during_period( hass, start_time=hour2_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "sensor.total_energy_import": expected_stats_mwh["sensor.total_energy_import"][ 1:4 ] } hass.states.async_remove("sensor.total_energy_import") # Get change from the second until the third recorded hour stats = statistics_during_period( hass, start_time=hour2_start, end_time=hour4_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "sensor.total_energy_import": expected_stats["sensor.total_energy_import"][1:3] } # Get change from the fourth recorded hour stats = statistics_during_period( hass, start_time=hour4_start, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "sensor.total_energy_import": expected_stats["sensor.total_energy_import"][3:4] } # Test change with a far future start date future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, statistic_ids={"sensor.total_energy_import"}, period="hour", types={"change"}, ) assert stats == {}
Test deriving change from sum statistic. This tests the behavior when some record has None sum. The calculated change is not expected to be correct, but we should not raise on this error.
def test_change_with_none( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, timezone, ) -> None: """Test deriving change from sum statistic. This tests the behavior when some record has None sum. The calculated change is not expected to be correct, but we should not raise on this error. """ hass = hass_recorder(timezone=timezone) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text zero = dt_util.utcnow() period1 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 01:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 02:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 03:00:00")) external_statistics = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": None, }, { "start": period4, "last_reset": None, "state": 3, "sum": 8, }, ) external_metadata = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import", "unit_of_measurement": "kWh", } async_add_external_statistics(hass, external_metadata, external_statistics) wait_recording_done(hass) # Get change from far in the past stats = statistics_during_period( hass, zero, period="hour", statistic_ids={"test:total_energy_import"}, types={"change"}, ) hour1_start = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 00:00:00")) hour1_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 01:00:00")) hour2_start = hour1_end hour2_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 02:00:00")) hour3_start = hour2_end hour3_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 03:00:00")) hour4_start = hour3_end hour4_end = dt_util.as_utc(dt_util.parse_datetime("2023-05-08 04:00:00")) expected_stats = { "test:total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": None, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 5.0, }, ] } assert stats == expected_stats # Get change from far in the past with unit conversion stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) expected_stats_wh = { "test:total_energy_import": [ { "start": hour1_start.timestamp(), "end": hour1_end.timestamp(), "change": 2.0 * 1000, }, { "start": hour2_start.timestamp(), "end": hour2_end.timestamp(), "change": 1.0 * 1000, }, { "start": hour3_start.timestamp(), "end": hour3_end.timestamp(), "change": None, }, { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 5.0 * 1000, }, ] } assert stats == expected_stats_wh # Get change from the first recorded hour stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, ) assert stats == expected_stats # Get change from the first recorded hour with unit conversion stats = statistics_during_period( hass, start_time=hour1_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) assert stats == expected_stats_wh # Get change from the second recorded hour stats = statistics_during_period( hass, start_time=hour2_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "test:total_energy_import": expected_stats["test:total_energy_import"][1:4] } # Get change from the second recorded hour with unit conversion stats = statistics_during_period( hass, start_time=hour2_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, units={"energy": "Wh"}, ) assert stats == { "test:total_energy_import": expected_stats_wh["test:total_energy_import"][1:4] } # Get change from the second until the third recorded hour stats = statistics_during_period( hass, start_time=hour2_start, end_time=hour4_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "test:total_energy_import": expected_stats["test:total_energy_import"][1:3] } # Get change from the fourth recorded hour stats = statistics_during_period( hass, start_time=hour4_start, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, ) assert stats == { "test:total_energy_import": [ { "start": hour4_start.timestamp(), "end": hour4_end.timestamp(), "change": 8.0, # Assumed to be 8 because the previous hour has no data }, ] } # Test change with a far future start date future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period( hass, start_time=future, statistic_ids={"test:total_energy_import"}, period="hour", types={"change"}, ) assert stats == {}
Test removal of duplicated statistics.
def test_delete_duplicates(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) external_energy_statistics_1 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, ) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_2 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 20, }, { "start": period2, "last_reset": None, "state": 1, "sum": 30, }, { "start": period3, "last_reset": None, "state": 2, "sum": 40, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, ) external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_2", "unit_of_measurement": "kWh", } external_co2_statistics = ( { "start": period1, "last_reset": None, "mean": 10, }, { "start": period2, "last_reset": None, "mean": 30, }, { "start": period3, "last_reset": None, "mean": 60, }, { "start": period4, "last_reset": None, "mean": 90, }, ) external_co2_metadata = { "has_mean": True, "has_sum": False, "name": "Fossil percentage", "source": "test", "statistic_id": "test:fossil_percentage", "unit_of_measurement": "%", } # Create some duplicated statistics with schema version 23 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( CREATE_ENGINE_TARGET, new=partial( create_engine_test_for_schema_version_postfix, schema_version_postfix=SCHEMA_VERSION_POSTFIX, ), ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata) ) with session_scope(hass=hass) as session: for stat in external_energy_statistics_1: session.add(recorder.db_schema.Statistics.from_stats(1, stat)) for stat in external_energy_statistics_2: session.add(recorder.db_schema.Statistics.from_stats(2, stat)) for stat in external_co2_statistics: session.add(recorder.db_schema.Statistics.from_stats(3, stat)) hass.stop() # Test that the duplicates are removed during migration from schema 23 with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) hass.stop() assert "Deleted 2 duplicated statistics rows" in caplog.text assert "Found non identical" not in caplog.text assert "Found duplicated" not in caplog.text
Test removal of duplicated statistics.
def test_delete_duplicates_many( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) external_energy_statistics_1 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, ) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_2 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 20, }, { "start": period2, "last_reset": None, "state": 1, "sum": 30, }, { "start": period3, "last_reset": None, "state": 2, "sum": 40, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, ) external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_2", "unit_of_measurement": "kWh", } external_co2_statistics = ( { "start": period1, "last_reset": None, "mean": 10, }, { "start": period2, "last_reset": None, "mean": 30, }, { "start": period3, "last_reset": None, "mean": 60, }, { "start": period4, "last_reset": None, "mean": 90, }, ) external_co2_metadata = { "has_mean": True, "has_sum": False, "name": "Fossil percentage", "source": "test", "statistic_id": "test:fossil_percentage", "unit_of_measurement": "%", } # Create some duplicated statistics with schema version 23 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( CREATE_ENGINE_TARGET, new=partial( create_engine_test_for_schema_version_postfix, schema_version_postfix=SCHEMA_VERSION_POSTFIX, ), ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata) ) with session_scope(hass=hass) as session: for stat in external_energy_statistics_1: session.add(recorder.db_schema.Statistics.from_stats(1, stat)) for _ in range(3000): session.add( recorder.db_schema.Statistics.from_stats( 1, external_energy_statistics_1[-1] ) ) for stat in external_energy_statistics_2: session.add(recorder.db_schema.Statistics.from_stats(2, stat)) for stat in external_co2_statistics: session.add(recorder.db_schema.Statistics.from_stats(3, stat)) hass.stop() # Test that the duplicates are removed during migration from schema 23 with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) hass.stop() assert "Deleted 3002 duplicated statistics rows" in caplog.text assert "Found non identical" not in caplog.text assert "Found duplicated" not in caplog.text
Test removal of duplicated statistics.
def test_delete_duplicates_non_identical( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) external_energy_statistics_1 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, { "start": period4, "last_reset": None, "state": 3, "sum": 6, }, ) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_2 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 20, }, { "start": period2, "last_reset": None, "state": 1, "sum": 30, }, { "start": period3, "last_reset": None, "state": 2, "sum": 40, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50, }, ) external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_2", "unit_of_measurement": "kWh", } # Create some duplicated statistics with schema version 23 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( CREATE_ENGINE_TARGET, new=partial( create_engine_test_for_schema_version_postfix, schema_version_postfix=SCHEMA_VERSION_POSTFIX, ), ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) with session_scope(hass=hass) as session: for stat in external_energy_statistics_1: session.add(recorder.db_schema.Statistics.from_stats(1, stat)) for stat in external_energy_statistics_2: session.add(recorder.db_schema.Statistics.from_stats(2, stat)) hass.stop() # Test that the duplicates are removed during migration from schema 23 with get_test_home_assistant() as hass: hass.config.config_dir = tmp_path recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) hass.stop() assert "Deleted 2 duplicated statistics rows" in caplog.text assert "Deleted 1 non identical" in caplog.text assert "Found duplicated" not in caplog.text isotime = dt_util.utcnow().isoformat() backup_file_name = f".storage/deleted_statistics.{isotime}.json" with open(hass.config.path(backup_file_name)) as backup_file: backup = json.load(backup_file) assert backup == [ { "duplicate": { "created": "2021-08-01T00:00:00", "id": 4, "last_reset": None, "max": None, "mean": None, "metadata_id": 1, "min": None, "start": "2021-10-31T23:00:00", "state": 3.0, "sum": 5.0, }, "original": { "created": "2021-08-01T00:00:00", "id": 5, "last_reset": None, "max": None, "mean": None, "metadata_id": 1, "min": None, "start": "2021-10-31T23:00:00", "state": 3.0, "sum": 6.0, }, } ]
Test removal of duplicated statistics.
def test_delete_duplicates_short_term( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } statistic_row = { "start": period4, "last_reset": None, "state": 3, "sum": 5, } # Create some duplicated statistics with schema version 23 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( CREATE_ENGINE_TARGET, new=partial( create_engine_test_for_schema_version_postfix, schema_version_postfix=SCHEMA_VERSION_POSTFIX, ), ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsShortTerm.from_stats(1, statistic_row) ) session.add( recorder.db_schema.StatisticsShortTerm.from_stats(1, statistic_row) ) hass.stop() # Test that the duplicates are removed during migration from schema 23 with get_test_home_assistant() as hass: hass.config.config_dir = tmp_path recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) hass.stop() assert "duplicated statistics rows" not in caplog.text assert "Found non identical" not in caplog.text assert "Deleted duplicated short term statistic" in caplog.text
Try to create a session scope when not setup.
def test_session_scope_not_setup(hass_recorder: Callable[..., HomeAssistant]) -> None: """Try to create a session scope when not setup.""" hass = hass_recorder() with ( patch.object(util.get_instance(hass), "get_session", return_value=None), pytest.raises(RuntimeError), util.session_scope(hass=hass), ): pass
Bad execute, retry 3 times.
def test_recorder_bad_execute(hass_recorder: Callable[..., HomeAssistant]) -> None: """Bad execute, retry 3 times.""" from sqlalchemy.exc import SQLAlchemyError hass_recorder() def to_native(validate_entity_id=True): """Raise exception.""" raise SQLAlchemyError mck1 = MagicMock() mck1.to_native = to_native with ( pytest.raises(SQLAlchemyError), patch("homeassistant.components.recorder.core.time.sleep") as e_mock, ): util.execute((mck1,), to_native=True) assert e_mock.call_count == 2
Ensure a malformed sqlite database is moved away.
def test_validate_or_move_away_sqlite_database( hass: HomeAssistant, tmp_path: Path, caplog: pytest.LogCaptureFixture ) -> None: """Ensure a malformed sqlite database is moved away.""" test_dir = tmp_path.joinpath("test_validate_or_move_away_sqlite_database") test_dir.mkdir() test_db_file = f"{test_dir}/broken.db" dburl = f"{SQLITE_URL_PREFIX}{test_db_file}" assert util.validate_sqlite_database(test_db_file) is False assert os.path.exists(test_db_file) is True assert util.validate_or_move_away_sqlite_database(dburl) is False corrupt_db_file(test_db_file) assert util.validate_sqlite_database(dburl) is False assert util.validate_or_move_away_sqlite_database(dburl) is False assert "corrupt or malformed" in caplog.text assert util.validate_sqlite_database(dburl) is False assert util.validate_or_move_away_sqlite_database(dburl) is True
Test setting up the connection for a mysql dialect.
def test_setup_connection_for_dialect_mysql(mysql_version) -> None: """Test setting up the connection for a mysql dialect.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT VERSION()": return [[mysql_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) util.setup_connection_for_dialect(instance_mock, "mysql", dbapi_connection, True) assert len(execute_args) == 3 assert execute_args[0] == "SET session wait_timeout=28800" assert execute_args[1] == "SELECT VERSION()" assert execute_args[2] == "SET time_zone = '+00:00'"
Test setting up the connection for a sqlite dialect.
def test_setup_connection_for_dialect_sqlite(sqlite_version) -> None: """Test setting up the connection for a sqlite dialect.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT sqlite_version()": return [[sqlite_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) assert ( util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, True ) is not None ) assert len(execute_args) == 5 assert execute_args[0] == "PRAGMA journal_mode=WAL" assert execute_args[1] == "SELECT sqlite_version()" assert execute_args[2] == "PRAGMA cache_size = -16384" assert execute_args[3] == "PRAGMA synchronous=NORMAL" assert execute_args[4] == "PRAGMA foreign_keys=ON" execute_args = [] assert ( util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, False ) is None ) assert len(execute_args) == 3 assert execute_args[0] == "PRAGMA cache_size = -16384" assert execute_args[1] == "PRAGMA synchronous=NORMAL" assert execute_args[2] == "PRAGMA foreign_keys=ON"
Test setting up the connection for a sqlite dialect with a zero commit interval.
def test_setup_connection_for_dialect_sqlite_zero_commit_interval( sqlite_version, ) -> None: """Test setting up the connection for a sqlite dialect with a zero commit interval.""" instance_mock = MagicMock(commit_interval=0) execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT sqlite_version()": return [[sqlite_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) assert ( util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, True ) is not None ) assert len(execute_args) == 5 assert execute_args[0] == "PRAGMA journal_mode=WAL" assert execute_args[1] == "SELECT sqlite_version()" assert execute_args[2] == "PRAGMA cache_size = -16384" assert execute_args[3] == "PRAGMA synchronous=FULL" assert execute_args[4] == "PRAGMA foreign_keys=ON" execute_args = [] assert ( util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, False ) is None ) assert len(execute_args) == 3 assert execute_args[0] == "PRAGMA cache_size = -16384" assert execute_args[1] == "PRAGMA synchronous=FULL" assert execute_args[2] == "PRAGMA foreign_keys=ON"
Test setting up the connection for an outdated mysql version.
def test_fail_outdated_mysql( caplog: pytest.LogCaptureFixture, mysql_version, message ) -> None: """Test setting up the connection for an outdated mysql version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT VERSION()": return [[mysql_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) with pytest.raises(UnsupportedDialect): util.setup_connection_for_dialect( instance_mock, "mysql", dbapi_connection, True ) assert message in caplog.text
Test setting up the connection for a supported mysql version.
def test_supported_mysql(caplog: pytest.LogCaptureFixture, mysql_version) -> None: """Test setting up the connection for a supported mysql version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT VERSION()": return [[mysql_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) util.setup_connection_for_dialect(instance_mock, "mysql", dbapi_connection, True) assert "minimum supported version" not in caplog.text
Test setting up the connection for an outdated PostgreSQL version.
def test_fail_outdated_pgsql( caplog: pytest.LogCaptureFixture, pgsql_version, message ) -> None: """Test setting up the connection for an outdated PostgreSQL version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SHOW server_version": return [[pgsql_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) with pytest.raises(UnsupportedDialect): util.setup_connection_for_dialect( instance_mock, "postgresql", dbapi_connection, True ) assert message in caplog.text
Test setting up the connection for a supported PostgreSQL version.
def test_supported_pgsql(caplog: pytest.LogCaptureFixture, pgsql_version) -> None: """Test setting up the connection for a supported PostgreSQL version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SHOW server_version": return [[pgsql_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) database_engine = util.setup_connection_for_dialect( instance_mock, "postgresql", dbapi_connection, True ) assert "minimum supported version" not in caplog.text assert database_engine is not None assert database_engine.optimizer.slow_range_in_select is False
Test setting up the connection for an outdated sqlite version.
def test_fail_outdated_sqlite( caplog: pytest.LogCaptureFixture, sqlite_version, message ) -> None: """Test setting up the connection for an outdated sqlite version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT sqlite_version()": return [[sqlite_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) with pytest.raises(UnsupportedDialect): util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, True ) assert message in caplog.text
Test setting up the connection for a supported sqlite version.
def test_supported_sqlite(caplog: pytest.LogCaptureFixture, sqlite_version) -> None: """Test setting up the connection for a supported sqlite version.""" instance_mock = MagicMock() execute_args = [] close_mock = MagicMock() def execute_mock(statement): nonlocal execute_args execute_args.append(statement) def fetchall_mock(): nonlocal execute_args if execute_args[-1] == "SELECT sqlite_version()": return [[sqlite_version]] return None def _make_cursor_mock(*_): return MagicMock(execute=execute_mock, close=close_mock, fetchall=fetchall_mock) dbapi_connection = MagicMock(cursor=_make_cursor_mock) database_engine = util.setup_connection_for_dialect( instance_mock, "sqlite", dbapi_connection, True ) assert "minimum supported version" not in caplog.text assert database_engine is not None assert database_engine.optimizer.slow_range_in_select is False
Test setting up the connection for an outdated sqlite version.
def test_warn_unsupported_dialect( caplog: pytest.LogCaptureFixture, dialect, message ) -> None: """Test setting up the connection for an outdated sqlite version.""" instance_mock = MagicMock() dbapi_connection = MagicMock() with pytest.raises(UnsupportedDialect): util.setup_connection_for_dialect( instance_mock, dialect, dbapi_connection, True ) assert message in caplog.text
Test the basic sanity checks with a missing table.
def test_basic_sanity_check( hass_recorder: Callable[..., HomeAssistant], recorder_db_url ) -> None: """Test the basic sanity checks with a missing table.""" if recorder_db_url.startswith(("mysql://", "postgresql://")): # This test is specific for SQLite return hass = hass_recorder() cursor = util.get_instance(hass).engine.raw_connection().cursor() assert util.basic_sanity_check(cursor) is True cursor.execute("DROP TABLE states;") with pytest.raises(sqlite3.DatabaseError): util.basic_sanity_check(cursor)
Run Checks on the open database.
def test_combined_checks( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, recorder_db_url, ) -> None: """Run Checks on the open database.""" if recorder_db_url.startswith(("mysql://", "postgresql://")): # This test is specific for SQLite return hass = hass_recorder() instance = util.get_instance(hass) instance.db_retry_wait = 0 cursor = instance.engine.raw_connection().cursor() assert util.run_checks_on_open_db("fake_db_path", cursor) is None assert "could not validate that the sqlite3 database" in caplog.text caplog.clear() # We are patching recorder.util here in order # to avoid creating the full database on disk with patch( "homeassistant.components.recorder.util.basic_sanity_check", return_value=False ): caplog.clear() assert util.run_checks_on_open_db("fake_db_path", cursor) is None assert "could not validate that the sqlite3 database" in caplog.text # We are patching recorder.util here in order # to avoid creating the full database on disk with patch("homeassistant.components.recorder.util.last_run_was_recently_clean"): caplog.clear() assert util.run_checks_on_open_db("fake_db_path", cursor) is None assert "restarted cleanly and passed the basic sanity check" in caplog.text caplog.clear() with ( patch( "homeassistant.components.recorder.util.last_run_was_recently_clean", side_effect=sqlite3.DatabaseError, ), pytest.raises(sqlite3.DatabaseError), ): util.run_checks_on_open_db("fake_db_path", cursor) caplog.clear() with ( patch( "homeassistant.components.recorder.util.last_run_was_recently_clean", side_effect=sqlite3.DatabaseError, ), pytest.raises(sqlite3.DatabaseError), ): util.run_checks_on_open_db("fake_db_path", cursor) cursor.execute("DROP TABLE events;") caplog.clear() with pytest.raises(sqlite3.DatabaseError): util.run_checks_on_open_db("fake_db_path", cursor) caplog.clear() with pytest.raises(sqlite3.DatabaseError): util.run_checks_on_open_db("fake_db_path", cursor)
Ensure we can end incomplete runs.
def test_end_incomplete_runs( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Ensure we can end incomplete runs.""" hass = hass_recorder() with session_scope(hass=hass) as session: run_info = run_information_with_session(session) assert isinstance(run_info, RecorderRuns) assert run_info.closed_incorrect is False now = dt_util.utcnow() end_incomplete_runs(session, now) run_info = run_information_with_session(session) assert run_info.closed_incorrect is True assert process_timestamp(run_info.end) == now session.flush() later = dt_util.utcnow() end_incomplete_runs(session, later) run_info = run_information_with_session(session) assert process_timestamp(run_info.end) == now assert "Ended unfinished session" in caplog.text
Test periodic db cleanups.
def test_periodic_db_cleanups( hass_recorder: Callable[..., HomeAssistant], recorder_db_url ) -> None: """Test periodic db cleanups.""" if recorder_db_url.startswith(("mysql://", "postgresql://")): # This test is specific for SQLite return hass = hass_recorder() with patch.object(util.get_instance(hass).engine, "connect") as connect_mock: util.periodic_db_cleanups(util.get_instance(hass)) text_obj = connect_mock.return_value.__enter__.return_value.execute.mock_calls[0][ 1 ][0] assert isinstance(text_obj, TextClause) assert str(text_obj) == "PRAGMA wal_checkpoint(TRUNCATE);"
Test we can find the second sunday of the month.
def test_is_second_sunday() -> None: """Test we can find the second sunday of the month.""" assert is_second_sunday(datetime(2022, 1, 9, 0, 0, 0, tzinfo=dt_util.UTC)) is True assert is_second_sunday(datetime(2022, 2, 13, 0, 0, 0, tzinfo=dt_util.UTC)) is True assert is_second_sunday(datetime(2022, 3, 13, 0, 0, 0, tzinfo=dt_util.UTC)) is True assert is_second_sunday(datetime(2022, 4, 10, 0, 0, 0, tzinfo=dt_util.UTC)) is True assert is_second_sunday(datetime(2022, 5, 8, 0, 0, 0, tzinfo=dt_util.UTC)) is True assert is_second_sunday(datetime(2022, 1, 10, 0, 0, 0, tzinfo=dt_util.UTC)) is False
Test building the MySQLdb connect conv param.
def test_build_mysqldb_conv() -> None: """Test building the MySQLdb connect conv param.""" mock_converters = Mock(conversions={"original": "preserved"}) mock_constants = Mock(FIELD_TYPE=Mock(DATETIME="DATETIME")) with patch.dict( "sys.modules", **{"MySQLdb.constants": mock_constants, "MySQLdb.converters": mock_converters}, ): conv = util.build_mysqldb_conv() assert conv["original"] == "preserved" assert conv["DATETIME"]("INVALID") is None assert conv["DATETIME"]("2022-05-13T22:33:12.741") == datetime( 2022, 5, 13, 22, 33, 12, 741000, tzinfo=None )
Test executing with execute_stmt_lambda_element.
def test_execute_stmt_lambda_element( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test executing with execute_stmt_lambda_element.""" hass = hass_recorder() instance = recorder.get_instance(hass) hass.states.set("sensor.on", "on") new_state = hass.states.get("sensor.on") wait_recording_done(hass) now = dt_util.utcnow() tomorrow = now + timedelta(days=1) one_week_from_now = now + timedelta(days=7) all_calls = 0 class MockExecutor: def __init__(self, stmt): assert isinstance(stmt, StatementLambdaElement) def all(self): nonlocal all_calls all_calls += 1 if all_calls == 2: return ["mock_row"] raise SQLAlchemyError with session_scope(hass=hass) as session: # No time window, we always get a list metadata_id = instance.states_meta_manager.get("sensor.on", session, True) start_time_ts = dt_util.utcnow().timestamp() stmt = lambda_stmt( lambda: _get_single_entity_start_time_stmt( start_time_ts, metadata_id, False, False, False ) ) rows = util.execute_stmt_lambda_element(session, stmt) assert isinstance(rows, list) assert rows[0].state == new_state.state assert rows[0].metadata_id == metadata_id # Time window >= 2 days, we get a ChunkedIteratorResult rows = util.execute_stmt_lambda_element(session, stmt, now, one_week_from_now) assert isinstance(rows, ChunkedIteratorResult) row = next(rows) assert row.state == new_state.state assert row.metadata_id == metadata_id # Time window >= 2 days, we should not get a ChunkedIteratorResult # because orm_rows=False rows = util.execute_stmt_lambda_element( session, stmt, now, one_week_from_now, orm_rows=False ) assert not isinstance(rows, ChunkedIteratorResult) row = next(rows) assert row.state == new_state.state assert row.metadata_id == metadata_id # Time window < 2 days, we get a list rows = util.execute_stmt_lambda_element(session, stmt, now, tomorrow) assert isinstance(rows, list) assert rows[0].state == new_state.state assert rows[0].metadata_id == metadata_id with patch.object(session, "execute", MockExecutor): rows = util.execute_stmt_lambda_element(session, stmt, now, tomorrow) assert rows == ["mock_row"]
Test chunked_or_all can iterate chunk sizes larger than the passed in collection.
def test_chunked_or_all(): """Test chunked_or_all can iterate chunk sizes larger than the passed in collection.""" all_items = [] incoming = (1, 2, 3, 4) for chunk in chunked_or_all(incoming, 2): assert len(chunk) == 2 all_items.extend(chunk) assert all_items == [1, 2, 3, 4] all_items = [] incoming = (1, 2, 3, 4) for chunk in chunked_or_all(incoming, 5): assert len(chunk) == 4 # Verify the chunk is the same object as the incoming # collection since we want to avoid copying the collection # if we don't need to assert chunk is incoming all_items.extend(chunk) assert all_items == [1, 2, 3, 4]
Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema.
def _create_engine_test(*args, **kwargs): """Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema. """ importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] engine = create_engine(*args, **kwargs) old_db_schema.Base.metadata.create_all(engine) with Session(engine) as session: session.add( recorder.db_schema.StatisticsRuns(start=statistics.get_start_time()) ) session.add( recorder.db_schema.SchemaChanges( schema_version=old_db_schema.SCHEMA_VERSION ) ) session.commit() return engine
Ensure UNIT_SCHEMA is aligned with sensor UNIT_CONVERTERS.
def test_converters_align_with_sensor() -> None: """Ensure UNIT_SCHEMA is aligned with sensor UNIT_CONVERTERS.""" for converter in UNIT_CONVERTERS.values(): assert converter.UNIT_CLASS in UNIT_SCHEMA.schema for unit_class in UNIT_SCHEMA.schema: assert any(c for c in UNIT_CONVERTERS.values() if unit_class == c.UNIT_CLASS)
Test removal of duplicated statistics.
def test_delete_duplicates_no_duplicates( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test removal of duplicated statistics.""" hass = hass_recorder() wait_recording_done(hass) instance = recorder.get_instance(hass) with session_scope(hass=hass) as session: delete_statistics_duplicates(instance, hass, session) assert "duplicated statistics rows" not in caplog.text assert "Found non identical" not in caplog.text assert "Found duplicated" not in caplog.text
Test the recorder does not blow up if statistics is duplicated.
def test_duplicate_statistics_handle_integrity_error( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test the recorder does not blow up if statistics is duplicated.""" hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_1 = [ { "start": period1, "last_reset": None, "state": 3, "sum": 5, }, ] external_energy_statistics_2 = [ { "start": period2, "last_reset": None, "state": 3, "sum": 6, } ] with ( patch.object(statistics, "_statistics_exists", return_value=False), patch.object( statistics, "_insert_statistics", wraps=statistics._insert_statistics ) as insert_statistics_mock, ): async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_2 ) wait_recording_done(hass) assert insert_statistics_mock.call_count == 3 with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.Statistics).all() assert len(tmp) == 2 assert "Blocked attempt to insert duplicated statistic rows" in caplog.text
Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema.
def _create_engine_28(*args, **kwargs): """Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema. """ module = "tests.components.recorder.db_schema_28" importlib.import_module(module) old_db_schema = sys.modules[module] engine = create_engine(*args, **kwargs) old_db_schema.Base.metadata.create_all(engine) with Session(engine) as session: session.add( recorder.db_schema.StatisticsRuns(start=statistics.get_start_time()) ) session.add( recorder.db_schema.SchemaChanges( schema_version=old_db_schema.SCHEMA_VERSION ) ) session.commit() return engine
Test removal of duplicated statistics.
def test_delete_metadata_duplicates( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" module = "tests.components.recorder.db_schema_28" importlib.import_module(module) old_db_schema = sys.modules[module] external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_co2_metadata = { "has_mean": True, "has_sum": False, "name": "Fossil percentage", "source": "test", "statistic_id": "test:fossil_percentage", "unit_of_measurement": "%", } # Create some duplicated statistics_meta with schema version 28 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( "homeassistant.components.recorder.core.create_engine", new=_create_engine_28, ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata) ) with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.StatisticsMeta).all() assert len(tmp) == 3 assert tmp[0].id == 1 assert tmp[0].statistic_id == "test:total_energy_import_tariff_1" assert tmp[1].id == 2 assert tmp[1].statistic_id == "test:total_energy_import_tariff_1" assert tmp[2].id == 3 assert tmp[2].statistic_id == "test:fossil_percentage" hass.stop() # Test that the duplicates are removed during migration from schema 28 with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) assert "Deleted 1 duplicated statistics_meta rows" in caplog.text with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.StatisticsMeta).all() assert len(tmp) == 2 assert tmp[0].id == 2 assert tmp[0].statistic_id == "test:total_energy_import_tariff_1" assert tmp[1].id == 3 assert tmp[1].statistic_id == "test:fossil_percentage" hass.stop()
Test removal of duplicated statistics.
def test_delete_metadata_duplicates_many( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test removal of duplicated statistics.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" module = "tests.components.recorder.db_schema_28" importlib.import_module(module) old_db_schema = sys.modules[module] external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_2", "unit_of_measurement": "kWh", } external_co2_metadata = { "has_mean": True, "has_sum": False, "name": "Fossil percentage", "source": "test", "statistic_id": "test:fossil_percentage", "unit_of_measurement": "%", } # Create some duplicated statistics with schema version 28 with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch( "homeassistant.components.recorder.core.create_engine", new=_create_engine_28, ), get_test_home_assistant() as hass, ): recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass) as session: session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1) ) for _ in range(1100): session.add( recorder.db_schema.StatisticsMeta.from_meta( external_energy_metadata_1 ) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata) ) session.add( recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata) ) hass.stop() # Test that the duplicates are removed during migration from schema 28 with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, "recorder", {"recorder": {"db_url": dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) assert "Deleted 1102 duplicated statistics_meta rows" in caplog.text with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.StatisticsMeta).all() assert len(tmp) == 3 assert tmp[0].id == 1101 assert tmp[0].statistic_id == "test:total_energy_import_tariff_1" assert tmp[1].id == 1103 assert tmp[1].statistic_id == "test:total_energy_import_tariff_2" assert tmp[2].id == 1105 assert tmp[2].statistic_id == "test:fossil_percentage" hass.stop()
Test removal of duplicated statistics.
def test_delete_metadata_duplicates_no_duplicates( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test removal of duplicated statistics.""" hass = hass_recorder() wait_recording_done(hass) with session_scope(hass=hass) as session: instance = recorder.get_instance(hass) delete_statistics_meta_duplicates(instance, session) assert "duplicated statistics_meta rows" not in caplog.text
Override async_setup_entry.
def mock_setup_entry() -> Generator[AsyncMock, None, None]: """Override async_setup_entry.""" with patch( "homeassistant.components.refoss.async_setup_entry", return_value=True ) as mock_setup_entry: yield mock_setup_entry
Build mock device object.
def build_device_mock(name="r10", ip="1.1.1.1", mac="aabbcc112233"): """Build mock device object.""" return Mock( uuid="abc", dev_name=name, device_type="r10", fmware_version="1.1.1", hdware_version="1.1.2", inner_ip=ip, port="80", mac=mac, sub_type="eu", channels=[0], )
Build mock base device object.
def build_base_device_mock(name="r10", ip="1.1.1.1", mac="aabbcc112233"): """Build mock base device object.""" mock = Mock( device_info=build_device_mock(name=name, ip=ip, mac=mac), uuid="abc", dev_name=name, device_type="r10", fmware_version="1.1.1", hdware_version="1.1.2", inner_ip=ip, port="80", mac=mac, sub_type="eu", channels=[0], async_handle_update=AsyncMock(), async_turn_on=AsyncMock(), async_turn_off=AsyncMock(), async_toggle=AsyncMock(), ) mock.status = {0: True} return mock
Test creating a new config file.
def test_create_new(hass: HomeAssistant) -> None: """Test creating a new config file.""" with ( patch("builtins.open", mock_open()), patch("os.path.isfile", Mock(return_value=False)), patch.object(rtm.RememberTheMilkConfiguration, "save_config"), ): config = rtm.RememberTheMilkConfiguration(hass) config.set_token(PROFILE, TOKEN) assert config.get_token(PROFILE) == TOKEN
Test loading an existing token from the file.
def test_load_config(hass: HomeAssistant) -> None: """Test loading an existing token from the file.""" with ( patch("builtins.open", mock_open(read_data=JSON_STRING)), patch("os.path.isfile", Mock(return_value=True)), ): config = rtm.RememberTheMilkConfiguration(hass) assert config.get_token(PROFILE) == TOKEN
Test starts with invalid data and should not raise an exception.
def test_invalid_data(hass: HomeAssistant) -> None: """Test starts with invalid data and should not raise an exception.""" with ( patch("builtins.open", mock_open(read_data="random characters")), patch("os.path.isfile", Mock(return_value=True)), ): config = rtm.RememberTheMilkConfiguration(hass) assert config is not None
Test the hass to rtm task is mapping.
def test_id_map(hass: HomeAssistant) -> None: """Test the hass to rtm task is mapping.""" hass_id = "hass-id-1234" list_id = "mylist" timeseries_id = "my_timeseries" rtm_id = "rtm-id-4567" with ( patch("builtins.open", mock_open()), patch("os.path.isfile", Mock(return_value=False)), patch.object(rtm.RememberTheMilkConfiguration, "save_config"), ): config = rtm.RememberTheMilkConfiguration(hass) assert config.get_rtm_id(PROFILE, hass_id) is None config.set_rtm_id(PROFILE, hass_id, list_id, timeseries_id, rtm_id) assert (list_id, timeseries_id, rtm_id) == config.get_rtm_id(PROFILE, hass_id) config.delete_rtm_id(PROFILE, hass_id) assert config.get_rtm_id(PROFILE, hass_id) is None
Test loading an existing key map from the file.
def test_load_key_map(hass: HomeAssistant) -> None: """Test loading an existing key map from the file.""" with ( patch("builtins.open", mock_open(read_data=JSON_STRING)), patch("os.path.isfile", Mock(return_value=True)), ): config = rtm.RememberTheMilkConfiguration(hass) assert config.get_rtm_id(PROFILE, "1234") == ("0", "1", "2")
Stub copying the blueprints to the config folder.
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None: """Stub copying the blueprints to the config folder."""
Track calls to a mock service.
def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation")
Stub copying the blueprints to the config folder.
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None: """Stub copying the blueprints to the config folder."""
Track calls to a mock service.
def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation")
Stub copying the blueprints to the config folder.
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None: """Stub copying the blueprints to the config folder."""
Track calls to a mock service.
def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation")
Test module.__all__ is correctly set.
def test_all() -> None: """Test module.__all__ is correctly set.""" help_test_all(remote)
Test deprecated constants.
def test_deprecated_constants( caplog: pytest.LogCaptureFixture, enum: remote.RemoteEntityFeature, ) -> None: """Test deprecated constants.""" import_and_test_deprecated_constant_enum(caplog, remote, enum, "SUPPORT_", "2025.1")
Test deprecated supported features ints.
def test_deprecated_supported_features_ints(caplog: pytest.LogCaptureFixture) -> None: """Test deprecated supported features ints.""" class MockRemote(remote.RemoteEntity): @property def supported_features(self) -> int: """Return supported features.""" return 1 entity = MockRemote() assert entity.supported_features_compat is remote.RemoteEntityFeature(1) assert "MockRemote" in caplog.text assert "is using deprecated supported features values" in caplog.text assert "Instead it should use" in caplog.text assert "RemoteEntityFeature.LEARN_COMMAND" in caplog.text caplog.clear() assert entity.supported_features_compat is remote.RemoteEntityFeature(1) assert "is using deprecated supported features values" not in caplog.text
Override async_setup_entry.
def mock_setup_entry() -> Generator[AsyncMock, None, None]: """Override async_setup_entry.""" with patch( "homeassistant.components.renault.async_setup_entry", return_value=True ) as mock_setup_entry: yield mock_setup_entry
Parametrize vehicle type.
def get_vehicle_type(request: pytest.FixtureRequest) -> str: """Parametrize vehicle type.""" return request.param
Create and register mock config entry.
def get_config_entry(hass: HomeAssistant) -> ConfigEntry: """Create and register mock config entry.""" config_entry = MockConfigEntry( domain=DOMAIN, source=SOURCE_USER, data=MOCK_CONFIG, unique_id=MOCK_ACCOUNT_ID, options={}, entry_id="123456", ) config_entry.add_to_hass(hass) return config_entry
Mock fixtures.
def patch_get_vehicles(vehicle_type: str): """Mock fixtures.""" with patch( "renault_api.renault_account.RenaultAccount.get_vehicles", return_value=( schemas.KamereonVehiclesResponseSchema.loads( load_fixture(f"renault/vehicle_{vehicle_type}.json") ) ), ): yield
Create a vehicle proxy for testing.
def _get_fixtures(vehicle_type: str) -> MappingProxyType: """Create a vehicle proxy for testing.""" mock_vehicle = MOCK_VEHICLES.get(vehicle_type, {"endpoints": {}}) return { "battery_status": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['battery_status']}") if "battery_status" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema), "charge_mode": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['charge_mode']}") if "charge_mode" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleChargeModeDataSchema), "cockpit": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['cockpit']}") if "cockpit" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleCockpitDataSchema), "hvac_status": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['hvac_status']}") if "hvac_status" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleHvacStatusDataSchema), "location": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['location']}") if "location" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleLocationDataSchema), "lock_status": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['lock_status']}") if "lock_status" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleLockStatusDataSchema), "res_state": schemas.KamereonVehicleDataResponseSchema.loads( load_fixture(f"renault/{mock_vehicle['endpoints']['res_state']}") if "res_state" in mock_vehicle["endpoints"] else load_fixture("renault/no_data.json") ).get_attributes(schemas.KamereonVehicleResStateDataSchema), }
Mock fixtures.
def patch_fixtures_with_data(vehicle_type: str): """Mock fixtures.""" mock_fixtures = _get_fixtures(vehicle_type) with ( patch( "renault_api.renault_vehicle.RenaultVehicle.get_battery_status", return_value=mock_fixtures["battery_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_charge_mode", return_value=mock_fixtures["charge_mode"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_cockpit", return_value=mock_fixtures["cockpit"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_hvac_status", return_value=mock_fixtures["hvac_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_location", return_value=mock_fixtures["location"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_lock_status", return_value=mock_fixtures["lock_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_res_state", return_value=mock_fixtures["res_state"], ), ): yield
Mock fixtures.
def patch_fixtures_with_no_data(): """Mock fixtures.""" mock_fixtures = _get_fixtures("") with ( patch( "renault_api.renault_vehicle.RenaultVehicle.get_battery_status", return_value=mock_fixtures["battery_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_charge_mode", return_value=mock_fixtures["charge_mode"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_cockpit", return_value=mock_fixtures["cockpit"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_hvac_status", return_value=mock_fixtures["hvac_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_location", return_value=mock_fixtures["location"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_lock_status", return_value=mock_fixtures["lock_status"], ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_res_state", return_value=mock_fixtures["res_state"], ), ): yield
Mock fixtures.
def _patch_fixtures_with_side_effect(side_effect: Any): """Mock fixtures.""" with ( patch( "renault_api.renault_vehicle.RenaultVehicle.get_battery_status", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_charge_mode", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_cockpit", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_hvac_status", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_location", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_lock_status", side_effect=side_effect, ), patch( "renault_api.renault_vehicle.RenaultVehicle.get_res_state", side_effect=side_effect, ), ): yield
Mock fixtures.
def patch_fixtures_with_access_denied_exception(): """Mock fixtures.""" access_denied_exception = exceptions.AccessDeniedException( "err.func.403", "Access is denied for this resource", ) with _patch_fixtures_with_side_effect(access_denied_exception): yield
Mock fixtures.
def patch_fixtures_with_invalid_upstream_exception(): """Mock fixtures.""" invalid_upstream_exception = exceptions.InvalidUpstreamException( "err.tech.500", "Invalid response from the upstream server (The request sent to the GDC is erroneous) ; 502 Bad Gateway", ) with _patch_fixtures_with_side_effect(invalid_upstream_exception): yield
Mock fixtures.
def patch_fixtures_with_not_supported_exception(): """Mock fixtures.""" not_supported_exception = exceptions.NotSupportedException( "err.tech.501", "This feature is not technically supported by this gateway", ) with _patch_fixtures_with_side_effect(not_supported_exception): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.BINARY_SENSOR]): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.BUTTON]): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.DEVICE_TRACKER]): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", []): yield
Parametrize vehicle type.
def override_vehicle_type(request) -> str: """Parametrize vehicle type.""" return request.param
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.SELECT]): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.SENSOR]): yield
Override PLATFORMS.
def override_platforms() -> Generator[None, None, None]: """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", []): yield
Parametrize vehicle type.
def override_vehicle_type(request) -> str: """Parametrize vehicle type.""" return request.param
Get device_id.
def get_device_id(hass: HomeAssistant) -> str: """Get device_id.""" device_registry = dr.async_get(hass) identifiers = {(DOMAIN, "VF1AAAAA555777999")} device = device_registry.async_get_device(identifiers=identifiers) return device.id
Check icon attribute for inactive sensors.
def get_no_data_icon(expected_entity: MappingProxyType): """Check icon attribute for inactive sensors.""" entity_id = expected_entity[ATTR_ENTITY_ID] return ICON_FOR_EMPTY_VALUES.get(entity_id, expected_entity.get(ATTR_ICON))
Ensure that the expected_device is correctly registered.
def check_device_registry( device_registry: DeviceRegistry, expected_device: MappingProxyType ) -> None: """Ensure that the expected_device is correctly registered.""" assert len(device_registry.devices) == 1 registry_entry = device_registry.async_get_device( identifiers=expected_device[ATTR_IDENTIFIERS] ) assert registry_entry is not None assert registry_entry.identifiers == expected_device[ATTR_IDENTIFIERS] assert registry_entry.manufacturer == expected_device[ATTR_MANUFACTURER] assert registry_entry.name == expected_device[ATTR_NAME] assert registry_entry.model == expected_device[ATTR_MODEL] assert registry_entry.sw_version == expected_device[ATTR_SW_VERSION]
Ensure that the expected_entities are correct.
def check_entities( hass: HomeAssistant, entity_registry: EntityRegistry, expected_entities: MappingProxyType, ) -> None: """Ensure that the expected_entities are correct.""" for expected_entity in expected_entities: entity_id = expected_entity[ATTR_ENTITY_ID] registry_entry = entity_registry.entities.get(entity_id) assert registry_entry is not None assert registry_entry.unique_id == expected_entity[ATTR_UNIQUE_ID] state = hass.states.get(entity_id) assert state.state == expected_entity[ATTR_STATE] for attr in FIXED_ATTRIBUTES + DYNAMIC_ATTRIBUTES: assert state.attributes.get(attr) == expected_entity.get(attr)
Ensure that the expected_entities are correct.
def check_entities_no_data( hass: HomeAssistant, entity_registry: EntityRegistry, expected_entities: MappingProxyType, expected_state: str, ) -> None: """Ensure that the expected_entities are correct.""" for expected_entity in expected_entities: entity_id = expected_entity[ATTR_ENTITY_ID] registry_entry = entity_registry.entities.get(entity_id) assert registry_entry is not None assert registry_entry.unique_id == expected_entity[ATTR_UNIQUE_ID] state = hass.states.get(entity_id) assert state.state == expected_state for attr in FIXED_ATTRIBUTES: assert state.attributes.get(attr) == expected_entity.get(attr)
Ensure that the expected_entities are correct.
def check_entities_unavailable( hass: HomeAssistant, entity_registry: EntityRegistry, expected_entities: MappingProxyType, ) -> None: """Ensure that the expected_entities are correct.""" for expected_entity in expected_entities: entity_id = expected_entity[ATTR_ENTITY_ID] registry_entry = entity_registry.entities.get(entity_id) assert registry_entry is not None, f"{entity_id} not found in registry" assert registry_entry.unique_id == expected_entity[ATTR_UNIQUE_ID] state = hass.states.get(entity_id) assert state.state == STATE_UNAVAILABLE for attr in FIXED_ATTRIBUTES: assert state.attributes.get(attr) == expected_entity.get(attr)
Override async_setup_entry.
def mock_setup_entry() -> Generator[AsyncMock, None, None]: """Override async_setup_entry.""" with patch( "homeassistant.components.reolink.async_setup_entry", return_value=True ) as mock_setup_entry: yield mock_setup_entry
Mock reolink connection and return both the host_mock and host_mock_class.
def reolink_connect_class( mock_get_source_ip: None, ) -> Generator[MagicMock, None, None]: """Mock reolink connection and return both the host_mock and host_mock_class.""" with ( patch( "homeassistant.components.reolink.host.webhook.async_register", return_value=True, ), patch( "homeassistant.components.reolink.host.Host", autospec=True ) as host_mock_class, ): host_mock = host_mock_class.return_value host_mock.get_host_data.return_value = None host_mock.get_states.return_value = None host_mock.check_new_firmware.return_value = False host_mock.unsubscribe.return_value = True host_mock.logout.return_value = True host_mock.mac_address = TEST_MAC host_mock.uid = TEST_UID host_mock.onvif_enabled = True host_mock.rtmp_enabled = True host_mock.rtsp_enabled = True host_mock.nvr_name = TEST_NVR_NAME host_mock.port = TEST_PORT host_mock.use_https = TEST_USE_HTTPS host_mock.is_admin = True host_mock.user_level = "admin" host_mock.protocol = "rtsp" host_mock.channels = [0] host_mock.stream_channels = [0] host_mock.sw_version_update_required = False host_mock.hardware_version = "IPC_00000" host_mock.sw_version = "v1.0.0.0.0.0000" host_mock.manufacturer = "Reolink" host_mock.model = TEST_HOST_MODEL host_mock.camera_model.return_value = TEST_CAM_MODEL host_mock.camera_name.return_value = TEST_NVR_NAME host_mock.camera_sw_version.return_value = "v1.1.0.0.0.0000" host_mock.session_active = True host_mock.timeout = 60 host_mock.renewtimer.return_value = 600 host_mock.wifi_connection = False host_mock.wifi_signal = None host_mock.whiteled_mode_list.return_value = [] host_mock.zoom_range.return_value = { "zoom": {"pos": {"min": 0, "max": 100}}, "focus": {"pos": {"min": 0, "max": 100}}, } host_mock.capabilities = {"Host": ["RTSP"], "0": ["motion_detection"]} host_mock.checked_api_versions = {"GetEvents": 1} host_mock.abilities = {"abilityChn": [{"aiTrack": {"permit": 0, "ver": 0}}]} yield host_mock_class