response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test getting states at a specific point in time for entities that never have been recorded.
def test_get_full_significant_states_with_session_entity_no_matches( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test getting states at a specific point in time for entities that never have been recorded.""" hass = hass_recorder() now = dt_util.utcnow() time_before_recorder_ran = now - timedelta(days=1000) with session_scope(hass=hass, read_only=True) as session: assert ( history.get_full_significant_states_with_session( hass, session, time_before_recorder_ran, now, entity_ids=["demo.id"] ) == {} ) assert ( history.get_full_significant_states_with_session( hass, session, time_before_recorder_ran, now, entity_ids=["demo.id", "demo.id2"], ) == {} )
Test getting states at a specific point in time for entities that never have been recorded.
def test_significant_states_with_session_entity_minimal_response_no_matches( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test getting states at a specific point in time for entities that never have been recorded.""" hass = hass_recorder() now = dt_util.utcnow() time_before_recorder_ran = now - timedelta(days=1000) with session_scope(hass=hass, read_only=True) as session: assert ( history.get_significant_states_with_session( hass, session, time_before_recorder_ran, now, entity_ids=["demo.id"], minimal_response=True, ) == {} ) assert ( history.get_significant_states_with_session( hass, session, time_before_recorder_ran, now, entity_ids=["demo.id", "demo.id2"], minimal_response=True, ) == {} )
Test get_significant_states_with_session with a single entity.
def test_significant_states_with_session_single_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test get_significant_states_with_session with a single entity.""" hass = hass_recorder() hass.states.set("demo.id", "any", {"attr": True}) hass.states.set("demo.id", "any2", {"attr": True}) wait_recording_done(hass) now = dt_util.utcnow() with session_scope(hass=hass, read_only=True) as session: states = history.get_significant_states_with_session( hass, session, now - timedelta(days=1), now, entity_ids=["demo.id"], minimal_response=False, ) assert len(states["demo.id"]) == 2
Test state change during period.
def test_state_changes_during_period( hass_recorder: Callable[..., HomeAssistant], attributes, no_attributes, limit ) -> None: """Test state change during period.""" hass = hass_recorder() entity_id = "media_player.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state, attributes) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow() point = start + timedelta(seconds=1) end = point + timedelta(seconds=1) with freeze_time(start) as freezer: set_state("idle") set_state("YouTube") freezer.move_to(point) states = [ set_state("idle"), set_state("Netflix"), set_state("Plex"), set_state("YouTube"), ] freezer.move_to(end) set_state("Netflix") set_state("Plex") hist = history.state_changes_during_period( hass, start, end, entity_id, no_attributes, limit=limit ) assert_multiple_states_equal_without_context(states[:limit], hist[entity_id])
Test state change during period.
def test_state_changes_during_period_last_reported( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test state change during period.""" hass = hass_recorder() entity_id = "media_player.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state) wait_recording_done(hass) return ha.State.from_dict(hass.states.get(entity_id).as_dict()) start = dt_util.utcnow() point1 = start + timedelta(seconds=1) point2 = point1 + timedelta(seconds=1) end = point2 + timedelta(seconds=1) with freeze_time(start) as freezer: set_state("idle") freezer.move_to(point1) states = [set_state("YouTube")] freezer.move_to(point2) set_state("YouTube") freezer.move_to(end) set_state("Netflix") hist = history.state_changes_during_period(hass, start, end, entity_id) assert_multiple_states_equal_without_context(states, hist[entity_id])
Test state change during period descending.
def test_state_changes_during_period_descending( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test state change during period descending.""" hass = hass_recorder() entity_id = "media_player.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state, {"any": 1}) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow().replace(microsecond=0) point = start + timedelta(seconds=1) point2 = start + timedelta(seconds=1, microseconds=100) point3 = start + timedelta(seconds=1, microseconds=200) point4 = start + timedelta(seconds=1, microseconds=300) end = point + timedelta(seconds=1, microseconds=400) with freeze_time(start) as freezer: set_state("idle") set_state("YouTube") freezer.move_to(point) states = [set_state("idle")] freezer.move_to(point2) states.append(set_state("Netflix")) freezer.move_to(point3) states.append(set_state("Plex")) freezer.move_to(point4) states.append(set_state("YouTube")) freezer.move_to(end) set_state("Netflix") set_state("Plex") hist = history.state_changes_during_period( hass, start, end, entity_id, no_attributes=False, descending=False ) assert_multiple_states_equal_without_context(states, hist[entity_id]) hist = history.state_changes_during_period( hass, start, end, entity_id, no_attributes=False, descending=True ) assert_multiple_states_equal_without_context( states, list(reversed(list(hist[entity_id]))) ) start_time = point2 + timedelta(microseconds=10) hist = history.state_changes_during_period( hass, start_time, # Pick a point where we will generate a start time state end, entity_id, no_attributes=False, descending=True, include_start_time_state=True, ) hist_states = list(hist[entity_id]) assert hist_states[-1].last_updated == start_time assert hist_states[-1].last_changed == start_time assert len(hist_states) == 3 # Make sure they are in descending order assert ( hist_states[0].last_updated > hist_states[1].last_updated > hist_states[2].last_updated ) assert ( hist_states[0].last_changed > hist_states[1].last_changed > hist_states[2].last_changed ) hist = history.state_changes_during_period( hass, start_time, # Pick a point where we will generate a start time state end, entity_id, no_attributes=False, descending=False, include_start_time_state=True, ) hist_states = list(hist[entity_id]) assert hist_states[0].last_updated == start_time assert hist_states[0].last_changed == start_time assert len(hist_states) == 3 # Make sure they are in ascending order assert ( hist_states[0].last_updated < hist_states[1].last_updated < hist_states[2].last_updated ) assert ( hist_states[0].last_changed < hist_states[1].last_changed < hist_states[2].last_changed )
Test number of state changes.
def test_get_last_state_changes(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test number of state changes.""" hass = hass_recorder() entity_id = "sensor.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow() - timedelta(minutes=2) point = start + timedelta(minutes=1) point2 = point + timedelta(minutes=1, seconds=1) states = [] with freeze_time(start) as freezer: set_state("1") freezer.move_to(point) states.append(set_state("2")) freezer.move_to(point2) states.append(set_state("3")) hist = history.get_last_state_changes(hass, 2, entity_id) assert_multiple_states_equal_without_context(states, hist[entity_id])
Test number of state changes.
def test_get_last_state_changes_last_reported( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test number of state changes.""" hass = hass_recorder() entity_id = "sensor.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state) wait_recording_done(hass) return ha.State.from_dict(hass.states.get(entity_id).as_dict()) start = dt_util.utcnow() - timedelta(minutes=2) point = start + timedelta(minutes=1) point2 = point + timedelta(minutes=1, seconds=1) states = [] with freeze_time(start) as freezer: states.append(set_state("1")) freezer.move_to(point) set_state("1") freezer.move_to(point2) states.append(set_state("2")) hist = history.get_last_state_changes(hass, 2, entity_id) assert_multiple_states_equal_without_context(states, hist[entity_id])
Test getting the last state change for an entity.
def test_get_last_state_change(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test getting the last state change for an entity.""" hass = hass_recorder() entity_id = "sensor.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow() - timedelta(minutes=2) point = start + timedelta(minutes=1) point2 = point + timedelta(minutes=1, seconds=1) states = [] with freeze_time(start) as freezer: set_state("1") freezer.move_to(point) set_state("2") freezer.move_to(point2) states.append(set_state("3")) hist = history.get_last_state_changes(hass, 1, entity_id) assert_multiple_states_equal_without_context(states, hist[entity_id])
Ensure a state can pass though copy(). The filter integration uses copy() on states from history.
def test_ensure_state_can_be_copied( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Ensure a state can pass though copy(). The filter integration uses copy() on states from history. """ hass = hass_recorder() entity_id = "sensor.test" def set_state(state): """Set the state.""" hass.states.set(entity_id, state) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow() - timedelta(minutes=2) point = start + timedelta(minutes=1) with freeze_time(start) as freezer: set_state("1") freezer.move_to(point) set_state("2") hist = history.get_last_state_changes(hass, 2, entity_id) assert_states_equal_without_context(copy(hist[entity_id][0]), hist[entity_id][0]) assert_states_equal_without_context(copy(hist[entity_id][1]), hist[entity_id][1])
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
def test_get_significant_states(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned). """ hass = hass_recorder() zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four, entity_ids=list(states)) assert_dict_of_states_equal_without_context_and_last_changed(states, hist)
Test that only significant states are returned. When minimal responses is set only the first and last states return a complete state. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
def test_get_significant_states_minimal_response( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that only significant states are returned. When minimal responses is set only the first and last states return a complete state. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned). """ hass = hass_recorder() zero, four, states = record_states(hass) hist = history.get_significant_states( hass, zero, four, minimal_response=True, entity_ids=list(states) ) entites_with_reducable_states = [ "media_player.test", "media_player.test3", ] # All states for media_player.test state are reduced # down to last_changed and state when minimal_response # is set except for the first state. # is set. We use JSONEncoder to make sure that are # pre-encoded last_changed is always the same as what # will happen with encoding a native state for entity_id in entites_with_reducable_states: entity_states = states[entity_id] for state_idx in range(1, len(entity_states)): input_state = entity_states[state_idx] orig_last_changed = json.dumps( process_timestamp(input_state.last_changed), cls=JSONEncoder, ).replace('"', "") orig_state = input_state.state entity_states[state_idx] = { "last_changed": orig_last_changed, "state": orig_state, } assert len(hist) == len(states) assert_states_equal_without_context( states["media_player.test"][0], hist["media_player.test"][0] ) assert states["media_player.test"][1] == hist["media_player.test"][1] assert states["media_player.test"][2] == hist["media_player.test"][2] assert_multiple_states_equal_without_context( states["media_player.test2"], hist["media_player.test2"] ) assert_states_equal_without_context( states["media_player.test3"][0], hist["media_player.test3"][0] ) assert states["media_player.test3"][1] == hist["media_player.test3"][1] assert_multiple_states_equal_without_context( states["script.can_cancel_this_one"], hist["script.can_cancel_this_one"] ) assert_multiple_states_equal_without_context_and_last_changed( states["thermostat.test"], hist["thermostat.test"] ) assert_multiple_states_equal_without_context_and_last_changed( states["thermostat.test2"], hist["thermostat.test2"] )
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
def test_get_significant_states_with_initial( time_zone, hass_recorder: Callable[..., HomeAssistant] ) -> None: """Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned). """ hass = hass_recorder() hass.config.set_time_zone(time_zone) zero, four, states = record_states(hass) one_and_half = zero + timedelta(seconds=1.5) for entity_id in states: if entity_id == "media_player.test": states[entity_id] = states[entity_id][1:] for state in states[entity_id]: # If the state is recorded before the start time # start it will have its last_updated and last_changed # set to the start time. if state.last_updated < one_and_half: state.last_updated = one_and_half state.last_changed = one_and_half hist = history.get_significant_states( hass, one_and_half, four, include_start_time_state=True, entity_ids=list(states) ) assert_dict_of_states_equal_without_context_and_last_changed(states, hist)
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
def test_get_significant_states_without_initial( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned). """ hass = hass_recorder() zero, four, states = record_states(hass) one = zero + timedelta(seconds=1) one_with_microsecond = zero + timedelta(seconds=1, microseconds=1) one_and_half = zero + timedelta(seconds=1.5) for entity_id in states: states[entity_id] = [ s for s in states[entity_id] if s.last_changed not in (one, one_with_microsecond) ] del states["media_player.test2"] del states["thermostat.test3"] hist = history.get_significant_states( hass, one_and_half, four, include_start_time_state=False, entity_ids=list(states), ) assert_dict_of_states_equal_without_context_and_last_changed(states, hist)
Test that only significant states are returned for one entity.
def test_get_significant_states_entity_id( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that only significant states are returned for one entity.""" hass = hass_recorder() zero, four, states = record_states(hass) del states["media_player.test2"] del states["media_player.test3"] del states["thermostat.test"] del states["thermostat.test2"] del states["thermostat.test3"] del states["script.can_cancel_this_one"] hist = history.get_significant_states(hass, zero, four, ["media_player.test"]) assert_dict_of_states_equal_without_context_and_last_changed(states, hist)
Test that only significant states are returned for one entity.
def test_get_significant_states_multiple_entity_ids( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that only significant states are returned for one entity.""" hass = hass_recorder() zero, four, states = record_states(hass) hist = history.get_significant_states( hass, zero, four, ["media_player.test", "thermostat.test"], ) assert_multiple_states_equal_without_context_and_last_changed( states["media_player.test"], hist["media_player.test"] ) assert_multiple_states_equal_without_context_and_last_changed( states["thermostat.test"], hist["thermostat.test"] )
Test order of results from get_significant_states. When entity ids are given, the results should be returned with the data in the same order.
def test_get_significant_states_are_ordered( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test order of results from get_significant_states. When entity ids are given, the results should be returned with the data in the same order. """ hass = hass_recorder() zero, four, _states = record_states(hass) entity_ids = ["media_player.test", "media_player.test2"] hist = history.get_significant_states(hass, zero, four, entity_ids) assert list(hist.keys()) == entity_ids entity_ids = ["media_player.test2", "media_player.test"] hist = history.get_significant_states(hass, zero, four, entity_ids) assert list(hist.keys()) == entity_ids
Test significant states when significant_states_only is set.
def test_get_significant_states_only( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test significant states when significant_states_only is set.""" hass = hass_recorder() entity_id = "sensor.test" def set_state(state, **kwargs): """Set the state.""" hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) start = dt_util.utcnow() - timedelta(minutes=4) points = [start + timedelta(minutes=i) for i in range(1, 4)] states = [] with freeze_time(start) as freezer: set_state("123", attributes={"attribute": 10.64}) freezer.move_to(points[0]) # Attributes are different, state not states.append(set_state("123", attributes={"attribute": 21.42})) freezer.move_to(points[1]) # state is different, attributes not states.append(set_state("32", attributes={"attribute": 21.42})) freezer.move_to(points[2]) # everything is different states.append(set_state("412", attributes={"attribute": 54.23})) hist = history.get_significant_states( hass, start, significant_changes_only=True, entity_ids=list({state.entity_id for state in states}), ) assert len(hist[entity_id]) == 2 assert not any( state.last_updated == states[0].last_updated for state in hist[entity_id] ) assert any( state.last_updated == states[1].last_updated for state in hist[entity_id] ) assert any( state.last_updated == states[2].last_updated for state in hist[entity_id] ) hist = history.get_significant_states( hass, start, significant_changes_only=False, entity_ids=list({state.entity_id for state in states}), ) assert len(hist[entity_id]) == 3 assert_multiple_states_equal_without_context_and_last_changed( states, hist[entity_id] )
Record some test states. We inject a bunch of state updates from media player, zone and thermostat.
def record_states(hass) -> tuple[datetime, datetime, dict[str, list[State]]]: """Record some test states. We inject a bunch of state updates from media player, zone and thermostat. """ mp = "media_player.test" mp2 = "media_player.test2" mp3 = "media_player.test3" therm = "thermostat.test" therm2 = "thermostat.test2" therm3 = "thermostat.test3" zone = "zone.home" script_c = "script.can_cancel_this_one" def set_state(entity_id, state, **kwargs): """Set the state.""" hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) zero = dt_util.utcnow() one = zero + timedelta(seconds=1) two = one + timedelta(seconds=1) three = two + timedelta(seconds=1) four = three + timedelta(seconds=1) states = {therm: [], therm2: [], therm3: [], mp: [], mp2: [], mp3: [], script_c: []} with freeze_time(one) as freezer: states[mp].append( set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)}) ) states[mp2].append( set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)}) ) states[mp3].append( set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)}) ) states[therm].append( set_state(therm, 20, attributes={"current_temperature": 19.5}) ) # This state will be updated set_state(therm3, 20, attributes={"current_temperature": 19.5}) freezer.move_to(one + timedelta(microseconds=1)) states[mp].append( set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)}) ) freezer.move_to(two) # This state will be skipped only different in time set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)}) # This state will be skipped because domain is excluded set_state(zone, "zoning") states[script_c].append( set_state(script_c, "off", attributes={"can_cancel": True}) ) states[therm].append( set_state(therm, 21, attributes={"current_temperature": 19.8}) ) states[therm2].append( set_state(therm2, 20, attributes={"current_temperature": 19}) ) # This state will be updated set_state(therm3, 20, attributes={"current_temperature": 19.5}) freezer.move_to(three) states[mp].append( set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)}) ) states[mp3].append( set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)}) ) # Attributes changed even though state is the same states[therm].append( set_state(therm, 21, attributes={"current_temperature": 20}) ) states[therm3].append( set_state(therm3, 20, attributes={"current_temperature": 19.5}) ) return zero, four, states
Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results.
def test_state_changes_during_period_multiple_entities_single_test( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results. """ hass = hass_recorder() start = dt_util.utcnow() test_entites = {f"sensor.{i}": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 assert hist[entity_id][0].state == value
Test at least one entity id is required for get_significant_states.
def test_get_significant_states_without_entity_ids_raises( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test at least one entity id is required for get_significant_states.""" hass = hass_recorder() now = dt_util.utcnow() with pytest.raises(ValueError, match="entity_ids must be provided"): history.get_significant_states(hass, now, None)
Test at least one entity id is required for state_changes_during_period.
def test_state_changes_during_period_without_entity_ids_raises( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test at least one entity id is required for state_changes_during_period.""" hass = hass_recorder() now = dt_util.utcnow() with pytest.raises(ValueError, match="entity_id must be provided"): history.state_changes_during_period(hass, now, None)
Test passing filters is no longer supported.
def test_get_significant_states_with_filters_raises( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test passing filters is no longer supported.""" hass = hass_recorder() now = dt_util.utcnow() with pytest.raises(NotImplementedError, match="Filters are no longer supported"): history.get_significant_states( hass, now, None, ["media_player.test"], Filters() )
Test get_significant_states returns an empty dict when entities not in the db.
def test_get_significant_states_with_non_existent_entity_ids_returns_empty( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test get_significant_states returns an empty dict when entities not in the db.""" hass = hass_recorder() now = dt_util.utcnow() assert history.get_significant_states(hass, now, None, ["nonexistent.entity"]) == {}
Test state_changes_during_period returns an empty dict when entities not in the db.
def test_state_changes_during_period_with_non_existent_entity_ids_returns_empty( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test state_changes_during_period returns an empty dict when entities not in the db.""" hass = hass_recorder() now = dt_util.utcnow() assert ( history.state_changes_during_period(hass, now, None, "nonexistent.entity") == {} )
Test get_last_state_changes returns an empty dict when entities not in the db.
def test_get_last_state_changes_with_non_existent_entity_ids_returns_empty( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test get_last_state_changes returns an empty dict when entities not in the db.""" hass = hass_recorder() assert history.get_last_state_changes(hass, 1, "nonexistent.entity") == {}
Patch the default cache size to 8.
def small_cache_size() -> None: """Patch the default cache size to 8.""" with ( patch.object(state_attributes_table_manager, "CACHE_SIZE", 8), patch.object(states_meta_table_manager, "CACHE_SIZE", 8), ): yield
Return a recorder with reasonable defaults.
def _default_recorder(hass): """Return a recorder with reasonable defaults.""" return Recorder( hass, auto_purge=True, auto_repack=True, keep_days=7, commit_interval=1, uri="sqlite://", db_max_retries=10, db_retry_wait=3, entity_filter=CONFIG_SCHEMA({DOMAIN: {}}), exclude_event_types=set(), )
Test saving and restoring a state.
def test_saving_state_with_exception( hass_recorder: Callable[..., HomeAssistant], hass: HomeAssistant, caplog: pytest.LogCaptureFixture, ) -> None: """Test saving and restoring a state.""" hass = hass_recorder() entity_id = "test.recorder" state = "restoring_from_db" attributes = {"test_attr": 5, "test_attr_10": "nice"} def _throw_if_state_in_session(*args, **kwargs): for obj in get_instance(hass).event_session: if isinstance(obj, States): raise OperationalError( "insert the state", "fake params", "forced to fail" ) with ( patch("time.sleep"), patch.object( get_instance(hass).event_session, "flush", side_effect=_throw_if_state_in_session, ), ): hass.states.set(entity_id, "fail", attributes) wait_recording_done(hass) assert "Error executing query" in caplog.text assert "Error saving events" not in caplog.text caplog.clear() hass.states.set(entity_id, state, attributes) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_states = list(session.query(States)) assert len(db_states) >= 1 assert "Error executing query" not in caplog.text assert "Error saving events" not in caplog.text
Test saving state when there is an SQLAlchemyError.
def test_saving_state_with_sqlalchemy_exception( hass_recorder: Callable[..., HomeAssistant], hass: HomeAssistant, caplog: pytest.LogCaptureFixture, ) -> None: """Test saving state when there is an SQLAlchemyError.""" hass = hass_recorder() entity_id = "test.recorder" state = "restoring_from_db" attributes = {"test_attr": 5, "test_attr_10": "nice"} def _throw_if_state_in_session(*args, **kwargs): for obj in get_instance(hass).event_session: if isinstance(obj, States): raise SQLAlchemyError( "insert the state", "fake params", "forced to fail" ) with ( patch("time.sleep"), patch.object( get_instance(hass).event_session, "flush", side_effect=_throw_if_state_in_session, ), ): hass.states.set(entity_id, "fail", attributes) wait_recording_done(hass) assert "SQLAlchemyError error processing task" in caplog.text caplog.clear() hass.states.set(entity_id, state, attributes) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_states = list(session.query(States)) assert len(db_states) >= 1 assert "Error executing query" not in caplog.text assert "Error saving events" not in caplog.text assert "SQLAlchemyError error processing task" not in caplog.text
Test saving and restoring an event.
def test_saving_event(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test saving and restoring an event.""" hass = hass_recorder() event_type = "EVENT_TEST" event_data = {"test_attr": 5, "test_attr_10": "nice"} events = [] @callback def event_listener(event): """Record events from eventbus.""" if event.event_type == event_type: events.append(event) hass.bus.listen(MATCH_ALL, event_listener) hass.bus.fire(event_type, event_data) wait_recording_done(hass) assert len(events) == 1 event: Event = events[0] get_instance(hass).block_till_done() events: list[Event] = [] with session_scope(hass=hass, read_only=True) as session: for select_event, event_data, event_types in ( session.query(Events, EventData, EventTypes) .filter(Events.event_type_id.in_(select_event_type_ids((event_type,)))) .outerjoin(EventTypes, (Events.event_type_id == EventTypes.event_type_id)) .outerjoin(EventData, Events.data_id == EventData.data_id) ): select_event = cast(Events, select_event) event_data = cast(EventData, event_data) event_types = cast(EventTypes, event_types) native_event = select_event.to_native() native_event.data = event_data.to_native() native_event.event_type = event_types.event_type events.append(native_event) db_event = events[0] assert event.event_type == db_event.event_type assert event.data == db_event.data assert event.origin == db_event.origin # Recorder uses SQLite and stores datetimes as integer unix timestamps assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace( microsecond=0 )
Test saving a state with a commit interval of zero.
def test_saving_state_with_commit_interval_zero( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving a state with a commit interval of zero.""" hass = hass_recorder(config={"commit_interval": 0}) assert get_instance(hass).commit_interval == 0 entity_id = "test.recorder" state = "restoring_from_db" attributes = {"test_attr": 5, "test_attr_10": "nice"} hass.states.set(entity_id, state, attributes) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_states = list(session.query(States)) assert len(db_states) == 1 assert db_states[0].event_id is None
Add entities.
def _add_entities(hass, entity_ids): """Add entities.""" attributes = {"test_attr": 5, "test_attr_10": "nice"} for idx, entity_id in enumerate(entity_ids): hass.states.set(entity_id, f"state{idx}", attributes) wait_recording_done(hass) with session_scope(hass=hass) as session: states = [] for db_state, db_state_attributes, states_meta in ( session.query(States, StateAttributes, StatesMeta) .outerjoin( StateAttributes, States.attributes_id == StateAttributes.attributes_id ) .outerjoin(StatesMeta, States.metadata_id == StatesMeta.metadata_id) ): db_state.entity_id = states_meta.entity_id native_state = db_state.to_native() native_state.attributes = db_state_attributes.to_native() states.append(native_state) convert_pending_states_to_meta(get_instance(hass), session) return states
Verify the schema version without a migration.
def test_setup_without_migration(hass_recorder: Callable[..., HomeAssistant]) -> None: """Verify the schema version without a migration.""" hass = hass_recorder() assert recorder.get_instance(hass).schema_version == SCHEMA_VERSION
Test saving and restoring a state.
def test_saving_state_include_domains( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder(config={"include": {"domains": "test2"}}) states = _add_entities(hass, ["test.recorder", "test2.recorder"]) assert len(states) == 1 assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict()
Test saving and restoring a state.
def test_saving_state_include_domains_globs( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={"include": {"domains": "test2", "entity_globs": "*.included_*"}} ) states = _add_entities( hass, ["test.recorder", "test2.recorder", "test3.included_entity"] ) assert len(states) == 2 state_map = {state.entity_id: state for state in states} assert ( _state_with_context(hass, "test2.recorder").as_dict() == state_map["test2.recorder"].as_dict() ) assert ( _state_with_context(hass, "test3.included_entity").as_dict() == state_map["test3.included_entity"].as_dict() )
Test saving and restoring a state.
def test_saving_state_incl_entities( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder(config={"include": {"entities": "test2.recorder"}}) states = _add_entities(hass, ["test.recorder", "test2.recorder"]) assert len(states) == 1 assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict()
Test saving and restoring a state.
def test_saving_state_exclude_domains( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder(config={"exclude": {"domains": "test"}}) states = _add_entities(hass, ["test.recorder", "test2.recorder"]) assert len(states) == 1 assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict()
Test saving and restoring a state.
def test_saving_state_exclude_domains_globs( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={"exclude": {"domains": "test", "entity_globs": "*.excluded_*"}} ) states = _add_entities( hass, ["test.recorder", "test2.recorder", "test2.excluded_entity"] ) assert len(states) == 1 assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict()
Test saving and restoring a state.
def test_saving_state_exclude_entities( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder(config={"exclude": {"entities": "test.recorder"}}) states = _add_entities(hass, ["test.recorder", "test2.recorder"]) assert len(states) == 1 assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict()
Test saving and restoring a state.
def test_saving_state_exclude_domain_include_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={ "include": {"entities": "test.recorder"}, "exclude": {"domains": "test"}, } ) states = _add_entities(hass, ["test.recorder", "test2.recorder"]) assert len(states) == 2
Test saving and restoring a state.
def test_saving_state_exclude_domain_glob_include_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={ "include": {"entities": ["test.recorder", "test.excluded_entity"]}, "exclude": {"domains": "test", "entity_globs": "*._excluded_*"}, } ) states = _add_entities( hass, ["test.recorder", "test2.recorder", "test.excluded_entity"] ) assert len(states) == 3
Test saving and restoring a state.
def test_saving_state_include_domain_exclude_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={ "exclude": {"entities": "test.recorder"}, "include": {"domains": "test"}, } ) states = _add_entities(hass, ["test.recorder", "test2.recorder", "test.ok"]) assert len(states) == 1 assert _state_with_context(hass, "test.ok").as_dict() == states[0].as_dict() assert _state_with_context(hass, "test.ok").state == "state2"
Test saving and restoring a state.
def test_saving_state_include_domain_glob_exclude_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving and restoring a state.""" hass = hass_recorder( config={ "exclude": {"entities": ["test.recorder", "test2.included_entity"]}, "include": {"domains": "test", "entity_globs": "*._included_*"}, } ) states = _add_entities( hass, ["test.recorder", "test2.recorder", "test.ok", "test2.included_entity"] ) assert len(states) == 1 assert _state_with_context(hass, "test.ok").as_dict() == states[0].as_dict() assert _state_with_context(hass, "test.ok").state == "state2"
Test saving the state of a removed entity.
def test_saving_state_and_removing_entity( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test saving the state of a removed entity.""" hass = hass_recorder() entity_id = "lock.mine" hass.states.set(entity_id, STATE_LOCKED) hass.states.set(entity_id, STATE_UNLOCKED) hass.states.remove(entity_id) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: states = list( session.query(StatesMeta.entity_id, States.state) .outerjoin(StatesMeta, States.metadata_id == StatesMeta.metadata_id) .order_by(States.last_updated_ts) ) assert len(states) == 3 assert states[0].entity_id == entity_id assert states[0].state == STATE_LOCKED assert states[1].entity_id == entity_id assert states[1].state == STATE_UNLOCKED assert states[2].entity_id == entity_id assert states[2].state is None
Test saving states is limited to 16KiB of JSON encoded attributes.
def test_saving_state_with_oversized_attributes( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test saving states is limited to 16KiB of JSON encoded attributes.""" hass = hass_recorder() massive_dict = {"a": "b" * 16384} attributes = {"test_attr": 5, "test_attr_10": "nice"} hass.states.set("switch.sane", "on", attributes) hass.states.set("switch.too_big", "on", massive_dict) wait_recording_done(hass) states = [] with session_scope(hass=hass, read_only=True) as session: for db_state, db_state_attributes, states_meta in ( session.query(States, StateAttributes, StatesMeta) .outerjoin( StateAttributes, States.attributes_id == StateAttributes.attributes_id ) .outerjoin(StatesMeta, States.metadata_id == StatesMeta.metadata_id) ): db_state.entity_id = states_meta.entity_id native_state = db_state.to_native() native_state.attributes = db_state_attributes.to_native() states.append(native_state) assert "switch.too_big" in caplog.text assert len(states) == 2 assert _state_with_context(hass, "switch.sane").as_dict() == states[0].as_dict() assert states[1].state == "on" assert states[1].entity_id == "switch.too_big" assert states[1].attributes == {}
Test saving events is limited to 32KiB of JSON encoded data.
def test_saving_event_with_oversized_data( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test saving events is limited to 32KiB of JSON encoded data.""" hass = hass_recorder() massive_dict = {"a": "b" * 32768} event_data = {"test_attr": 5, "test_attr_10": "nice"} hass.bus.fire("test_event", event_data) hass.bus.fire("test_event_too_big", massive_dict) wait_recording_done(hass) events = {} with session_scope(hass=hass, read_only=True) as session: for _, data, event_type in ( session.query(Events.event_id, EventData.shared_data, EventTypes.event_type) .outerjoin(EventData, Events.data_id == EventData.data_id) .outerjoin(EventTypes, Events.event_type_id == EventTypes.event_type_id) .where(EventTypes.event_type.in_(["test_event", "test_event_too_big"])) ): events[event_type] = data assert "test_event_too_big" in caplog.text assert len(events) == 2 assert json_loads(events["test_event"]) == event_data assert json_loads(events["test_event_too_big"]) == {}
Test we handle invalid manually injected context ids.
def test_saving_event_invalid_context_ulid( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test we handle invalid manually injected context ids.""" hass = hass_recorder() event_data = {"test_attr": 5, "test_attr_10": "nice"} hass.bus.fire("test_event", event_data, context=Context(id="invalid")) wait_recording_done(hass) events = {} with session_scope(hass=hass, read_only=True) as session: for _, data, event_type in ( session.query(Events.event_id, EventData.shared_data, EventTypes.event_type) .outerjoin(EventData, Events.data_id == EventData.data_id) .outerjoin(EventTypes, Events.event_type_id == EventTypes.event_type_id) .where(EventTypes.event_type.in_(["test_event"])) ): events[event_type] = data assert "invalid" in caplog.text assert len(events) == 1 assert json_loads(events["test_event"]) == event_data
Test some exceptions.
def test_recorder_setup_failure(hass: HomeAssistant) -> None: """Test some exceptions.""" recorder_helper.async_initialize_recorder(hass) with ( patch.object(Recorder, "_setup_connection") as setup, patch("homeassistant.components.recorder.core.time.sleep"), ): setup.side_effect = ImportError("driver not found") rec = _default_recorder(hass) rec.async_initialize() rec.start() rec.join() hass.stop()
Test some exceptions.
def test_recorder_validate_schema_failure(hass: HomeAssistant) -> None: """Test some exceptions.""" recorder_helper.async_initialize_recorder(hass) with ( patch( "homeassistant.components.recorder.migration._get_schema_version" ) as inspect_schema_version, patch("homeassistant.components.recorder.core.time.sleep"), ): inspect_schema_version.side_effect = ImportError("driver not found") rec = _default_recorder(hass) rec.async_initialize() rec.start() rec.join() hass.stop()
Test recorder setup failure when the event listener is not setup.
def test_recorder_setup_failure_without_event_listener(hass: HomeAssistant) -> None: """Test recorder setup failure when the event listener is not setup.""" recorder_helper.async_initialize_recorder(hass) with ( patch.object(Recorder, "_setup_connection") as setup, patch("homeassistant.components.recorder.core.time.sleep"), ): setup.side_effect = ImportError("driver not found") rec = _default_recorder(hass) rec.start() rec.join() hass.stop()
Advance the clock and wait for any callbacks to finish.
def run_tasks_at_time(hass: HomeAssistant, test_time: datetime) -> None: """Advance the clock and wait for any callbacks to finish.""" fire_time_changed(hass, test_time) hass.block_till_done(wait_background_tasks=True) get_instance(hass).block_till_done() hass.block_till_done(wait_background_tasks=True)
Test periodic purge scheduling.
def test_auto_purge(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test periodic purge scheduling.""" timezone = "Europe/Copenhagen" hass = hass_recorder(timezone=timezone) tz = dt_util.get_time_zone(timezone) # Purging is scheduled to happen at 4:12am every day. Exercise this behavior by # firing time changed events and advancing the clock around this time. Pick an # arbitrary year in the future to avoid boundary conditions relative to the current # date. # # The clock is started at 4:15am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz) run_tasks_at_time(hass, test_time) with ( patch( "homeassistant.components.recorder.purge.purge_old_data", return_value=True ) as purge_old_data, patch( "homeassistant.components.recorder.tasks.periodic_db_cleanups" ) as periodic_db_cleanups, ): # Advance one day, and the purge task should run test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 assert len(periodic_db_cleanups.mock_calls) == 1 purge_old_data.reset_mock() periodic_db_cleanups.reset_mock() # Advance one day, and the purge task should run again test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 assert len(periodic_db_cleanups.mock_calls) == 1 purge_old_data.reset_mock() periodic_db_cleanups.reset_mock() # Advance less than one full day. The alarm should not yet fire. test_time = test_time + timedelta(hours=23) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 0 assert len(periodic_db_cleanups.mock_calls) == 0 # Advance to the next day and fire the alarm again test_time = test_time + timedelta(hours=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 assert len(periodic_db_cleanups.mock_calls) == 1
Test periodic purge scheduling does a repack on the 2nd sunday.
def test_auto_purge_auto_repack_on_second_sunday( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test periodic purge scheduling does a repack on the 2nd sunday.""" timezone = "Europe/Copenhagen" hass = hass_recorder(timezone=timezone) tz = dt_util.get_time_zone(timezone) # Purging is scheduled to happen at 4:12am every day. Exercise this behavior by # firing time changed events and advancing the clock around this time. Pick an # arbitrary year in the future to avoid boundary conditions relative to the current # date. # # The clock is started at 4:15am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz) run_tasks_at_time(hass, test_time) with ( patch( "homeassistant.components.recorder.core.is_second_sunday", return_value=True ), patch( "homeassistant.components.recorder.purge.purge_old_data", return_value=True ) as purge_old_data, patch( "homeassistant.components.recorder.tasks.periodic_db_cleanups" ) as periodic_db_cleanups, ): # Advance one day, and the purge task should run test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 args, _ = purge_old_data.call_args_list[0] assert args[2] is True # repack assert len(periodic_db_cleanups.mock_calls) == 1
Test periodic purge scheduling does not auto repack on the 2nd sunday if disabled.
def test_auto_purge_auto_repack_disabled_on_second_sunday( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test periodic purge scheduling does not auto repack on the 2nd sunday if disabled.""" timezone = "Europe/Copenhagen" hass = hass_recorder(config={CONF_AUTO_REPACK: False}, timezone=timezone) tz = dt_util.get_time_zone(timezone) # Purging is scheduled to happen at 4:12am every day. Exercise this behavior by # firing time changed events and advancing the clock around this time. Pick an # arbitrary year in the future to avoid boundary conditions relative to the current # date. # # The clock is started at 4:15am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz) run_tasks_at_time(hass, test_time) with ( patch( "homeassistant.components.recorder.core.is_second_sunday", return_value=True ), patch( "homeassistant.components.recorder.purge.purge_old_data", return_value=True ) as purge_old_data, patch( "homeassistant.components.recorder.tasks.periodic_db_cleanups" ) as periodic_db_cleanups, ): # Advance one day, and the purge task should run test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 args, _ = purge_old_data.call_args_list[0] assert args[2] is False # repack assert len(periodic_db_cleanups.mock_calls) == 1
Test periodic purge scheduling does not do a repack unless its the 2nd sunday.
def test_auto_purge_no_auto_repack_on_not_second_sunday( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test periodic purge scheduling does not do a repack unless its the 2nd sunday.""" timezone = "Europe/Copenhagen" hass = hass_recorder(timezone=timezone) tz = dt_util.get_time_zone(timezone) # Purging is scheduled to happen at 4:12am every day. Exercise this behavior by # firing time changed events and advancing the clock around this time. Pick an # arbitrary year in the future to avoid boundary conditions relative to the current # date. # # The clock is started at 4:15am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz) run_tasks_at_time(hass, test_time) with ( patch( "homeassistant.components.recorder.core.is_second_sunday", return_value=False, ), patch( "homeassistant.components.recorder.purge.purge_old_data", return_value=True ) as purge_old_data, patch( "homeassistant.components.recorder.tasks.periodic_db_cleanups" ) as periodic_db_cleanups, ): # Advance one day, and the purge task should run test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 1 args, _ = purge_old_data.call_args_list[0] assert args[2] is False # repack assert len(periodic_db_cleanups.mock_calls) == 1
Test periodic db cleanup still run when auto purge is disabled.
def test_auto_purge_disabled(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test periodic db cleanup still run when auto purge is disabled.""" timezone = "Europe/Copenhagen" hass = hass_recorder(config={CONF_AUTO_PURGE: False}, timezone=timezone) tz = dt_util.get_time_zone(timezone) # Purging is scheduled to happen at 4:12am every day. We want # to verify that when auto purge is disabled periodic db cleanups # are still scheduled # # The clock is started at 4:15am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz) run_tasks_at_time(hass, test_time) with ( patch( "homeassistant.components.recorder.purge.purge_old_data", return_value=True ) as purge_old_data, patch( "homeassistant.components.recorder.tasks.periodic_db_cleanups" ) as periodic_db_cleanups, ): # Advance one day, and the purge task should run test_time = test_time + timedelta(days=1) run_tasks_at_time(hass, test_time) assert len(purge_old_data.mock_calls) == 0 assert len(periodic_db_cleanups.mock_calls) == 1 purge_old_data.reset_mock() periodic_db_cleanups.reset_mock()
Test periodic statistics scheduling.
def test_auto_statistics(hass_recorder: Callable[..., HomeAssistant], freezer) -> None: """Test periodic statistics scheduling.""" timezone = "Europe/Copenhagen" hass = hass_recorder(timezone=timezone) tz = dt_util.get_time_zone(timezone) stats_5min = [] stats_hourly = [] @callback def async_5min_stats_updated_listener(event: Event) -> None: """Handle recorder 5 min stat updated.""" stats_5min.append(event) def async_hourly_stats_updated_listener(event: Event) -> None: """Handle recorder 5 min stat updated.""" stats_hourly.append(event) # Statistics is scheduled to happen every 5 minutes. Exercise this behavior by # firing time changed events and advancing the clock around this time. Pick an # arbitrary year in the future to avoid boundary conditions relative to the current # date. # # The clock is started at 4:51am then advanced forward below now = dt_util.utcnow() test_time = datetime(now.year + 2, 1, 1, 4, 51, 0, tzinfo=tz) freezer.move_to(test_time.isoformat()) run_tasks_at_time(hass, test_time) hass.bus.listen( EVENT_RECORDER_5MIN_STATISTICS_GENERATED, async_5min_stats_updated_listener ) hass.bus.listen( EVENT_RECORDER_HOURLY_STATISTICS_GENERATED, async_hourly_stats_updated_listener ) real_compile_statistics = statistics.compile_statistics with patch( "homeassistant.components.recorder.statistics.compile_statistics", side_effect=real_compile_statistics, autospec=True, ) as compile_statistics: # Advance 5 minutes, and the statistics task should run test_time = test_time + timedelta(minutes=5) freezer.move_to(test_time.isoformat()) run_tasks_at_time(hass, test_time) assert len(compile_statistics.mock_calls) == 1 assert len(stats_5min) == 1 assert len(stats_hourly) == 0 compile_statistics.reset_mock() # Advance 5 minutes, and the statistics task should run again test_time = test_time + timedelta(minutes=5) freezer.move_to(test_time.isoformat()) run_tasks_at_time(hass, test_time) assert len(compile_statistics.mock_calls) == 1 assert len(stats_5min) == 2 assert len(stats_hourly) == 1 compile_statistics.reset_mock() # Advance less than 5 minutes. The task should not run. test_time = test_time + timedelta(minutes=3) freezer.move_to(test_time.isoformat()) run_tasks_at_time(hass, test_time) assert len(compile_statistics.mock_calls) == 0 assert len(stats_5min) == 2 assert len(stats_hourly) == 1 # Advance 5 minutes, and the statistics task should run again test_time = test_time + timedelta(minutes=5) freezer.move_to(test_time.isoformat()) run_tasks_at_time(hass, test_time) assert len(compile_statistics.mock_calls) == 1 assert len(stats_5min) == 3 assert len(stats_hourly) == 1
Test statistics_runs is initiated when DB is created.
def test_statistics_runs_initiated(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test statistics_runs is initiated when DB is created.""" now = dt_util.utcnow() with patch( "homeassistant.components.recorder.core.dt_util.utcnow", return_value=now ): hass = hass_recorder() wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: statistics_runs = list(session.query(StatisticsRuns)) assert len(statistics_runs) == 1 last_run = process_timestamp(statistics_runs[0].start) assert process_timestamp(last_run) == now.replace( minute=now.minute - now.minute % 5, second=0, microsecond=0 ) - timedelta(minutes=5)
Test missing statistics are compiled on startup.
def test_compile_missing_statistics( tmp_path: Path, freezer: FrozenDateTimeFactory ) -> None: """Test missing statistics are compiled on startup.""" now = dt_util.utcnow().replace(minute=0, second=0, microsecond=0) test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: statistics_runs = list(session.query(StatisticsRuns)) assert len(statistics_runs) == 1 last_run = process_timestamp(statistics_runs[0].start) assert last_run == now - timedelta(minutes=5) wait_recording_done(hass) wait_recording_done(hass) hass.stop() # Start Home Assistant one hour later stats_5min = [] stats_hourly = [] @callback def async_5min_stats_updated_listener(event: Event) -> None: """Handle recorder 5 min stat updated.""" stats_5min.append(event) def async_hourly_stats_updated_listener(event: Event) -> None: """Handle recorder 5 min stat updated.""" stats_hourly.append(event) freezer.tick(timedelta(hours=1)) with get_test_home_assistant() as hass: hass.bus.listen( EVENT_RECORDER_5MIN_STATISTICS_GENERATED, async_5min_stats_updated_listener ) hass.bus.listen( EVENT_RECORDER_HOURLY_STATISTICS_GENERATED, async_hourly_stats_updated_listener, ) recorder_helper.async_initialize_recorder(hass) setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}}) hass.start() wait_recording_done(hass) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: statistics_runs = list(session.query(StatisticsRuns)) assert len(statistics_runs) == 13 # 12 5-minute runs last_run = process_timestamp(statistics_runs[1].start) assert last_run == now assert len(stats_5min) == 1 assert len(stats_hourly) == 1 wait_recording_done(hass) wait_recording_done(hass) hass.stop()
Test saving sets old state.
def test_saving_sets_old_state(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test saving sets old state.""" hass = hass_recorder() hass.states.set("test.one", "s1", {}) hass.states.set("test.two", "s2", {}) wait_recording_done(hass) hass.states.set("test.one", "s3", {}) hass.states.set("test.two", "s4", {}) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: states = list( session.query( StatesMeta.entity_id, States.state_id, States.old_state_id, States.state ).outerjoin(StatesMeta, States.metadata_id == StatesMeta.metadata_id) ) assert len(states) == 4 states_by_state = {state.state: state for state in states} assert states_by_state["s1"].entity_id == "test.one" assert states_by_state["s2"].entity_id == "test.two" assert states_by_state["s3"].entity_id == "test.one" assert states_by_state["s4"].entity_id == "test.two" assert states_by_state["s1"].old_state_id is None assert states_by_state["s2"].old_state_id is None assert states_by_state["s3"].old_state_id == states_by_state["s1"].state_id assert states_by_state["s4"].old_state_id == states_by_state["s2"].state_id
Test saving data that cannot be serialized does not crash.
def test_saving_state_with_serializable_data( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test saving data that cannot be serialized does not crash.""" hass = hass_recorder() hass.bus.fire("bad_event", {"fail": CannotSerializeMe()}) hass.states.set("test.one", "s1", {"fail": CannotSerializeMe()}) wait_recording_done(hass) hass.states.set("test.two", "s2", {}) wait_recording_done(hass) hass.states.set("test.two", "s3", {}) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: states = list( session.query( StatesMeta.entity_id, States.state_id, States.old_state_id, States.state ).outerjoin(StatesMeta, States.metadata_id == StatesMeta.metadata_id) ) assert len(states) == 2 states_by_state = {state.state: state for state in states} assert states_by_state["s2"].entity_id == "test.two" assert states_by_state["s3"].entity_id == "test.two" assert states_by_state["s2"].old_state_id is None assert states_by_state["s3"].old_state_id == states_by_state["s2"].state_id assert "State is not JSON serializable" in caplog.text
Test the services exist.
def test_has_services(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test the services exist.""" hass = hass_recorder() assert hass.services.has_service(DOMAIN, SERVICE_DISABLE) assert hass.services.has_service(DOMAIN, SERVICE_ENABLE) assert hass.services.has_service(DOMAIN, SERVICE_PURGE) assert hass.services.has_service(DOMAIN, SERVICE_PURGE_ENTITIES)
Test that events are not recorded when recorder is disabled using service.
def test_service_disable_events_not_recording( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that events are not recorded when recorder is disabled using service.""" hass = hass_recorder() hass.services.call( DOMAIN, SERVICE_DISABLE, {}, blocking=True, ) event_type = "EVENT_TEST" events = [] @callback def event_listener(event): """Record events from eventbus.""" if event.event_type == event_type: events.append(event) hass.bus.listen(MATCH_ALL, event_listener) event_data1 = {"test_attr": 5, "test_attr_10": "nice"} hass.bus.fire(event_type, event_data1) wait_recording_done(hass) assert len(events) == 1 event = events[0] with session_scope(hass=hass, read_only=True) as session: db_events = list( session.query(Events) .filter(Events.event_type_id.in_(select_event_type_ids((event_type,)))) .outerjoin(EventTypes, (Events.event_type_id == EventTypes.event_type_id)) ) assert len(db_events) == 0 hass.services.call( DOMAIN, SERVICE_ENABLE, {}, blocking=True, ) event_data2 = {"attr_one": 5, "attr_two": "nice"} hass.bus.fire(event_type, event_data2) wait_recording_done(hass) assert len(events) == 2 assert events[0] != events[1] assert events[0].data != events[1].data db_events = [] with session_scope(hass=hass, read_only=True) as session: for select_event, event_data, event_types in ( session.query(Events, EventData, EventTypes) .filter(Events.event_type_id.in_(select_event_type_ids((event_type,)))) .outerjoin(EventTypes, (Events.event_type_id == EventTypes.event_type_id)) .outerjoin(EventData, Events.data_id == EventData.data_id) ): select_event = cast(Events, select_event) event_data = cast(EventData, event_data) event_types = cast(EventTypes, event_types) native_event = select_event.to_native() native_event.data = event_data.to_native() native_event.event_type = event_types.event_type db_events.append(native_event) assert len(db_events) == 1 db_event = db_events[0] event = events[1] assert event.event_type == db_event.event_type assert event.data == db_event.data assert event.origin == db_event.origin assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace( microsecond=0 )
Test that state changes are not recorded when recorder is disabled using service.
def test_service_disable_states_not_recording( hass_recorder: Callable[..., HomeAssistant], ) -> None: """Test that state changes are not recorded when recorder is disabled using service.""" hass = hass_recorder() hass.services.call( DOMAIN, SERVICE_DISABLE, {}, blocking=True, ) hass.states.set("test.one", "on", {}) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: assert len(list(session.query(States))) == 0 hass.services.call( DOMAIN, SERVICE_ENABLE, {}, blocking=True, ) hass.states.set("test.two", "off", {}) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_states = list(session.query(States)) assert len(db_states) == 1 assert db_states[0].event_id is None db_states[0].entity_id = "test.two" assert ( db_states[0].to_native().as_dict() == _state_with_context(hass, "test.two").as_dict() )
Test that runs are still recorded when recorder is disabled.
def test_service_disable_run_information_recorded(tmp_path: Path) -> None: """Test that runs are still recorded when recorder is disabled.""" test_dir = tmp_path.joinpath("sqlite") test_dir.mkdir() test_db_file = test_dir.joinpath("test_run_info.db") dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}}) hass.start() wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_run_info = list(session.query(RecorderRuns)) assert len(db_run_info) == 1 assert db_run_info[0].start is not None assert db_run_info[0].end is None hass.services.call( DOMAIN, SERVICE_DISABLE, {}, blocking=True, ) wait_recording_done(hass) hass.stop() with get_test_home_assistant() as hass: recorder_helper.async_initialize_recorder(hass) setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}}) hass.start() wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_run_info = list(session.query(RecorderRuns)) assert len(db_run_info) == 2 assert db_run_info[0].start is not None assert db_run_info[0].end is not None assert db_run_info[1].start is not None assert db_run_info[1].end is None hass.stop()
Test that entity ID filtering filters string and list.
def test_entity_id_filter(hass_recorder: Callable[..., HomeAssistant]) -> None: """Test that entity ID filtering filters string and list.""" hass = hass_recorder( config={ "include": {"domains": "hello"}, "exclude": {"domains": "hidden_domain"}, } ) event_types = ("hello",) for idx, data in enumerate( ( {}, {"entity_id": "hello.world"}, {"entity_id": ["hello.world"]}, {"entity_id": ["hello.world", "hidden_domain.person"]}, {"entity_id": {"unexpected": "data"}}, ) ): hass.bus.fire("hello", data) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_events = list( session.query(Events).filter( Events.event_type_id.in_(select_event_type_ids(event_types)) ) ) assert len(db_events) == idx + 1, data for data in ( {"entity_id": "hidden_domain.person"}, {"entity_id": ["hidden_domain.person"]}, ): hass.bus.fire("hello", data) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: db_events = list( session.query(Events).filter( Events.event_type_id.in_(select_event_type_ids(event_types)) ) ) # Keep referring idx + 1, as no new events are being added assert len(db_events) == idx + 1, data
Test deduplication of event data inside the commit interval.
def test_deduplication_event_data_inside_commit_interval( hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture ) -> None: """Test deduplication of event data inside the commit interval.""" hass = hass_recorder() for _ in range(10): hass.bus.fire("this_event", {"de": "dupe"}) wait_recording_done(hass) for _ in range(10): hass.bus.fire("this_event", {"de": "dupe"}) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: event_types = ("this_event",) events = list( session.query(Events) .filter(Events.event_type_id.in_(select_event_type_ids(event_types))) .outerjoin(EventTypes, (Events.event_type_id == EventTypes.event_type_id)) .outerjoin(EventData, (Events.data_id == EventData.data_id)) ) assert len(events) == 20 first_data_id = events[0].data_id assert all(event.data_id == first_data_id for event in events)
Test deduplication of state attributes inside the commit interval.
def test_deduplication_state_attributes_inside_commit_interval( small_cache_size: None, hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture, ) -> None: """Test deduplication of state attributes inside the commit interval.""" hass = hass_recorder() entity_id = "test.recorder" attributes = {"test_attr": 5, "test_attr_10": "nice"} hass.states.set(entity_id, "on", attributes) hass.states.set(entity_id, "off", attributes) # Now exhaust the cache to ensure we go back to the db for attr_id in range(5): hass.states.set(entity_id, "on", {"test_attr": attr_id}) hass.states.set(entity_id, "off", {"test_attr": attr_id}) wait_recording_done(hass) for _ in range(5): hass.states.set(entity_id, "on", attributes) hass.states.set(entity_id, "off", attributes) wait_recording_done(hass) with session_scope(hass=hass, read_only=True) as session: states = list( session.query(States).outerjoin( StateAttributes, (States.attributes_id == StateAttributes.attributes_id) ) ) assert len(states) == 22 first_attributes_id = states[0].attributes_id last_attributes_id = states[-1].attributes_id assert first_attributes_id == last_attributes_id
Test that all tables use the default table args.
def test_all_tables_use_default_table_args(hass: HomeAssistant) -> None: """Test that all tables use the default table args.""" for table in db_schema.Base.metadata.tables.values(): assert table.kwargs.items() >= db_schema._DEFAULT_TABLE_ARGS.items()
Test that an invalid new version raises an exception.
def test_invalid_update(hass: HomeAssistant) -> None: """Test that an invalid new version raises an exception.""" with pytest.raises(ValueError): migration._apply_update(Mock(), hass, Mock(), Mock(), -1, 0)
Test that modify column generates the expected query.
def test_modify_column(engine_type, substr) -> None: """Test that modify column generates the expected query.""" connection = Mock() session = Mock() session.connection = Mock(return_value=connection) instance = Mock() instance.get_session = Mock(return_value=session) engine = Mock() engine.dialect.name = engine_type migration._modify_columns( instance.get_session, engine, "events", ["event_type VARCHAR(64)"] ) if substr: assert substr in connection.execute.call_args[0][0].text else: assert not connection.execute.called
Test that add column will continue if column exists.
def test_forgiving_add_column(recorder_db_url: str) -> None: """Test that add column will continue if column exists.""" engine = create_engine(recorder_db_url, poolclass=StaticPool) with Session(engine) as session: session.execute(text("CREATE TABLE hello (id int)")) instance = Mock() instance.get_session = Mock(return_value=session) migration._add_columns( instance.get_session, "hello", ["context_id CHARACTER(36)"] ) migration._add_columns( instance.get_session, "hello", ["context_id CHARACTER(36)"] ) engine.dispose()
Test that add index will continue if index exists.
def test_forgiving_add_index(recorder_db_url: str) -> None: """Test that add index will continue if index exists.""" engine = create_engine(recorder_db_url, poolclass=StaticPool) db_schema.Base.metadata.create_all(engine) with Session(engine) as session: instance = Mock() instance.get_session = Mock(return_value=session) migration._create_index( instance.get_session, "states", "ix_states_context_id_bin" ) engine.dispose()
Test that drop index will continue if index drop fails.
def test_forgiving_drop_index( recorder_db_url: str, caplog: pytest.LogCaptureFixture ) -> None: """Test that drop index will continue if index drop fails.""" engine = create_engine(recorder_db_url, poolclass=StaticPool) db_schema.Base.metadata.create_all(engine) with Session(engine) as session: instance = Mock() instance.get_session = Mock(return_value=session) migration._drop_index( instance.get_session, "states", "ix_states_context_id_bin" ) migration._drop_index( instance.get_session, "states", "ix_states_context_id_bin" ) with ( patch( "homeassistant.components.recorder.migration.get_index_by_name", return_value="ix_states_context_id_bin", ), patch.object( session, "connection", side_effect=SQLAlchemyError("connection failure") ), ): migration._drop_index( instance.get_session, "states", "ix_states_context_id_bin" ) assert "Failed to drop index" in caplog.text assert "connection failure" in caplog.text caplog.clear() with ( patch( "homeassistant.components.recorder.migration.get_index_by_name", return_value="ix_states_context_id_bin", ), patch.object( session, "connection", side_effect=SQLAlchemyError("connection failure") ), ): migration._drop_index( instance.get_session, "states", "ix_states_context_id_bin", quiet=True ) assert "Failed to drop index" not in caplog.text assert "connection failure" not in caplog.text engine.dispose()
Test that add index will continue if index exists on mysql and postgres.
def test_forgiving_add_index_with_other_db_types( caplog: pytest.LogCaptureFixture, exception_type ) -> None: """Test that add index will continue if index exists on mysql and postgres.""" mocked_index = Mock() type(mocked_index).name = "ix_states_context_id" mocked_index.create = Mock( side_effect=exception_type( "CREATE INDEX ix_states_old_state_id ON states (old_state_id);", [], 'relation "ix_states_old_state_id" already exists', ) ) mocked_table = Mock() type(mocked_table).indexes = PropertyMock(return_value=[mocked_index]) with patch( "homeassistant.components.recorder.migration.Table", return_value=mocked_table ): migration._create_index(Mock(), "states", "ix_states_context_id") assert "already exists on states" in caplog.text assert "continuing" in caplog.text
Test we raise an exception if strings are not present.
def test_raise_if_exception_missing_str() -> None: """Test we raise an exception if strings are not present.""" programming_exc = ProgrammingError("select * from;", Mock(), Mock()) programming_exc.__cause__ = MockPyODBCProgrammingError( "[42S11] [FreeTDS][SQL Server]The operation failed because an index or statistics with name 'ix_states_old_state_id' already exists on table 'states'. (1913) (SQLExecDirectW)" ) migration.raise_if_exception_missing_str( programming_exc, ["already exists", "duplicate"] ) with pytest.raises(ProgrammingError): migration.raise_if_exception_missing_str(programming_exc, ["not present"])
Test we raise an exception if strings are not present with an empty cause.
def test_raise_if_exception_missing_empty_cause_str() -> None: """Test we raise an exception if strings are not present with an empty cause.""" programming_exc = ProgrammingError("select * from;", Mock(), Mock()) programming_exc.__cause__ = MockPyODBCProgrammingError() with pytest.raises(ProgrammingError): migration.raise_if_exception_missing_str( programming_exc, ["already exists", "duplicate"] ) with pytest.raises(ProgrammingError): migration.raise_if_exception_missing_str(programming_exc, ["not present"])
Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema.
def _create_engine_test(*args, **kwargs): """Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema. """ importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] engine = create_engine(*args, **kwargs) old_db_schema.Base.metadata.create_all(engine) with Session(engine) as session: session.add( recorder.db_schema.StatisticsRuns(start=statistics.get_start_time()) ) session.add( recorder.db_schema.SchemaChanges( schema_version=old_db_schema.SCHEMA_VERSION ) ) session.commit() return engine
Fixture to initialize the db with the old schema.
def db_schema_32(): """Fixture to initialize the db with the old schema.""" importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] with ( patch.object(recorder, "db_schema", old_db_schema), patch.object( recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION ), patch.object(core, "StatesMeta", old_db_schema.StatesMeta), patch.object(core, "EventTypes", old_db_schema.EventTypes), patch.object(core, "EventData", old_db_schema.EventData), patch.object(core, "States", old_db_schema.States), patch.object(core, "Events", old_db_schema.Events), patch.object(core, "StateAttributes", old_db_schema.StateAttributes), patch.object(migration.EntityIDMigration, "task", core.RecorderTask), patch(CREATE_ENGINE_TARGET, new=_create_engine_test), ): yield
Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema.
def _create_engine_test(*args, **kwargs): """Test version of create_engine that initializes with old schema. This simulates an existing db with the old schema. """ importlib.import_module(SCHEMA_MODULE) old_db_schema = sys.modules[SCHEMA_MODULE] engine = create_engine(*args, **kwargs) old_db_schema.Base.metadata.create_all(engine) with Session(engine) as session: session.add( recorder.db_schema.StatisticsRuns(start=statistics.get_start_time()) ) session.add( recorder.db_schema.SchemaChanges( schema_version=old_db_schema.SCHEMA_VERSION ) ) session.commit() return engine
Test converting event to db event.
def test_from_event_to_db_event() -> None: """Test converting event to db event.""" event = ha.Event( "test_event", {"some_data": 15}, context=ha.Context( id="01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1", parent_id="01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1", user_id="12345678901234567890123456789012", ), ) db_event = Events.from_event(event) dialect = SupportedDialect.MYSQL db_event.event_data = EventData.shared_data_bytes_from_event(event, dialect) db_event.event_type = event.event_type assert event.as_dict() == db_event.to_native().as_dict()
Test converting event to db state.
def test_from_event_to_db_state() -> None: """Test converting event to db state.""" state = ha.State( "sensor.temperature", "18", context=ha.Context( id="01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1", parent_id="01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1", user_id="12345678901234567890123456789012", ), ) event = ha.Event( EVENT_STATE_CHANGED, {"entity_id": "sensor.temperature", "old_state": None, "new_state": state}, context=state.context, ) assert state.as_dict() == States.from_event(event).to_native().as_dict()
Test converting event to db state attributes.
def test_from_event_to_db_state_attributes() -> None: """Test converting event to db state attributes.""" attrs = {"this_attr": True} state = ha.State("sensor.temperature", "18", attrs) event = ha.Event( EVENT_STATE_CHANGED, {"entity_id": "sensor.temperature", "old_state": None, "new_state": state}, context=state.context, ) db_attrs = StateAttributes() dialect = SupportedDialect.MYSQL db_attrs.shared_attrs = StateAttributes.shared_attrs_bytes_from_event( event, dialect ) assert db_attrs.to_native() == attrs
Test converting event to db state repr.
def test_repr() -> None: """Test converting event to db state repr.""" attrs = {"this_attr": True} fixed_time = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC, microsecond=432432) state = ha.State( "sensor.temperature", "18", attrs, last_changed=fixed_time, last_updated=fixed_time, ) event = ha.Event( EVENT_STATE_CHANGED, {"entity_id": "sensor.temperature", "old_state": None, "new_state": state}, context=state.context, time_fired_timestamp=fixed_time.timestamp(), ) assert "2016-07-09 11:00:00+00:00" in repr(States.from_event(event)) assert "2016-07-09 11:00:00+00:00" in repr(Events.from_event(event))
Test repr for a state without last_updated_ts.
def test_states_repr_without_timestamp() -> None: """Test repr for a state without last_updated_ts.""" fixed_time = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC, microsecond=432432) states = States( entity_id="sensor.temp", attributes=None, context_id=None, context_user_id=None, context_parent_id=None, origin_idx=None, last_updated=fixed_time, last_changed=fixed_time, last_updated_ts=None, last_changed_ts=None, ) assert "2016-07-09 11:00:00+00:00" in repr(states)
Test repr for an event without time_fired_ts.
def test_events_repr_without_timestamp() -> None: """Test repr for an event without time_fired_ts.""" fixed_time = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC, microsecond=432432) events = Events( event_type="any", event_data=None, origin_idx=None, time_fired=fixed_time, time_fired_ts=None, context_id=None, context_user_id=None, context_parent_id=None, ) assert "2016-07-09 11:00:00+00:00" in repr(events)
Test we handle broken json in state attributes.
def test_handling_broken_json_state_attributes( caplog: pytest.LogCaptureFixture, ) -> None: """Test we handle broken json in state attributes.""" state_attributes = StateAttributes( attributes_id=444, hash=1234, shared_attrs="{NOT_PARSE}" ) assert state_attributes.to_native() == {} assert "Error converting row to state attributes" in caplog.text
Test converting deleting state event to db state.
def test_from_event_to_delete_state() -> None: """Test converting deleting state event to db state.""" event = ha.Event( EVENT_STATE_CHANGED, { "entity_id": "sensor.temperature", "old_state": ha.State("sensor.temperature", "18"), "new_state": None, }, ) db_state = States.from_event(event) assert db_state.entity_id == "sensor.temperature" assert db_state.state == "" assert db_state.last_changed_ts is None assert db_state.last_updated_ts == pytest.approx(event.time_fired.timestamp())
Test loading a state from an invalid entity ID.
def test_states_from_native_invalid_entity_id() -> None: """Test loading a state from an invalid entity ID.""" state = States() state.entity_id = "test.invalid__id" state.attributes = "{}" with pytest.raises(InvalidEntityFormatError): state = state.to_native() state = state.to_native(validate_entity_id=False) assert state.entity_id == "test.invalid__id"
Test we can handle processing database datatimes to timestamps.
def test_process_datetime_to_timestamp(time_zone, hass: HomeAssistant) -> None: """Test we can handle processing database datatimes to timestamps.""" hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() assert process_datetime_to_timestamp(utc_now) == utc_now.timestamp() now = dt_util.now() assert process_datetime_to_timestamp(now) == now.timestamp()
Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches.
def test_process_datetime_to_timestamp_freeze_time( time_zone, hass: HomeAssistant ) -> None: """Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches. """ hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch
Test ulid_to_bytes_or_none.
def test_ulid_to_bytes_or_none(caplog: pytest.LogCaptureFixture) -> None: """Test ulid_to_bytes_or_none.""" assert ( ulid_to_bytes_or_none("01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1") == b"\x01w\xaf\xf9w\xe5\xf8~\x1f\x87\xe1\xf8~\x1f\x87\xe1" ) assert ulid_to_bytes_or_none("invalid") is None assert "invalid" in caplog.text assert ulid_to_bytes_or_none(None) is None
Test bytes_to_ulid_or_none.
def test_bytes_to_ulid_or_none(caplog: pytest.LogCaptureFixture) -> None: """Test bytes_to_ulid_or_none.""" assert ( bytes_to_ulid_or_none(b"\x01w\xaf\xf9w\xe5\xf8~\x1f\x87\xe1\xf8~\x1f\x87\xe1") == "01EYQZJXZ5Z1Z1Z1Z1Z1Z1Z1Z1" ) assert bytes_to_ulid_or_none(b"invalid") is None assert "invalid" in caplog.text assert bytes_to_ulid_or_none(None) is None
Test RecorderPool gives the same connection in the creating thread.
def test_recorder_pool(caplog: pytest.LogCaptureFixture) -> None: """Test RecorderPool gives the same connection in the creating thread.""" engine = create_engine("sqlite://", poolclass=RecorderPool) get_session = sessionmaker(bind=engine) shutdown = False connections = [] def _get_connection_twice(): session = get_session() connections.append(session.connection().connection.driver_connection) session.close() if shutdown: engine.pool.shutdown() session = get_session() connections.append(session.connection().connection.driver_connection) session.close() caplog.clear() new_thread = threading.Thread(target=_get_connection_twice) new_thread.start() new_thread.join() assert "accesses the database without the database executor" in caplog.text assert connections[0] != connections[1] caplog.clear() new_thread = threading.Thread(target=_get_connection_twice, name=DB_WORKER_PREFIX) new_thread.start() new_thread.join() assert "accesses the database without the database executor" not in caplog.text assert connections[2] == connections[3] caplog.clear() new_thread = threading.Thread(target=_get_connection_twice, name="Recorder") new_thread.start() new_thread.join() assert "accesses the database without the database executor" not in caplog.text assert connections[4] == connections[5] shutdown = True caplog.clear() new_thread = threading.Thread(target=_get_connection_twice, name=DB_WORKER_PREFIX) new_thread.start() new_thread.join() assert "accesses the database without the database executor" not in caplog.text assert connections[6] != connections[7]
Pytest fixture to switch purge method.
def mock_use_sqlite(request): """Pytest fixture to switch purge method.""" with patch( "homeassistant.components.recorder.core.Recorder.dialect_name", return_value=SupportedDialect.SQLITE if request.param else SupportedDialect.MYSQL, ): yield
Add state and state_changed event to database for testing.
def _add_state_with_state_attributes( session: Session, entity_id: str, state: str, timestamp: datetime, event_id: int, ) -> None: """Add state and state_changed event to database for testing.""" state_attrs = StateAttributes( hash=event_id, shared_attrs=json.dumps({entity_id: entity_id}) ) session.add(state_attrs) session.add( States( entity_id=entity_id, state=state, attributes=None, last_changed_ts=dt_util.utc_to_timestamp(timestamp), last_updated_ts=dt_util.utc_to_timestamp(timestamp), event_id=event_id, state_attributes=state_attrs, ) )
Fixture to initialize the db with the old schema 32.
def db_schema_32(): """Fixture to initialize the db with the old schema 32.""" with old_db_schema("32"): yield
Pytest fixture to switch purge method.
def mock_use_sqlite(request): """Pytest fixture to switch purge method.""" with patch( "homeassistant.components.recorder.core.Recorder.dialect_name", return_value=SupportedDialect.SQLITE if request.param else SupportedDialect.MYSQL, ): yield
Add state and state_changed event to database for testing.
def _add_state_and_state_changed_event( session: Session, entity_id: str, state: str, timestamp: datetime, event_id: int, ) -> None: """Add state and state_changed event to database for testing.""" state_attrs = StateAttributes( hash=event_id, shared_attrs=json.dumps({entity_id: entity_id}) ) session.add(state_attrs) session.add( States( entity_id=entity_id, state=state, attributes=None, last_changed_ts=dt_util.utc_to_timestamp(timestamp), last_updated_ts=dt_util.utc_to_timestamp(timestamp), event_id=event_id, state_attributes=state_attrs, ) ) session.add( Events( event_id=event_id, event_type=EVENT_STATE_CHANGED, event_data="{}", origin="LOCAL", time_fired_ts=dt_util.utc_to_timestamp(timestamp), ) )