response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Return entries that match an area.
def async_entries_for_area( registry: EntityRegistry, area_id: str ) -> list[RegistryEntry]: """Return entries that match an area.""" return registry.entities.get_entries_for_area_id(area_id)
Return entries that match a label.
def async_entries_for_label( registry: EntityRegistry, label_id: str ) -> list[RegistryEntry]: """Return entries that match a label.""" return registry.entities.get_entries_for_label(label_id)
Return entries that match a category in a scope.
def async_entries_for_category( registry: EntityRegistry, scope: str, category_id: str ) -> list[RegistryEntry]: """Return entries that match a category in a scope.""" return [ entry for entry in registry.entities.values() if ( (existing_category_id := entry.categories.get(scope)) and category_id == existing_category_id ) ]
Return entries that match a config entry.
def async_entries_for_config_entry( registry: EntityRegistry, config_entry_id: str ) -> list[RegistryEntry]: """Return entries that match a config entry.""" return registry.entities.get_entries_for_config_entry_id(config_entry_id)
Handle a config entry being disabled or enabled. Disable entities in the registry that are associated with a config entry when the config entry is disabled, enable entities in the registry that are associated with a config entry when the config entry is enabled and the entities are marked DISABLED_CONFIG_ENTRY.
def async_config_entry_disabled_by_changed( registry: EntityRegistry, config_entry: ConfigEntry ) -> None: """Handle a config entry being disabled or enabled. Disable entities in the registry that are associated with a config entry when the config entry is disabled, enable entities in the registry that are associated with a config entry when the config entry is enabled and the entities are marked DISABLED_CONFIG_ENTRY. """ entities = async_entries_for_config_entry(registry, config_entry.entry_id) if not config_entry.disabled_by: for entity in entities: if entity.disabled_by is not RegistryEntryDisabler.CONFIG_ENTRY: continue registry.async_update_entity(entity.entity_id, disabled_by=None) return for entity in entities: if entity.disabled: # Entity already disabled, do not overwrite continue registry.async_update_entity( entity.entity_id, disabled_by=RegistryEntryDisabler.CONFIG_ENTRY )
Clean up device registry when entities removed.
def _async_setup_cleanup(hass: HomeAssistant, registry: EntityRegistry) -> None: """Clean up device registry when entities removed.""" # pylint: disable-next=import-outside-toplevel from . import category_registry as cr, event, label_registry as lr @callback def _removed_from_registry_filter( event_data: lr.EventLabelRegistryUpdatedData | cr.EventCategoryRegistryUpdatedData, ) -> bool: """Filter all except for the remove action from registry events.""" return event_data["action"] == "remove" @callback def _handle_label_registry_update(event: lr.EventLabelRegistryUpdated) -> None: """Update entity that have a label that has been removed.""" registry.async_clear_label_id(event.data["label_id"]) hass.bus.async_listen( event_type=lr.EVENT_LABEL_REGISTRY_UPDATED, event_filter=_removed_from_registry_filter, listener=_handle_label_registry_update, ) @callback def _handle_category_registry_update( event: cr.EventCategoryRegistryUpdated, ) -> None: """Update entity that have a category that has been removed.""" registry.async_clear_category_id(event.data["scope"], event.data["category_id"]) hass.bus.async_listen( event_type=cr.EVENT_CATEGORY_REGISTRY_UPDATED, event_filter=_removed_from_registry_filter, listener=_handle_category_registry_update, ) @callback def cleanup(_: datetime) -> None: """Clean up entity registry.""" # Periodic purge of orphaned entities to avoid the registry # growing without bounds when there are lots of deleted entities registry.async_purge_expired_orphaned_entities() cancel = event.async_track_time_interval( hass, cleanup, timedelta(seconds=CLEANUP_INTERVAL) ) @callback def _on_homeassistant_stop(event: Event) -> None: """Cancel cleanup.""" cancel() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _on_homeassistant_stop)
Set up the entity restore mechanism.
def _async_setup_entity_restore(hass: HomeAssistant, registry: EntityRegistry) -> None: """Set up the entity restore mechanism.""" @callback def cleanup_restored_states_filter(event_data: Mapping[str, Any]) -> bool: """Clean up restored states filter.""" return bool(event_data["action"] == "remove") @callback def cleanup_restored_states(event: Event[EventEntityRegistryUpdatedData]) -> None: """Clean up restored states.""" state = hass.states.get(event.data["entity_id"]) if state is None or not state.attributes.get(ATTR_RESTORED): return hass.states.async_remove(event.data["entity_id"], context=event.context) hass.bus.async_listen( EVENT_ENTITY_REGISTRY_UPDATED, cleanup_restored_states, event_filter=cleanup_restored_states_filter, ) if hass.is_running: return @callback def _write_unavailable_states(_: Event) -> None: """Make sure state machine contains entry for each registered entity.""" existing = set(hass.states.async_entity_ids()) for entry in registry.entities.values(): if entry.entity_id in existing or entry.disabled: continue entry.write_unavailable_state(hass) hass.bus.async_listen(EVENT_HOMEASSISTANT_START, _write_unavailable_states)
Validate and resolve an entity id or UUID to an entity id. Raises vol.Invalid if the entity or UUID is invalid, or if the UUID is not associated with an entity registry item.
def async_validate_entity_id(registry: EntityRegistry, entity_id_or_uuid: str) -> str: """Validate and resolve an entity id or UUID to an entity id. Raises vol.Invalid if the entity or UUID is invalid, or if the UUID is not associated with an entity registry item. """ if valid_entity_id(entity_id_or_uuid): return entity_id_or_uuid if (entry := registry.entities.get_entry(entity_id_or_uuid)) is None: raise vol.Invalid(f"Unknown entity registry entry {entity_id_or_uuid}") return entry.entity_id
Validate and resolve an entity id or UUID to an entity id. Returns None if the entity or UUID is invalid, or if the UUID is not associated with an entity registry item.
def async_resolve_entity_id( registry: EntityRegistry, entity_id_or_uuid: str ) -> str | None: """Validate and resolve an entity id or UUID to an entity id. Returns None if the entity or UUID is invalid, or if the UUID is not associated with an entity registry item. """ if valid_entity_id(entity_id_or_uuid): return entity_id_or_uuid if (entry := registry.entities.get_entry(entity_id_or_uuid)) is None: return None return entry.entity_id
Validate and resolve a list of entity ids or UUIDs to a list of entity ids. Returns a list with UUID resolved to entity_ids. Raises vol.Invalid if any item is invalid, or if any a UUID is not associated with an entity registry item.
def async_validate_entity_ids( registry: EntityRegistry, entity_ids_or_uuids: list[str] ) -> list[str]: """Validate and resolve a list of entity ids or UUIDs to a list of entity ids. Returns a list with UUID resolved to entity_ids. Raises vol.Invalid if any item is invalid, or if any a UUID is not associated with an entity registry item. """ return [async_validate_entity_id(registry, item) for item in entity_ids_or_uuids]
Convert an async event helper to a threaded one.
def threaded_listener_factory( async_factory: Callable[Concatenate[HomeAssistant, _P], Any], ) -> Callable[Concatenate[HomeAssistant, _P], CALLBACK_TYPE]: """Convert an async event helper to a threaded one.""" @wraps(async_factory) def factory( hass: HomeAssistant, *args: _P.args, **kwargs: _P.kwargs ) -> CALLBACK_TYPE: """Call async event helper safely.""" if not isinstance(hass, HomeAssistant): raise TypeError("First parameter needs to be a hass instance") async_remove = run_callback_threadsafe( hass.loop, partial(async_factory, hass, *args, **kwargs) ).result() def remove() -> None: """Threadsafe removal.""" run_callback_threadsafe(hass.loop, async_remove).result() return remove return factory
Track specific state changes. entity_ids, from_state and to_state can be string or list. Use list to match multiple. Returns a function that can be called to remove the listener. If entity_ids are not MATCH_ALL along with from_state and to_state being None, async_track_state_change_event should be used instead as it is slightly faster. This function is deprecated and will be removed in Home Assistant 2025.5. Must be run within the event loop.
def async_track_state_change( hass: HomeAssistant, entity_ids: str | Iterable[str], action: Callable[ [str, State | None, State | None], Coroutine[Any, Any, None] | None ], from_state: None | str | Iterable[str] = None, to_state: None | str | Iterable[str] = None, ) -> CALLBACK_TYPE: """Track specific state changes. entity_ids, from_state and to_state can be string or list. Use list to match multiple. Returns a function that can be called to remove the listener. If entity_ids are not MATCH_ALL along with from_state and to_state being None, async_track_state_change_event should be used instead as it is slightly faster. This function is deprecated and will be removed in Home Assistant 2025.5. Must be run within the event loop. """ frame.report( "calls `async_track_state_change` instead of `async_track_state_change_event`" " which is deprecated and will be removed in Home Assistant 2025.5", error_if_core=False, ) if from_state is not None: match_from_state = process_state_match(from_state) if to_state is not None: match_to_state = process_state_match(to_state) # Ensure it is a lowercase list with entity ids we want to match on if entity_ids == MATCH_ALL: pass elif isinstance(entity_ids, str): entity_ids = (entity_ids.lower(),) else: entity_ids = tuple(entity_id.lower() for entity_id in entity_ids) job = HassJob(action, f"track state change {entity_ids} {from_state} {to_state}") @callback def state_change_filter(event_data: EventStateChangedData) -> bool: """Handle specific state changes.""" if from_state is not None: old_state_str: str | None = None if (old_state := event_data["old_state"]) is not None: old_state_str = old_state.state if not match_from_state(old_state_str): return False if to_state is not None: new_state_str: str | None = None if (new_state := event_data["new_state"]) is not None: new_state_str = new_state.state if not match_to_state(new_state_str): return False return True @callback def state_change_dispatcher(event: Event[EventStateChangedData]) -> None: """Handle specific state changes.""" hass.async_run_hass_job( job, event.data["entity_id"], event.data["old_state"], event.data["new_state"], ) @callback def state_change_listener(event: Event[EventStateChangedData]) -> None: """Handle specific state changes.""" if not state_change_filter(event.data): return state_change_dispatcher(event) if entity_ids != MATCH_ALL: # If we have a list of entity ids we use # async_track_state_change_event to route # by entity_id to avoid iterating though state change # events and creating a jobs where the most # common outcome is to return right away because # the entity_id does not match since usually # only one or two listeners want that specific # entity_id. return async_track_state_change_event(hass, entity_ids, state_change_listener) return hass.bus.async_listen( EVENT_STATE_CHANGED, state_change_dispatcher, event_filter=state_change_filter, )
Track specific state change events indexed by entity_id. Unlike async_track_state_change, async_track_state_change_event passes the full event to the callback. In order to avoid having to iterate a long list of EVENT_STATE_CHANGED and fire and create a job for each one, we keep a dict of entity ids that care about the state change events so we can do a fast dict lookup to route events.
def async_track_state_change_event( hass: HomeAssistant, entity_ids: str | Iterable[str], action: Callable[[Event[EventStateChangedData]], Any], job_type: HassJobType | None = None, ) -> CALLBACK_TYPE: """Track specific state change events indexed by entity_id. Unlike async_track_state_change, async_track_state_change_event passes the full event to the callback. In order to avoid having to iterate a long list of EVENT_STATE_CHANGED and fire and create a job for each one, we keep a dict of entity ids that care about the state change events so we can do a fast dict lookup to route events. """ if not (entity_ids := _async_string_to_lower_list(entity_ids)): return _remove_empty_listener return _async_track_state_change_event(hass, entity_ids, action, job_type)
Dispatch to listeners.
def _async_dispatch_entity_id_event( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventStateChangedData]], Any]]], event: Event[EventStateChangedData], ) -> None: """Dispatch to listeners.""" if not (callbacks_list := callbacks.get(event.data["entity_id"])): return for job in callbacks_list.copy(): try: hass.async_run_hass_job(job, event) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error while dispatching event for %s to %s", event.data["entity_id"], job, )
Filter state changes by entity_id.
def _async_state_change_filter( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventStateChangedData]], Any]]], event_data: EventStateChangedData, ) -> bool: """Filter state changes by entity_id.""" return event_data["entity_id"] in callbacks
async_track_state_change_event without lowercasing.
def _async_track_state_change_event( hass: HomeAssistant, entity_ids: str | Iterable[str], action: Callable[[Event[EventStateChangedData]], Any], job_type: HassJobType | None, ) -> CALLBACK_TYPE: """async_track_state_change_event without lowercasing.""" return _async_track_event( _KEYED_TRACK_STATE_CHANGE, hass, entity_ids, action, job_type )
Remove a listener that does nothing.
def _remove_empty_listener() -> None: """Remove a listener that does nothing."""
Remove listener.
def _remove_listener( hass: HomeAssistant, listeners_key: str, keys: Iterable[str], job: HassJob[[Event[_TypedDictT]], Any], callbacks: dict[str, list[HassJob[[Event[_TypedDictT]], Any]]], ) -> None: """Remove listener.""" for key in keys: callbacks[key].remove(job) if len(callbacks[key]) == 0: del callbacks[key] if not callbacks: hass.data[listeners_key]() del hass.data[listeners_key]
Track an event by a specific key. This function is intended for internal use only. The dispatcher_callable, filter_callable, event_type, and run_immediately must always be the same for the listener_key as the first call to this function will set the listener_key in hass.data.
def _async_track_event( tracker: _KeyedEventTracker[_TypedDictT], hass: HomeAssistant, keys: str | Iterable[str], action: Callable[[Event[_TypedDictT]], None], job_type: HassJobType | None, ) -> CALLBACK_TYPE: """Track an event by a specific key. This function is intended for internal use only. The dispatcher_callable, filter_callable, event_type, and run_immediately must always be the same for the listener_key as the first call to this function will set the listener_key in hass.data. """ if not keys: return _remove_empty_listener hass_data = hass.data callbacks: defaultdict[str, list[HassJob[[Event[_TypedDictT]], Any]]] | None if not (callbacks := hass_data.get(tracker.callbacks_key)): callbacks = hass_data[tracker.callbacks_key] = defaultdict(list) listeners_key = tracker.listeners_key if tracker.listeners_key not in hass_data: hass_data[tracker.listeners_key] = hass.bus.async_listen( tracker.event_type, partial(tracker.dispatcher_callable, hass, callbacks), event_filter=partial(tracker.filter_callable, hass, callbacks), ) job = HassJob(action, f"track {tracker.event_type} event {keys}", job_type=job_type) if isinstance(keys, str): # Almost all calls to this function use a single key # so we optimize for that case. We don't use setdefault # here because this function gets called ~20000 times # during startup, and we want to avoid the overhead of # creating empty lists and throwing them away. callbacks[keys].append(job) keys = [keys] else: for key in keys: callbacks[key].append(job) return partial(_remove_listener, hass, listeners_key, keys, job, callbacks)
Dispatch to listeners.
def _async_dispatch_old_entity_id_or_entity_id_event( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventEntityRegistryUpdatedData]], Any]]], event: Event[EventEntityRegistryUpdatedData], ) -> None: """Dispatch to listeners.""" if not ( callbacks_list := callbacks.get( # type: ignore[call-overload] # mypy bug? event.data.get("old_entity_id", event.data["entity_id"]) ) ): return for job in callbacks_list.copy(): try: hass.async_run_hass_job(job, event) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error while dispatching event for %s to %s", event.data.get("old_entity_id", event.data["entity_id"]), job, )
Filter entity registry updates by entity_id.
def _async_entity_registry_updated_filter( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventEntityRegistryUpdatedData]], Any]]], event_data: EventEntityRegistryUpdatedData, ) -> bool: """Filter entity registry updates by entity_id.""" return event_data.get("old_entity_id", event_data["entity_id"]) in callbacks
Track specific entity registry updated events indexed by entity_id. Entities must be lower case. Similar to async_track_state_change_event.
def async_track_entity_registry_updated_event( hass: HomeAssistant, entity_ids: str | Iterable[str], action: Callable[[Event[EventEntityRegistryUpdatedData]], Any], job_type: HassJobType | None = None, ) -> CALLBACK_TYPE: """Track specific entity registry updated events indexed by entity_id. Entities must be lower case. Similar to async_track_state_change_event. """ return _async_track_event( _KEYED_TRACK_ENTITY_REGISTRY_UPDATED, hass, entity_ids, action, job_type )
Filter device registry updates by device_id.
def _async_device_registry_updated_filter( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventDeviceRegistryUpdatedData]], Any]]], event_data: EventDeviceRegistryUpdatedData, ) -> bool: """Filter device registry updates by device_id.""" return event_data["device_id"] in callbacks
Dispatch to listeners.
def _async_dispatch_device_id_event( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventDeviceRegistryUpdatedData]], Any]]], event: Event[EventDeviceRegistryUpdatedData], ) -> None: """Dispatch to listeners.""" if not (callbacks_list := callbacks.get(event.data["device_id"])): return for job in callbacks_list.copy(): try: hass.async_run_hass_job(job, event) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error while dispatching event for %s to %s", event.data["device_id"], job, )
Track specific device registry updated events indexed by device_id. Similar to async_track_entity_registry_updated_event.
def async_track_device_registry_updated_event( hass: HomeAssistant, device_ids: str | Iterable[str], action: Callable[[Event[EventDeviceRegistryUpdatedData]], Any], job_type: HassJobType | None = None, ) -> CALLBACK_TYPE: """Track specific device registry updated events indexed by device_id. Similar to async_track_entity_registry_updated_event. """ return _async_track_event( _KEYED_TRACK_DEVICE_REGISTRY_UPDATED, hass, device_ids, action, job_type )
Dispatch domain event listeners.
def _async_dispatch_domain_event( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventStateChangedData]], Any]]], event: Event[EventStateChangedData], ) -> None: """Dispatch domain event listeners.""" domain = split_entity_id(event.data["entity_id"])[0] for job in callbacks.get(domain, []) + callbacks.get(MATCH_ALL, []): try: hass.async_run_hass_job(job, event) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error while processing event %s for domain %s", event, domain )
Filter state changes by entity_id.
def _async_domain_added_filter( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventStateChangedData]], Any]]], event_data: EventStateChangedData, ) -> bool: """Filter state changes by entity_id.""" return event_data["old_state"] is None and ( MATCH_ALL in callbacks or split_entity_id(event_data["entity_id"])[0] in callbacks )
Track state change events when an entity is added to domains.
def async_track_state_added_domain( hass: HomeAssistant, domains: str | Iterable[str], action: Callable[[Event[EventStateChangedData]], Any], job_type: HassJobType | None = None, ) -> CALLBACK_TYPE: """Track state change events when an entity is added to domains.""" if not (domains := _async_string_to_lower_list(domains)): return _remove_empty_listener return _async_track_state_added_domain(hass, domains, action, job_type)
Track state change events when an entity is added to domains.
def _async_track_state_added_domain( hass: HomeAssistant, domains: str | Iterable[str], action: Callable[[Event[EventStateChangedData]], Any], job_type: HassJobType | None, ) -> CALLBACK_TYPE: """Track state change events when an entity is added to domains.""" return _async_track_event( _KEYED_TRACK_STATE_ADDED_DOMAIN, hass, domains, action, job_type )
Filter state changes by entity_id.
def _async_domain_removed_filter( hass: HomeAssistant, callbacks: dict[str, list[HassJob[[Event[EventStateChangedData]], Any]]], event_data: EventStateChangedData, ) -> bool: """Filter state changes by entity_id.""" return event_data["new_state"] is None and ( MATCH_ALL in callbacks or split_entity_id(event_data["entity_id"])[0] in callbacks )
Track state change events when an entity is removed from domains.
def async_track_state_removed_domain( hass: HomeAssistant, domains: str | Iterable[str], action: Callable[[Event[EventStateChangedData]], Any], job_type: HassJobType | None = None, ) -> CALLBACK_TYPE: """Track state change events when an entity is removed from domains.""" return _async_track_event( _KEYED_TRACK_STATE_REMOVED_DOMAIN, hass, domains, action, job_type )
Track state changes with a TrackStates filter that can be updated. Parameters ---------- hass Home assistant object. track_states A TrackStates data class. action Callable to call with results. Returns ------- Object used to update the listeners (async_update_listeners) with a new TrackStates or cancel the tracking (async_remove).
def async_track_state_change_filtered( hass: HomeAssistant, track_states: TrackStates, action: Callable[[Event[EventStateChangedData]], Any], ) -> _TrackStateChangeFiltered: """Track state changes with a TrackStates filter that can be updated. Parameters ---------- hass Home assistant object. track_states A TrackStates data class. action Callable to call with results. Returns ------- Object used to update the listeners (async_update_listeners) with a new TrackStates or cancel the tracking (async_remove). """ tracker = _TrackStateChangeFiltered(hass, track_states, action) tracker.async_setup() return tracker
Add a listener that fires when a template evaluates to 'true'. Listen for the result of the template becoming true, or a true-like string result, such as 'On', 'Open', or 'Yes'. If the template results in an error state when the value changes, this will be logged and not passed through. If the initial check of the template is invalid and results in an exception, the listener will still be registered but will only fire if the template result becomes true without an exception. Action arguments ---------------- entity_id ID of the entity that triggered the state change. old_state The old state of the entity that changed. new_state New state of the entity that changed. Parameters ---------- hass Home assistant object. template The template to calculate. action Callable to call with results. See above for arguments. variables Variables to pass to the template. Returns ------- Callable to unregister the listener.
def async_track_template( hass: HomeAssistant, template: Template, action: Callable[ [str, State | None, State | None], Coroutine[Any, Any, None] | None ], variables: TemplateVarsType | None = None, ) -> CALLBACK_TYPE: """Add a listener that fires when a template evaluates to 'true'. Listen for the result of the template becoming true, or a true-like string result, such as 'On', 'Open', or 'Yes'. If the template results in an error state when the value changes, this will be logged and not passed through. If the initial check of the template is invalid and results in an exception, the listener will still be registered but will only fire if the template result becomes true without an exception. Action arguments ---------------- entity_id ID of the entity that triggered the state change. old_state The old state of the entity that changed. new_state New state of the entity that changed. Parameters ---------- hass Home assistant object. template The template to calculate. action Callable to call with results. See above for arguments. variables Variables to pass to the template. Returns ------- Callable to unregister the listener. """ job = HassJob(action, f"track template {template}") @callback def _template_changed_listener( event: Event[EventStateChangedData] | None, updates: list[TrackTemplateResult], ) -> None: """Check if condition is correct and run action.""" track_result = updates.pop() template = track_result.template last_result = track_result.last_result result = track_result.result if isinstance(result, TemplateError): _LOGGER.error( "Error while processing template: %s", template.template, exc_info=result, ) return if ( not isinstance(last_result, TemplateError) and result_as_boolean(last_result) or not result_as_boolean(result) ): return hass.async_run_hass_job( job, event and event.data["entity_id"], event and event.data["old_state"], event and event.data["new_state"], ) info = async_track_template_result( hass, [TrackTemplate(template, variables)], _template_changed_listener ) return info.async_remove
Add a listener that fires when the result of a template changes. The action will fire with the initial result from the template, and then whenever the output from the template changes. The template will be reevaluated if any states referenced in the last run of the template change, or if manually triggered. If the result of the evaluation is different from the previous run, the listener is passed the result. If the template results in an TemplateError, this will be returned to the listener the first time this happens but not for subsequent errors. Once the template returns to a non-error condition the result is sent to the action as usual. Parameters ---------- hass Home assistant object. track_templates An iterable of TrackTemplate. action Callable to call with results. strict When set to True, raise on undefined variables. log_fn If not None, template error messages will logging by calling log_fn instead of the normal logging facility. has_super_template When set to True, the first template will block rendering of other templates if it doesn't render as True. Returns ------- Info object used to unregister the listener, and refresh the template.
def async_track_template_result( hass: HomeAssistant, track_templates: Sequence[TrackTemplate], action: TrackTemplateResultListener, strict: bool = False, log_fn: Callable[[int, str], None] | None = None, has_super_template: bool = False, ) -> TrackTemplateResultInfo: """Add a listener that fires when the result of a template changes. The action will fire with the initial result from the template, and then whenever the output from the template changes. The template will be reevaluated if any states referenced in the last run of the template change, or if manually triggered. If the result of the evaluation is different from the previous run, the listener is passed the result. If the template results in an TemplateError, this will be returned to the listener the first time this happens but not for subsequent errors. Once the template returns to a non-error condition the result is sent to the action as usual. Parameters ---------- hass Home assistant object. track_templates An iterable of TrackTemplate. action Callable to call with results. strict When set to True, raise on undefined variables. log_fn If not None, template error messages will logging by calling log_fn instead of the normal logging facility. has_super_template When set to True, the first template will block rendering of other templates if it doesn't render as True. Returns ------- Info object used to unregister the listener, and refresh the template. """ tracker = TrackTemplateResultInfo(hass, track_templates, action, has_super_template) tracker.async_setup(strict=strict, log_fn=log_fn) return tracker
Track the state of entities for a period and run an action. If async_check_func is None it use the state of orig_value. Without entity_ids we track all state changes.
def async_track_same_state( hass: HomeAssistant, period: timedelta, action: Callable[[], Coroutine[Any, Any, None] | None], async_check_same_func: Callable[[str, State | None, State | None], bool], entity_ids: str | Iterable[str] = MATCH_ALL, ) -> CALLBACK_TYPE: """Track the state of entities for a period and run an action. If async_check_func is None it use the state of orig_value. Without entity_ids we track all state changes. """ async_remove_state_for_cancel: CALLBACK_TYPE | None = None async_remove_state_for_listener: CALLBACK_TYPE | None = None job = HassJob(action, f"track same state {period} {entity_ids}") @callback def clear_listener() -> None: """Clear all unsub listener.""" nonlocal async_remove_state_for_cancel, async_remove_state_for_listener if async_remove_state_for_listener is not None: async_remove_state_for_listener() async_remove_state_for_listener = None if async_remove_state_for_cancel is not None: async_remove_state_for_cancel() async_remove_state_for_cancel = None @callback def state_for_listener(now: Any) -> None: """Fire on state changes after a delay and calls action.""" nonlocal async_remove_state_for_listener async_remove_state_for_listener = None clear_listener() hass.async_run_hass_job(job) @callback def state_for_cancel_listener(event: Event[EventStateChangedData]) -> None: """Fire on changes and cancel for listener if changed.""" entity = event.data["entity_id"] from_state = event.data["old_state"] to_state = event.data["new_state"] if not async_check_same_func(entity, from_state, to_state): clear_listener() async_remove_state_for_listener = async_call_later(hass, period, state_for_listener) if entity_ids == MATCH_ALL: async_remove_state_for_cancel = hass.bus.async_listen( EVENT_STATE_CHANGED, state_for_cancel_listener ) else: async_remove_state_for_cancel = async_track_state_change_event( hass, entity_ids, state_for_cancel_listener, ) return clear_listener
Add a listener that fires once at or after a specific point in time. The listener is passed the time it fires in local time.
def async_track_point_in_time( hass: HomeAssistant, action: HassJob[[datetime], Coroutine[Any, Any, None] | None] | Callable[[datetime], Coroutine[Any, Any, None] | None], point_in_time: datetime, ) -> CALLBACK_TYPE: """Add a listener that fires once at or after a specific point in time. The listener is passed the time it fires in local time. """ job = ( action if isinstance(action, HassJob) else HassJob(action, f"track point in time {point_in_time}") ) @callback def utc_converter(utc_now: datetime) -> None: """Convert passed in UTC now to local now.""" hass.async_run_hass_job(job, dt_util.as_local(utc_now)) track_job = HassJob( utc_converter, name=f"{job.name} UTC converter", cancel_on_shutdown=job.cancel_on_shutdown, job_type=HassJobType.Callback, ) return async_track_point_in_utc_time(hass, track_job, point_in_time)
Add a listener that fires once at or after a specific point in time. The listener is passed the time it fires in UTC time.
def async_track_point_in_utc_time( hass: HomeAssistant, action: HassJob[[datetime], Coroutine[Any, Any, None] | None] | Callable[[datetime], Coroutine[Any, Any, None] | None], point_in_time: datetime, ) -> CALLBACK_TYPE: """Add a listener that fires once at or after a specific point in time. The listener is passed the time it fires in UTC time. """ # Ensure point_in_time is UTC utc_point_in_time = dt_util.as_utc(point_in_time) expected_fire_timestamp = utc_point_in_time.timestamp() job = ( action if isinstance(action, HassJob) else HassJob(action, f"track point in utc time {utc_point_in_time}") ) track = _TrackPointUTCTime(hass, job, utc_point_in_time, expected_fire_timestamp) track.async_attach() return track.async_cancel
Run action.
def _run_async_call_action( hass: HomeAssistant, job: HassJob[[datetime], Coroutine[Any, Any, None] | None] ) -> None: """Run action.""" hass.async_run_hass_job(job, time_tracker_utcnow())
Add a listener that fires at or after <loop_time>. The listener is passed the time it fires in UTC time.
def async_call_at( hass: HomeAssistant, action: HassJob[[datetime], Coroutine[Any, Any, None] | None] | Callable[[datetime], Coroutine[Any, Any, None] | None], loop_time: float, ) -> CALLBACK_TYPE: """Add a listener that fires at or after <loop_time>. The listener is passed the time it fires in UTC time. """ job = ( action if isinstance(action, HassJob) else HassJob(action, f"call_at {loop_time}") ) return hass.loop.call_at(loop_time, _run_async_call_action, hass, job).cancel
Add a listener that fires at or after <delay>. The listener is passed the time it fires in UTC time.
def async_call_later( hass: HomeAssistant, delay: float | timedelta, action: HassJob[[datetime], Coroutine[Any, Any, None] | None] | Callable[[datetime], Coroutine[Any, Any, None] | None], ) -> CALLBACK_TYPE: """Add a listener that fires at or after <delay>. The listener is passed the time it fires in UTC time. """ if isinstance(delay, timedelta): delay = delay.total_seconds() job = ( action if isinstance(action, HassJob) else HassJob(action, f"call_later {delay}") ) loop = hass.loop return loop.call_at(loop.time() + delay, _run_async_call_action, hass, job).cancel
Add a listener that fires repetitively at every timedelta interval. The listener is passed the time it fires in UTC time.
def async_track_time_interval( hass: HomeAssistant, action: Callable[[datetime], Coroutine[Any, Any, None] | None], interval: timedelta, *, name: str | None = None, cancel_on_shutdown: bool | None = None, ) -> CALLBACK_TYPE: """Add a listener that fires repetitively at every timedelta interval. The listener is passed the time it fires in UTC time. """ seconds = interval.total_seconds() job_name = f"track time interval {seconds} {action}" if name: job_name = f"{name}: {job_name}" track = _TrackTimeInterval(hass, seconds, job_name, action, cancel_on_shutdown) track.async_attach() return track.async_cancel
Add a listener that will fire a specified offset from sunrise daily.
def async_track_sunrise( hass: HomeAssistant, action: Callable[[], None], offset: timedelta | None = None ) -> CALLBACK_TYPE: """Add a listener that will fire a specified offset from sunrise daily.""" listener = SunListener( hass, HassJob(action, "track sunrise"), SUN_EVENT_SUNRISE, offset ) listener.async_attach() return listener.async_detach
Add a listener that will fire a specified offset from sunset daily.
def async_track_sunset( hass: HomeAssistant, action: Callable[[], None], offset: timedelta | None = None ) -> CALLBACK_TYPE: """Add a listener that will fire a specified offset from sunset daily.""" listener = SunListener( hass, HassJob(action, "track sunset"), SUN_EVENT_SUNSET, offset ) listener.async_attach() return listener.async_detach
Add a listener that will fire every time the UTC or local time matches a pattern. The listener is passed the time it fires in UTC or local time.
def async_track_utc_time_change( hass: HomeAssistant, action: Callable[[datetime], Coroutine[Any, Any, None] | None], hour: Any | None = None, minute: Any | None = None, second: Any | None = None, local: bool = False, ) -> CALLBACK_TYPE: """Add a listener that will fire every time the UTC or local time matches a pattern. The listener is passed the time it fires in UTC or local time. """ # We do not have to wrap the function with time pattern matching logic # if no pattern given if all(val is None or val == "*" for val in (hour, minute, second)): # Previously this relied on EVENT_TIME_FIRED # which meant it would not fire right away because # the caller would always be misaligned with the call # time vs the fire time by < 1s. To preserve this # misalignment we use async_track_time_interval here return async_track_time_interval(hass, action, timedelta(seconds=1)) job = HassJob(action, f"track time change {hour}:{minute}:{second} local={local}") matching_seconds = dt_util.parse_time_expression(second, 0, 59) matching_minutes = dt_util.parse_time_expression(minute, 0, 59) matching_hours = dt_util.parse_time_expression(hour, 0, 23) # Avoid aligning all time trackers to the same fraction of a second # since it can create a thundering herd problem # https://github.com/home-assistant/core/issues/82231 microsecond = randint(RANDOM_MICROSECOND_MIN, RANDOM_MICROSECOND_MAX) listener_job_name = f"time change listener {hour}:{minute}:{second} {action}" track = _TrackUTCTimeChange( hass, (matching_seconds, matching_minutes, matching_hours), microsecond, local, job, listener_job_name, ) track.async_attach() return track.async_cancel
Add a listener that will fire every time the local time matches a pattern. The listener is passed the time it fires in local time.
def async_track_time_change( hass: HomeAssistant, action: Callable[[datetime], Coroutine[Any, Any, None] | None], hour: Any | None = None, minute: Any | None = None, second: Any | None = None, ) -> CALLBACK_TYPE: """Add a listener that will fire every time the local time matches a pattern. The listener is passed the time it fires in local time. """ return async_track_utc_time_change(hass, action, hour, minute, second, local=True)
Convert parameter to function that matches input against parameter.
def process_state_match( parameter: None | str | Iterable[str], invert: bool = False ) -> Callable[[str | None], bool]: """Convert parameter to function that matches input against parameter.""" if parameter is None or parameter == MATCH_ALL: return lambda _: not invert if isinstance(parameter, str) or not hasattr(parameter, "__iter__"): return lambda state: invert is not (state == parameter) parameter_set = set(parameter) return lambda state: invert is not (state in parameter_set)
Combine from multiple RenderInfo.
def _entities_domains_from_render_infos( render_infos: Iterable[RenderInfo], ) -> tuple[set[str], set[str]]: """Combine from multiple RenderInfo.""" entities: set[str] = set() domains: set[str] = set() for render_info in render_infos: if render_info.entities: entities.update(render_info.entities) if render_info.domains: domains.update(render_info.domains) if render_info.domains_lifecycle: domains.update(render_info.domains_lifecycle) return entities, domains
Determine if an all listener is needed from RenderInfo.
def _render_infos_needs_all_listener(render_infos: Iterable[RenderInfo]) -> bool: """Determine if an all listener is needed from RenderInfo.""" for render_info in render_infos: # Tracking all states if render_info.all_states or render_info.all_states_lifecycle: return True return False
Create a TrackStates dataclass from the latest RenderInfo.
def _render_infos_to_track_states(render_infos: Iterable[RenderInfo]) -> TrackStates: """Create a TrackStates dataclass from the latest RenderInfo.""" if _render_infos_needs_all_listener(render_infos): return TrackStates(True, set(), set()) return TrackStates(False, *_entities_domains_from_render_infos(render_infos))
Determine if a template should be re-rendered from an event.
def _event_triggers_rerender( event: Event[EventStateChangedData], info: RenderInfo ) -> bool: """Determine if a template should be re-rendered from an event.""" entity_id = event.data["entity_id"] if info.filter(entity_id): return True if event.data["new_state"] is not None and event.data["old_state"] is not None: return False return bool(info.filter_lifecycle(entity_id))
Determine the rate limit for an event.
def _rate_limit_for_event( event: Event[EventStateChangedData], info: RenderInfo, track_template_: TrackTemplate, ) -> float | None: """Determine the rate limit for an event.""" # Specifically referenced entities are excluded # from the rate limit if event.data["entity_id"] in info.entities: return None if track_template_.rate_limit is not None: return track_template_.rate_limit rate_limit: float | None = info.rate_limit return rate_limit
Remove the domains and all_states from render info during a ratelimit.
def _suppress_domain_all_in_render_info(render_info: RenderInfo) -> RenderInfo: """Remove the domains and all_states from render info during a ratelimit.""" rate_limited_render_info = copy.copy(render_info) rate_limited_render_info.all_states = False rate_limited_render_info.all_states_lifecycle = False rate_limited_render_info.domains = set() rate_limited_render_info.domains_lifecycle = set() return rate_limited_render_info
Get floor registry.
def async_get(hass: HomeAssistant) -> FloorRegistry: """Get floor registry.""" return cast(FloorRegistry, hass.data[DATA_REGISTRY])
Return a logger by checking the current integration frame. If Python is unable to access the sources files, the call stack frame will be missing information, so let's guard by requiring a fallback name. https://github.com/home-assistant/core/issues/24982
def get_integration_logger(fallback_name: str) -> logging.Logger: """Return a logger by checking the current integration frame. If Python is unable to access the sources files, the call stack frame will be missing information, so let's guard by requiring a fallback name. https://github.com/home-assistant/core/issues/24982 """ try: integration_frame = get_integration_frame() except MissingIntegrationFrame: return logging.getLogger(fallback_name) if integration_frame.custom_integration: logger_name = f"custom_components.{integration_frame.integration}" else: logger_name = f"homeassistant.components.{integration_frame.integration}" return logging.getLogger(logger_name)
Return the current frame.
def get_current_frame(depth: int = 0) -> FrameType: """Return the current frame.""" # Add one to depth since get_current_frame is included return sys._getframe(depth + 1)
Return the frame, integration and integration path of the current stack frame.
def get_integration_frame(exclude_integrations: set | None = None) -> IntegrationFrame: """Return the frame, integration and integration path of the current stack frame.""" found_frame = None if not exclude_integrations: exclude_integrations = set() frame: FrameType | None = get_current_frame() while frame is not None: filename = frame.f_code.co_filename for path in ("custom_components/", "homeassistant/components/"): try: index = filename.index(path) start = index + len(path) end = filename.index("/", start) integration = filename[start:end] if integration not in exclude_integrations: found_frame = frame break except ValueError: continue if found_frame is not None: break frame = frame.f_back if found_frame is None: raise MissingIntegrationFrame found_module: str | None = None for module, module_obj in dict(sys.modules).items(): if not hasattr(module_obj, "__file__"): continue if module_obj.__file__ == found_frame.f_code.co_filename: found_module = module break return IntegrationFrame( custom_integration=path == "custom_components/", integration=integration, module=found_module, relative_filename=found_frame.f_code.co_filename[index:], _frame=found_frame, )
Report incorrect usage. Async friendly.
def report( what: str, exclude_integrations: set | None = None, error_if_core: bool = True, level: int = logging.WARNING, log_custom_component_only: bool = False, error_if_integration: bool = False, ) -> None: """Report incorrect usage. Async friendly. """ try: integration_frame = get_integration_frame( exclude_integrations=exclude_integrations ) except MissingIntegrationFrame as err: msg = f"Detected code that {what}. Please report this issue." if error_if_core: raise RuntimeError(msg) from err if not log_custom_component_only: _LOGGER.warning(msg, stack_info=True) return if ( error_if_integration or not log_custom_component_only or integration_frame.custom_integration ): _report_integration(what, integration_frame, level, error_if_integration)
Report incorrect usage in an integration. Async friendly.
def _report_integration( what: str, integration_frame: IntegrationFrame, level: int = logging.WARNING, error: bool = False, ) -> None: """Report incorrect usage in an integration. Async friendly. """ # Keep track of integrations already reported to prevent flooding key = f"{integration_frame.filename}:{integration_frame.line_number}" if not error and key in _REPORTED_INTEGRATIONS: return _REPORTED_INTEGRATIONS.add(key) hass: HomeAssistant | None = None with suppress(HomeAssistantError): hass = async_get_hass() report_issue = async_suggest_report_issue( hass, integration_domain=integration_frame.integration, module=integration_frame.module, ) integration_type = "custom " if integration_frame.custom_integration else "" _LOGGER.log( level, "Detected that %sintegration '%s' %s at %s, line %s: %s, please %s", integration_type, integration_frame.integration, what, integration_frame.relative_filename, integration_frame.line_number, integration_frame.line, report_issue, ) if not error: return raise RuntimeError( f"Detected that {integration_type}integration " f"'{integration_frame.integration}' {what} at " f"{integration_frame.relative_filename}, line " f"{integration_frame.line_number}: {integration_frame.line}. " f"Please {report_issue}." )
Mock a function to warn when it was about to be used.
def warn_use(func: _CallableT, what: str) -> _CallableT: """Mock a function to warn when it was about to be used.""" if asyncio.iscoroutinefunction(func): @functools.wraps(func) async def report_use(*args: Any, **kwargs: Any) -> None: report(what) else: @functools.wraps(func) def report_use(*args: Any, **kwargs: Any) -> None: report(what) return cast(_CallableT, report_use)
Return entity_ids with group entity ids replaced by their members. Async friendly.
def expand_entity_ids(hass: HomeAssistant, entity_ids: Iterable[Any]) -> list[str]: """Return entity_ids with group entity ids replaced by their members. Async friendly. """ found_ids: list[str] = [] for entity_id in entity_ids: if not isinstance(entity_id, str) or entity_id in ( ENTITY_MATCH_NONE, ENTITY_MATCH_ALL, ): continue entity_id = entity_id.lower() # If entity_id points at a group, expand it if entity_id.startswith(ENTITY_PREFIX): child_entities = get_entity_ids(hass, entity_id) if entity_id in child_entities: child_entities = list(child_entities) child_entities.remove(entity_id) found_ids.extend( ent_id for ent_id in expand_entity_ids(hass, child_entities) if ent_id not in found_ids ) elif entity_id not in found_ids: found_ids.append(entity_id) return found_ids
Get members of this group. Async friendly.
def get_entity_ids( hass: HomeAssistant, entity_id: str, domain_filter: str | None = None ) -> list[str]: """Get members of this group. Async friendly. """ group = hass.states.get(entity_id) if not group or ATTR_ENTITY_ID not in group.attributes: return [] entity_ids: list[str] = group.attributes[ATTR_ENTITY_ID] if not domain_filter: return entity_ids domain_filter = f"{domain_filter.lower()}." return [ent_id for ent_id in entity_ids if ent_id.startswith(domain_filter)]
Wrap the handler classes.
def request_handler_factory( hass: HomeAssistant, view: HomeAssistantView, handler: Callable ) -> Callable[[web.Request], Awaitable[web.StreamResponse]]: """Wrap the handler classes.""" is_coroutinefunction = asyncio.iscoroutinefunction(handler) assert is_coroutinefunction or is_callback( handler ), "Handler should be a coroutine or a callback." async def handle(request: web.Request) -> web.StreamResponse: """Handle incoming request.""" if hass.is_stopping: return web.Response(status=HTTPStatus.SERVICE_UNAVAILABLE) authenticated = request.get(KEY_AUTHENTICATED, False) if view.requires_auth and not authenticated: raise HTTPUnauthorized if _LOGGER.isEnabledFor(logging.DEBUG): _LOGGER.debug( "Serving %s to %s (auth: %s)", request.path, request.remote, authenticated, ) try: if is_coroutinefunction: result = await handler(request, **request.match_info) else: result = handler(request, **request.match_info) except vol.Invalid as err: raise HTTPBadRequest from err except exceptions.ServiceNotFound as err: raise HTTPInternalServerError from err except exceptions.Unauthorized as err: raise HTTPUnauthorized from err if isinstance(result, web.StreamResponse): # The method handler returned a ready-made Response, how nice of it return result status_code = HTTPStatus.OK if isinstance(result, tuple): result, status_code = result if isinstance(result, bytes): return web.Response(body=result, status=status_code) if isinstance(result, str): return web.Response(text=result, status=status_code) if result is None: return web.Response(body=b"", status=status_code) raise TypeError( f"Result should be None, string, bytes or StreamResponse. Got: {result}" ) return handle
Return default httpx AsyncClient. This method must be run in the event loop.
def get_async_client(hass: HomeAssistant, verify_ssl: bool = True) -> httpx.AsyncClient: """Return default httpx AsyncClient. This method must be run in the event loop. """ key = DATA_ASYNC_CLIENT if verify_ssl else DATA_ASYNC_CLIENT_NOVERIFY client: httpx.AsyncClient | None = hass.data.get(key) if client is None: client = hass.data[key] = create_async_httpx_client(hass, verify_ssl) return client
Create a new httpx.AsyncClient with kwargs, i.e. for cookies. If auto_cleanup is False, the client will be automatically closed on homeassistant_stop. This method must be run in the event loop.
def create_async_httpx_client( hass: HomeAssistant, verify_ssl: bool = True, auto_cleanup: bool = True, ssl_cipher_list: SSLCipherList = SSLCipherList.PYTHON_DEFAULT, **kwargs: Any, ) -> httpx.AsyncClient: """Create a new httpx.AsyncClient with kwargs, i.e. for cookies. If auto_cleanup is False, the client will be automatically closed on homeassistant_stop. This method must be run in the event loop. """ ssl_context = ( client_context(ssl_cipher_list) if verify_ssl else create_no_verify_ssl_context(ssl_cipher_list) ) client = HassHttpXAsyncClient( verify=ssl_context, headers={USER_AGENT: SERVER_SOFTWARE}, limits=DEFAULT_LIMITS, **kwargs, ) original_aclose = client.aclose client.aclose = warn_use( # type: ignore[method-assign] client.aclose, "closes the Home Assistant httpx client" ) if auto_cleanup: _async_register_async_client_shutdown(hass, client, original_aclose) return client
Register httpx AsyncClient aclose on Home Assistant shutdown. This method must be run in the event loop.
def _async_register_async_client_shutdown( hass: HomeAssistant, client: httpx.AsyncClient, original_aclose: Callable[..., Any], ) -> None: """Register httpx AsyncClient aclose on Home Assistant shutdown. This method must be run in the event loop. """ async def _async_close_client(event: Event) -> None: """Close httpx client.""" await original_aclose() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_client)
Return the icons json file location for a component. Ex: components/hue/icons.json
def _component_icons_path(integration: Integration) -> pathlib.Path: """Return the icons json file location for a component. Ex: components/hue/icons.json """ return integration.file_path / "icons.json"
Load and parse icons.json files.
def _load_icons_files( icons_files: dict[str, pathlib.Path], ) -> dict[str, dict[str, Any]]: """Load and parse icons.json files.""" return { component: load_json_object(icons_file) for component, icons_file in icons_files.items() }
Return a battery icon valid identifier.
def icon_for_battery_level( battery_level: int | None = None, charging: bool = False ) -> str: """Return a battery icon valid identifier.""" icon = "mdi:battery" if battery_level is None: return f"{icon}-unknown" if charging and battery_level > 10: icon += f"-charging-{int(round(battery_level / 20 - 0.01)) * 20}" elif charging: icon += "-outline" elif battery_level <= 5: icon += "-alert" elif 5 < battery_level < 95: icon += f"-{int(round(battery_level / 10 - 0.01)) * 10}" return icon
Return a signal icon valid identifier.
def icon_for_signal_level(signal_level: int | None = None) -> str: """Return a signal icon valid identifier.""" if signal_level is None or signal_level == 0: return "mdi:signal-cellular-outline" if signal_level > 70: return "mdi:signal-cellular-3" if signal_level > 30: return "mdi:signal-cellular-2" return "mdi:signal-cellular-1"
Get a module.
def _get_module(cache: dict[str, ModuleType], name: str) -> ModuleType: """Get a module.""" cache[name] = importlib.import_module(name) return cache[name]
Process integration platforms for a component.
def _async_integration_platform_component_loaded( hass: HomeAssistant, integration_platforms: list[IntegrationPlatform], event: Event[EventComponentLoaded], ) -> None: """Process integration platforms for a component.""" if "." in (component_name := event.data[ATTR_COMPONENT]): return integration = async_get_loaded_integration(hass, component_name) # First filter out platforms that the integration already processed. integration_platforms_by_name: dict[str, IntegrationPlatform] = {} for integration_platform in integration_platforms: if component_name in integration_platform.seen_components: continue integration_platform.seen_components.add(component_name) integration_platforms_by_name[integration_platform.platform_name] = ( integration_platform ) if not integration_platforms_by_name: return # Next, check which platforms exist for this integration. platforms_that_exist = integration.platforms_exists(integration_platforms_by_name) if not platforms_that_exist: return # If everything is already loaded, we can avoid creating a task. can_use_cache = True platforms: dict[str, ModuleType] = {} for platform_name in platforms_that_exist: if platform := integration.get_platform_cached(platform_name): platforms[platform_name] = platform else: can_use_cache = False break if can_use_cache: _process_integration_platforms( hass, integration, platforms, integration_platforms_by_name, ) return # At least one of the platforms is not loaded, we need to load them # so we have to fall back to creating a task. hass.async_create_task_internal( _async_process_integration_platforms_for_component( hass, integration, platforms_that_exist, integration_platforms_by_name ), eager_start=True, )
Process integration platforms for a component. Only the platforms that are passed in will be processed.
def _process_integration_platforms( hass: HomeAssistant, integration: Integration, platforms: dict[str, ModuleType], integration_platforms_by_name: dict[str, IntegrationPlatform], ) -> list[asyncio.Future[Awaitable[None] | None]]: """Process integration platforms for a component. Only the platforms that are passed in will be processed. """ return [ future for platform_name, platform in platforms.items() if (integration_platform := integration_platforms_by_name[platform_name]) and ( future := hass.async_run_hass_job( integration_platform.process_job, hass, integration.domain, platform, ) ) ]
Format error message.
def _format_err(name: str, platform_name: str, *args: Any) -> str: """Format error message.""" return f"Exception in {name} when processing platform '{platform_name}': {args}"
Register an intent with Home Assistant.
def async_register(hass: HomeAssistant, handler: IntentHandler) -> None: """Register an intent with Home Assistant.""" if (intents := hass.data.get(DATA_KEY)) is None: intents = hass.data[DATA_KEY] = {} assert handler.intent_type is not None, "intent_type cannot be None" if handler.intent_type in intents: _LOGGER.warning( "Intent %s is being overwritten by %s", handler.intent_type, handler ) intents[handler.intent_type] = handler
Remove an intent from Home Assistant.
def async_remove(hass: HomeAssistant, intent_type: str) -> None: """Remove an intent from Home Assistant.""" if (intents := hass.data.get(DATA_KEY)) is None: return intents.pop(intent_type, None)
Return true if entity device class matches.
def _is_device_class( state: State, entity: entity_registry.RegistryEntry | None, device_classes: Collection[str], ) -> bool: """Return true if entity device class matches.""" # Try entity first if (entity is not None) and (entity.device_class is not None): # Entity device class can be None or blank as "unset" if entity.device_class in device_classes: return True # Fall back to state attribute device_class = state.attributes.get(ATTR_DEVICE_CLASS) return (device_class is not None) and (device_class in device_classes)
Return true if entity name or alias matches.
def _has_name( state: State, entity: entity_registry.RegistryEntry | None, name: str ) -> bool: """Return true if entity name or alias matches.""" if name in (state.entity_id, state.name.casefold()): return True # Check name/aliases if (entity is None) or (not entity.aliases): return False return any(name == alias.casefold() for alias in entity.aliases)
Find an area by id or name, checking aliases too.
def _find_area( id_or_name: str, areas: area_registry.AreaRegistry ) -> area_registry.AreaEntry | None: """Find an area by id or name, checking aliases too.""" area = areas.async_get_area(id_or_name) or areas.async_get_area_by_name(id_or_name) if area is not None: return area # Check area aliases for maybe_area in areas.areas.values(): if not maybe_area.aliases: continue for area_alias in maybe_area.aliases: if id_or_name == area_alias.casefold(): return maybe_area return None
Find an floor by id or name, checking aliases too.
def _find_floor( id_or_name: str, floors: floor_registry.FloorRegistry ) -> floor_registry.FloorEntry | None: """Find an floor by id or name, checking aliases too.""" floor = floors.async_get_floor(id_or_name) or floors.async_get_floor_by_name( id_or_name ) if floor is not None: return floor # Check floor aliases for maybe_floor in floors.floors.values(): if not maybe_floor.aliases: continue for floor_alias in maybe_floor.aliases: if id_or_name == floor_alias.casefold(): return maybe_floor return None
Filter state/entity pairs by an area.
def _filter_by_areas( states_and_entities: list[tuple[State, entity_registry.RegistryEntry | None]], areas: Iterable[area_registry.AreaEntry], devices: device_registry.DeviceRegistry, ) -> Iterable[tuple[State, entity_registry.RegistryEntry | None]]: """Filter state/entity pairs by an area.""" filter_area_ids: set[str | None] = {a.id for a in areas} entity_area_ids: dict[str, str | None] = {} for _state, entity in states_and_entities: if entity is None: continue if entity.area_id: # Use entity's area id first entity_area_ids[entity.id] = entity.area_id elif entity.device_id: # Fall back to device area if not set on entity device = devices.async_get(entity.device_id) if device is not None: entity_area_ids[entity.id] = device.area_id for state, entity in states_and_entities: if (entity is not None) and (entity_area_ids.get(entity.id) in filter_area_ids): yield (state, entity)
Find states that match the constraints.
def async_match_states( hass: HomeAssistant, name: str | None = None, area_name: str | None = None, area: area_registry.AreaEntry | None = None, floor_name: str | None = None, floor: floor_registry.FloorEntry | None = None, domains: Collection[str] | None = None, device_classes: Collection[str] | None = None, states: Iterable[State] | None = None, entities: entity_registry.EntityRegistry | None = None, areas: area_registry.AreaRegistry | None = None, floors: floor_registry.FloorRegistry | None = None, devices: device_registry.DeviceRegistry | None = None, assistant: str | None = None, ) -> Iterable[State]: """Find states that match the constraints.""" if states is None: # All states states = hass.states.async_all() if entities is None: entities = entity_registry.async_get(hass) if devices is None: devices = device_registry.async_get(hass) if areas is None: areas = area_registry.async_get(hass) if floors is None: floors = floor_registry.async_get(hass) # Gather entities states_and_entities: list[tuple[State, entity_registry.RegistryEntry | None]] = [] for state in states: entity = entities.async_get(state.entity_id) if (entity is not None) and entity.entity_category: # Skip diagnostic entities continue states_and_entities.append((state, entity)) # Filter by domain and device class if domains: states_and_entities = [ (state, entity) for state, entity in states_and_entities if state.domain in domains ] if device_classes: # Check device class in state attribute and in entity entry (if available) states_and_entities = [ (state, entity) for state, entity in states_and_entities if _is_device_class(state, entity, device_classes) ] filter_areas: list[area_registry.AreaEntry] = [] if (floor is None) and (floor_name is not None): # Look up floor by name floor = _find_floor(floor_name, floors) if floor is None: _LOGGER.warning("Floor not found: %s", floor_name) return if floor is not None: filter_areas = [ a for a in areas.async_list_areas() if a.floor_id == floor.floor_id ] if (area is None) and (area_name is not None): # Look up area by name area = _find_area(area_name, areas) if area is None: _LOGGER.warning("Area not found: %s", area_name) return if area is not None: filter_areas = [area] if filter_areas: # Filter by states/entities by area states_and_entities = list( _filter_by_areas(states_and_entities, filter_areas, devices) ) if assistant is not None: # Filter by exposure states_and_entities = [ (state, entity) for state, entity in states_and_entities if async_should_expose(hass, assistant, state.entity_id) ] if name is not None: # Filter by name name = name.casefold() # Check states for state, entity in states_and_entities: if _has_name(state, entity, name): yield state else: # Not filtered by name for state, _entity in states_and_entities: yield state
Test if state supports a feature.
def async_test_feature(state: State, feature: int, feature_name: str) -> None: """Test if state supports a feature.""" if state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & feature == 0: raise IntentHandleError(f"Entity {state.name} does not support {feature_name}")
Get issue registry.
def async_get(hass: HomeAssistant) -> IssueRegistry: """Get issue registry.""" return cast(IssueRegistry, hass.data[DATA_REGISTRY])
Create an issue, or replace an existing one.
def async_create_issue( hass: HomeAssistant, domain: str, issue_id: str, *, breaks_in_ha_version: str | None = None, data: dict[str, str | int | float | None] | None = None, is_fixable: bool, is_persistent: bool = False, issue_domain: str | None = None, learn_more_url: str | None = None, severity: IssueSeverity, translation_key: str, translation_placeholders: dict[str, str] | None = None, ) -> None: """Create an issue, or replace an existing one.""" # Verify the breaks_in_ha_version is a valid version string if breaks_in_ha_version: AwesomeVersion( breaks_in_ha_version, ensure_strategy=AwesomeVersionStrategy.CALVER, ) issue_registry = async_get(hass) issue_registry.async_get_or_create( domain, issue_id, breaks_in_ha_version=breaks_in_ha_version, data=data, is_fixable=is_fixable, is_persistent=is_persistent, issue_domain=issue_domain, learn_more_url=learn_more_url, severity=severity, translation_key=translation_key, translation_placeholders=translation_placeholders, )
Create an issue, or replace an existing one.
def create_issue( hass: HomeAssistant, domain: str, issue_id: str, *, breaks_in_ha_version: str | None = None, data: dict[str, str | int | float | None] | None = None, is_fixable: bool, is_persistent: bool = False, issue_domain: str | None = None, learn_more_url: str | None = None, severity: IssueSeverity, translation_key: str, translation_placeholders: dict[str, str] | None = None, ) -> None: """Create an issue, or replace an existing one.""" return run_callback_threadsafe( hass.loop, ft.partial( async_create_issue, hass, domain, issue_id, breaks_in_ha_version=breaks_in_ha_version, data=data, is_fixable=is_fixable, is_persistent=is_persistent, issue_domain=issue_domain, learn_more_url=learn_more_url, severity=severity, translation_key=translation_key, translation_placeholders=translation_placeholders, ), ).result()
Delete an issue. It is not an error to delete an issue that does not exist.
def async_delete_issue(hass: HomeAssistant, domain: str, issue_id: str) -> None: """Delete an issue. It is not an error to delete an issue that does not exist. """ issue_registry = async_get(hass) issue_registry.async_delete(domain, issue_id)
Delete an issue. It is not an error to delete an issue that does not exist.
def delete_issue(hass: HomeAssistant, domain: str, issue_id: str) -> None: """Delete an issue. It is not an error to delete an issue that does not exist. """ return run_callback_threadsafe( hass.loop, async_delete_issue, hass, domain, issue_id ).result()
Ignore an issue. Will raise if the issue does not exist.
def async_ignore_issue( hass: HomeAssistant, domain: str, issue_id: str, ignore: bool ) -> None: """Ignore an issue. Will raise if the issue does not exist. """ issue_registry = async_get(hass) issue_registry.async_ignore(domain, issue_id, ignore)
Convert Home Assistant objects. Hand other objects to the original method.
def json_encoder_default(obj: Any) -> Any: """Convert Home Assistant objects. Hand other objects to the original method. """ if hasattr(obj, "json_fragment"): return obj.json_fragment if isinstance(obj, (set, tuple)): return list(obj) if isinstance(obj, float): return float(obj) if hasattr(obj, "as_dict"): return obj.as_dict() if isinstance(obj, Path): return obj.as_posix() if isinstance(obj, datetime.datetime): return obj.isoformat() raise TypeError
Strip NUL from an object.
def _strip_null(obj: Any) -> Any: """Strip NUL from an object.""" if isinstance(obj, str): return obj.split("\0", 1)[0] if isinstance(obj, dict): return {key: _strip_null(o) for key, o in obj.items()} if isinstance(obj, list): return [_strip_null(o) for o in obj] return obj
Dump json bytes after terminating strings at the first NUL.
def json_bytes_strip_null(data: Any) -> bytes: """Dump json bytes after terminating strings at the first NUL.""" # We expect null-characters to be very rare, hence try encoding first and look # for an escaped null-character in the output. result = json_bytes(data) if b"\\u0000" not in result: return result # We work on the processed result so we don't need to worry about # Home Assistant extensions which allows encoding sets, tuples, etc. return json_bytes(_strip_null(orjson.loads(result)))
Dump json string. orjson supports serializing dataclasses natively which eliminates the need to implement as_dict in many places when the data is already in a dataclass. This works well as long as all the data in the dataclass can also be serialized. If it turns out to be a problem we can disable this with option \|= orjson.OPT_PASSTHROUGH_DATACLASS and it will fallback to as_dict
def json_dumps(data: Any) -> str: r"""Dump json string. orjson supports serializing dataclasses natively which eliminates the need to implement as_dict in many places when the data is already in a dataclass. This works well as long as all the data in the dataclass can also be serialized. If it turns out to be a problem we can disable this with option \|= orjson.OPT_PASSTHROUGH_DATACLASS and it will fallback to as_dict """ return json_bytes(data).decode("utf-8")
Dump json string with keys sorted.
def json_dumps_sorted(data: Any) -> str: """Dump json string with keys sorted.""" return orjson.dumps( data, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS, default=json_encoder_default, ).decode("utf-8")
JSON encoder that uses orjson with hass defaults and returns a str.
def _orjson_default_encoder(data: Any) -> str: """JSON encoder that uses orjson with hass defaults and returns a str.""" return _orjson_bytes_default_encoder(data).decode("utf-8")
JSON encoder that uses orjson with hass defaults and returns bytes.
def _orjson_bytes_default_encoder(data: Any) -> bytes: """JSON encoder that uses orjson with hass defaults and returns bytes.""" return orjson.dumps( data, option=orjson.OPT_INDENT_2 | orjson.OPT_NON_STR_KEYS, default=json_encoder_default, )
Save JSON data to a file.
def save_json( filename: str, data: list | dict, private: bool = False, *, encoder: type[json.JSONEncoder] | None = None, atomic_writes: bool = False, ) -> None: """Save JSON data to a file.""" dump: Callable[[Any], Any] try: # For backwards compatibility, if they pass in the # default json encoder we use _orjson_default_encoder # which is the orjson equivalent to the default encoder. if encoder and encoder is not JSONEncoder: # If they pass a custom encoder that is not the # default JSONEncoder, we use the slow path of json.dumps mode = "w" dump = json.dumps json_data: str | bytes = json.dumps(data, indent=2, cls=encoder) else: mode = "wb" dump = _orjson_default_encoder json_data = _orjson_bytes_default_encoder(data) except TypeError as error: formatted_data = format_unserializable_data( find_paths_unserializable_data(data, dump=dump) ) msg = f"Failed to serialize to JSON: {filename}. Bad data at {formatted_data}" _LOGGER.error(msg) raise SerializationError(msg) from error method = write_utf8_file_atomic if atomic_writes else write_utf8_file method(filename, json_data, private, mode=mode)
Find the paths to unserializable data. This method is slow! Only use for error handling.
def find_paths_unserializable_data( bad_data: Any, *, dump: Callable[[Any], str] = json.dumps ) -> dict[str, Any]: """Find the paths to unserializable data. This method is slow! Only use for error handling. """ from homeassistant.core import ( # pylint: disable=import-outside-toplevel Event, State, ) to_process = deque([(bad_data, "$")]) invalid = {} while to_process: obj, obj_path = to_process.popleft() try: dump(obj) continue except (ValueError, TypeError): pass # We convert objects with as_dict to their dict values # so we can find bad data inside it if hasattr(obj, "as_dict"): desc = obj.__class__.__name__ if isinstance(obj, State): desc += f": {obj.entity_id}" elif isinstance(obj, Event): desc += f": {obj.event_type}" obj_path += f"({desc})" obj = obj.as_dict() if isinstance(obj, dict): for key, value in obj.items(): try: # Is key valid? dump({key: None}) except TypeError: invalid[f"{obj_path}<key: {key}>"] = key else: # Process value to_process.append((value, f"{obj_path}.{key}")) elif isinstance(obj, list): for idx, value in enumerate(obj): to_process.append((value, f"{obj_path}[{idx}]")) else: invalid[obj_path] = obj return invalid
Get label registry.
def async_get(hass: HomeAssistant) -> LabelRegistry: """Get label registry.""" return cast(LabelRegistry, hass.data[DATA_REGISTRY])
Test if state contains a valid location. Async friendly.
def has_location(state: State) -> bool: """Test if state contains a valid location. Async friendly. """ return ( isinstance(state, State) and isinstance(state.attributes.get(ATTR_LATITUDE), float) and isinstance(state.attributes.get(ATTR_LONGITUDE), float) )
Return closest state to point. Async friendly.
def closest(latitude: float, longitude: float, states: Iterable[State]) -> State | None: """Return closest state to point. Async friendly. """ with_location = [state for state in states if has_location(state)] if not with_location: return None return min( with_location, key=lambda state: loc_util.distance( state.attributes.get(ATTR_LATITUDE), state.attributes.get(ATTR_LONGITUDE), latitude, longitude, ) or 0, )