response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Bring a config entry up to current standards.
def _async_standardize_config_entry(hass: HomeAssistant, entry: ConfigEntry) -> None: """Bring a config entry up to current standards.""" if CONF_TOKEN not in entry.data: raise ConfigEntryAuthFailed( "SimpliSafe OAuth standard requires re-authentication" ) entry_updates = {} if not entry.unique_id: # If the config entry doesn't already have a unique ID, set one: entry_updates["unique_id"] = entry.data[CONF_USERNAME] if CONF_CODE in entry.data: # If an alarm code was provided as part of configuration.yaml, pop it out of # the config entry's data and move it to options: data = {**entry.data} entry_updates["data"] = data entry_updates["options"] = { **entry.options, CONF_CODE: data.pop(CONF_CODE), } if entry_updates: hass.config_entries.async_update_entry(entry, **entry_updates)
Set up the simulated sensor.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the simulated sensor.""" name = config.get(CONF_NAME) unit = config.get(CONF_UNIT) amp = config.get(CONF_AMP) mean = config.get(CONF_MEAN) period = config.get(CONF_PERIOD) phase = config.get(CONF_PHASE) fwhm = config.get(CONF_FWHM) seed = config.get(CONF_SEED) relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH) sensor = SimulatedSensor( name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch ) add_entities([sensor], True)
Get the Sinch notification service.
def get_service( hass: HomeAssistant, config: ConfigType, discovery_info: DiscoveryInfoType | None = None, ) -> SinchNotificationService: """Get the Sinch notification service.""" return SinchNotificationService(config)
Process turn_on service params. Filters out unsupported params and validates the rest.
def process_turn_on_params( siren: SirenEntity, params: SirenTurnOnServiceParameters ) -> SirenTurnOnServiceParameters: """Process turn_on service params. Filters out unsupported params and validates the rest. """ if not siren.supported_features & SirenEntityFeature.TONES: params.pop(ATTR_TONE, None) elif (tone := params.get(ATTR_TONE)) is not None: # Raise an exception if the specified tone isn't available is_tone_dict_value = bool( isinstance(siren.available_tones, dict) and tone in siren.available_tones.values() ) if ( not siren.available_tones or tone not in siren.available_tones and not is_tone_dict_value ): raise ValueError( f"Invalid tone specified for entity {siren.entity_id}: {tone}, " "check the available_tones attribute for valid tones to pass in" ) # If available tones is a dict, and the tone provided is a dict value, we need # to transform it to the corresponding dict key before returning if is_tone_dict_value: assert isinstance(siren.available_tones, dict) params[ATTR_TONE] = next( key for key, value in siren.available_tones.items() if value == tone ) if not siren.supported_features & SirenEntityFeature.DURATION: params.pop(ATTR_DURATION, None) if not siren.supported_features & SirenEntityFeature.VOLUME_SET: params.pop(ATTR_VOLUME_LEVEL, None) return params
Set up the Skybeacon sensor.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Skybeacon sensor.""" name = config.get(CONF_NAME) mac = config.get(CONF_MAC) _LOGGER.debug("Setting up") mon = Monitor(hass, mac, name) add_entities([SkybeaconTemp(name, mon)]) add_entities([SkybeaconHumid(name, mon)]) def monitor_stop(_service_or_event): """Stop the monitor thread.""" _LOGGER.info("Stopping monitor for %s", name) mon.terminate() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop) mon.start()
Return the filename of a passed URL.
def _async_get_filename_from_url(url: str) -> str: """Return the filename of a passed URL.""" parsed_url = urlparse(url) return os.path.basename(parsed_url.path)
Remove any # symbols from a channel list.
def _async_sanitize_channel_names(channel_list: list[str]) -> list[str]: """Remove any # symbols from a channel list.""" return [channel.lstrip("#") for channel in channel_list]
Create a device given a bed.
def device_from_bed(bed: SleepIQBed) -> DeviceInfo: """Create a device given a bed.""" return DeviceInfo( connections={(dr.CONNECTION_NETWORK_MAC, bed.mac_addr)}, manufacturer="SleepNumber", name=bed.name, model=bed.model, )
Find the sleeper for a side or the first sleeper.
def sleeper_for_side(bed: SleepIQBed, side: str) -> SleepIQSleeper: """Find the sleeper for a side or the first sleeper.""" for sleeper in bed.sleepers: if sleeper.side == side: return sleeper return bed.sleepers[0]
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" return [ capability for capability in CAPABILITY_TO_ATTRIB if capability in capabilities ]
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" supported = [ Capability.air_conditioner_mode, Capability.demand_response_load_control, Capability.air_conditioner_fan_mode, Capability.switch, Capability.thermostat, Capability.thermostat_cooling_setpoint, Capability.thermostat_fan_mode, Capability.thermostat_heating_setpoint, Capability.thermostat_mode, Capability.thermostat_operating_state, ] # Can have this legacy/deprecated capability if Capability.thermostat in capabilities: return supported # Or must have all of these thermostat capabilities thermostat_capabilities = [ Capability.temperature_measurement, Capability.thermostat_cooling_setpoint, Capability.thermostat_heating_setpoint, Capability.thermostat_mode, ] if all(capability in capabilities for capability in thermostat_capabilities): return supported # Or must have all of these A/C capabilities ac_capabilities = [ Capability.air_conditioner_mode, Capability.air_conditioner_fan_mode, Capability.switch, Capability.temperature_measurement, Capability.thermostat_cooling_setpoint, ] if all(capability in capabilities for capability in ac_capabilities): return supported return None
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" min_required = [ Capability.door_control, Capability.garage_door_control, Capability.window_shade, ] # Must have one of the min_required if any(capability in capabilities for capability in min_required): # Return all capabilities supported/consumed return [ *min_required, Capability.battery, Capability.switch_level, Capability.window_shade_level, ] return None
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" # MUST support switch as we need a way to turn it on and off if Capability.switch not in capabilities: return None # These are all optional but at least one must be supported optional = [ Capability.air_conditioner_fan_mode, Capability.fan_speed, ] # At least one of the optional capabilities must be supported # to classify this entity as a fan. # If they are not then return None and don't setup the platform. if not any(capability in capabilities for capability in optional): return None supported = [Capability.switch] supported.extend( capability for capability in optional if capability in capabilities ) return supported
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" supported = [ Capability.switch, Capability.switch_level, Capability.color_control, Capability.color_temperature, ] # Must be able to be turned on/off. if Capability.switch not in capabilities: return None # Must have one of these light_capabilities = [ Capability.color_control, Capability.color_temperature, Capability.switch_level, ] if any(capability in capabilities for capability in light_capabilities): return supported return None
Convert a value to a different scale.
def convert_scale(value, value_scale, target_scale, round_digits=4): """Convert a value to a different scale.""" return round(value * target_scale / value_scale, round_digits)
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" if Capability.lock in capabilities: return [Capability.lock] return None
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" return [ capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities ]
Format the unique id for a config entry.
def format_unique_id(app_id: str, location_id: str) -> str: """Format the unique id for a config entry.""" return f"{app_id}_{location_id}"
Ensure Home Assistant is setup properly to receive webhooks.
def validate_webhook_requirements(hass: HomeAssistant) -> bool: """Ensure Home Assistant is setup properly to receive webhooks.""" if cloud.async_active_subscription(hass): return True if hass.data[DOMAIN][CONF_CLOUDHOOK_URL] is not None: return True return get_webhook_url(hass).lower().startswith("https://")
Get the URL of the webhook. Return the cloudhook if available, otherwise local webhook.
def get_webhook_url(hass: HomeAssistant) -> str: """Get the URL of the webhook. Return the cloudhook if available, otherwise local webhook. """ cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL] if cloud.async_active_subscription(hass) and cloudhook_url is not None: return cloudhook_url return webhook.async_generate_url(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
Configure an individual SmartApp in hass. Register the SmartApp with the SmartAppManager so that hass will service lifecycle events (install, event, etc...). A unique SmartApp is created for each SmartThings account that is configured in hass.
def setup_smartapp(hass, app): """Configure an individual SmartApp in hass. Register the SmartApp with the SmartAppManager so that hass will service lifecycle events (install, event, etc...). A unique SmartApp is created for each SmartThings account that is configured in hass. """ manager = hass.data[DOMAIN][DATA_MANAGER] if smartapp := manager.smartapps.get(app.app_id): # already setup return smartapp smartapp = manager.register(app.app_id, app.webhook_public_key) smartapp.name = app.display_name smartapp.description = app.description smartapp.permissions.extend(APP_OAUTH_SCOPES) return smartapp
Return all capabilities supported if minimum required are present.
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" # Must be able to be turned on/off. if Capability.switch in capabilities: return [Capability.switch, Capability.energy_meter, Capability.power_meter] return None
Return the name of the specified spa.
def get_spa_name(spa: smarttub.Spa) -> str: """Return the name of the specified spa.""" return f"{spa.brand} {spa.model}"
Set up the smarty environment.
def setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the smarty environment.""" conf = config[DOMAIN] host = conf[CONF_HOST] name = conf[CONF_NAME] _LOGGER.debug("Name: %s, host: %s", name, host) smarty = Smarty(host=host) hass.data[DOMAIN] = {"api": smarty, "name": name} # Initial update smarty.update() # Load platforms discovery.load_platform(hass, Platform.FAN, DOMAIN, {}, config) discovery.load_platform(hass, Platform.SENSOR, DOMAIN, {}, config) discovery.load_platform(hass, Platform.BINARY_SENSOR, DOMAIN, {}, config) def poll_device_update(event_time): """Update Smarty device.""" _LOGGER.debug("Updating Smarty device") if smarty.update(): _LOGGER.debug("Update success") dispatcher_send(hass, SIGNAL_UPDATE_SMARTY) else: _LOGGER.debug("Update failed") track_time_interval(hass, poll_device_update, timedelta(seconds=30)) return True
Get the mail notification service.
def get_service( hass: HomeAssistant, config: ConfigType, discovery_info: DiscoveryInfoType | None = None, ) -> MailNotificationService | None: """Get the mail notification service.""" setup_reload_service(hass, DOMAIN, PLATFORMS) mail_service = MailNotificationService( config[CONF_SERVER], config[CONF_PORT], config[CONF_TIMEOUT], config[CONF_SENDER], config[CONF_ENCRYPTION], config.get(CONF_USERNAME), config.get(CONF_PASSWORD), config[CONF_RECIPIENT], config.get(CONF_SENDER_NAME), config[CONF_DEBUG], config[CONF_VERIFY_SSL], ) if mail_service.connection_is_valid(): return mail_service return None
Build plaintext email.
def _build_text_msg(message): """Build plaintext email.""" _LOGGER.debug("Building plain text email") return MIMEText(message)
Create a message attachment. If MIMEImage is successful and content_id is passed (HTML), add images in-line. Otherwise add them as attachments.
def _attach_file(hass, atch_name, content_id=""): """Create a message attachment. If MIMEImage is successful and content_id is passed (HTML), add images in-line. Otherwise add them as attachments. """ try: file_path = Path(atch_name).parent if os.path.exists(file_path) and not hass.config.is_allowed_path( str(file_path) ): allow_list = "allowlist_external_dirs" file_name = os.path.basename(atch_name) url = "https://www.home-assistant.io/docs/configuration/basic/" raise ServiceValidationError( translation_domain=DOMAIN, translation_key="remote_path_not_allowed", translation_placeholders={ "allow_list": allow_list, "file_path": file_path, "file_name": file_name, "url": url, }, ) with open(atch_name, "rb") as attachment_file: file_bytes = attachment_file.read() except FileNotFoundError: _LOGGER.warning("Attachment %s not found. Skipping", atch_name) return None try: attachment = MIMEImage(file_bytes) except TypeError: _LOGGER.warning( "Attachment %s has an unknown MIME type. Falling back to file", atch_name, ) attachment = MIMEApplication(file_bytes, Name=os.path.basename(atch_name)) attachment["Content-Disposition"] = ( f'attachment; filename="{os.path.basename(atch_name)}"' ) else: if content_id: attachment.add_header("Content-ID", f"<{content_id}>") else: attachment.add_header( "Content-Disposition", f"attachment; filename={os.path.basename(atch_name)}", ) return attachment
Build Multipart message with images as attachments.
def _build_multipart_msg(hass, message, images): """Build Multipart message with images as attachments.""" _LOGGER.debug("Building multipart email with image attachme_build_html_msgnt(s)") msg = MIMEMultipart() body_txt = MIMEText(message) msg.attach(body_txt) for atch_name in images: attachment = _attach_file(hass, atch_name) if attachment: msg.attach(attachment) return msg
Build Multipart message with in-line images and rich HTML (UTF-8).
def _build_html_msg(hass, text, html, images): """Build Multipart message with in-line images and rich HTML (UTF-8).""" _LOGGER.debug("Building HTML rich email") msg = MIMEMultipart("related") alternative = MIMEMultipart("alternative") alternative.attach(MIMEText(text, _charset="utf-8")) alternative.attach(MIMEText(html, ATTR_HTML, _charset="utf-8")) msg.attach(alternative) for atch_name in images: name = os.path.basename(atch_name) attachment = _attach_file(hass, atch_name, name) if attachment: msg.attach(attachment) return msg
Register snapcast services.
def register_services(): """Register snapcast services.""" platform = entity_platform.async_get_current_platform() platform.async_register_entity_service(SERVICE_SNAPSHOT, {}, "snapshot") platform.async_register_entity_service(SERVICE_RESTORE, {}, "async_restore") platform.async_register_entity_service( SERVICE_JOIN, {vol.Required(ATTR_MASTER): cv.entity_id}, handle_async_join ) platform.async_register_entity_service(SERVICE_UNJOIN, {}, handle_async_unjoin) platform.async_register_entity_service( SERVICE_SET_LATENCY, {vol.Required(ATTR_LATENCY): cv.positive_int}, handle_set_latency, )
Convert snips builtin types to usable values.
def resolve_slot_values(slot): """Convert snips builtin types to usable values.""" if "value" in slot["value"]: value = slot["value"]["value"] else: value = slot["rawValue"] if slot.get("entity") == "snips/duration": delta = timedelta( weeks=slot["value"]["weeks"], days=slot["value"]["days"], hours=slot["value"]["hours"], minutes=slot["value"]["minutes"], seconds=slot["value"]["seconds"], ) value = delta.total_seconds() return value
Create the SolarEdge Monitoring API sensor.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Create the SolarEdge Monitoring API sensor.""" ip_address = config[CONF_IP_ADDRESS] platform_name = config[CONF_NAME] # Create new SolarEdge object to retrieve data. api = SolarEdge(f"http://{ip_address}/") # Check if api can be reached and site is active. try: status = api.get_status() _LOGGER.debug("Credentials correct and site is active") except AttributeError: _LOGGER.error("Missing details data in solaredge status") return except (ConnectTimeout, HTTPError): _LOGGER.error("Could not retrieve details from SolarEdge API") return # Create solaredge data service which will retrieve and update the data. data = SolarEdgeData(hass, api) # Changing inverter temperature unit. inverter_temp_description = SENSOR_TYPE_INVERTER_TEMPERATURE if status.inverters.primary.temperature.units.farenheit: inverter_temp_description = dataclasses.replace( inverter_temp_description, native_unit_of_measurement=UnitOfTemperature.FAHRENHEIT, ) # Create entities entities = [ SolarEdgeSensor(platform_name, data, description) for description in (*SENSOR_TYPES, inverter_temp_description) ] try: if status.metersList[0]: entities.extend( [ SolarEdgeSensor(platform_name, data, description) for description in SENSOR_TYPES_ENERGY_IMPORT ] ) except IndexError: _LOGGER.debug("Import meter sensors are not created") try: if status.metersList[1]: entities.extend( [ SolarEdgeSensor(platform_name, data, description) for description in SENSOR_TYPES_ENERGY_EXPORT ] ) except IndexError: _LOGGER.debug("Export meter sensors are not created") add_entities(entities, True)
Return the hosts already configured.
def solarlog_entries(hass: HomeAssistant): """Return the hosts already configured.""" return { entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN) }
Check if the response returned from the Connect API is a success or not.
def is_api_response_success(api_response: dict) -> bool: """Check if the response returned from the Connect API is a success or not.""" return "result" in api_response and api_response["result"].lower() == "success"
Soma api call decorator.
def soma_api_call( api_call: Callable[[_SomaEntityT], Coroutine[Any, Any, dict]], ) -> Callable[[_SomaEntityT], Coroutine[Any, Any, dict]]: """Soma api call decorator.""" async def inner(self: _SomaEntityT) -> dict: response = {} try: response_from_api = await api_call(self) except RequestException: if self.api_is_available: _LOGGER.warning("Connection to SOMA Connect failed") self.api_is_available = False else: if not self.api_is_available: self.api_is_available = True _LOGGER.info("Connection to SOMA Connect succeeded") if not is_api_response_success(response_from_api): if self.is_available: self.is_available = False _LOGGER.warning( ( "Device is unreachable (%s). Error while fetching the" " state: %s" ), self.name, response_from_api["msg"], ) else: if not self.is_available: self.is_available = True _LOGGER.info("Device %s is now reachable", self.name) response = response_from_api return response return inner
Create the attributes for disk space.
def get_disk_space_attr(disks: list[Diskspace]) -> dict[str, str]: """Create the attributes for disk space.""" attrs: dict[str, str] = {} for disk in disks: free = disk.freeSpace / 1024**3 total = disk.totalSpace / 1024**3 usage = free / total * 100 attrs[disk.path] = ( f"{free:.2f}/{total:.2f}{UnitOfInformation.GIGABYTES} ({usage:.2f}%)" ) return attrs
Create the attributes for series queue.
def get_queue_attr(queue: SonarrQueue) -> dict[str, str]: """Create the attributes for series queue.""" attrs: dict[str, str] = {} for item in queue.records: remaining = 1 if item.size == 0 else item.sizeleft / item.size remaining_pct = 100 * (1 - remaining) identifier = ( f"S{item.episode.seasonNumber:02d}E{item.episode. episodeNumber:02d}" ) attrs[f"{item.series.title} {identifier}"] = f"{remaining_pct:.2f}%" return attrs
Create the attributes for missing series.
def get_wanted_attr(wanted: SonarrWantedMissing) -> dict[str, str]: """Create the attributes for missing series.""" attrs: dict[str, str] = {} for item in wanted.records: identifier = f"S{item.seasonNumber:02d}E{item.episodeNumber:02d}" name = f"{item.series.title} {identifier}" attrs[name] = dt_util.as_local( item.airDateUtc.replace(tzinfo=dt_util.UTC) ).isoformat() return attrs
Filter out specified UPnP errors and raise exceptions for service calls.
def soco_error( errorcodes: list[str] | None = None, ) -> Callable[[_FuncType[_T, _P, _R]], _ReturnFuncType[_T, _P, _R]]: """Filter out specified UPnP errors and raise exceptions for service calls.""" def decorator(funct: _FuncType[_T, _P, _R]) -> _ReturnFuncType[_T, _P, _R]: """Decorate functions.""" def wrapper(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> _R | None: """Wrap for all soco UPnP exception.""" args_soco = next((arg for arg in args if isinstance(arg, SoCo)), None) try: result = funct(self, *args, **kwargs) except (OSError, SoCoException, SoCoUPnPException, Timeout) as err: error_code = getattr(err, "error_code", None) function = funct.__qualname__ if errorcodes and error_code in errorcodes: _LOGGER.debug( "Error code %s ignored in call to %s", error_code, function ) return None if (target := _find_target_identifier(self, args_soco)) is None: raise RuntimeError("Unexpected use of soco_error") from err message = f"Error calling {function} on {target}: {err}" raise SonosUpdateError(message) from err dispatch_soco = args_soco or self.soco # type: ignore[union-attr] dispatcher_send( self.hass, f"{SONOS_SPEAKER_ACTIVITY}-{dispatch_soco.uid}", funct.__qualname__, ) return result return wrapper return decorator
Extract the best available target identifier from the provided instance object.
def _find_target_identifier(instance: Any, fallback_soco: SoCo | None) -> str | None: """Extract the best available target identifier from the provided instance object.""" if entity_id := getattr(instance, "entity_id", None): # SonosEntity instance return entity_id if zone_name := getattr(instance, "zone_name", None): # SonosSpeaker instance return zone_name if speaker := getattr(instance, "speaker", None): # Holds a SonosSpeaker instance attribute return speaker.zone_name if soco := getattr(instance, "soco", fallback_soco): # Holds a SoCo instance attribute # Only use attributes with no I/O return soco._player_name or soco.ip_address # pylint: disable=protected-access return None
Convert a Sonos hostname to a uid.
def hostname_to_uid(hostname: str) -> str: """Convert a Sonos hostname to a uid.""" if hostname.startswith("Sonos-"): baseuid = hostname.removeprefix("Sonos-").replace(".local.", "") elif hostname.startswith("sonos"): baseuid = hostname.removeprefix("sonos").replace(".local.", "") else: raise ValueError(f"{hostname} is not a sonos device.") return f"{UID_PREFIX}{baseuid}{UID_POSTFIX}"
Ensure I/O attributes are cached and return visible zones.
def sync_get_visible_zones(soco: SoCo) -> set[SoCo]: """Ensure I/O attributes are cached and return visible zones.""" _ = soco.household_id _ = soco.uid return soco.visible_zones
Parse a time-span into number of seconds.
def _timespan_secs(timespan: str | None) -> None | int: """Parse a time-span into number of seconds.""" if timespan in UNAVAILABLE_VALUES: return None return int(time_period_str(timespan).total_seconds())
Get thumbnail URL.
def get_thumbnail_url_full( media: SonosMedia, is_internal: bool, get_browse_image_url: GetBrowseImageUrlType, media_content_type: str, media_content_id: str, media_image_id: str | None = None, ) -> str | None: """Get thumbnail URL.""" if is_internal: item = get_media( media.library, media_content_id, media_content_type, ) return urllib.parse.unquote(getattr(item, "album_art_uri", "")) return urllib.parse.unquote( get_browse_image_url( media_content_type, media_content_id, media_image_id, ) )
Filter media sources.
def media_source_filter(item: BrowseMedia) -> bool: """Filter media sources.""" return item.media_content_type.startswith("audio/")
Create response payload for the provided media query.
def build_item_response( media_library: MusicLibrary, payload: dict[str, str], get_thumbnail_url=None ) -> BrowseMedia | None: """Create response payload for the provided media query.""" if payload["search_type"] == MediaType.ALBUM and payload["idstring"].startswith( ("A:GENRE", "A:COMPOSER") ): payload["idstring"] = "A:ALBUMARTIST/" + "/".join( payload["idstring"].split("/")[2:] ) payload["idstring"] = urllib.parse.unquote(payload["idstring"]) try: search_type = MEDIA_TYPES_TO_SONOS[payload["search_type"]] except KeyError: _LOGGER.debug( "Unknown media type received when building item response: %s", payload["search_type"], ) return None media = media_library.browse_by_idstring( search_type, payload["idstring"], full_album_art_uri=True, max_items=0, ) if media is None: return None thumbnail = None title = None # Fetch album info for titles and thumbnails # Can't be extracted from track info if ( payload["search_type"] == MediaType.ALBUM and media[0].item_class == "object.item.audioItem.musicTrack" ): idstring = payload["idstring"] if idstring.startswith("A:ALBUMARTIST/"): search_type = SONOS_ALBUM_ARTIST elif idstring.startswith("A:ALBUM/"): search_type = SONOS_ALBUM item = get_media(media_library, idstring, search_type) title = getattr(item, "title", None) thumbnail = get_thumbnail_url(search_type, payload["idstring"]) if not title: try: title = urllib.parse.unquote(payload["idstring"].split("/")[1]) except IndexError: title = LIBRARY_TITLES_MAPPING[payload["idstring"]] try: media_class = SONOS_TO_MEDIA_CLASSES[ MEDIA_TYPES_TO_SONOS[payload["search_type"]] ] except KeyError: _LOGGER.debug("Unknown media type received %s", payload["search_type"]) return None children = [] for item in media: with suppress(UnknownMediaType): children.append(item_payload(item, get_thumbnail_url)) return BrowseMedia( title=title, thumbnail=thumbnail, media_class=media_class, media_content_id=payload["idstring"], media_content_type=payload["search_type"], children=children, can_play=can_play(payload["search_type"]), can_expand=can_expand(payload["search_type"]), )
Create response payload for a single media item. Used by async_browse_media.
def item_payload(item: DidlObject, get_thumbnail_url=None) -> BrowseMedia: """Create response payload for a single media item. Used by async_browse_media. """ media_type = get_media_type(item) try: media_class = SONOS_TO_MEDIA_CLASSES[media_type] except KeyError as err: _LOGGER.debug("Unknown media type received %s", media_type) raise UnknownMediaType from err content_id = get_content_id(item) thumbnail = None if getattr(item, "album_art_uri", None): thumbnail = get_thumbnail_url(media_class, content_id) return BrowseMedia( title=item.title, thumbnail=thumbnail, media_class=media_class, media_content_id=content_id, media_content_type=SONOS_TO_MEDIA_TYPES[media_type], can_play=can_play(item.item_class), can_expand=can_expand(item), )
Create response payload to describe contents of a specific library. Used by async_browse_media.
def library_payload(media_library: MusicLibrary, get_thumbnail_url=None) -> BrowseMedia: """Create response payload to describe contents of a specific library. Used by async_browse_media. """ children = [] for item in media_library.browse(): with suppress(UnknownMediaType): children.append(item_payload(item, get_thumbnail_url)) return BrowseMedia( title="Music Library", media_class=MediaClass.DIRECTORY, media_content_id="library", media_content_type="library", can_play=False, can_expand=True, children=children, )
Create response payload to describe contents of a specific library. Used by async_browse_media.
def favorites_payload(favorites: SonosFavorites) -> BrowseMedia: """Create response payload to describe contents of a specific library. Used by async_browse_media. """ children: list[BrowseMedia] = [] group_types: set[str] = {fav.reference.item_class for fav in favorites} for group_type in sorted(group_types): try: media_content_type = SONOS_TYPES_MAPPING[group_type] media_class = SONOS_TO_MEDIA_CLASSES[group_type] except KeyError: _LOGGER.debug("Unknown media type or class received %s", group_type) continue children.append( BrowseMedia( title=media_content_type.title(), media_class=media_class, media_content_id=group_type, media_content_type="favorites_folder", can_play=False, can_expand=True, ) ) return BrowseMedia( title="Favorites", media_class=MediaClass.DIRECTORY, media_content_id="", media_content_type="favorites", can_play=False, can_expand=True, children=children, )
Create response payload to describe all items of a type of favorite. Used by async_browse_media.
def favorites_folder_payload( favorites: SonosFavorites, media_content_id: str ) -> BrowseMedia: """Create response payload to describe all items of a type of favorite. Used by async_browse_media. """ children: list[BrowseMedia] = [] content_type = SONOS_TYPES_MAPPING[media_content_id] for favorite in favorites: if favorite.reference.item_class != media_content_id: continue children.append( BrowseMedia( title=favorite.title, media_class=SONOS_TO_MEDIA_CLASSES[favorite.reference.item_class], media_content_id=favorite.item_id, media_content_type="favorite_item_id", can_play=True, can_expand=False, thumbnail=getattr(favorite, "album_art_uri", None), ) ) return BrowseMedia( title=content_type.title(), media_class=MediaClass.DIRECTORY, media_content_id="", media_content_type="favorites", can_play=False, can_expand=True, children=children, )
Extract media type of item.
def get_media_type(item: DidlObject) -> str: """Extract media type of item.""" if item.item_class == "object.item.audioItem.musicTrack": return SONOS_TRACKS if ( item.item_class == "object.container.album.musicAlbum" and SONOS_TYPES_MAPPING.get(item.item_id.split("/")[0]) in [ SONOS_ALBUM_ARTIST, SONOS_GENRE, ] ): return SONOS_TYPES_MAPPING[item.item_class] return SONOS_TYPES_MAPPING.get(item.item_id.split("/")[0], item.item_class)
Test if playable. Used by async_browse_media.
def can_play(item: DidlObject) -> bool: """Test if playable. Used by async_browse_media. """ return SONOS_TO_MEDIA_TYPES.get(item) in PLAYABLE_MEDIA_TYPES
Test if expandable. Used by async_browse_media.
def can_expand(item: DidlObject) -> bool: """Test if expandable. Used by async_browse_media. """ if isinstance(item, str): return SONOS_TYPES_MAPPING.get(item) in EXPANDABLE_MEDIA_TYPES if SONOS_TO_MEDIA_TYPES.get(item.item_class) in EXPANDABLE_MEDIA_TYPES: return True return SONOS_TYPES_MAPPING.get(item.item_id) in EXPANDABLE_MEDIA_TYPES
Extract content id or uri.
def get_content_id(item: DidlObject) -> str: """Extract content id or uri.""" if item.item_class == "object.item.audioItem.musicTrack": return cast(str, item.get_uri()) return cast(str, item.item_id)
Fetch a single media/album.
def get_media( media_library: MusicLibrary, item_id: str, search_type: str ) -> MusicServiceItem | None: """Fetch a single media/album.""" _LOGGER.debug("get_media item_id [%s], search_type [%s]", item_id, search_type) search_type = MEDIA_TYPES_TO_SONOS.get(search_type, search_type) if search_type == "playlists": # Format is S:TITLE or S:ITEM_ID splits = item_id.split(":") title = splits[1] if len(splits) > 1 else None return next( ( p for p in media_library.get_playlists() if (item_id == p.item_id or title == p.title) ), None, ) if not item_id.startswith("A:ALBUM") and search_type == SONOS_ALBUM: item_id = "A:ALBUMARTIST/" + "/".join(item_id.split("/")[2:]) if item_id.startswith("A:ALBUM/") or search_type == "tracks": search_term = urllib.parse.unquote(item_id.split("/")[-1]) matches = media_library.get_music_library_information( search_type, search_term=search_term, full_album_art_uri=True ) else: # When requesting media by album_artist, composer, genre use the browse interface # to navigate the hierarchy. This occurs when invoked from media browser or service # calls # Example: A:ALBUMARTIST/Neil Young/Greatest Hits - get specific album # Example: A:ALBUMARTIST/Neil Young - get all albums # Others: composer, genre # A:<topic>/<name>/<optional title> splits = item_id.split("/") title = urllib.parse.unquote(splits[2]) if len(splits) > 2 else None browse_id_string = splits[0] + "/" + splits[1] matches = media_library.browse_by_idstring( search_type, browse_id_string, full_album_art_uri=True ) if title: result = next( (item for item in matches if (title == item.title)), None, ) matches = [result] _LOGGER.debug( "get_media search_type [%s] item_id [%s] matches [%d]", search_type, item_id, len(matches), ) if len(matches) > 0: return matches[0] return None
Represent a balance measure returned by SoCo as a number. SoCo returns a pair of volumes, one for the left side and one for the right side. When the two are equal, sound is centered; HA will show that as 0. When the left side is louder, HA will show a negative value, and a positive value means the right side is louder. Maximum absolute value is 100, which means only one side produces sound at all.
def _balance_to_number(state: tuple[int, int]) -> float: """Represent a balance measure returned by SoCo as a number. SoCo returns a pair of volumes, one for the left side and one for the right side. When the two are equal, sound is centered; HA will show that as 0. When the left side is louder, HA will show a negative value, and a positive value means the right side is louder. Maximum absolute value is 100, which means only one side produces sound at all. """ left, right = state return (right - left) * 100 // max(right, left)
Convert a balance value from -100 to 100 into SoCo format. 0 becomes (100, 100), fully enabling both sides. Note that the master volume control is separate, so this does not turn up the speakers to maximum volume. Negative values reduce the volume of the right side, and positive values reduce the volume of the left side. -100 becomes (100, 0), fully muting the right side, and +100 becomes (0, 100), muting the left side.
def _balance_from_number(value: float) -> tuple[int, int]: """Convert a balance value from -100 to 100 into SoCo format. 0 becomes (100, 100), fully enabling both sides. Note that the master volume control is separate, so this does not turn up the speakers to maximum volume. Negative values reduce the volume of the right side, and positive values reduce the volume of the left side. -100 becomes (100, 0), fully muting the right side, and +100 becomes (0, 100), muting the left side. """ left = min(100, 100 - int(value)) right = min(100, int(value) + 100) return left, right
Connect to Sony projector using network.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Connect to Sony projector using network.""" host = config[CONF_HOST] name = config[CONF_NAME] sdcp_connection = pysdcp.Projector(host) # Sanity check the connection try: sdcp_connection.get_power() except ConnectionError: _LOGGER.error("Failed to connect to projector '%s'", host) return _LOGGER.debug("Validated projector '%s' OK", host) add_entities([SonyProjector(sdcp_connection, name)], True)
Register the SpaceAPI with the HTTP interface.
def setup(hass: HomeAssistant, config: ConfigType) -> bool: """Register the SpaceAPI with the HTTP interface.""" hass.data[DATA_SPACEAPI] = config[DOMAIN] hass.http.register_view(APISpaceApiView) return True
Get the alarm state.
def _get_alarm_state(area: Area) -> str | None: """Get the alarm state.""" if area.verified_alarm: return STATE_ALARM_TRIGGERED mode_to_state = { AreaMode.UNSET: STATE_ALARM_DISARMED, AreaMode.PART_SET_A: STATE_ALARM_ARMED_HOME, AreaMode.PART_SET_B: STATE_ALARM_ARMED_NIGHT, AreaMode.FULL_SET: STATE_ALARM_ARMED_AWAY, } return mode_to_state.get(area.mode)
Create response payload for the provided media query.
def build_item_response( # noqa: C901 spotify: Spotify, user: dict[str, Any], payload: dict[str, str | None], *, can_play_artist: bool, ) -> BrowseMedia | None: """Create response payload for the provided media query.""" media_content_type = payload["media_content_type"] media_content_id = payload["media_content_id"] if media_content_type is None or media_content_id is None: return None title = None image = None media: dict[str, Any] | None = None items = [] if media_content_type == BrowsableMedia.CURRENT_USER_PLAYLISTS: if media := spotify.current_user_playlists(limit=BROWSE_LIMIT): items = media.get("items", []) elif media_content_type == BrowsableMedia.CURRENT_USER_FOLLOWED_ARTISTS: if media := spotify.current_user_followed_artists(limit=BROWSE_LIMIT): items = media.get("artists", {}).get("items", []) elif media_content_type == BrowsableMedia.CURRENT_USER_SAVED_ALBUMS: if media := spotify.current_user_saved_albums(limit=BROWSE_LIMIT): items = [item["album"] for item in media.get("items", [])] elif media_content_type == BrowsableMedia.CURRENT_USER_SAVED_TRACKS: if media := spotify.current_user_saved_tracks(limit=BROWSE_LIMIT): items = [item["track"] for item in media.get("items", [])] elif media_content_type == BrowsableMedia.CURRENT_USER_SAVED_SHOWS: if media := spotify.current_user_saved_shows(limit=BROWSE_LIMIT): items = [item["show"] for item in media.get("items", [])] elif media_content_type == BrowsableMedia.CURRENT_USER_RECENTLY_PLAYED: if media := spotify.current_user_recently_played(limit=BROWSE_LIMIT): items = [item["track"] for item in media.get("items", [])] elif media_content_type == BrowsableMedia.CURRENT_USER_TOP_ARTISTS: if media := spotify.current_user_top_artists(limit=BROWSE_LIMIT): items = media.get("items", []) elif media_content_type == BrowsableMedia.CURRENT_USER_TOP_TRACKS: if media := spotify.current_user_top_tracks(limit=BROWSE_LIMIT): items = media.get("items", []) elif media_content_type == BrowsableMedia.FEATURED_PLAYLISTS: if media := spotify.featured_playlists( country=user["country"], limit=BROWSE_LIMIT ): items = media.get("playlists", {}).get("items", []) elif media_content_type == BrowsableMedia.CATEGORIES: if media := spotify.categories(country=user["country"], limit=BROWSE_LIMIT): items = media.get("categories", {}).get("items", []) elif media_content_type == "category_playlists": if ( media := spotify.category_playlists( category_id=media_content_id, country=user["country"], limit=BROWSE_LIMIT, ) ) and (category := spotify.category(media_content_id, country=user["country"])): title = category.get("name") image = fetch_image_url(category, key="icons") items = media.get("playlists", {}).get("items", []) elif media_content_type == BrowsableMedia.NEW_RELEASES: if media := spotify.new_releases(country=user["country"], limit=BROWSE_LIMIT): items = media.get("albums", {}).get("items", []) elif media_content_type == MediaType.PLAYLIST: if media := spotify.playlist(media_content_id): items = [item["track"] for item in media.get("tracks", {}).get("items", [])] elif media_content_type == MediaType.ALBUM: if media := spotify.album(media_content_id): items = media.get("tracks", {}).get("items", []) elif media_content_type == MediaType.ARTIST: if (media := spotify.artist_albums(media_content_id, limit=BROWSE_LIMIT)) and ( artist := spotify.artist(media_content_id) ): title = artist.get("name") image = fetch_image_url(artist) items = media.get("items", []) elif media_content_type == MEDIA_TYPE_SHOW: if (media := spotify.show_episodes(media_content_id, limit=BROWSE_LIMIT)) and ( show := spotify.show(media_content_id) ): title = show.get("name") image = fetch_image_url(show) items = media.get("items", []) if media is None: return None try: media_class = CONTENT_TYPE_MEDIA_CLASS[media_content_type] except KeyError: _LOGGER.debug("Unknown media type received: %s", media_content_type) return None if media_content_type == BrowsableMedia.CATEGORIES: media_item = BrowseMedia( can_expand=True, can_play=False, children_media_class=media_class["children"], media_class=media_class["parent"], media_content_id=media_content_id, media_content_type=f"{MEDIA_PLAYER_PREFIX}{media_content_type}", title=LIBRARY_MAP.get(media_content_id, "Unknown"), ) media_item.children = [] for item in items: try: item_id = item["id"] except KeyError: _LOGGER.debug("Missing ID for media item: %s", item) continue media_item.children.append( BrowseMedia( can_expand=True, can_play=False, children_media_class=MediaClass.TRACK, media_class=MediaClass.PLAYLIST, media_content_id=item_id, media_content_type=f"{MEDIA_PLAYER_PREFIX}category_playlists", thumbnail=fetch_image_url(item, key="icons"), title=item.get("name"), ) ) return media_item if title is None: title = LIBRARY_MAP.get(media_content_id, "Unknown") if "name" in media: title = media["name"] can_play = media_content_type in PLAYABLE_MEDIA_TYPES and ( media_content_type != MediaType.ARTIST or can_play_artist ) browse_media = BrowseMedia( can_expand=True, can_play=can_play, children_media_class=media_class["children"], media_class=media_class["parent"], media_content_id=media_content_id, media_content_type=f"{MEDIA_PLAYER_PREFIX}{media_content_type}", thumbnail=image, title=title, ) browse_media.children = [] for item in items: try: browse_media.children.append( item_payload(item, can_play_artist=can_play_artist) ) except (MissingMediaInformation, UnknownMediaType): continue if "images" in media: browse_media.thumbnail = fetch_image_url(media) return browse_media
Create response payload for a single media item. Used by async_browse_media.
def item_payload(item: dict[str, Any], *, can_play_artist: bool) -> BrowseMedia: """Create response payload for a single media item. Used by async_browse_media. """ try: media_type = item["type"] media_id = item["uri"] except KeyError as err: _LOGGER.debug("Missing type or URI for media item: %s", item) raise MissingMediaInformation from err try: media_class = CONTENT_TYPE_MEDIA_CLASS[media_type] except KeyError as err: _LOGGER.debug("Unknown media type received: %s", media_type) raise UnknownMediaType from err can_expand = media_type not in [ MediaType.TRACK, MediaType.EPISODE, ] can_play = media_type in PLAYABLE_MEDIA_TYPES and ( media_type != MediaType.ARTIST or can_play_artist ) browse_media = BrowseMedia( can_expand=can_expand, can_play=can_play, children_media_class=media_class["children"], media_class=media_class["parent"], media_content_id=media_id, media_content_type=f"{MEDIA_PLAYER_PREFIX}{media_type}", title=item.get("name", "Unknown"), ) if "images" in item: browse_media.thumbnail = fetch_image_url(item) elif MediaType.ALBUM in item: browse_media.thumbnail = fetch_image_url(item[MediaType.ALBUM]) return browse_media
Create response payload to describe contents of a specific library. Used by async_browse_media.
def library_payload(*, can_play_artist: bool) -> BrowseMedia: """Create response payload to describe contents of a specific library. Used by async_browse_media. """ browse_media = BrowseMedia( can_expand=True, can_play=False, children_media_class=MediaClass.DIRECTORY, media_class=MediaClass.DIRECTORY, media_content_id="library", media_content_type=f"{MEDIA_PLAYER_PREFIX}library", title="Media Library", ) browse_media.children = [] for item in [{"name": n, "type": t} for t, n in LIBRARY_MAP.items()]: browse_media.children.append( item_payload( {"name": item["name"], "type": item["type"], "uri": item["type"]}, can_play_artist=can_play_artist, ) ) return browse_media
Decorate Spotify calls to handle Spotify exception. A decorator that wraps the passed in function, catches Spotify errors, aiohttp exceptions and handles the availability of the media player.
def spotify_exception_handler( func: Callable[Concatenate[_SpotifyMediaPlayerT, _P], _R], ) -> Callable[Concatenate[_SpotifyMediaPlayerT, _P], _R | None]: """Decorate Spotify calls to handle Spotify exception. A decorator that wraps the passed in function, catches Spotify errors, aiohttp exceptions and handles the availability of the media player. """ def wrapper( self: _SpotifyMediaPlayerT, *args: _P.args, **kwargs: _P.kwargs ) -> _R | None: # pylint: disable=protected-access try: result = func(self, *args, **kwargs) except requests.RequestException: self._attr_available = False return None except SpotifyException as exc: self._attr_available = False if exc.reason == "NO_ACTIVE_DEVICE": raise HomeAssistantError("No active playback device found") from None raise HomeAssistantError(f"Spotify error: {exc.reason}") from exc self._attr_available = True return result return wrapper
Register system health callbacks.
def async_register( hass: HomeAssistant, register: system_health.SystemHealthRegistration ) -> None: """Register system health callbacks.""" register.async_register_info(system_health_info)
Return whether the media_content_type is a valid Spotify media_id.
def is_spotify_media_type(media_content_type: str) -> bool: """Return whether the media_content_type is a valid Spotify media_id.""" return media_content_type.startswith(MEDIA_PLAYER_PREFIX)
Return actual spotify media_content_type.
def resolve_spotify_media_type(media_content_type: str) -> str: """Return actual spotify media_content_type.""" return media_content_type.removeprefix(MEDIA_PLAYER_PREFIX)
Fetch image url.
def fetch_image_url(item: dict[str, Any], key="images") -> str | None: """Fetch image url.""" source = item.get(key, []) if isinstance(source, list) and source: return source[0].get("url") return None
Extract spotify URI from media browser URL.
def spotify_uri_from_media_browser_url(media_content_id: str) -> str: """Extract spotify URI from media browser URL.""" if media_content_id and media_content_id.startswith(MEDIA_PLAYER_PREFIX): parsed_url = yarl.URL(media_content_id) media_content_id = parsed_url.name return media_content_id
Validate that value is a SQL SELECT query.
def validate_sql_select(value: str) -> str: """Validate that value is a SQL SELECT query.""" if len(query := sqlparse.parse(value.lstrip().lstrip(";"))) > 1: raise MultipleResultsFound if len(query) == 0 or (query_type := query[0].get_type()) == "UNKNOWN": raise ValueError if query_type != "SELECT": _LOGGER.debug("The SQL query %s is of type %s", query, query_type) raise SQLParseError return str(query[0])
Validate SQL query.
def validate_query(db_url: str, query: str, column: str) -> bool: """Validate SQL query.""" engine = sqlalchemy.create_engine(db_url, future=True) sessmaker = scoped_session(sessionmaker(bind=engine, future=True)) sess: Session = sessmaker() try: result: Result = sess.execute(sqlalchemy.text(query)) except SQLAlchemyError as error: _LOGGER.debug("Execution error %s", error) if sess: sess.close() engine.dispose() raise ValueError(error) from error for res in result.mappings(): if column not in res: _LOGGER.debug("Column `%s` is not returned by the query", column) if sess: sess.close() engine.dispose() raise NoSuchColumnError(f"Column {column} is not returned by the query.") data = res[column] _LOGGER.debug("Return value from query: %s", data) if sess: sess.close() engine.dispose() return True
Get or initialize domain data.
def _async_get_or_init_domain_data(hass: HomeAssistant) -> SQLData: """Get or initialize domain data.""" if DOMAIN in hass.data: sql_data: SQLData = hass.data[DOMAIN] return sql_data session_makers_by_db_url: dict[str, scoped_session] = {} # # Ensure we dispose of all engines at shutdown # to avoid unclean disconnects # # Shutdown all sessions in the executor since they will # do blocking I/O # def _shutdown_db_engines(event: Event) -> None: """Shutdown all database engines.""" for sessmaker in session_makers_by_db_url.values(): sessmaker.connection().engine.dispose() cancel_shutdown = hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, _shutdown_db_engines ) sql_data = SQLData(cancel_shutdown, session_makers_by_db_url) hass.data[DOMAIN] = sql_data return sql_data
Validate the db_url and return a session maker. This does I/O and should be run in the executor.
def _validate_and_get_session_maker_for_db_url(db_url: str) -> scoped_session | None: """Validate the db_url and return a session maker. This does I/O and should be run in the executor. """ sess: Session | None = None try: engine = sqlalchemy.create_engine(db_url, future=True) sessmaker = scoped_session(sessionmaker(bind=engine, future=True)) # Run a dummy query just to test the db_url sess = sessmaker() sess.execute(sqlalchemy.text("SELECT 1;")) except SQLAlchemyError as err: _LOGGER.error( "Couldn't connect using %s DB_URL: %s", redact_credentials(db_url), redact_credentials(str(err)), ) return None else: return sessmaker finally: if sess: sess.close()
Generate the lambda statement.
def _generate_lambda_stmt(query: str) -> StatementLambdaElement: """Generate the lambda statement.""" text = sqlalchemy.text(query) return lambda_stmt(lambda: text, lambda_cache=_SQL_LAMBDA_CACHE)
Redact credentials from string data.
def redact_credentials(data: str | None) -> str: """Redact credentials from string data.""" if not data: return "none" return DB_URL_RE.sub("//****:****@", data)
Return the db_url provided if not empty, otherwise return the recorder db_url.
def resolve_db_url(hass: HomeAssistant, db_url: str | None) -> str: """Return the db_url provided if not empty, otherwise return the recorder db_url.""" _LOGGER.debug("db_url: %s", redact_credentials(db_url)) if db_url and not db_url.isspace(): return db_url return get_instance(hass).db_url
Validate that value is a SQL SELECT query.
def validate_sql_select(value: str) -> str: """Validate that value is a SQL SELECT query.""" if len(query := sqlparse.parse(value.lstrip().lstrip(";"))) > 1: raise vol.Invalid("Multiple SQL queries are not supported") if len(query) == 0 or (query_type := query[0].get_type()) == "UNKNOWN": raise vol.Invalid("Invalid SQL query") if query_type != "SELECT": _LOGGER.debug("The SQL query %s is of type %s", query, query_type) raise vol.Invalid("Only SELECT queries allowed") return str(query[0])
Remove db url from config if it matches recorder database.
def remove_configured_db_url_if_not_needed( hass: HomeAssistant, entry: ConfigEntry ) -> None: """Remove db url from config if it matches recorder database.""" hass.config_entries.async_update_entry( entry, options={ key: value for key, value in entry.options.items() if key != CONF_DB_URL }, )
Content filter for media sources.
def media_source_content_filter(item: BrowseMedia) -> bool: """Content filter for media sources.""" return item.media_content_type.startswith("audio/")
Generate base schema.
def _base_schema(discovery_info=None): """Generate base schema.""" base_schema = {} if discovery_info and CONF_HOST in discovery_info: base_schema.update( { vol.Required( CONF_HOST, description={"suggested_value": discovery_info[CONF_HOST]}, ): str, } ) else: base_schema.update({vol.Required(CONF_HOST): str}) if discovery_info and CONF_PORT in discovery_info: base_schema.update( { vol.Required( CONF_PORT, default=DEFAULT_PORT, description={"suggested_value": discovery_info[CONF_PORT]}, ): int, } ) else: base_schema.update({vol.Required(CONF_PORT, default=DEFAULT_PORT): int}) base_schema.update( { vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str, vol.Optional(CONF_HTTPS, default=False): bool, } ) return vol.Schema(base_schema)
Format error message.
def _format_err(name: str, *args: Any) -> str: """Format error message.""" return f"Exception in SSDP callback {name}: {args}"
Convert headers and description to discovery_info.
def discovery_info_from_headers_and_description( ssdp_device: SsdpDevice, combined_headers: CaseInsensitiveDict, info_desc: Mapping[str, Any], ) -> SsdpServiceInfo: """Convert headers and description to discovery_info.""" ssdp_usn = combined_headers["usn"] ssdp_st = combined_headers.get_lower("st") if isinstance(info_desc, CaseInsensitiveDict): upnp_info = {**info_desc.as_dict()} else: upnp_info = {**info_desc} # Increase compatibility: depending on the message type, # either the ST (Search Target, from M-SEARCH messages) # or NT (Notification Type, from NOTIFY messages) header is mandatory if not ssdp_st: ssdp_st = combined_headers["nt"] # Ensure UPnP "udn" is set if ATTR_UPNP_UDN not in upnp_info: if udn := _udn_from_usn(ssdp_usn): upnp_info[ATTR_UPNP_UDN] = udn return SsdpServiceInfo( ssdp_usn=ssdp_usn, ssdp_st=ssdp_st, ssdp_ext=combined_headers.get_lower("ext"), ssdp_server=combined_headers.get_lower("server"), ssdp_location=combined_headers.get_lower("location"), ssdp_udn=combined_headers.get_lower("_udn"), ssdp_nt=combined_headers.get_lower("nt"), ssdp_headers=combined_headers, upnp=upnp_info, ssdp_all_locations=set(ssdp_device.locations), )
Get the UDN from the USN.
def _udn_from_usn(usn: str | None) -> str | None: """Get the UDN from the USN.""" if usn is None: return None if usn.startswith("uuid:"): return usn.split("::")[0] return None
Set up the Sterling Bank sensor platform.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_devices: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Sterling Bank sensor platform.""" sensors: list[StarlingBalanceSensor] = [] for account in config[CONF_ACCOUNTS]: try: starling_account = StarlingAccount( account[CONF_ACCESS_TOKEN], sandbox=account[CONF_SANDBOX] ) sensors.extend( StarlingBalanceSensor( starling_account, account[CONF_NAME], balance_type ) for balance_type in account[CONF_BALANCE_TYPES] ) except requests.exceptions.HTTPError as error: _LOGGER.error( "Unable to set up Starling account '%s': %s", account[CONF_NAME], error ) add_devices(sensors, True)
Validate that the characteristic selected is valid for the source sensor type, throw if it isn't.
def valid_state_characteristic_configuration(config: dict[str, Any]) -> dict[str, Any]: """Validate that the characteristic selected is valid for the source sensor type, throw if it isn't.""" is_binary = split_entity_id(config[CONF_ENTITY_ID])[0] == BINARY_SENSOR_DOMAIN characteristic = cast(str, config[CONF_STATE_CHARACTERISTIC]) if (is_binary and characteristic not in STATS_BINARY_SUPPORT) or ( not is_binary and characteristic not in STATS_NUMERIC_SUPPORT ): raise vol.ValueInvalid( f"The configured characteristic '{characteristic}' is not supported " "for the configured source sensor" ) return config
Validate that max_age, sampling_size, or both are provided.
def valid_boundary_configuration(config: dict[str, Any]) -> dict[str, Any]: """Validate that max_age, sampling_size, or both are provided.""" if ( config.get(CONF_SAMPLES_MAX_BUFFER_SIZE) is None and config.get(CONF_MAX_AGE) is None ): raise vol.RequiredFieldInvalid( "The sensor configuration must provide 'max_age' and/or 'sampling_size'" ) return config
Validate that if keep_last_sample is set, max_age must also be set.
def valid_keep_last_sample(config: dict[str, Any]) -> dict[str, Any]: """Validate that if keep_last_sample is set, max_age must also be set.""" if config.get(CONF_KEEP_LAST_SAMPLE) is True and config.get(CONF_MAX_AGE) is None: raise vol.RequiredFieldInvalid( "The sensor configuration must provide 'max_age' if 'keep_last_sample' is True" ) return config
Set up the StatsD component.
def setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the StatsD component.""" conf = config[DOMAIN] host = conf.get(CONF_HOST) port = conf.get(CONF_PORT) sample_rate = conf.get(CONF_RATE) prefix = conf.get(CONF_PREFIX) value_mapping = conf.get(CONF_VALUE_MAP) show_attribute_flag = conf.get(CONF_ATTR) statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix) def statsd_event_listener(event): """Listen for new messages on the bus and sends them to StatsD.""" if (state := event.data.get("new_state")) is None: return try: if value_mapping and state.state in value_mapping: _state = float(value_mapping[state.state]) else: _state = state_helper.state_as_number(state) except ValueError: # Set the state to none and continue for any numeric attributes. _state = None states = dict(state.attributes) _LOGGER.debug("Sending %s", state.entity_id) if show_attribute_flag is True: if isinstance(_state, (float, int)): statsd_client.gauge(f"{state.entity_id}.state", _state, sample_rate) # Send attribute values for key, value in states.items(): if isinstance(value, (float, int)): stat = "{}.{}".format(state.entity_id, key.replace(" ", "_")) statsd_client.gauge(stat, value, sample_rate) elif isinstance(_state, (float, int)): statsd_client.gauge(state.entity_id, _state, sample_rate) # Increment the count statsd_client.incr(state.entity_id, rate=sample_rate) hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener) return True
Check if a 30303 discovery is a steamist device.
def async_is_steamist_device(device: Device30303) -> bool: """Check if a 30303 discovery is a steamist device.""" return device.hostname.startswith( MODEL_450_HOSTNAME_PREFIX ) or device.hostname.startswith(MODEL_550_HOSTNAME_PREFIX)
Update a config entry from a discovery.
def async_update_entry_from_discovery( hass: HomeAssistant, entry: config_entries.ConfigEntry, device: Device30303, ) -> bool: """Update a config entry from a discovery.""" data_updates: dict[str, Any] = {} updates: dict[str, Any] = {} if not entry.unique_id: updates["unique_id"] = dr.format_mac(device.mac) if not entry.data.get(CONF_NAME) or is_ip_address(entry.data[CONF_NAME]): updates["title"] = data_updates[CONF_NAME] = device.name if not entry.data.get(CONF_MODEL) and "-" in device.hostname: data_updates[CONF_MODEL] = device.hostname.split("-", maxsplit=1)[0] if data_updates: updates["data"] = {**entry.data, **data_updates} if updates: return hass.config_entries.async_update_entry(entry, **updates) return False
Search a list of discoveries for one with a matching ip.
def async_find_discovery_by_ip( discoveries: list[Device30303], host: str ) -> Device30303 | None: """Search a list of discoveries for one with a matching ip.""" for discovery in discoveries: if discovery.ipaddress == host: return discovery return None
Check if a device was already discovered via a broadcast discovery.
def async_get_discovery(hass: HomeAssistant, host: str) -> Device30303 | None: """Check if a device was already discovered via a broadcast discovery.""" discoveries: list[Device30303] = hass.data[DOMAIN][DISCOVERY] return async_find_discovery_by_ip(discoveries, host)
Trigger config flows for discovered devices.
def async_trigger_discovery( hass: HomeAssistant, discovered_devices: list[Device30303], ) -> None: """Trigger config flows for discovered devices.""" for device in discovered_devices: discovery_flow.async_create_flow( hass, DOMAIN, context={"source": config_entries.SOURCE_INTEGRATION_DISCOVERY}, data={ "ipaddress": device.ipaddress, "name": device.name, "mac": device.mac, "hostname": device.hostname, }, )
Handle common flow input validation.
def validate_input(user_input: dict[str, str]) -> dict[str, str | int]: """Handle common flow input validation.""" steam.api.key.set(user_input[CONF_API_KEY]) interface = steam.api.interface("ISteamUser") names = interface.GetPlayerSummaries(steamids=user_input[CONF_ACCOUNT]) return names["response"]["players"]["player"][0]
Set up the StiebelEltron platform.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the StiebelEltron platform.""" name = hass.data[STE_DOMAIN]["name"] ste_data = hass.data[STE_DOMAIN]["ste_data"] add_entities([StiebelEltron(name, ste_data)], True)
Set up the STIEBEL ELTRON unit. Will automatically load climate platform.
def setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the STIEBEL ELTRON unit. Will automatically load climate platform. """ name = config[DOMAIN][CONF_NAME] modbus_client = hass.data[MODBUS_DOMAIN][config[DOMAIN][CONF_HUB]] hass.data[DOMAIN] = { "name": name, "ste_data": StiebelEltronData(name, modbus_client), } discovery.load_platform(hass, Platform.CLIMATE, DOMAIN, {}, config) return True
Find location of first box (or sub box if box_start provided) of given type.
def find_box( mp4_bytes: bytes, target_type: bytes, box_start: int = 0 ) -> Generator[int, None, None]: """Find location of first box (or sub box if box_start provided) of given type.""" if box_start == 0: index = 0 box_end = len(mp4_bytes) else: box_end = box_start + int.from_bytes( mp4_bytes[box_start : box_start + 4], byteorder="big" ) index = box_start + 8 while 1: if index > box_end - 8: # End of box, not found break box_header = mp4_bytes[index : index + 8] if box_header[4:8] == target_type: yield index index += int.from_bytes(box_header[0:4], byteorder="big")
Get RFC 6381 codec string.
def get_codec_string(mp4_bytes: bytes) -> str: """Get RFC 6381 codec string.""" codecs = [] # Find moov moov_location = next(find_box(mp4_bytes, b"moov")) # Find tracks for trak_location in find_box(mp4_bytes, b"trak", moov_location): # Drill down to media info mdia_location = next(find_box(mp4_bytes, b"mdia", trak_location)) minf_location = next(find_box(mp4_bytes, b"minf", mdia_location)) stbl_location = next(find_box(mp4_bytes, b"stbl", minf_location)) stsd_location = next(find_box(mp4_bytes, b"stsd", stbl_location)) # Get stsd box stsd_length = int.from_bytes( mp4_bytes[stsd_location : stsd_location + 4], byteorder="big" ) stsd_box = mp4_bytes[stsd_location : stsd_location + stsd_length] # Base Codec codec = stsd_box[20:24].decode("utf-8") # Handle H264 if ( codec in ("avc1", "avc2", "avc3", "avc4") and stsd_length > 110 and stsd_box[106:110] == b"avcC" ): profile = stsd_box[111:112].hex() compatibility = stsd_box[112:113].hex() # Cap level at 4.1 for compatibility with some Google Cast devices level = hex(min(stsd_box[113], 41))[2:] codec += "." + profile + compatibility + level # Handle H265 elif ( codec in ("hev1", "hvc1") and stsd_length > 110 and stsd_box[106:110] == b"hvcC" ): tmp_byte = int.from_bytes(stsd_box[111:112], byteorder="big") # Profile Space codec += "." profile_space_map = {0: "", 1: "A", 2: "B", 3: "C"} profile_space = tmp_byte >> 6 codec += profile_space_map[profile_space] general_profile_idc = tmp_byte & 31 codec += str(general_profile_idc) # Compatibility codec += "." general_profile_compatibility = int.from_bytes( stsd_box[112:116], byteorder="big" ) reverse = 0 for i in range(32): reverse |= general_profile_compatibility & 1 if i == 31: break reverse <<= 1 general_profile_compatibility >>= 1 codec += hex(reverse)[2:] # Tier Flag if (tmp_byte & 32) >> 5 == 0: codec += ".L" else: codec += ".H" codec += str(int.from_bytes(stsd_box[122:123], byteorder="big")) # Constraint String has_byte = False constraint_string = "" for i in range(121, 115, -1): gci = int.from_bytes(stsd_box[i : i + 1], byteorder="big") if gci or has_byte: constraint_string = "." + hex(gci)[2:] + constraint_string has_byte = True codec += constraint_string # Handle Audio elif codec == "mp4a": oti = None dsi = None # Parse ES Descriptors oti_loc = stsd_box.find(b"\x04\x80\x80\x80") if oti_loc > 0: oti = stsd_box[oti_loc + 5 : oti_loc + 6].hex() codec += f".{oti}" dsi_loc = stsd_box.find(b"\x05\x80\x80\x80") if dsi_loc > 0: dsi_length = int.from_bytes( stsd_box[dsi_loc + 4 : dsi_loc + 5], byteorder="big" ) dsi_data = stsd_box[dsi_loc + 5 : dsi_loc + 5 + dsi_length] dsi0 = int.from_bytes(dsi_data[0:1], byteorder="big") dsi = (dsi0 & 248) >> 3 if dsi == 31 and len(dsi_data) >= 2: dsi1 = int.from_bytes(dsi_data[1:2], byteorder="big") dsi = 32 + ((dsi0 & 7) << 3) + ((dsi1 & 224) >> 5) codec += f".{dsi}" codecs.append(codec) return ",".join(codecs)
Find location of moov atom in a BufferedIOBase mp4.
def find_moov(mp4_io: BufferedIOBase) -> int: """Find location of moov atom in a BufferedIOBase mp4.""" index = 0 while 1: mp4_io.seek(index) box_header = mp4_io.read(8) if len(box_header) != 8 or box_header[0:4] == b"\x00\x00\x00\x00": raise HomeAssistantError("moov atom not found") if box_header[4:8] == b"moov": return index index += int.from_bytes(box_header[0:4], byteorder="big")
Read the init from a mp4 file.
def read_init(bytes_io: BufferedIOBase) -> bytes: """Read the init from a mp4 file.""" moov_loc = find_moov(bytes_io) bytes_io.seek(moov_loc) moov_len = int.from_bytes(bytes_io.read(4), byteorder="big") bytes_io.seek(0) return bytes_io.read(moov_loc + moov_len)