response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Check whether a datetime is ambiguous.
def _datetime_ambiguous(dattim: dt.datetime) -> bool: """Check whether a datetime is ambiguous.""" assert dattim.tzinfo is not None opposite_fold = dattim.replace(fold=not dattim.fold) return _datetime_exists(dattim) and dattim.utcoffset() != opposite_fold.utcoffset()
Try to parse the value into an Enum. Return None if parsing fails.
def try_parse_enum(cls: type[_EnumT], value: Any) -> _EnumT | None: """Try to parse the value into an Enum. Return None if parsing fails. """ with contextlib.suppress(ValueError): return cls(value) return None
Log the stack of a thread that was still running at shutdown.
def _log_thread_running_at_shutdown(name: str, ident: int) -> None: """Log the stack of a thread that was still running at shutdown.""" frames = sys._current_frames() # pylint: disable=protected-access stack = frames.get(ident) formatted_stack = traceback.format_stack(stack) _LOGGER.warning( "Thread[%s] is still running at shutdown: %s", name, "".join(formatted_stack).strip(), )
Attempt to join or interrupt a set of threads.
def join_or_interrupt_threads( threads: set[Thread], timeout: float, log: bool ) -> set[Thread]: """Attempt to join or interrupt a set of threads.""" joined = set() timeout_per_thread = timeout / len(threads) for thread in threads: thread.join(timeout=timeout_per_thread) if not thread.is_alive() or thread.ident is None: joined.add(thread) continue if log: _log_thread_running_at_shutdown(thread.name, thread.ident) with contextlib.suppress(SystemError): # SystemError at this stage is usually a race condition # where the thread happens to die right before we force # it to raise the exception async_raise(thread.ident, SystemExit) return joined
Write a file and rename it into place using atomicwrites. Writes all or nothing. This function uses fsync under the hood. It should only be used to write mission critical files as fsync can block for a few seconds or longer is the disk is busy. Using this function frequently will significantly negatively impact performance.
def write_utf8_file_atomic( filename: str, utf8_data: bytes | str, private: bool = False, mode: str = "w" ) -> None: """Write a file and rename it into place using atomicwrites. Writes all or nothing. This function uses fsync under the hood. It should only be used to write mission critical files as fsync can block for a few seconds or longer is the disk is busy. Using this function frequently will significantly negatively impact performance. """ try: with AtomicWriter(filename, mode=mode, overwrite=True).open() as fdesc: if not private: os.fchmod(fdesc.fileno(), 0o644) fdesc.write(utf8_data) except OSError as error: _LOGGER.exception("Saving file failed: %s", filename) raise WriteError(error) from error
Write a file and rename it into place. Writes all or nothing.
def write_utf8_file( filename: str, utf8_data: bytes | str, private: bool = False, mode: str = "w" ) -> None: """Write a file and rename it into place. Writes all or nothing. """ tmp_filename = "" encoding = "utf-8" if "b" not in mode else None try: # Modern versions of Python tempfile create this file with mode 0o600 with tempfile.NamedTemporaryFile( mode=mode, encoding=encoding, dir=os.path.dirname(filename), delete=False ) as fdesc: fdesc.write(utf8_data) tmp_filename = fdesc.name if not private: os.fchmod(fdesc.fileno(), 0o644) os.replace(tmp_filename, filename) except OSError as error: _LOGGER.exception("Saving file failed: %s", filename) raise WriteError(error) from error finally: if os.path.exists(tmp_filename): try: os.remove(tmp_filename) except OSError as err: # If we are cleaning up then something else went wrong, so # we should suppress likely follow-on errors in the cleanup _LOGGER.error( "File replacement cleanup failed for %s while saving %s: %s", tmp_filename, filename, err, )
Return a list of dataclass fields. Extracted from dataclasses._process_class.
def _class_fields(cls: type, kw_only: bool) -> list[tuple[str, Any, Any]]: """Return a list of dataclass fields. Extracted from dataclasses._process_class. """ # pylint: disable=protected-access cls_annotations = cls.__dict__.get("__annotations__", {}) cls_fields: list[dataclasses.Field[Any]] = [] _dataclasses = sys.modules[dataclasses.__name__] for name, _type in cls_annotations.items(): # See if this is a marker to change the value of kw_only. if dataclasses._is_kw_only(type, _dataclasses) or ( # type: ignore[attr-defined] isinstance(_type, str) and dataclasses._is_type( # type: ignore[attr-defined] _type, cls, _dataclasses, dataclasses.KW_ONLY, dataclasses._is_kw_only, # type: ignore[attr-defined] ) ): kw_only = True else: # Otherwise it's a field of some type. cls_fields.append(dataclasses._get_field(cls, name, _type, kw_only)) # type: ignore[attr-defined] return [(field.name, field.type, field) for field in cls_fields]
Parse JSON data. This adds a workaround for orjson not handling subclasses of str, https://github.com/ijl/orjson/issues/445.
def json_loads(__obj: bytes | bytearray | memoryview | str) -> JsonValueType: """Parse JSON data. This adds a workaround for orjson not handling subclasses of str, https://github.com/ijl/orjson/issues/445. """ # Avoid isinstance overhead for the common case if type(__obj) not in (bytes, bytearray, memoryview, str) and isinstance( __obj, str ): return orjson.loads(str(__obj)) # type:ignore[no-any-return] return orjson.loads(__obj)
Parse JSON data and ensure result is a list.
def json_loads_array(__obj: bytes | bytearray | memoryview | str) -> JsonArrayType: """Parse JSON data and ensure result is a list.""" value: JsonValueType = json_loads(__obj) # Avoid isinstance overhead as we are not interested in list subclasses if type(value) is list: # noqa: E721 return value raise ValueError(f"Expected JSON to be parsed as a list got {type(value)}")
Parse JSON data and ensure result is a dictionary.
def json_loads_object(__obj: bytes | bytearray | memoryview | str) -> JsonObjectType: """Parse JSON data and ensure result is a dictionary.""" value: JsonValueType = json_loads(__obj) # Avoid isinstance overhead as we are not interested in dict subclasses if type(value) is dict: # noqa: E721 return value raise ValueError(f"Expected JSON to be parsed as a dict got {type(value)}")
Load JSON data from a file. Defaults to returning empty dict if file is not found.
def load_json( filename: str | PathLike[str], default: JsonValueType = _SENTINEL, # type: ignore[assignment] ) -> JsonValueType: """Load JSON data from a file. Defaults to returning empty dict if file is not found. """ try: with open(filename, mode="rb") as fdesc: return orjson.loads(fdesc.read()) # type: ignore[no-any-return] except FileNotFoundError: # This is not a fatal error _LOGGER.debug("JSON file not found: %s", filename) except JSON_DECODE_EXCEPTIONS as error: _LOGGER.exception("Could not parse JSON content: %s", filename) raise HomeAssistantError(f"Error while loading {filename}: {error}") from error except OSError as error: _LOGGER.exception("JSON file reading failed: %s", filename) raise HomeAssistantError(f"Error while loading {filename}: {error}") from error return {} if default is _SENTINEL else default
Load JSON data from a file and return as list. Defaults to returning empty list if file is not found.
def load_json_array( filename: str | PathLike[str], default: JsonArrayType = _SENTINEL, # type: ignore[assignment] ) -> JsonArrayType: """Load JSON data from a file and return as list. Defaults to returning empty list if file is not found. """ if default is _SENTINEL: default = [] value: JsonValueType = load_json(filename, default=default) # Avoid isinstance overhead as we are not interested in list subclasses if type(value) is list: # noqa: E721 return value _LOGGER.exception( "Expected JSON to be parsed as a list got %s in: %s", {type(value)}, filename ) raise HomeAssistantError(f"Expected JSON to be parsed as a list got {type(value)}")
Load JSON data from a file and return as dict. Defaults to returning empty dict if file is not found.
def load_json_object( filename: str | PathLike[str], default: JsonObjectType = _SENTINEL, # type: ignore[assignment] ) -> JsonObjectType: """Load JSON data from a file and return as dict. Defaults to returning empty dict if file is not found. """ if default is _SENTINEL: default = {} value: JsonValueType = load_json(filename, default=default) # Avoid isinstance overhead as we are not interested in dict subclasses if type(value) is dict: # noqa: E721 return value _LOGGER.exception( "Expected JSON to be parsed as a dict got %s in: %s", {type(value)}, filename ) raise HomeAssistantError(f"Expected JSON to be parsed as a dict got {type(value)}")
Save JSON data to a file.
def save_json( filename: str, data: list | dict, private: bool = False, *, encoder: type[json.JSONEncoder] | None = None, atomic_writes: bool = False, ) -> None: """Save JSON data to a file.""" # pylint: disable-next=import-outside-toplevel from homeassistant.helpers.frame import report report( ( "uses save_json from homeassistant.util.json module." " This is deprecated and will stop working in Home Assistant 2022.4, it" " should be updated to use homeassistant.helpers.json module instead" ), error_if_core=False, ) # pylint: disable-next=import-outside-toplevel import homeassistant.helpers.json as json_helper json_helper.save_json( filename, data, private, encoder=encoder, atomic_writes=atomic_writes )
Format output of find_paths in a friendly way. Format is comma separated: <path>=<value>(<type>)
def format_unserializable_data(data: dict[str, Any]) -> str: """Format output of find_paths in a friendly way. Format is comma separated: <path>=<value>(<type>) """ return ", ".join(f"{path}={value}({type(value)}" for path, value in data.items())
Find the paths to unserializable data. This method is slow! Only use for error handling.
def find_paths_unserializable_data( bad_data: Any, *, dump: Callable[[Any], str] = json.dumps ) -> dict[str, Any]: """Find the paths to unserializable data. This method is slow! Only use for error handling. """ # pylint: disable-next=import-outside-toplevel from homeassistant.helpers.frame import report report( ( "uses find_paths_unserializable_data from homeassistant.util.json module." " This is deprecated and will stop working in Home Assistant 2022.4, it" " should be updated to use homeassistant.helpers.json module instead" ), error_if_core=False, ) # pylint: disable-next=import-outside-toplevel import homeassistant.helpers.json as json_helper return json_helper.find_paths_unserializable_data(bad_data, dump=dump)
Yield an ordered list of regions for a language based on country/code hints. Regions should be checked for support in the returned order if no other information is available.
def preferred_regions( language: str, country: str | None = None, code: str | None = None, ) -> Iterable[str]: """Yield an ordered list of regions for a language based on country/code hints. Regions should be checked for support in the returned order if no other information is available. """ if country is not None: yield country.upper() if language == "en": # Prefer U.S. English if no country if country is None: yield "US" elif language == "zh": if code == "Hant": yield "HK" yield "TW" else: yield "CN" # fr -> fr-FR yield language.upper()
Return true if region is not known to be a script/code instead.
def is_region(language: str, region: str | None) -> bool: """Return true if region is not known to be a script/code instead.""" if language == "es": return region != "419" if language == "sr": return region != "Latn" if language == "zh": return region not in ("Hans", "Hant") return True
Return true if two languages are considered the same.
def is_language_match(lang_1: str, lang_2: str) -> bool: """Return true if two languages are considered the same.""" if lang_1 == lang_2: # Exact match return True if tuple(sorted([lang_1, lang_2])) in SAME_LANGUAGES: return True return False
Return a sorted list of matching language tags based on a target tag and country hint.
def matches( target: str, supported: Iterable[str], country: str | None = None ) -> list[str]: """Return a sorted list of matching language tags based on a target tag and country hint.""" if target == MATCH_ALL: return list(supported) target_dialect = Dialect.parse(target) # Higher score is better scored = sorted( ( ( dialect := Dialect.parse(tag), target_dialect.score(dialect, country=country), tag, ) for tag in supported ), key=operator.itemgetter(1), reverse=True, ) # Score < 0 is not a match return [tag for _dialect, score, tag in scored if score[0] >= 0]
Intersect two sets of languages using is_match for aliases.
def intersect(languages_1: set[str], languages_2: set[str]) -> set[str]: """Intersect two sets of languages using is_match for aliases.""" languages = set() for lang_1 in languages_1: for lang_2 in languages_2: if is_language_match(lang_1, lang_2): languages.add(lang_1) return languages
Calculate the distance in meters between two points. Async friendly.
def distance( lat1: float | None, lon1: float | None, lat2: float, lon2: float ) -> float | None: """Calculate the distance in meters between two points. Async friendly. """ if lat1 is None or lon1 is None: return None result = vincenty((lat1, lon1), (lat2, lon2)) if result is None: return None return result * 1000
Vincenty formula (inverse method) to calculate the distance. Result in kilometers or miles between two points on the surface of a spheroid. Async friendly.
def vincenty( point1: tuple[float, float], point2: tuple[float, float], miles: bool = False ) -> float | None: """Vincenty formula (inverse method) to calculate the distance. Result in kilometers or miles between two points on the surface of a spheroid. Async friendly. """ # short-circuit coincident points if point1[0] == point2[0] and point1[1] == point2[1]: return 0.0 U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0]))) U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0]))) L = math.radians(point2[1] - point1[1]) Lambda = L sinU1 = math.sin(U1) cosU1 = math.cos(U1) sinU2 = math.sin(U2) cosU2 = math.cos(U2) for _ in range(MAX_ITERATIONS): sinLambda = math.sin(Lambda) cosLambda = math.cos(Lambda) sinSigma = math.sqrt( (cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2 ) if sinSigma == 0.0: return 0.0 # coincident points cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda sigma = math.atan2(sinSigma, cosSigma) sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma cosSqAlpha = 1 - sinAlpha**2 try: cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha except ZeroDivisionError: cos2SigmaM = 0 C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha)) LambdaPrev = Lambda Lambda = L + (1 - C) * FLATTENING * sinAlpha * ( sigma + C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM**2)) ) if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD: break # successful convergence else: return None # failure to converge uSq = cosSqAlpha * (AXIS_A**2 - AXIS_B**2) / (AXIS_B**2) A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq))) B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq))) # fmt: off deltaSigma = ( B * sinSigma * ( cos2SigmaM + B / 4 * ( cosSigma * (-1 + 2 * cos2SigmaM**2) - B / 6 * cos2SigmaM * (-3 + 4 * sinSigma ** 2) * (-3 + 4 * cos2SigmaM ** 2) ) ) ) # fmt: on s = AXIS_B * A * (sigma - deltaSigma) s /= 1000 # Conversion of meters to kilometers if miles: s *= MILES_PER_KILOMETER # kilometers to miles return round(s, 6)
Migrate the existing log handlers to use the queue. This allows us to avoid blocking I/O and formatting messages in the event loop as log messages are written in another thread.
def async_activate_log_queue_handler(hass: HomeAssistant) -> None: """Migrate the existing log handlers to use the queue. This allows us to avoid blocking I/O and formatting messages in the event loop as log messages are written in another thread. """ simple_queue: queue.SimpleQueue[logging.Handler] = queue.SimpleQueue() queue_handler = HomeAssistantQueueHandler(simple_queue) logging.root.addHandler(queue_handler) migrated_handlers: list[logging.Handler] = [] for handler in logging.root.handlers[:]: if handler is queue_handler: continue logging.root.removeHandler(handler) migrated_handlers.append(handler) listener = logging.handlers.QueueListener(simple_queue, *migrated_handlers) queue_handler.listener = listener listener.start()
Log an exception with additional context.
def log_exception(format_err: Callable[[*_Ts], Any], *args: *_Ts) -> None: """Log an exception with additional context.""" module = inspect.getmodule(inspect.stack(context=0)[1].frame) if module is not None: module_name = module.__name__ else: # If Python is unable to access the sources files, the call stack frame # will be missing information, so let's guard. # https://github.com/home-assistant/core/issues/24982 module_name = __name__ # Do not print the wrapper in the traceback frames = len(inspect.trace()) - 1 exc_msg = traceback.format_exc(-frames) friendly_msg = format_err(*args) logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
Catch and log exception.
def _sync_wrapper( func: Callable[[*_Ts], Any], format_err: Callable[[*_Ts], Any], *args: *_Ts ) -> None: """Catch and log exception.""" try: func(*args) except Exception: # pylint: disable=broad-except log_exception(format_err, *args)
Catch and log exception.
def _callback_wrapper( func: Callable[[*_Ts], Any], format_err: Callable[[*_Ts], Any], *args: *_Ts ) -> None: """Catch and log exception.""" try: func(*args) except Exception: # pylint: disable=broad-except log_exception(format_err, *args)
Decorate a function func to catch and log exceptions. If func is a coroutine function, a coroutine function will be returned. If func is a callback, a callback will be returned.
def catch_log_exception( func: Callable[[*_Ts], Any], format_err: Callable[[*_Ts], Any], job_type: HassJobType | None = None, ) -> Callable[[*_Ts], None] | Callable[[*_Ts], Coroutine[Any, Any, None]]: """Decorate a function func to catch and log exceptions. If func is a coroutine function, a coroutine function will be returned. If func is a callback, a callback will be returned. """ if job_type is None: job_type = get_hassjob_callable_job_type(func) if job_type is HassJobType.Coroutinefunction: async_func = cast(Callable[[*_Ts], Coroutine[Any, Any, None]], func) return wraps(async_func)(partial(_async_wrapper, async_func, format_err)) # type: ignore[return-value] if job_type is HassJobType.Callback: return wraps(func)(partial(_callback_wrapper, func, format_err)) # type: ignore[return-value] return wraps(func)(partial(_sync_wrapper, func, format_err))
Decorate a coroutine to catch and log exceptions.
def catch_log_coro_exception( target: Coroutine[Any, Any, _T], format_err: Callable[[*_Ts], Any], *args: *_Ts ) -> Coroutine[Any, Any, _T | None]: """Decorate a coroutine to catch and log exceptions.""" async def coro_wrapper(*args: *_Ts) -> _T | None: """Catch and log exception.""" try: return await target except Exception: # pylint: disable=broad-except log_exception(format_err, *args) return None return coro_wrapper(*args)
Wrap a coroutine to catch and log exceptions. The exception will be logged together with a stacktrace of where the coroutine was wrapped. target: target coroutine.
def async_create_catching_coro( target: Coroutine[Any, Any, _T], ) -> Coroutine[Any, Any, _T | None]: """Wrap a coroutine to catch and log exceptions. The exception will be logged together with a stacktrace of where the coroutine was wrapped. target: target coroutine. """ trace = traceback.extract_stack() return catch_log_coro_exception( target, lambda: "Exception in {} called from\n {}".format( target.__name__, "".join(traceback.format_list(trace[:-1])), ), )
Get line from cache or read from file.
def _get_line_from_cache(filename: str, lineno: int) -> str: """Get line from cache or read from file.""" return (linecache.getline(filename, lineno) or "?").strip()
Warn if called inside the event loop. Raise if `strict` is True. The default advisory message is 'Use `await hass.async_add_executor_job()' Set `advise_msg` to an alternate message if the solution differs.
def check_loop( func: Callable[..., Any], check_allowed: Callable[[dict[str, Any]], bool] | None = None, strict: bool = True, strict_core: bool = True, advise_msg: str | None = None, **mapped_args: Any, ) -> None: """Warn if called inside the event loop. Raise if `strict` is True. The default advisory message is 'Use `await hass.async_add_executor_job()' Set `advise_msg` to an alternate message if the solution differs. """ try: get_running_loop() in_loop = True except RuntimeError: in_loop = False if not in_loop: return if check_allowed is not None and check_allowed(mapped_args): return found_frame = None offender_frame = get_current_frame(2) offender_filename = offender_frame.f_code.co_filename offender_lineno = offender_frame.f_lineno offender_line = _get_line_from_cache(offender_filename, offender_lineno) try: integration_frame = get_integration_frame() except MissingIntegrationFrame: # Did not source from integration? Hard error. if not strict_core: _LOGGER.warning( "Detected blocking call to %s with args %s in %s, " "line %s: %s inside the event loop", func.__name__, mapped_args.get("args"), offender_filename, offender_lineno, offender_line, ) return if found_frame is None: raise RuntimeError( # noqa: TRY200 f"Detected blocking call to {func.__name__} inside the event loop " f"in {offender_filename}, line {offender_lineno}: {offender_line}. " f"{advise_msg or 'Use `await hass.async_add_executor_job()`'}; " "This is causing stability issues. Please create a bug report at " f"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue" ) hass: HomeAssistant | None = None with suppress(HomeAssistantError): hass = async_get_hass() report_issue = async_suggest_report_issue( hass, integration_domain=integration_frame.integration, module=integration_frame.module, ) _LOGGER.warning( ( "Detected blocking call to %s inside the event loop by %sintegration '%s' " "at %s, line %s: %s (offender: %s, line %s: %s), please %s" ), func.__name__, "custom " if integration_frame.custom_integration else "", integration_frame.integration, integration_frame.relative_filename, integration_frame.line_number, integration_frame.line, offender_filename, offender_lineno, offender_line, report_issue, ) if strict: raise RuntimeError( "Blocking calls must be done in the executor or a separate thread;" f" {advise_msg or 'Use `await hass.async_add_executor_job()`'}; at" f" {integration_frame.relative_filename}, line {integration_frame.line_number}:" f" {integration_frame.line} " f"(offender: {offender_filename}, line {offender_lineno}: {offender_line})" )
Protect function from running in event loop.
def protect_loop( func: Callable[_P, _R], strict: bool = True, strict_core: bool = True, check_allowed: Callable[[dict[str, Any]], bool] | None = None, ) -> Callable[_P, _R]: """Protect function from running in event loop.""" @functools.wraps(func) def protected_loop_func(*args: _P.args, **kwargs: _P.kwargs) -> _R: check_loop( func, strict=strict, strict_core=strict_core, check_allowed=check_allowed, args=args, kwargs=kwargs, ) return func(*args, **kwargs) return protected_loop_func
Check if an address is a loopback address.
def is_loopback(address: IPv4Address | IPv6Address) -> bool: """Check if an address is a loopback address.""" return address.is_loopback or address in IPV6_IPV4_LOOPBACK
Check if an address is a unique local non-loopback address.
def is_private(address: IPv4Address | IPv6Address) -> bool: """Check if an address is a unique local non-loopback address.""" return any(address in network for network in PRIVATE_NETWORKS)
Check if an address is link-local (local but not necessarily unique).
def is_link_local(address: IPv4Address | IPv6Address) -> bool: """Check if an address is link-local (local but not necessarily unique).""" return address.is_link_local
Check if an address is on a local network.
def is_local(address: IPv4Address | IPv6Address) -> bool: """Check if an address is on a local network.""" return is_loopback(address) or is_private(address) or is_link_local(address)
Check if an address is invalid.
def is_invalid(address: IPv4Address | IPv6Address) -> bool: """Check if an address is invalid.""" return address.is_unspecified
Check if a given string is an IP address.
def is_ip_address(address: str) -> bool: """Check if a given string is an IP address.""" try: ip_address(address) except ValueError: return False return True
Check if a given string is an IPv4 address.
def is_ipv4_address(address: str) -> bool: """Check if a given string is an IPv4 address.""" try: IPv4Address(address) except ValueError: return False return True
Check if a given string is an IPv6 address.
def is_ipv6_address(address: str) -> bool: """Check if a given string is an IPv6 address.""" try: IPv6Address(address) except ValueError: return False return True
Check if a given string is an IP address or valid hostname.
def is_host_valid(host: str) -> bool: """Check if a given string is an IP address or valid hostname.""" if is_ip_address(host): return True if len(host) > 255: return False if re.match(r"^[0-9\.]+$", host): # reject invalid IPv4 return False if host.endswith("."): # dot at the end is correct host = host[:-1] allowed = re.compile(r"(?!-)[A-Z\d\-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in host.split("."))
Normalize a given URL.
def normalize_url(address: str) -> str: """Normalize a given URL.""" url = yarl.URL(address.rstrip("/")) if url.is_absolute() and url.is_default_port(): return str(url.with_port(None)) return str(url)
Return if we run in a virtual environment.
def is_virtual_env() -> bool: """Return if we run in a virtual environment.""" # Check supports venv && virtualenv return getattr(sys, "base_prefix", sys.prefix) != sys.prefix or hasattr( sys, "real_prefix" )
Return True if we run in a docker env.
def is_docker_env() -> bool: """Return True if we run in a docker env.""" return Path("/.dockerenv").exists()
Return a set of installed packages and versions.
def get_installed_versions(specifiers: set[str]) -> set[str]: """Return a set of installed packages and versions.""" return {specifier for specifier in specifiers if is_installed(specifier)}
Check if a package is installed and will be loaded when we import it. expected input is a pip compatible package specifier (requirement string) e.g. "package==1.0.0" or "package>=1.0.0,<2.0.0" For backward compatibility, it also accepts a URL with a fragment e.g. "git+https://github.com/pypa/pip#pip>=1" Returns True when the requirement is met. Returns False when the package is not installed or doesn't meet req.
def is_installed(requirement_str: str) -> bool: """Check if a package is installed and will be loaded when we import it. expected input is a pip compatible package specifier (requirement string) e.g. "package==1.0.0" or "package>=1.0.0,<2.0.0" For backward compatibility, it also accepts a URL with a fragment e.g. "git+https://github.com/pypa/pip#pip>=1" Returns True when the requirement is met. Returns False when the package is not installed or doesn't meet req. """ try: req = Requirement(requirement_str) except InvalidRequirement: if "#" not in requirement_str: _LOGGER.error("Invalid requirement '%s'", requirement_str) return False # This is likely a URL with a fragment # example: git+https://github.com/pypa/pip#pip>=1 # fragment support was originally used to install zip files, and # we no longer do this in Home Assistant. However, custom # components started using it to install packages from git # urls which would make it would be a breaking change to # remove it. try: req = Requirement(urlparse(requirement_str).fragment) except InvalidRequirement: _LOGGER.error("Invalid requirement '%s'", requirement_str) return False try: if (installed_version := version(req.name)) is None: # This can happen when an install failed or # was aborted while in progress see # https://github.com/home-assistant/core/issues/47699 _LOGGER.error( # type: ignore[unreachable] "Installed version for %s resolved to None", req.name ) return False return req.specifier.contains(installed_version, prereleases=True) except PackageNotFoundError: return False
Install a package on PyPi. Accepts pip compatible package strings. Return boolean if install successful.
def install_package( package: str, upgrade: bool = True, target: str | None = None, constraints: str | None = None, timeout: int | None = None, ) -> bool: """Install a package on PyPi. Accepts pip compatible package strings. Return boolean if install successful. """ # Not using 'import pip; pip.main([])' because it breaks the logger _LOGGER.info("Attempting install of %s", package) env = os.environ.copy() args = [sys.executable, "-m", "pip", "install", "--quiet", package] if timeout: args += ["--timeout", str(timeout)] if upgrade: args.append("--upgrade") if constraints is not None: args += ["--constraint", constraints] if target: assert not is_virtual_env() # This only works if not running in venv args += ["--user"] env["PYTHONUSERBASE"] = os.path.abspath(target) _LOGGER.debug("Running pip command: args=%s", args) with Popen( args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env, close_fds=False, # required for posix_spawn ) as process: _, stderr = process.communicate() if process.returncode != 0: _LOGGER.error( "Unable to install package %s: %s", package, stderr.decode("utf-8").lstrip().strip(), ) return False return True
Determine the percentage of an item in an ordered list. When using this utility for fan speeds, do not include "off" Given the list: ["low", "medium", "high", "very_high"], this function will return the following when the item is passed in: low: 25 medium: 50 high: 75 very_high: 100
def ordered_list_item_to_percentage(ordered_list: list[_T], item: _T) -> int: """Determine the percentage of an item in an ordered list. When using this utility for fan speeds, do not include "off" Given the list: ["low", "medium", "high", "very_high"], this function will return the following when the item is passed in: low: 25 medium: 50 high: 75 very_high: 100 """ if item not in ordered_list: raise ValueError(f'The item "{item}" is not in "{ordered_list}"') list_len = len(ordered_list) list_position = ordered_list.index(item) + 1 return (list_position * 100) // list_len
Find the item that most closely matches the percentage in an ordered list. When using this utility for fan speeds, do not include "off" Given the list: ["low", "medium", "high", "very_high"], this function will return the following when when the item is passed in: 1-25: low 26-50: medium 51-75: high 76-100: very_high
def percentage_to_ordered_list_item(ordered_list: list[_T], percentage: int) -> _T: """Find the item that most closely matches the percentage in an ordered list. When using this utility for fan speeds, do not include "off" Given the list: ["low", "medium", "high", "very_high"], this function will return the following when when the item is passed in: 1-25: low 26-50: medium 51-75: high 76-100: very_high """ if not (list_len := len(ordered_list)): raise ValueError("The ordered list is empty") for offset, speed in enumerate(ordered_list): list_position = offset + 1 upper_bound = (list_position * 100) // list_len if percentage <= upper_bound: return speed return ordered_list[-1]
Given a range of low and high values convert a single value to a percentage. When using this utility for fan speeds, do not include 0 if it is off Given a low value of 1 and a high value of 255 this function will return: (1,255), 255: 100 (1,255), 127: 50 (1,255), 10: 4
def ranged_value_to_percentage( low_high_range: tuple[float, float], value: float ) -> int: """Given a range of low and high values convert a single value to a percentage. When using this utility for fan speeds, do not include 0 if it is off Given a low value of 1 and a high value of 255 this function will return: (1,255), 255: 100 (1,255), 127: 50 (1,255), 10: 4 """ return scale_ranged_value_to_int_range(low_high_range, (1, 100), value)
Given a range of low and high values convert a percentage to a single value. When using this utility for fan speeds, do not include 0 if it is off Given a low value of 1 and a high value of 255 this function will return: (1,255), 100: 255 (1,255), 50: 127.5 (1,255), 4: 10.2
def percentage_to_ranged_value( low_high_range: tuple[float, float], percentage: float ) -> float: """Given a range of low and high values convert a percentage to a single value. When using this utility for fan speeds, do not include 0 if it is off Given a low value of 1 and a high value of 255 this function will return: (1,255), 100: 255 (1,255), 50: 127.5 (1,255), 4: 10.2 """ return scale_to_ranged_value((1, 100), low_high_range, percentage)
Draw a bounding box on and image. The bounding box is defined by the tuple (y_min, x_min, y_max, x_max) where the coordinates are floats in the range [0.0, 1.0] and relative to the width and height of the image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
def draw_box( draw: ImageDraw, box: tuple[float, float, float, float], img_width: int, img_height: int, text: str = "", color: tuple[int, int, int] = (255, 255, 0), ) -> None: """Draw a bounding box on and image. The bounding box is defined by the tuple (y_min, x_min, y_max, x_max) where the coordinates are floats in the range [0.0, 1.0] and relative to the width and height of the image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). """ line_width = 3 font_height = 8 y_min, x_min, y_max, x_max = box (left, right, top, bottom) = ( x_min * img_width, x_max * img_width, y_min * img_height, y_max * img_height, ) draw.line( [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=line_width, fill=color, ) if text: draw.text( (left + line_width, abs(top - line_width - font_height)), text, fill=color )
Force kill a subprocess and wait for it to exit.
def kill_subprocess(process: subprocess.Popen[Any]) -> None: """Force kill a subprocess and wait for it to exit.""" process.kill() process.communicate() process.wait() del process
Raise an exception when a read only dict is modified.
def _readonly(*args: Any, **kwargs: Any) -> Any: """Raise an exception when a read only dict is modified.""" raise RuntimeError("Cannot modify ReadOnlyDict")
Given a range of low and high values convert a single value to another range. Given a source low value of 1 and a high value of 255 and a target range from 1 to 100 this function will return: (1,255), (1,100), 255: 100 (1,255), (1,100), 127: 49 (1,255), (1,100), 10: 3
def scale_ranged_value_to_int_range( source_low_high_range: tuple[float, float], target_low_high_range: tuple[float, float], value: float, ) -> int: """Given a range of low and high values convert a single value to another range. Given a source low value of 1 and a high value of 255 and a target range from 1 to 100 this function will return: (1,255), (1,100), 255: 100 (1,255), (1,100), 127: 49 (1,255), (1,100), 10: 3 """ source_offset = source_low_high_range[0] - 1 target_offset = target_low_high_range[0] - 1 return int( (value - source_offset) * states_in_range(target_low_high_range) // states_in_range(source_low_high_range) + target_offset )
Given a range of low and high values convert a single value to another range. Do not include 0 in a range if 0 means off, e.g. for brightness or fan speed. Given a source low value of 1 and a high value of 255 and a target range from 1 to 100 this function will return: (1,255), 255: 100 (1,255), 127: ~49.8039 (1,255), 10: ~3.9216
def scale_to_ranged_value( source_low_high_range: tuple[float, float], target_low_high_range: tuple[float, float], value: float, ) -> float: """Given a range of low and high values convert a single value to another range. Do not include 0 in a range if 0 means off, e.g. for brightness or fan speed. Given a source low value of 1 and a high value of 255 and a target range from 1 to 100 this function will return: (1,255), 255: 100 (1,255), 127: ~49.8039 (1,255), 10: ~3.9216 """ source_offset = source_low_high_range[0] - 1 target_offset = target_low_high_range[0] - 1 return (value - source_offset) * ( states_in_range(target_low_high_range) ) / states_in_range(source_low_high_range) + target_offset
Given a range of low and high values return how many states exist.
def states_in_range(low_high_range: tuple[float, float]) -> float: """Given a range of low and high values return how many states exist.""" return low_high_range[1] - low_high_range[0] + 1
Given a range of low and high values return how many integer states exist.
def int_states_in_range(low_high_range: tuple[float, float]) -> int: """Given a range of low and high values return how many integer states exist.""" return int(states_in_range(low_high_range))
Return an SSL context that does not verify the server certificate.
def create_no_verify_ssl_context( ssl_cipher_list: SSLCipherList = SSLCipherList.PYTHON_DEFAULT, ) -> ssl.SSLContext: """Return an SSL context that does not verify the server certificate.""" return _create_no_verify_ssl_context(ssl_cipher_list=ssl_cipher_list)
Return an SSL context for making requests.
def client_context( ssl_cipher_list: SSLCipherList = SSLCipherList.PYTHON_DEFAULT, ) -> ssl.SSLContext: """Return an SSL context for making requests.""" return _client_context(ssl_cipher_list=ssl_cipher_list)
Return the default SSL context.
def get_default_context() -> ssl.SSLContext: """Return the default SSL context.""" return _DEFAULT_SSL_CONTEXT
Return the default SSL context that does not verify the server certificate.
def get_default_no_verify_context() -> ssl.SSLContext: """Return the default SSL context that does not verify the server certificate.""" return _DEFAULT_NO_VERIFY_SSL_CONTEXT
Return an SSL context following the Mozilla recommendations. TLS configuration follows the best-practice guidelines specified here: https://wiki.mozilla.org/Security/Server_Side_TLS Modern guidelines are followed.
def server_context_modern() -> ssl.SSLContext: """Return an SSL context following the Mozilla recommendations. TLS configuration follows the best-practice guidelines specified here: https://wiki.mozilla.org/Security/Server_Side_TLS Modern guidelines are followed. """ context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.minimum_version = ssl.TLSVersion.TLSv1_2 context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE if hasattr(ssl, "OP_NO_COMPRESSION"): context.options |= ssl.OP_NO_COMPRESSION context.set_ciphers(SSL_CIPHER_LISTS[SSLCipherList.MODERN]) return context
Return an SSL context following the Mozilla recommendations. TLS configuration follows the best-practice guidelines specified here: https://wiki.mozilla.org/Security/Server_Side_TLS Intermediate guidelines are followed.
def server_context_intermediate() -> ssl.SSLContext: """Return an SSL context following the Mozilla recommendations. TLS configuration follows the best-practice guidelines specified here: https://wiki.mozilla.org/Security/Server_Side_TLS Intermediate guidelines are followed. """ context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.options |= ( ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_CIPHER_SERVER_PREFERENCE ) if hasattr(ssl, "OP_NO_COMPRESSION"): context.options |= ssl.OP_NO_COMPRESSION context.set_ciphers(SSL_CIPHER_LISTS[SSLCipherList.INTERMEDIATE]) return context
Shutdown that will not deadlock.
def deadlock_safe_shutdown() -> None: """Shutdown that will not deadlock.""" # threading._shutdown can deadlock forever # see https://github.com/justengel/continuous_threading#shutdown-update # for additional detail remaining_threads = [ thread for thread in threading.enumerate() if thread is not threading.main_thread() and not thread.daemon and thread.is_alive() ] if not remaining_threads: return timeout_per_thread = THREADING_SHUTDOWN_TIMEOUT / len(remaining_threads) for thread in remaining_threads: try: thread.join(timeout_per_thread) except Exception as err: # pylint: disable=broad-except _LOGGER.warning("Failed to join thread: %s", err)
Raise an exception in the threads with id tid.
def async_raise(tid: int, exctype: Any) -> None: """Raise an exception in the threads with id tid.""" if not inspect.isclass(exctype): raise TypeError("Only types can be raised (not instances)") c_tid = ctypes.c_ulong(tid) # changed in python 3.7+ res = ctypes.pythonapi.PyThreadState_SetAsyncExc(c_tid, ctypes.py_object(exctype)) if res == 1: return # "if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect" ctypes.pythonapi.PyThreadState_SetAsyncExc(c_tid, None) raise SystemError("PyThreadState_SetAsyncExc failed")
Generate a ULID. This ulid should not be used for cryptographically secure operations. 01AN4Z07BY 79KA1307SR9X4MV3 |----------| |----------------| Timestamp Randomness 48bits 80bits This string can be loaded directly with https://github.com/ahawker/ulid import homeassistant.util.ulid as ulid_util import ulid ulid.parse(ulid_util.ulid())
def ulid(timestamp: float | None = None) -> str: """Generate a ULID. This ulid should not be used for cryptographically secure operations. 01AN4Z07BY 79KA1307SR9X4MV3 |----------| |----------------| Timestamp Randomness 48bits 80bits This string can be loaded directly with https://github.com/ahawker/ulid import homeassistant.util.ulid as ulid_util import ulid ulid.parse(ulid_util.ulid()) """ return ulid_now() if timestamp is None else ulid_at_time(timestamp)
Check if the unit is valid for it's type.
def _is_valid_unit(unit: str, unit_type: str) -> bool: """Check if the unit is valid for it's type.""" if unit_type == LENGTH: return unit in LENGTH_UNITS if unit_type == ACCUMULATED_PRECIPITATION: return unit in LENGTH_UNITS if unit_type == WIND_SPEED: return unit in WIND_SPEED_UNITS if unit_type == TEMPERATURE: return unit in TEMPERATURE_UNITS if unit_type == MASS: return unit in MASS_UNITS if unit_type == VOLUME: return unit in VOLUME_UNITS if unit_type == PRESSURE: return unit in PRESSURE_UNITS return False
Get unit system based on key.
def get_unit_system(key: str) -> UnitSystem: """Get unit system based on key.""" if key == _CONF_UNIT_SYSTEM_US_CUSTOMARY: return US_CUSTOMARY_SYSTEM if key == _CONF_UNIT_SYSTEM_METRIC: return METRIC_SYSTEM raise ValueError(f"`{key}` is not a valid unit system key")
Convert deprecated unit system.
def _deprecated_unit_system(value: str) -> str: """Convert deprecated unit system.""" if value == _CONF_UNIT_SYSTEM_IMPERIAL: # need to add warning in 2023.1 return _CONF_UNIT_SYSTEM_US_CUSTOMARY return value
Generate a random UUID hex. This uuid should not be used for cryptographically secure operations.
def random_uuid_hex() -> str: """Generate a random UUID hex. This uuid should not be used for cryptographically secure operations. """ return f"{getrandbits(32 * 4):032x}"
Wrap a function that returns old result if new result does not vary enough.
def ignore_variance(func: Callable[_P, _R], ignored_variance: Any) -> Callable[_P, _R]: """Wrap a function that returns old result if new result does not vary enough.""" last_value: _R | None = None @functools.wraps(func) def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: nonlocal last_value value = func(*args, **kwargs) if last_value is not None and abs(value - last_value) < ignored_variance: return last_value last_value = value return value return wrapper
Check if a filename is valid. Raises a ValueError if the filename is invalid.
def raise_if_invalid_filename(filename: str) -> None: """Check if a filename is valid. Raises a ValueError if the filename is invalid. """ if RE_SANITIZE_FILENAME.sub("", filename) != filename: raise ValueError(f"{filename} is not a safe filename")
Check if a path is valid. Raises a ValueError if the path is invalid.
def raise_if_invalid_path(path: str) -> None: """Check if a path is valid. Raises a ValueError if the path is invalid. """ if RE_SANITIZE_PATH.sub("", path) != path: raise ValueError(f"{path} is not a safe path")
Slugify a given text.
def slugify(text: str | None, *, separator: str = "_") -> str: """Slugify a given text.""" if text == "" or text is None: return "" slug = unicode_slug.slugify(text, separator=separator) return "unknown" if slug == "" else slug
Help creating a more readable string representation of objects.
def repr_helper(inp: Any) -> str: """Help creating a more readable string representation of objects.""" if isinstance(inp, Mapping): return ", ".join( f"{repr_helper(key)}={repr_helper(item)}" for key, item in inp.items() ) if isinstance(inp, datetime): return as_local(inp).isoformat() return str(inp)
Convert value to to_type, returns default if fails.
def convert( value: _T | None, to_type: Callable[[_T], _U], default: _U | None = None ) -> _U | None: """Convert value to to_type, returns default if fails.""" try: return default if value is None else to_type(value) except (ValueError, TypeError): # If value could not be converted return default
Return a string that is not present in current_strings. If preferred string exists will append _2, _3, ..
def ensure_unique_string( preferred_string: str, current_strings: Iterable[str] | KeysView[str] ) -> str: """Return a string that is not present in current_strings. If preferred string exists will append _2, _3, .. """ test_string = preferred_string current_strings_set = set(current_strings) tries = 1 while test_string in current_strings_set: tries += 1 test_string = f"{preferred_string}_{tries}" return test_string
Return a random string with letters and digits.
def get_random_string(length: int = 10) -> str: """Return a random string with letters and digits.""" generator = random.SystemRandom() source_chars = string.ascii_letters + string.digits return "".join(generator.choice(source_chars) for _ in range(length))
Dump YAML to a string and remove null.
def dump(_dict: dict | list) -> str: """Dump YAML to a string and remove null.""" return yaml.dump( _dict, default_flow_style=False, allow_unicode=True, sort_keys=False, Dumper=FastestAvailableSafeDumper, ).replace(": null\n", ":\n")
Save YAML to a file.
def save_yaml(path: str, data: dict) -> None: """Save YAML to a file.""" # Dump before writing to not truncate the file if dumping fails str_data = dump(data) with open(path, "w", encoding="utf-8") as outfile: outfile.write(str_data)
Like BaseRepresenter.represent_mapping but does not issue the sort().
def represent_odict( # type: ignore[no-untyped-def] dumper, tag, mapping, flow_style=None ) -> yaml.MappingNode: """Like BaseRepresenter.represent_mapping but does not issue the sort().""" value: list = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if dumper.alias_key is not None: dumper.represented_objects[dumper.alias_key] = node best_style = True if hasattr(mapping, "items"): mapping = mapping.items() for item_key, item_value in mapping: node_key = dumper.represent_data(item_key) node_value = dumper.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): best_style = False if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if dumper.default_flow_style is not None: node.flow_style = dumper.default_flow_style else: node.flow_style = best_style return node
Add to representer to the dumper.
def add_representer(klass: Any, representer: Any) -> None: """Add to representer to the dumper.""" FastestAvailableSafeDumper.add_representer(klass, representer)
Extract input from a structure.
def extract_inputs(obj: Any) -> set[str]: """Extract input from a structure.""" found: set[str] = set() _extract_inputs(obj, found) return found
Extract input from a structure.
def _extract_inputs(obj: Any, found: set[str]) -> None: """Extract input from a structure.""" if isinstance(obj, Input): found.add(obj.name) return if isinstance(obj, list): for val in obj: _extract_inputs(val, found) return if isinstance(obj, dict): for val in obj.values(): _extract_inputs(val, found) return
Substitute values.
def substitute(obj: Any, substitutions: dict[str, Any]) -> Any: """Substitute values.""" if isinstance(obj, Input): if obj.name not in substitutions: raise UndefinedSubstitution(obj.name) return substitutions[obj.name] if isinstance(obj, list): return [substitute(val, substitutions) for val in obj] if isinstance(obj, dict): return {key: substitute(val, substitutions) for key, val in obj.items()} return obj
Load a YAML file.
def load_yaml( fname: str | os.PathLike[str], secrets: Secrets | None = None ) -> JSON_TYPE | None: """Load a YAML file.""" try: with open(fname, encoding="utf-8") as conf_file: return parse_yaml(conf_file, secrets) except UnicodeDecodeError as exc: _LOGGER.error("Unable to read file %s: %s", fname, exc) raise HomeAssistantError(exc) from exc
Load a YAML file and ensure the top level is a dict. Raise if the top level is not a dict. Return an empty dict if the file is empty.
def load_yaml_dict( fname: str | os.PathLike[str], secrets: Secrets | None = None ) -> dict: """Load a YAML file and ensure the top level is a dict. Raise if the top level is not a dict. Return an empty dict if the file is empty. """ loaded_yaml = load_yaml(fname, secrets) if loaded_yaml is None: loaded_yaml = {} if not isinstance(loaded_yaml, dict): raise YamlTypeError(f"YAML file {fname} does not contain a dict") return loaded_yaml
Parse YAML with the fastest available loader.
def parse_yaml( content: str | TextIO | StringIO, secrets: Secrets | None = None ) -> JSON_TYPE: """Parse YAML with the fastest available loader.""" if not HAS_C_LOADER: return _parse_yaml_python(content, secrets) try: return _parse_yaml(FastSafeLoader, content, secrets) except yaml.YAMLError: # Loading failed, so we now load with the Python loader which has more # readable exceptions if isinstance(content, (StringIO, TextIO, TextIOWrapper)): # Rewind the stream so we can try again content.seek(0, 0) return _parse_yaml_python(content, secrets)
Parse YAML with the python loader (this is very slow).
def _parse_yaml_python( content: str | TextIO | StringIO, secrets: Secrets | None = None ) -> JSON_TYPE: """Parse YAML with the python loader (this is very slow).""" try: return _parse_yaml(PythonSafeLoader, content, secrets) except yaml.YAMLError as exc: _LOGGER.error(str(exc)) raise HomeAssistantError(exc) from exc
Load a YAML file.
def _parse_yaml( loader: type[FastSafeLoader | PythonSafeLoader], content: str | TextIO, secrets: Secrets | None = None, ) -> JSON_TYPE: """Load a YAML file.""" return yaml.load(content, Loader=lambda stream: loader(stream, secrets))
Add file reference information to an object.
def _add_reference( obj: dict | list | str | NodeDictClass | NodeListClass | NodeStrClass, loader: LoaderType, node: yaml.nodes.Node, ) -> NodeDictClass | NodeListClass | NodeStrClass: """Add file reference information to an object.""" if isinstance(obj, list): obj = NodeListClass(obj) elif isinstance(obj, str): obj = NodeStrClass(obj) elif isinstance(obj, dict): obj = NodeDictClass(obj) try: # suppress is much slower obj.__config_file__ = loader.get_name obj.__line__ = node.start_mark.line + 1 except AttributeError: pass return obj
Load another YAML file and embed it using the !include tag. Example: device_tracker: !include device_tracker.yaml
def _include_yaml(loader: LoaderType, node: yaml.nodes.Node) -> JSON_TYPE: """Load another YAML file and embed it using the !include tag. Example: device_tracker: !include device_tracker.yaml """ fname = os.path.join(os.path.dirname(loader.get_name), node.value) try: loaded_yaml = load_yaml(fname, loader.secrets) if loaded_yaml is None: loaded_yaml = NodeDictClass() return _add_reference(loaded_yaml, loader, node) except FileNotFoundError as exc: raise HomeAssistantError( f"{node.start_mark}: Unable to read file {fname}." ) from exc
Decide if a file is valid.
def _is_file_valid(name: str) -> bool: """Decide if a file is valid.""" return not name.startswith(".")
Recursively load files in a directory.
def _find_files(directory: str, pattern: str) -> Iterator[str]: """Recursively load files in a directory.""" for root, dirs, files in os.walk(directory, topdown=True): dirs[:] = [d for d in dirs if _is_file_valid(d)] for basename in sorted(files): if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern): filename = os.path.join(root, basename) yield filename
Load multiple files from directory as a dictionary.
def _include_dir_named_yaml(loader: LoaderType, node: yaml.nodes.Node) -> NodeDictClass: """Load multiple files from directory as a dictionary.""" mapping = NodeDictClass() loc = os.path.join(os.path.dirname(loader.get_name), node.value) for fname in _find_files(loc, "*.yaml"): filename = os.path.splitext(os.path.basename(fname))[0] if os.path.basename(fname) == SECRET_YAML: continue loaded_yaml = load_yaml(fname, loader.secrets) if loaded_yaml is None: # Special case, an empty file included by !include_dir_named is treated # as an empty dictionary loaded_yaml = NodeDictClass() mapping[filename] = loaded_yaml return _add_reference(mapping, loader, node)
Load multiple files from directory as a merged dictionary.
def _include_dir_merge_named_yaml( loader: LoaderType, node: yaml.nodes.Node ) -> NodeDictClass: """Load multiple files from directory as a merged dictionary.""" mapping = NodeDictClass() loc = os.path.join(os.path.dirname(loader.get_name), node.value) for fname in _find_files(loc, "*.yaml"): if os.path.basename(fname) == SECRET_YAML: continue loaded_yaml = load_yaml(fname, loader.secrets) if isinstance(loaded_yaml, dict): mapping.update(loaded_yaml) return _add_reference(mapping, loader, node)
Load multiple files from directory as a list.
def _include_dir_list_yaml( loader: LoaderType, node: yaml.nodes.Node ) -> list[JSON_TYPE]: """Load multiple files from directory as a list.""" loc = os.path.join(os.path.dirname(loader.get_name), node.value) return [ loaded_yaml for f in _find_files(loc, "*.yaml") if os.path.basename(f) != SECRET_YAML and (loaded_yaml := load_yaml(f, loader.secrets)) is not None ]
Load multiple files from directory as a merged list.
def _include_dir_merge_list_yaml( loader: LoaderType, node: yaml.nodes.Node ) -> JSON_TYPE: """Load multiple files from directory as a merged list.""" loc: str = os.path.join(os.path.dirname(loader.get_name), node.value) merged_list: list[JSON_TYPE] = [] for fname in _find_files(loc, "*.yaml"): if os.path.basename(fname) == SECRET_YAML: continue loaded_yaml = load_yaml(fname, loader.secrets) if isinstance(loaded_yaml, list): merged_list.extend(loaded_yaml) return _add_reference(merged_list, loader, node)