response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Wrapper for organizations:DescribeOrganizationalUnit
Args:
ca: CloudAux instance
ou_id: organizational unit ID | def _describe_ou(ca: CloudAux, ou_id: str, **kwargs) -> Dict[str, str]:
"""Wrapper for organizations:DescribeOrganizationalUnit
Args:
ca: CloudAux instance
ou_id: organizational unit ID
"""
result = ca.call(
"organizations.client.describe_organizational_unit",
OrganizationalUnitId=ou_id,
**kwargs
)
return result.get("OrganizationalUnit") |
Wrapper for organizations:DescribeAccount
Args:
ca: CloudAux instance
account_id: AWS account ID | def _describe_account(ca: CloudAux, account_id: str, **kwargs) -> Dict[str, str]:
"""Wrapper for organizations:DescribeAccount
Args:
ca: CloudAux instance
account_id: AWS account ID
"""
result = ca.call(
"organizations.client.describe_account", AccountId=account_id, **kwargs
)
return result.get("Account") |
Wrapper for organizations:ListChildren
Args:
ca: CloudAux instance
parent_id: ID of organization root or organizational unit
child_type: ACCOUNT or ORGANIZATIONAL_UNIT | def _list_children_for_ou(
ca: CloudAux,
parent_id: str,
child_type: Literal["ACCOUNT", "ORGANIZATIONAL_UNIT"],
**kwargs
) -> List[Dict[str, Any]]:
"""Wrapper for organizations:ListChildren
Args:
ca: CloudAux instance
parent_id: ID of organization root or organizational unit
child_type: ACCOUNT or ORGANIZATIONAL_UNIT
"""
return ca.call(
"organizations.client.list_children",
ChildType=child_type,
ParentId=parent_id,
**kwargs
) |
Wrapper for organizations:ListRoots
Args:
ca: CloudAux instance | def _list_org_roots(ca: CloudAux, **kwargs) -> List[Dict[str, Any]]:
"""Wrapper for organizations:ListRoots
Args:
ca: CloudAux instance
"""
return ca.call("organizations.client.list_roots", **kwargs) |
Recursively build OU structure
Args:
ca: CloudAux instance
root_id: ID of organization root or organizational unit | def _get_children_for_ou(ca: CloudAux, root_id: str) -> Dict[str, Any]:
"""Recursively build OU structure
Args:
ca: CloudAux instance
root_id: ID of organization root or organizational unit
"""
children: List[Dict[str, Any]] = []
children.extend(_list_children_for_ou(ca, root_id, "ORGANIZATIONAL_UNIT"))
children.extend(_list_children_for_ou(ca, root_id, "ACCOUNT"))
for child in children:
child["Parent"] = root_id
if child["Type"] == "ORGANIZATIONAL_UNIT":
child.update(_describe_ou(ca, child["Id"]))
child["Children"] = _get_children_for_ou(ca, child["Id"])
else:
child.update(_describe_account(ca, child["Id"]))
return children |
This function detects role changes through event bridge rules, and forces a refresh of the roles. | def detect_role_changes_and_update_cache(celery_app):
"""
This function detects role changes through event bridge rules, and forces a refresh of the roles.
"""
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
queue_arn = config.get(
"event_bridge.detect_role_changes_and_update_cache.queue_arn", ""
).format(region=config.region)
if not queue_arn:
raise MissingConfigurationValue(
"Unable to find required configuration value: "
"`event_bridge.detect_role_changes_and_update_cache.queue_arn`"
)
queue_name = queue_arn.split(":")[-1]
queue_account_number = queue_arn.split(":")[4]
queue_region = queue_arn.split(":")[3]
# Optionally assume a role before receiving messages from the queue
queue_assume_role = config.get(
"event_bridge.detect_role_changes_and_update_cache.assume_role"
)
sqs_client = boto3_cached_conn(
"sqs",
service_type="client",
region=queue_region,
retry_max_attempts=2,
account_number=queue_account_number,
assume_role=queue_assume_role,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
queue_url_res = sqs_client.get_queue_url(QueueName=queue_name)
queue_url = queue_url_res.get("QueueUrl")
if not queue_url:
raise DataNotRetrievable(f"Unable to retrieve Queue URL for {queue_arn}")
roles_to_update = set()
messages = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=10
).get("Messages", [])
while messages:
processed_messages = []
for message in messages:
try:
message_body = json.loads(message["Body"])
try:
if "Message" in message_body:
decoded_message = json.loads(message_body["Message"])["detail"]
else:
decoded_message = message_body["detail"]
except Exception as e:
log.error(
{
**log_data,
"message": "Unable to process Cloudtrail message",
"message_body": message_body,
"error": str(e),
}
)
sentry_sdk.capture_exception()
continue
role_name = decoded_message["requestParameters"]["roleName"]
role_account_id = decoded_message.get(
"account", decoded_message.get("recipientAccountId")
)
role_arn = f"arn:aws:iam::{role_account_id}:role/{role_name}"
if role_arn not in roles_to_update:
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.refresh_iam_role",
args=[role_arn],
)
roles_to_update.add(role_arn)
except Exception as e:
log.error(
{**log_data, "error": str(e), "raw_message": message}, exc_info=True
)
sentry_sdk.capture_exception()
processed_messages.append(
{
"Id": message["MessageId"],
"ReceiptHandle": message["ReceiptHandle"],
}
)
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=processed_messages)
messages = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=10
).get("Messages", [])
log.debug(
{
**log_data,
"num_roles": len(roles_to_update),
"message": "Triggered role cache update for roles that were created or changed",
}
)
return roles_to_update |
Set a session name if running locally. | def get_session_name():
"""Set a session name if running locally."""
if platform.platform().lower().startswith("darwin"):
session_name = getpass.getuser()
return session_name
return "roleupdater" |
Update a role policy | def update_role(event):
"""Update a role policy"""
with open(event, "r") as f:
event_data = json.load(f)
for e in event_data:
e["requestor"] = e["requestor"].format(requestor=get_session_name())
result = async_to_sync(update_role_handler)(event_data, None)
if result.get("success", False):
log.info("Role policy update successful")
else:
log.info("Role policy update failed") |
For the given resource and policy statement, return the actions that are
for that resource's service. | def get_actions_for_resource(resource_arn: str, statement: Dict) -> List[str]:
"""For the given resource and policy statement, return the actions that are
for that resource's service.
"""
results: List[str] = []
# Get service from resource
resource_service = get_service_from_arn(resource_arn)
# Get relevant actions from policy doc
actions = statement.get("Action", [])
actions = actions if isinstance(actions, list) else [actions]
for action in actions:
if action == "*":
results.append(action)
else:
if (
get_service_from_action(action) == resource_service
or action.lower() == "sts:assumerole"
and resource_service == "iam"
):
if action not in results:
results.append(action)
return results |
Mocked AWS Credentials for moto. | def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing" |
Mocked STS Fixture. | def sts(aws_credentials):
"""Mocked STS Fixture."""
from consoleme.config import config
with mock_sts():
yield boto3.client(
"sts", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked IAM Fixture. | def iam(aws_credentials):
"""Mocked IAM Fixture."""
from consoleme.config import config
with mock_iam():
yield boto3.client(
"iam", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked Config Fixture. | def aws_config(aws_credentials):
"""Mocked Config Fixture."""
from consoleme.config import config
with mock_config():
yield boto3.client(
"config", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked S3 Fixture. | def s3(aws_credentials):
"""Mocked S3 Fixture."""
from consoleme.config import config
with mock_s3():
yield boto3.client(
"s3", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked SES Fixture. | def ses(aws_credentials):
"""Mocked SES Fixture."""
from consoleme.config import config
with mock_ses():
client = boto3.client(
"ses", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
)
client.verify_email_address(EmailAddress="[email protected]")
yield client |
Mocked SQS Fixture. | def sqs(aws_credentials):
"""Mocked SQS Fixture."""
from consoleme.config import config
with mock_sqs():
yield boto3.client(
"sqs", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked S3 Fixture. | def sns(aws_credentials):
"""Mocked S3 Fixture."""
from consoleme.config import config
with mock_sns():
yield boto3.client(
"sns", region_name="us-east-1", **config.get("boto3.client_kwargs", {})
) |
Mocked DynamoDB Fixture. | def dynamodb(aws_credentials):
"""Mocked DynamoDB Fixture."""
with mock_dynamodb():
# Remove the config value for the DynamoDB Server
from consoleme.config.config import CONFIG
old_value = CONFIG.config.pop("dynamodb_server", None)
yield boto3.client(
"dynamodb", region_name="us-east-1", **CONFIG.get("boto3.client_kwargs", {})
)
# Reset the config value:
CONFIG.config["dynamodb_server"] = old_value |
Mock the retry library so that it doesn't retry. | def retry():
"""Mock the retry library so that it doesn't retry."""
class MockRetry:
def __init__(self, *args, **kwargs):
pass
def call(self, f, *args, **kwargs):
return f(*args, **kwargs)
patch_retry = patch("retrying.Retrying", MockRetry)
yield patch_retry.start()
patch_retry.stop() |
Each layer is assigned distinctive layer ids | def get_num_layer_for_convnext_single(var_name, depths):
"""
Each layer is assigned distinctive layer ids
"""
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
layer_id = sum(depths[:stage_id]) + 1
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
layer_id = sum(depths[:stage_id]) + block_id + 1
return layer_id
else:
return sum(depths) + 1 |
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py | def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1 |
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false' | def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') |
This function disables printing when not in master process | def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print |
Decay the learning rate with half-cycle cosine after warmup | def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr |
Enable the detection of blocking calls in the event loop. | def enable() -> None:
"""Enable the detection of blocking calls in the event loop."""
# Prevent urllib3 and requests doing I/O in event loop
HTTPConnection.putrequest = protect_loop( # type: ignore[method-assign]
HTTPConnection.putrequest
)
# Prevent sleeping in event loop. Non-strict since 2022.02
time.sleep = protect_loop(
time.sleep, strict=False, check_allowed=_check_sleep_call_allowed
)
# Currently disabled. pytz doing I/O when getting timezone.
# Prevent files being opened inside the event loop
# builtins.open = protect_loop(builtins.open)
if not _IN_TESTS:
# unittest uses `importlib.import_module` to do mocking
# so we cannot protect it if we are running tests
importlib.import_module = protect_loop(
importlib.import_module,
strict_core=False,
strict=False,
check_allowed=_check_import_call_allowed,
) |
Open the UI. | def open_hass_ui(hass: core.HomeAssistant) -> None:
"""Open the UI."""
import webbrowser # pylint: disable=import-outside-toplevel
if hass.config.api is None or "frontend" not in hass.config.components:
_LOGGER.warning("Cannot launch the UI because frontend not loaded")
return
scheme = "https" if hass.config.api.use_ssl else "http"
url = str(
yarl.URL.build(scheme=scheme, host="127.0.0.1", port=hass.config.api.port)
)
if not webbrowser.open(url):
_LOGGER.warning(
"Unable to open the Home Assistant UI in a browser. Open it yourself at %s",
url,
) |
Set up the logging.
This method must be run in the event loop. | def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: int | None = None,
log_file: str | None = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = (
"%(asctime)s.%(msecs)03d %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
)
if not log_no_color:
try:
# pylint: disable-next=import-outside-toplevel
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=FORMAT_DATETIME,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this will result in a no-op.
logging.basicConfig(format=fmt, datefmt=FORMAT_DATETIME, level=logging.INFO)
# Capture warnings.warn(...) and friends messages in logs.
# The standard destination for them is stderr, which may end up unnoticed.
# This way they're where other messages are, and can be filtered as usual.
logging.captureWarnings(True)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
sys.excepthook = lambda *args: logging.getLogger(None).exception(
"Uncaught exception", exc_info=args
)
threading.excepthook = lambda args: logging.getLogger(None).exception(
"Uncaught thread exception",
exc_info=( # type: ignore[arg-type]
args.exc_type,
args.exc_value,
args.exc_traceback,
),
)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
err_handler: (
logging.handlers.RotatingFileHandler
| logging.handlers.TimedRotatingFileHandler
)
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
else:
err_handler = _RotatingFileHandlerWithoutShouldRollOver(
err_log_path, backupCount=1
)
try:
err_handler.doRollover()
except OSError as err:
_LOGGER.error("Error rolling over log file: %s", err)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=FORMAT_DATETIME))
logger = logging.getLogger("")
logger.addHandler(err_handler)
logger.setLevel(logging.INFO if verbose else logging.WARNING)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async_activate_log_queue_handler(hass) |
Get domains of components to set up. | def _get_domains(hass: core.HomeAssistant, config: dict[str, Any]) -> set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = {
domain for key in config if (domain := cv.domain_key(key)) != core.DOMAIN
}
# Add config entry and default domains
if not hass.config.recovery_mode:
domains.update(DEFAULT_INTEGRATIONS)
domains.update(hass.config_entries.async_domains())
else:
domains.update(DEFAULT_INTEGRATIONS_RECOVERY_MODE)
# Add domains depending on if the Supervisor is used or not
if "SUPERVISOR" in os.environ:
domains.update(DEFAULT_INTEGRATIONS_SUPERVISOR)
return domains |
No duplicate auth provider config allowed in a list.
Each type of auth provider can only have one config without optional id.
Unique id is required if same type of auth provider used multiple times. | def _no_duplicate_auth_provider(
configs: Sequence[dict[str, Any]],
) -> Sequence[dict[str, Any]]:
"""No duplicate auth provider config allowed in a list.
Each type of auth provider can only have one config without optional id.
Unique id is required if same type of auth provider used multiple times.
"""
config_keys: set[tuple[str, str | None]] = set()
for config in configs:
key = (config[CONF_TYPE], config.get(CONF_ID))
if key in config_keys:
raise vol.Invalid(
f"Duplicate auth provider {config[CONF_TYPE]} found. "
"Please add unique IDs "
"if you want to have the same auth provider twice"
)
config_keys.add(key)
return configs |
No duplicate auth mfa module item allowed in a list.
Each type of mfa module can only have one config without optional id.
A global unique id is required if same type of mfa module used multiple
times.
Note: this is different than auth provider | def _no_duplicate_auth_mfa_module(
configs: Sequence[dict[str, Any]],
) -> Sequence[dict[str, Any]]:
"""No duplicate auth mfa module item allowed in a list.
Each type of mfa module can only have one config without optional id.
A global unique id is required if same type of mfa module used multiple
times.
Note: this is different than auth provider
"""
config_keys: set[str] = set()
for config in configs:
key = config.get(CONF_ID, config[CONF_TYPE])
if key in config_keys:
raise vol.Invalid(
f"Duplicate mfa module {config[CONF_TYPE]} found. "
"Please add unique IDs "
"if you want to have the same mfa module twice"
)
config_keys.add(key)
return configs |
Filter internal/external URL with a path. | def _filter_bad_internal_external_urls(conf: dict) -> dict:
"""Filter internal/external URL with a path."""
for key in CONF_INTERNAL_URL, CONF_EXTERNAL_URL:
if key in conf and urlparse(conf[key]).path not in ("", "/"):
# We warn but do not fix, because if this was incorrectly configured,
# adjusting this value might impact security.
_LOGGER.warning(
"Invalid %s set. It's not allowed to have a path (/bla)", key
)
return conf |
Put together the default configuration directory based on the OS. | def get_default_config_dir() -> str:
"""Put together the default configuration directory based on the OS."""
data_dir = os.path.expanduser("~")
return os.path.join(data_dir, CONFIG_DIR_NAME) |
Write the default config. | def _write_default_config(config_dir: str) -> bool:
"""Write the default config."""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
secret_path = os.path.join(config_dir, SECRET_YAML)
version_path = os.path.join(config_dir, VERSION_FILE)
automation_yaml_path = os.path.join(config_dir, AUTOMATION_CONFIG_PATH)
script_yaml_path = os.path.join(config_dir, SCRIPT_CONFIG_PATH)
scene_yaml_path = os.path.join(config_dir, SCENE_CONFIG_PATH)
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, "w", encoding="utf8") as config_file:
config_file.write(DEFAULT_CONFIG)
if not os.path.isfile(secret_path):
with open(secret_path, "w", encoding="utf8") as secret_file:
secret_file.write(DEFAULT_SECRETS)
with open(version_path, "w", encoding="utf8") as version_file:
version_file.write(__version__)
if not os.path.isfile(automation_yaml_path):
with open(automation_yaml_path, "w", encoding="utf8") as automation_file:
automation_file.write("[]")
if not os.path.isfile(script_yaml_path):
with open(script_yaml_path, "w", encoding="utf8"):
pass
if not os.path.isfile(scene_yaml_path):
with open(scene_yaml_path, "w", encoding="utf8"):
pass
except OSError:
print( # noqa: T201
f"Unable to create default configuration file {config_path}"
)
return False
return True |
Parse a YAML configuration file.
Raises FileNotFoundError or HomeAssistantError.
This method needs to run in an executor. | def load_yaml_config_file(
config_path: str, secrets: Secrets | None = None
) -> dict[Any, Any]:
"""Parse a YAML configuration file.
Raises FileNotFoundError or HomeAssistantError.
This method needs to run in an executor.
"""
try:
conf_dict = load_yaml_dict(config_path, secrets)
except YamlTypeError as exc:
msg = (
f"The configuration file {os.path.basename(config_path)} "
"does not contain a dictionary"
)
_LOGGER.error(msg)
raise HomeAssistantError(msg) from exc
# Convert values to dictionaries if they are None
for key, value in conf_dict.items():
conf_dict[key] = value or {}
return conf_dict |
Upgrade configuration if necessary.
This method needs to run in an executor. | def process_ha_config_upgrade(hass: HomeAssistant) -> None:
"""Upgrade configuration if necessary.
This method needs to run in an executor.
"""
version_path = hass.config.path(VERSION_FILE)
try:
with open(version_path, encoding="utf8") as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = "0.7.7"
if conf_version == __version__:
return
_LOGGER.info(
"Upgrading configuration directory from %s to %s", conf_version, __version__
)
version_obj = AwesomeVersion(conf_version)
if version_obj < AwesomeVersion("0.50"):
# 0.50 introduced persistent deps dir.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
if version_obj < AwesomeVersion("0.92"):
# 0.92 moved google/tts.py to google_translate/tts.py
config_path = hass.config.path(YAML_CONFIG_FILE)
with open(config_path, encoding="utf-8") as config_file:
config_raw = config_file.read()
if TTS_PRE_92 in config_raw:
_LOGGER.info("Migrating google tts to google_translate tts")
config_raw = config_raw.replace(TTS_PRE_92, TTS_92)
try:
with open(config_path, "w", encoding="utf-8") as config_file:
config_file.write(config_raw)
except OSError:
_LOGGER.exception("Migrating to google_translate tts failed")
if version_obj < AwesomeVersion("0.94") and is_docker_env():
# In 0.94 we no longer install packages inside the deps folder when
# running inside a Docker container.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, "w", encoding="utf8") as outp:
outp.write(__version__) |
Log a schema validation error. | def async_log_schema_error(
exc: vol.Invalid,
domain: str,
config: dict,
hass: HomeAssistant,
link: str | None = None,
) -> None:
"""Log a schema validation error."""
message = format_schema_error(hass, exc, domain, config, link)
_LOGGER.error(message) |
Log an error from a custom config validator. | def async_log_config_validator_error(
exc: vol.Invalid | HomeAssistantError,
domain: str,
config: dict,
hass: HomeAssistant,
link: str | None = None,
) -> None:
"""Log an error from a custom config validator."""
if isinstance(exc, vol.Invalid):
async_log_schema_error(exc, domain, config, hass, link)
return
message = format_homeassistant_error(hass, exc, domain, config, link)
_LOGGER.error(message, exc_info=exc) |
Access a nested object in root by item sequence.
Returns None in case of error. | def _get_by_path(data: dict | list, items: list[str | int]) -> Any:
"""Access a nested object in root by item sequence.
Returns None in case of error.
"""
try:
return reduce(operator.getitem, items, data) # type: ignore[arg-type]
except (KeyError, IndexError, TypeError):
return None |
Find file/line annotation for a node in config pointed to by path.
If the node pointed to is a dict or list, prefer the annotation for the key in
the key/value pair defining the dict or list.
If the node is not annotated, try the parent node. | def find_annotation(
config: dict | list, path: list[str | int]
) -> tuple[str, int | str] | None:
"""Find file/line annotation for a node in config pointed to by path.
If the node pointed to is a dict or list, prefer the annotation for the key in
the key/value pair defining the dict or list.
If the node is not annotated, try the parent node.
"""
def find_annotation_for_key(
item: dict, path: list[str | int], tail: str | int
) -> tuple[str, int | str] | None:
for key in item:
if key == tail:
if annotation := _get_annotation(key):
return annotation
break
return None
def find_annotation_rec(
config: dict | list, path: list[str | int], tail: str | int | None
) -> tuple[str, int | str] | None:
item = _get_by_path(config, path)
if isinstance(item, dict) and tail is not None:
if tail_annotation := find_annotation_for_key(item, path, tail):
return tail_annotation
if (
isinstance(item, (dict, list))
and path
and (
key_annotation := find_annotation_for_key(
_get_by_path(config, path[:-1]), path[:-1], path[-1]
)
)
):
return key_annotation
if annotation := _get_annotation(item):
return annotation
if not path:
return None
tail = path.pop()
if annotation := find_annotation_rec(config, path, tail):
return annotation
return _get_annotation(item)
return find_annotation_rec(config, list(path), None) |
Return path relative to the Home Assistant config dir. | def _relpath(hass: HomeAssistant, path: str) -> str:
"""Return path relative to the Home Assistant config dir."""
return os.path.relpath(path, hass.config.config_dir) |
Stringify voluptuous.Invalid.
This is an alternative to the custom __str__ implemented in
voluptuous.error.Invalid. The modifications are:
- Format the path delimited by -> instead of @data[]
- Prefix with domain, file and line of the error
- Suffix with a link to the documentation
- Give a more user friendly output for unknown options
- Give a more user friendly output for missing options | def stringify_invalid(
hass: HomeAssistant,
exc: vol.Invalid,
domain: str,
config: dict,
link: str | None,
max_sub_error_length: int,
) -> str:
"""Stringify voluptuous.Invalid.
This is an alternative to the custom __str__ implemented in
voluptuous.error.Invalid. The modifications are:
- Format the path delimited by -> instead of @data[]
- Prefix with domain, file and line of the error
- Suffix with a link to the documentation
- Give a more user friendly output for unknown options
- Give a more user friendly output for missing options
"""
if "." in domain:
integration_domain, _, platform_domain = domain.partition(".")
message_prefix = (
f"Invalid config for '{platform_domain}' from integration "
f"'{integration_domain}'"
)
else:
message_prefix = f"Invalid config for '{domain}'"
if domain != HA_DOMAIN and link:
message_suffix = f", please check the docs at {link}"
else:
message_suffix = ""
if annotation := find_annotation(config, exc.path):
message_prefix += f" at {_relpath(hass, annotation[0])}, line {annotation[1]}"
path = "->".join(str(m) for m in exc.path)
if exc.error_message == "extra keys not allowed":
return (
f"{message_prefix}: '{exc.path[-1]}' is an invalid option for '{domain}', "
f"check: {path}{message_suffix}"
)
if exc.error_message == "required key not provided":
return (
f"{message_prefix}: required key '{exc.path[-1]}' not provided"
f"{message_suffix}"
)
# This function is an alternative to the stringification done by
# vol.Invalid.__str__, so we need to call Exception.__str__ here
# instead of str(exc)
output = Exception.__str__(exc)
if error_type := exc.error_type:
output += " for " + error_type
offending_item_summary = repr(_get_by_path(config, exc.path))
if len(offending_item_summary) > max_sub_error_length:
offending_item_summary = (
f"{offending_item_summary[: max_sub_error_length - 3]}..."
)
return (
f"{message_prefix}: {output} '{path}', got {offending_item_summary}"
f"{message_suffix}"
) |
Provide a more helpful + complete validation error message.
This is a modified version of voluptuous.error.Invalid.__str__,
the modifications make some minor changes to the formatting. | def humanize_error(
hass: HomeAssistant,
validation_error: vol.Invalid,
domain: str,
config: dict,
link: str | None,
max_sub_error_length: int = MAX_VALIDATION_ERROR_ITEM_LENGTH,
) -> str:
"""Provide a more helpful + complete validation error message.
This is a modified version of voluptuous.error.Invalid.__str__,
the modifications make some minor changes to the formatting.
"""
if isinstance(validation_error, vol.MultipleInvalid):
return "\n".join(
sorted(
humanize_error(
hass, sub_error, domain, config, link, max_sub_error_length
)
for sub_error in validation_error.errors
)
)
return stringify_invalid(
hass, validation_error, domain, config, link, max_sub_error_length
) |
Format HomeAssistantError thrown by a custom config validator. | def format_homeassistant_error(
hass: HomeAssistant,
exc: HomeAssistantError,
domain: str,
config: dict,
link: str | None = None,
) -> str:
"""Format HomeAssistantError thrown by a custom config validator."""
if "." in domain:
integration_domain, _, platform_domain = domain.partition(".")
message_prefix = (
f"Invalid config for '{platform_domain}' from integration "
f"'{integration_domain}'"
)
else:
message_prefix = f"Invalid config for '{domain}'"
# HomeAssistantError raised by custom config validator has no path to the
# offending configuration key, use the domain key as path instead.
if annotation := find_annotation(config, [domain]):
message_prefix += f" at {_relpath(hass, annotation[0])}, line {annotation[1]}"
message = f"{message_prefix}: {str(exc) or repr(exc)}"
if domain != HA_DOMAIN and link:
message += f", please check the docs at {link}"
return message |
Format configuration validation error. | def format_schema_error(
hass: HomeAssistant,
exc: vol.Invalid,
domain: str,
config: dict,
link: str | None = None,
) -> str:
"""Format configuration validation error."""
return humanize_error(hass, exc, domain, config, link) |
Log an error while merging packages. | def _log_pkg_error(
hass: HomeAssistant, package: str, component: str | None, config: dict, message: str
) -> None:
"""Log an error while merging packages."""
message_prefix = f"Setup of package '{package}'"
if annotation := find_annotation(config, [HA_DOMAIN, CONF_PACKAGES, package]):
message_prefix += f" at {_relpath(hass, annotation[0])}, line {annotation[1]}"
_LOGGER.error("%s failed: %s", message_prefix, message) |
Extract the schema and identify list or dict based. | def _identify_config_schema(module: ComponentProtocol) -> str | None:
"""Extract the schema and identify list or dict based."""
if not isinstance(module.CONFIG_SCHEMA, vol.Schema):
return None
schema = module.CONFIG_SCHEMA.schema
if isinstance(schema, vol.All):
for subschema in schema.validators:
if isinstance(subschema, dict):
schema = subschema
break
else:
return None
try:
key = next(k for k in schema if k == module.DOMAIN)
except (TypeError, AttributeError, StopIteration):
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error identifying config schema")
return None
if hasattr(key, "default") and not isinstance(
key.default, vol.schema_builder.Undefined
):
default_value = module.CONFIG_SCHEMA({module.DOMAIN: key.default()})[
module.DOMAIN
]
if isinstance(default_value, dict):
return "dict"
if isinstance(default_value, list):
return "list"
return None
domain_schema = schema[key]
t_schema = str(domain_schema)
if t_schema.startswith("{") or "schema_with_slug_keys" in t_schema:
return "dict"
if t_schema.startswith(("[", "All(<function ensure_list")):
return "list"
return None |
Validate basic package definition properties. | def _validate_package_definition(name: str, conf: Any) -> None:
"""Validate basic package definition properties."""
cv.slug(name)
PACKAGE_DEFINITION_SCHEMA(conf) |
Merge package into conf, recursively. | def _recursive_merge(conf: dict[str, Any], package: dict[str, Any]) -> str | None:
"""Merge package into conf, recursively."""
duplicate_key: str | None = None
for key, pack_conf in package.items():
if isinstance(pack_conf, dict):
if not pack_conf:
continue
conf[key] = conf.get(key, OrderedDict())
duplicate_key = _recursive_merge(conf=conf[key], package=pack_conf)
elif isinstance(pack_conf, list):
conf[key] = cv.remove_falsy(
cv.ensure_list(conf.get(key)) + cv.ensure_list(pack_conf)
)
else:
if conf.get(key) is not None:
return key
conf[key] = pack_conf
return duplicate_key |
Get message to log and print stack trace preference. | def _get_log_message_and_stack_print_pref(
hass: HomeAssistant, domain: str, platform_exception: ConfigExceptionInfo
) -> tuple[str | None, bool, dict[str, str]]:
"""Get message to log and print stack trace preference."""
exception = platform_exception.exception
platform_path = platform_exception.platform_path
platform_config = platform_exception.config
link = platform_exception.integration_link
placeholders: dict[str, str] = {
"domain": domain,
"error": str(exception),
"p_name": platform_path,
}
show_stack_trace: bool | None = _CONFIG_LOG_SHOW_STACK_TRACE.get(
platform_exception.translation_key
)
if show_stack_trace is None:
# If no pre defined log_message is set, we generate an enriched error
# message, so we can notify about it during setup
show_stack_trace = False
if isinstance(exception, vol.Invalid):
log_message = format_schema_error(
hass, exception, platform_path, platform_config, link
)
if annotation := find_annotation(platform_config, exception.path):
placeholders["config_file"], line = annotation
placeholders["line"] = str(line)
else:
if TYPE_CHECKING:
assert isinstance(exception, HomeAssistantError)
log_message = format_homeassistant_error(
hass, exception, platform_path, platform_config, link
)
if annotation := find_annotation(platform_config, [platform_path]):
placeholders["config_file"], line = annotation
placeholders["line"] = str(line)
show_stack_trace = True
return (log_message, show_stack_trace, placeholders)
# Generate the log message from the English translations
log_message = async_get_exception_message(
HA_DOMAIN,
platform_exception.translation_key,
translation_placeholders=placeholders,
)
return (log_message, show_stack_trace, placeholders) |
Remove file and line annotations from str items in component configuration. | def async_drop_config_annotations(
integration_config_info: IntegrationConfigInfo,
integration: Integration,
) -> ConfigType | None:
"""Remove file and line annotations from str items in component configuration."""
if (config := integration_config_info.config) is None:
return None
def drop_config_annotations_rec(node: Any) -> Any:
if isinstance(node, dict):
# Some integrations store metadata in custom dict classes, preserve those
tmp = dict(node)
node.clear()
node.update(
(drop_config_annotations_rec(k), drop_config_annotations_rec(v))
for k, v in tmp.items()
)
return node
if isinstance(node, list):
return [drop_config_annotations_rec(v) for v in node]
if isinstance(node, NodeStrClass):
return str(node)
return node
# Don't drop annotations from the homeassistant integration because it may
# have configuration for other integrations as packages.
if integration.domain in config and integration.domain != HA_DOMAIN:
drop_config_annotations_rec(config[integration.domain])
return config |
Handle component configuration errors from async_process_component_config.
In case of errors:
- Print the error messages to the log.
- Raise a ConfigValidationError if raise_on_failure is set. | def async_handle_component_errors(
hass: HomeAssistant,
integration_config_info: IntegrationConfigInfo,
integration: Integration,
raise_on_failure: bool = False,
) -> None:
"""Handle component configuration errors from async_process_component_config.
In case of errors:
- Print the error messages to the log.
- Raise a ConfigValidationError if raise_on_failure is set.
"""
if not (config_exception_info := integration_config_info.exception_info_list):
return
platform_exception: ConfigExceptionInfo
domain = integration.domain
placeholders: dict[str, str]
for platform_exception in config_exception_info:
exception = platform_exception.exception
(
log_message,
show_stack_trace,
placeholders,
) = _get_log_message_and_stack_print_pref(hass, domain, platform_exception)
_LOGGER.error(
log_message,
exc_info=exception if show_stack_trace else None,
)
if not raise_on_failure:
return
if len(config_exception_info) == 1:
translation_key = platform_exception.translation_key
else:
translation_key = ConfigErrorTranslationKey.MULTIPLE_INTEGRATION_CONFIG_ERRORS
errors = str(len(config_exception_info))
placeholders = {
"domain": domain,
"errors": errors,
}
raise ConfigValidationError(
translation_key,
[platform_exception.exception for platform_exception in config_exception_info],
translation_domain=HA_DOMAIN,
translation_placeholders=placeholders,
) |
Break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
Async friendly. | def config_per_platform(
config: ConfigType, domain: str
) -> Iterable[tuple[str | None, ConfigType]]:
"""Break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
Async friendly.
"""
for config_key in extract_domain_configs(config, domain):
if not (platform_config := config[config_key]):
continue
if not isinstance(platform_config, list):
platform_config = [platform_config]
item: ConfigType
platform: str | None
for item in platform_config:
try:
platform = item.get(CONF_PLATFORM)
except AttributeError:
platform = None
yield platform, item |
Find all the platforms in a configuration.
Returns a dictionary with domain as key and a set of platforms as value. | def extract_platform_integrations(
config: ConfigType, domains: set[str]
) -> dict[str, set[str]]:
"""Find all the platforms in a configuration.
Returns a dictionary with domain as key and a set of platforms as value.
"""
platform_integrations: dict[str, set[str]] = {}
for key, domain_config in config.items():
try:
domain = cv.domain_key(key)
except vol.Invalid:
continue
if domain not in domains:
continue
if not isinstance(domain_config, list):
domain_config = [domain_config]
for item in domain_config:
try:
platform = item.get(CONF_PLATFORM)
except AttributeError:
continue
if platform and isinstance(platform, Hashable):
platform_integrations.setdefault(domain, set()).add(platform)
return platform_integrations |
Extract keys from config for given domain name.
Async friendly. | def extract_domain_configs(config: ConfigType, domain: str) -> Sequence[str]:
"""Extract keys from config for given domain name.
Async friendly.
"""
domain_configs = []
for key in config:
with suppress(vol.Invalid):
if cv.domain_key(key) != domain:
continue
domain_configs.append(key)
return domain_configs |
Return a config with all configuration for a domain removed. | def config_without_domain(config: ConfigType, domain: str) -> ConfigType:
"""Return a config with all configuration for a domain removed."""
filter_keys = extract_domain_configs(config, domain)
return {key: value for key, value in config.items() if key not in filter_keys} |
Return if safe mode is enabled.
If safe mode is enabled, the safe mode file will be removed. | def safe_mode_enabled(config_dir: str) -> bool:
"""Return if safe mode is enabled.
If safe mode is enabled, the safe mode file will be removed.
"""
safe_mode_path = os.path.join(config_dir, SAFE_MODE_FILENAME)
safe_mode = os.path.exists(safe_mode_path)
if safe_mode:
os.remove(safe_mode_path)
return safe_mode |
Abort if current entries match all data.
Requires `already_configured` in strings.json in user visible flows. | def _async_abort_entries_match(
other_entries: list[ConfigEntry], match_dict: dict[str, Any] | None = None
) -> None:
"""Abort if current entries match all data.
Requires `already_configured` in strings.json in user visible flows.
"""
if match_dict is None:
match_dict = {} # Match any entry
for entry in other_entries:
options_items = entry.options.items()
data_items = entry.data.items()
for kv in match_dict.items():
if kv not in options_items and kv not in data_items:
break
else:
raise data_entry_flow.AbortFlow("already_configured") |
Handle entity registry entry update filter.
Only handle changes to "disabled_by".
If "disabled_by" was CONFIG_ENTRY, reload is not needed. | def _handle_entry_updated_filter(
event_data: entity_registry.EventEntityRegistryUpdatedData,
) -> bool:
"""Handle entity registry entry update filter.
Only handle changes to "disabled_by".
If "disabled_by" was CONFIG_ENTRY, reload is not needed.
"""
return not (
event_data["action"] != "update"
or "disabled_by" not in event_data["changes"]
or event_data["changes"]["disabled_by"]
is entity_registry.RegistryEntryDisabler.CONFIG_ENTRY
) |
Split a state entity ID into domain and object ID. | def split_entity_id(entity_id: str) -> tuple[str, str]:
"""Split a state entity ID into domain and object ID."""
domain, _, object_id = entity_id.partition(".")
if not domain or not object_id:
raise ValueError(f"Invalid entity ID {entity_id}")
return domain, object_id |
Test if a domain a valid format. | def valid_domain(domain: str) -> bool:
"""Test if a domain a valid format."""
return VALID_DOMAIN.match(domain) is not None |
Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs. | def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return VALID_ENTITY_ID.match(entity_id) is not None |
Validate a state, raise if it not valid. | def validate_state(state: str) -> str:
"""Validate a state, raise if it not valid."""
if len(state) > MAX_LENGTH_STATE_STATE:
raise InvalidStateError(
f"Invalid state with length {len(state)}. "
"State max length is 255 characters."
)
return state |
Annotation to mark method as safe to call from within the event loop. | def callback(func: _CallableT) -> _CallableT:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func |
Check if function is safe to be called in the event loop. | def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True |
Check if function is safe to be called in the event loop.
This version of is_callback will also check if the target is a partial
and walk the chain of partials to find the original function. | def is_callback_check_partial(target: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop.
This version of is_callback will also check if the target is a partial
and walk the chain of partials to find the original function.
"""
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
return is_callback(check_target) |
Return the HomeAssistant instance.
Raises HomeAssistantError when called from the wrong thread.
This should be used where it's very cumbersome or downright impossible to pass
hass to the code which needs it. | def async_get_hass() -> HomeAssistant:
"""Return the HomeAssistant instance.
Raises HomeAssistantError when called from the wrong thread.
This should be used where it's very cumbersome or downright impossible to pass
hass to the code which needs it.
"""
if not _hass.hass:
raise HomeAssistantError("async_get_hass called from the wrong thread")
return _hass.hass |
Find release channel based on version number. | def get_release_channel() -> ReleaseChannel:
"""Find release channel based on version number."""
version = __version__
if "dev0" in version:
return ReleaseChannel.DEV
if "dev" in version:
return ReleaseChannel.NIGHTLY
if "b" in version:
return ReleaseChannel.BETA
return ReleaseChannel.STABLE |
Determine the job type from the callable. | def get_hassjob_callable_job_type(target: Callable[..., Any]) -> HassJobType:
"""Determine the job type from the callable."""
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutinefunction(check_target):
return HassJobType.Coroutinefunction
if is_callback(check_target):
return HassJobType.Callback
if asyncio.iscoroutine(check_target):
raise ValueError("Coroutine not allowed to be passed to HassJob")
return HassJobType.Executor |
Return the representation. | def _event_repr(
event_type: EventType[_DataT] | str, origin: EventOrigin, data: _DataT | None
) -> str:
"""Return the representation."""
if data:
return f"<Event {event_type}[{str(origin)[0]}]: {util.repr_helper(data)}>"
return f"<Event {event_type}[{str(origin)[0]}]>" |
Verify the length of the event type and raise if too long. | def _verify_event_type_length_or_raise(event_type: EventType[_DataT] | str) -> None:
"""Verify the length of the event type and raise if too long."""
if len(event_type) > MAX_LENGTH_EVENT_EVENT_TYPE:
raise MaxLengthExceeded(event_type, "event_type", MAX_LENGTH_EVENT_EVENT_TYPE) |
Map an error to the correct position in the schema_errors.
Raises ValueError if the error path could not be found in the schema.
Limitation: Nested schemas are not supported and a ValueError will be raised. | def _map_error_to_schema_errors(
schema_errors: dict[str, Any],
error: vol.Invalid,
data_schema: vol.Schema,
) -> None:
"""Map an error to the correct position in the schema_errors.
Raises ValueError if the error path could not be found in the schema.
Limitation: Nested schemas are not supported and a ValueError will be raised.
"""
schema = data_schema.schema
error_path = error.path
if not error_path or (path_part := error_path[0]) not in schema:
raise ValueError("Could not find path in schema")
if len(error_path) > 1:
raise ValueError("Nested schemas are not supported")
# path_part can also be vol.Marker, but we need a string key
path_part_str = str(path_part)
schema_errors[path_part_str] = error.error_message |
Return a method that can fetch a translated exception message.
Defaults to English, requires translations to already be cached. | def import_async_get_exception_message() -> (
Callable[[str, str, dict[str, str] | None], str]
):
"""Return a method that can fetch a translated exception message.
Defaults to English, requires translations to already be cached.
"""
# pylint: disable-next=import-outside-toplevel
from .helpers.translation import (
async_get_exception_message as async_get_exception_message_import,
)
return async_get_exception_message_import |
Set up the necessary data structures. | def async_setup(hass: HomeAssistant) -> None:
"""Set up the necessary data structures."""
_async_mount_config_dir(hass)
hass.data[DATA_COMPONENTS] = {}
hass.data[DATA_INTEGRATIONS] = {}
hass.data[DATA_MISSING_PLATFORMS] = {}
hass.data[DATA_PRELOAD_PLATFORMS] = BASE_PRELOAD_PLATFORMS.copy() |
Generate a manifest from a legacy module. | def manifest_from_legacy_module(domain: str, module: ModuleType) -> Manifest:
"""Generate a manifest from a legacy module."""
return {
"domain": domain,
"name": domain,
"requirements": getattr(module, "REQUIREMENTS", []),
"dependencies": getattr(module, "DEPENDENCIES", []),
"codeowners": [],
} |
Handle backwards compat with zeroconf matchers. | def async_process_zeroconf_match_dict(entry: dict[str, Any]) -> ZeroconfMatcher:
"""Handle backwards compat with zeroconf matchers."""
entry_without_type: dict[str, Any] = entry.copy()
del entry_without_type["type"]
# These properties keys used to be at the top level, we relocate
# them for backwards compat
for moved_prop in MOVED_ZEROCONF_PROPS:
if value := entry_without_type.pop(moved_prop, None):
_LOGGER.warning(
(
'Matching the zeroconf property "%s" at top-level is deprecated and'
" should be moved into a properties dict; Check the developer"
" documentation"
),
moved_prop,
)
if "properties" not in entry_without_type:
prop_dict: dict[str, str] = {}
entry_without_type["properties"] = prop_dict
else:
prop_dict = entry_without_type["properties"]
prop_dict[moved_prop] = value.lower()
return cast(ZeroconfMatcher, entry_without_type) |
Return if we should always offer HomeKit control for a device. | def homekit_always_discover(iot_class: str | None) -> bool:
"""Return if we should always offer HomeKit control for a device."""
#
# Since we prefer local control, if the integration that is being
# discovered is cloud AND the HomeKit device is UNPAIRED we still
# want to discovery it.
#
# Additionally if the integration is polling, HKC offers a local
# push experience for the user to control the device so we want
# to offer that as well.
#
return not iot_class or (iot_class.startswith("cloud") or "polling" in iot_class) |
Register a platform to be preloaded. | def async_register_preload_platform(hass: HomeAssistant, platform_name: str) -> None:
"""Register a platform to be preloaded."""
preload_platforms: list[str] = hass.data[DATA_PRELOAD_PLATFORMS]
if platform_name not in preload_platforms:
preload_platforms.append(platform_name) |
Return True if the integration version is blocked. | def _version_blocked(
integration_version: AwesomeVersion,
blocked_integration: BlockedIntegration,
) -> bool:
"""Return True if the integration version is blocked."""
if blocked_integration.lowest_good_version is None:
return True
if integration_version >= blocked_integration.lowest_good_version:
return False
return True |
Resolve multiple integrations from root. | def _resolve_integrations_from_root(
hass: HomeAssistant, root_module: ModuleType, domains: Iterable[str]
) -> dict[str, Integration]:
"""Resolve multiple integrations from root."""
integrations: dict[str, Integration] = {}
for domain in domains:
try:
integration = Integration.resolve_from_root(hass, root_module, domain)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error loading integration: %s", domain)
else:
if integration:
integrations[domain] = integration
return integrations |
Get an integration which is already loaded.
Raises IntegrationNotLoaded if the integration is not loaded. | def async_get_loaded_integration(hass: HomeAssistant, domain: str) -> Integration:
"""Get an integration which is already loaded.
Raises IntegrationNotLoaded if the integration is not loaded.
"""
cache = hass.data[DATA_INTEGRATIONS]
if TYPE_CHECKING:
cache = cast(dict[str, Integration | asyncio.Future[None]], cache)
int_or_fut = cache.get(domain, _UNDEF)
# Integration is never subclassed, so we can check for type
if type(int_or_fut) is Integration:
return int_or_fut
raise IntegrationNotLoaded(domain) |
Try to load specified file.
Looks in config dir first, then built-in components.
Only returns it if also found to be valid.
Async friendly. | def _load_file(
hass: HomeAssistant, comp_or_platform: str, base_paths: list[str]
) -> ComponentProtocol | None:
"""Try to load specified file.
Looks in config dir first, then built-in components.
Only returns it if also found to be valid.
Async friendly.
"""
cache: dict[str, ComponentProtocol] = hass.data[DATA_COMPONENTS]
module: ComponentProtocol | None
if module := cache.get(comp_or_platform):
return module
for path in (f"{base}.{comp_or_platform}" for base in base_paths):
try:
module = importlib.import_module(path)
# In Python 3 you can import files from directories that do not
# contain the file __init__.py. A directory is a valid module if
# it contains a file with the .py extension. In this case Python
# will succeed in importing the directory as a module and call it
# a namespace. We do not care about namespaces.
# This prevents that when only
# custom_components/switch/some_platform.py exists,
# the import custom_components.switch would succeed.
# __file__ was unset for namespaces before Python 3.7
if getattr(module, "__file__", None) is None:
continue
cache[comp_or_platform] = module
return cast(ComponentProtocol, module)
except ImportError as err:
# This error happens if for example custom_components/switch
# exists and we try to load switch.demo.
# Ignore errors for custom_components, custom_components.switch
# and custom_components.switch.demo.
white_listed_errors = []
parts = []
for part in path.split("."):
parts.append(part)
white_listed_errors.append(f"No module named '{'.'.join(parts)}'")
if str(err) not in white_listed_errors:
_LOGGER.exception(
"Error loading %s. Make sure all dependencies are installed", path
)
return None |
Decorate function to indicate that first argument is hass.
The use of this decorator is discouraged, and it should not be used
for new functions. | def bind_hass(func: _CallableT) -> _CallableT:
"""Decorate function to indicate that first argument is hass.
The use of this decorator is discouraged, and it should not be used
for new functions.
"""
setattr(func, "__bind_hass", True)
return func |
Mount config dir in order to load custom_component.
Async friendly but not a coroutine. | def _async_mount_config_dir(hass: HomeAssistant) -> None:
"""Mount config dir in order to load custom_component.
Async friendly but not a coroutine.
"""
sys.path.insert(0, hass.config.config_dir)
with suppress(ImportError):
import custom_components # pylint: disable=import-outside-toplevel # noqa: F401
sys.path.remove(hass.config.config_dir)
sys.path_importer_cache.pop(hass.config.config_dir, None) |
Return the lookup paths for legacy lookups. | def _lookup_path(hass: HomeAssistant) -> list[str]:
"""Return the lookup paths for legacy lookups."""
if hass.config.recovery_mode or hass.config.safe_mode:
return [PACKAGE_BUILTIN]
return [PACKAGE_CUSTOM_COMPONENTS, PACKAGE_BUILTIN] |
Test if a component module is loaded. | def is_component_module_loaded(hass: HomeAssistant, module: str) -> bool:
"""Test if a component module is loaded."""
return module in hass.data[DATA_COMPONENTS] |
Return a URL for an integration's issue tracker. | def async_get_issue_tracker(
hass: HomeAssistant | None,
*,
integration: Integration | None = None,
integration_domain: str | None = None,
module: str | None = None,
) -> str | None:
"""Return a URL for an integration's issue tracker."""
issue_tracker = (
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if not integration and not integration_domain and not module:
# If we know nothing about the entity, suggest opening an issue on HA core
return issue_tracker
if not integration and (hass and integration_domain):
with suppress(IntegrationNotLoaded):
integration = async_get_loaded_integration(hass, integration_domain)
if integration and not integration.is_built_in:
return integration.issue_tracker
if module and "custom_components" in module:
return None
if integration:
integration_domain = integration.domain
if integration_domain:
issue_tracker += f"+label%3A%22integration%3A+{integration_domain}%22"
return issue_tracker |
Generate a blurb asking the user to file a bug report. | def async_suggest_report_issue(
hass: HomeAssistant | None,
*,
integration: Integration | None = None,
integration_domain: str | None = None,
module: str | None = None,
) -> str:
"""Generate a blurb asking the user to file a bug report."""
issue_tracker = async_get_issue_tracker(
hass,
integration=integration,
integration_domain=integration_domain,
module=module,
)
if not issue_tracker:
if integration:
integration_domain = integration.domain
if not integration_domain:
return "report it to the custom integration author"
return (
f"report it to the author of the '{integration_domain}' "
"custom integration"
)
return f"create a bug report at {issue_tracker}" |
Get the requirements manager. | def _async_get_manager(hass: HomeAssistant) -> RequirementsManager:
"""Get the requirements manager."""
return RequirementsManager(hass) |
Forget the install history. | def async_clear_install_history(hass: HomeAssistant) -> None:
"""Forget the install history."""
_async_get_manager(hass).install_failure_history.clear() |
Return keyword arguments for PIP install. | def pip_kwargs(config_dir: str | None) -> dict[str, Any]:
"""Return keyword arguments for PIP install."""
is_docker = pkg_util.is_docker_env()
kwargs = {
"constraints": os.path.join(os.path.dirname(__file__), CONSTRAINT_FILE),
"timeout": PIP_TIMEOUT,
}
if not (config_dir is None or pkg_util.is_virtual_env()) and not is_docker:
kwargs["target"] = os.path.join(config_dir, "deps")
return kwargs |
Try to install a package up to MAX_INSTALL_FAILURES times. | def _install_with_retry(requirement: str, kwargs: dict[str, Any]) -> bool:
"""Try to install a package up to MAX_INSTALL_FAILURES times."""
for _ in range(MAX_INSTALL_FAILURES):
if pkg_util.install_package(requirement, **kwargs):
return True
return False |
Install requirements if missing. | def _install_requirements_if_missing(
requirements: list[str], kwargs: dict[str, Any]
) -> tuple[set[str], set[str]]:
"""Install requirements if missing."""
installed: set[str] = set()
failures: set[str] = set()
for req in requirements:
if pkg_util.is_installed(req) or _install_with_retry(req, kwargs):
installed.add(req)
continue
failures.add(req)
return installed, failures |
Check if pidfd_open is available.
Back ported from cpython 3.12 | def can_use_pidfd() -> bool:
"""Check if pidfd_open is available.
Back ported from cpython 3.12
"""
if not hasattr(os, "pidfd_open"):
return False
try:
pid = os.getpid()
os.close(os.pidfd_open(pid, 0))
except OSError:
# blocked by security policy like SECCOMP
return False
return True |
Handle all exception inside the core loop. | def _async_loop_exception_handler(_: Any, context: dict[str, Any]) -> None:
"""Handle all exception inside the core loop."""
kwargs = {}
if exception := context.get("exception"):
kwargs["exc_info"] = (type(exception), exception, exception.__traceback__)
logger = logging.getLogger(__package__)
if source_traceback := context.get("source_traceback"):
stack_summary = "".join(traceback.format_list(source_traceback))
logger.error(
"Error doing job: %s: %s",
context["message"],
stack_summary,
**kwargs, # type: ignore[arg-type]
)
return
logger.error(
"Error doing job: %s",
context["message"],
**kwargs, # type: ignore[arg-type]
) |
Enable posix_spawn on Alpine Linux. | def _enable_posix_spawn() -> None:
"""Enable posix_spawn on Alpine Linux."""
if subprocess._USE_POSIX_SPAWN: # pylint: disable=protected-access
return
# The subprocess module does not know about Alpine Linux/musl
# and will use fork() instead of posix_spawn() which significantly
# less efficient. This is a workaround to force posix_spawn()
# when using musl since cpython is not aware its supported.
tag = next(packaging.tags.sys_tags())
# pylint: disable-next=protected-access
subprocess._USE_POSIX_SPAWN = "musllinux" in tag.platform |
Run Home Assistant. | def run(runtime_config: RuntimeConfig) -> int:
"""Run Home Assistant."""
_enable_posix_spawn()
asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug))
# Backport of cpython 3.9 asyncio.run with a _cancel_all_tasks that times out
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(setup_and_run_hass(runtime_config))
finally:
try:
_cancel_all_tasks_with_timeout(loop, TASK_CANCELATION_TIMEOUT)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(loop.shutdown_default_executor())
finally:
asyncio.set_event_loop(None)
loop.close() |
Adapted _cancel_all_tasks from python 3.9 with a timeout. | def _cancel_all_tasks_with_timeout(
loop: asyncio.AbstractEventLoop, timeout: int
) -> None:
"""Adapted _cancel_all_tasks from python 3.9 with a timeout."""
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel("Final process shutdown")
loop.run_until_complete(asyncio.wait(to_cancel, timeout=timeout))
for task in to_cancel:
if task.cancelled():
continue
if not task.done():
_LOGGER.warning(
"Task could not be canceled and was still running after shutdown: %s",
task,
)
continue
if task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
) |
Print a persistent notification.
This method must be run in the event loop. | def async_notify_setup_error(
hass: HomeAssistant, component: str, display_link: str | None = None
) -> None:
"""Print a persistent notification.
This method must be run in the event loop.
"""
# pylint: disable-next=import-outside-toplevel
from .components import persistent_notification
if (errors := hass.data.get(DATA_PERSISTENT_ERRORS)) is None:
errors = hass.data[DATA_PERSISTENT_ERRORS] = {}
errors[component] = errors.get(component) or display_link
message = "The following integrations and platforms could not be set up:\n\n"
for name, link in errors.items():
show_logs = f"[Show logs](/config/logs?filter={name})"
part = f"[{name}]({link})" if link else name
message += f" - {part} ({show_logs})\n"
message += "\nPlease check your config and [logs](/config/logs)."
persistent_notification.async_create(
hass, message, "Invalid config", "invalid_config"
) |
Set domains that are going to be loaded from the config.
This allow us to:
- Properly handle after_dependencies.
- Keep track of domains which will load but have not yet finished loading | def async_set_domains_to_be_loaded(hass: core.HomeAssistant, domains: set[str]) -> None:
"""Set domains that are going to be loaded from the config.
This allow us to:
- Properly handle after_dependencies.
- Keep track of domains which will load but have not yet finished loading
"""
setup_done_futures: dict[str, asyncio.Future[bool]] = hass.data.setdefault(
DATA_SETUP_DONE, {}
)
setup_done_futures.update({domain: hass.loop.create_future() for domain in domains}) |
Set up a component and all its dependencies. | def setup_component(hass: core.HomeAssistant, domain: str, config: ConfigType) -> bool:
"""Set up a component and all its dependencies."""
return asyncio.run_coroutine_threadsafe(
async_setup_component(hass, domain, config), hass.loop
).result() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.