code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def __init__( self, *, environment_id: Optional[str] = None, configuration: Optional["JobConfiguration"] = None, template: Optional["JobTemplate"] = None, outbound_ip_addresses: Optional[List[str]] = None, event_stream_endpoint: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword environment_id: Resource ID of environment. :paramtype environment_id: str :keyword configuration: Container Apps Job configuration properties. :paramtype configuration: ~azure.mgmt.appcontainers.models.JobConfiguration :keyword template: Container Apps job definition. :paramtype template: ~azure.mgmt.appcontainers.models.JobTemplate :keyword outbound_ip_addresses: Outbound IP Addresses of a container apps job. :paramtype outbound_ip_addresses: list[str] :keyword event_stream_endpoint: The endpoint of the eventstream of the container apps job. :paramtype event_stream_endpoint: str """ super().__init__(**kwargs) self.environment_id = environment_id self.configuration = configuration self.template = template self.outbound_ip_addresses = outbound_ip_addresses self.event_stream_endpoint = event_stream_endpoint
:keyword environment_id: Resource ID of environment. :paramtype environment_id: str :keyword configuration: Container Apps Job configuration properties. :paramtype configuration: ~azure.mgmt.appcontainers.models.JobConfiguration :keyword template: Container Apps job definition. :paramtype template: ~azure.mgmt.appcontainers.models.JobTemplate :keyword outbound_ip_addresses: Outbound IP Addresses of a container apps job. :paramtype outbound_ip_addresses: list[str] :keyword event_stream_endpoint: The endpoint of the eventstream of the container apps job. :paramtype event_stream_endpoint: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, polling_interval: Optional[int] = None, min_executions: int = 0, max_executions: int = 100, rules: Optional[List["JobScaleRule"]] = None, **kwargs: Any ) -> None: """ :keyword polling_interval: Interval to check each event source in seconds. Defaults to 30s. :paramtype polling_interval: int :keyword min_executions: Minimum number of job executions that are created for a trigger, default 0. :paramtype min_executions: int :keyword max_executions: Maximum number of job executions that are created for a trigger, default 100. :paramtype max_executions: int :keyword rules: Scaling rules. :paramtype rules: list[~azure.mgmt.appcontainers.models.JobScaleRule] """ super().__init__(**kwargs) self.polling_interval = polling_interval self.min_executions = min_executions self.max_executions = max_executions self.rules = rules
:keyword polling_interval: Interval to check each event source in seconds. Defaults to 30s. :paramtype polling_interval: int :keyword min_executions: Minimum number of job executions that are created for a trigger, default 0. :paramtype min_executions: int :keyword max_executions: Maximum number of job executions that are created for a trigger, default 100. :paramtype max_executions: int :keyword rules: Scaling rules. :paramtype rules: list[~azure.mgmt.appcontainers.models.JobScaleRule]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, type: Optional[str] = None, metadata: Optional[JSON] = None, auth: Optional[List["ScaleRuleAuth"]] = None, **kwargs: Any ) -> None: """ :keyword name: Scale Rule Name. :paramtype name: str :keyword type: Type of the scale rule eg: azure-servicebus, redis etc. :paramtype type: str :keyword metadata: Metadata properties to describe the scale rule. :paramtype metadata: JSON :keyword auth: Authentication secrets for the scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth] """ super().__init__(**kwargs) self.name = name self.type = type self.metadata = metadata self.auth = auth
:keyword name: Scale Rule Name. :paramtype name: str :keyword type: Type of the scale rule eg: azure-servicebus, redis etc. :paramtype type: str :keyword metadata: Metadata properties to describe the scale rule. :paramtype metadata: JSON :keyword auth: Authentication secrets for the scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["Job"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Job] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Job]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["Secret"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Secret] """ super().__init__(**kwargs) self.value = value
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Secret]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, init_containers: Optional[List["InitContainer"]] = None, containers: Optional[List["Container"]] = None, volumes: Optional[List["Volume"]] = None, **kwargs: Any ) -> None: """ :keyword init_containers: List of specialized containers that run before app containers. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.InitContainer] :keyword containers: List of container definitions for the Container App. :paramtype containers: list[~azure.mgmt.appcontainers.models.Container] :keyword volumes: List of volume definitions for the Container App. :paramtype volumes: list[~azure.mgmt.appcontainers.models.Volume] """ super().__init__(**kwargs) self.init_containers = init_containers self.containers = containers self.volumes = volumes
:keyword init_containers: List of specialized containers that run before app containers. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.InitContainer] :keyword containers: List of container definitions for the Container App. :paramtype containers: list[~azure.mgmt.appcontainers.models.Container] :keyword volumes: List of volume definitions for the Container App. :paramtype volumes: list[~azure.mgmt.appcontainers.models.Volume]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, allowed_groups: Optional[List[str]] = None, allowed_client_applications: Optional[List[str]] = None, **kwargs: Any ) -> None: """ :keyword allowed_groups: The list of the allowed groups. :paramtype allowed_groups: list[str] :keyword allowed_client_applications: The list of the allowed client applications. :paramtype allowed_client_applications: list[str] """ super().__init__(**kwargs) self.allowed_groups = allowed_groups self.allowed_client_applications = allowed_client_applications
:keyword allowed_groups: The list of the allowed groups. :paramtype allowed_groups: list[str] :keyword allowed_client_applications: The list of the allowed client applications. :paramtype allowed_client_applications: list[str]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, value: Optional[List["Usage"]] = None, next_link: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword value: The list of compute resource usages. :paramtype value: list[~azure.mgmt.appcontainers.models.Usage] :keyword next_link: The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link
:keyword value: The list of compute resource usages. :paramtype value: list[~azure.mgmt.appcontainers.models.Usage] :keyword next_link: The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. :paramtype next_link: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, customer_id: Optional[str] = None, shared_key: Optional[str] = None, **kwargs: Any) -> None: """ :keyword customer_id: Log analytics customer id. :paramtype customer_id: str :keyword shared_key: Log analytics customer key. :paramtype shared_key: str """ super().__init__(**kwargs) self.customer_id = customer_id self.shared_key = shared_key
:keyword customer_id: Log analytics customer id. :paramtype customer_id: str :keyword shared_key: Log analytics customer key. :paramtype shared_key: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, routes: Optional["LoginRoutes"] = None, token_store: Optional["TokenStore"] = None, preserve_url_fragments_for_logins: Optional[bool] = None, allowed_external_redirect_urls: Optional[List[str]] = None, cookie_expiration: Optional["CookieExpiration"] = None, nonce: Optional["Nonce"] = None, **kwargs: Any ) -> None: """ :keyword routes: The routes that specify the endpoints used for login and logout requests. :paramtype routes: ~azure.mgmt.appcontainers.models.LoginRoutes :keyword token_store: The configuration settings of the token store. :paramtype token_store: ~azure.mgmt.appcontainers.models.TokenStore :keyword preserve_url_fragments_for_logins: :code:`<code>true</code>` if the fragments from the request are preserved after the login request is made; otherwise, :code:`<code>false</code>`. :paramtype preserve_url_fragments_for_logins: bool :keyword allowed_external_redirect_urls: External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored. This is an advanced setting typically only needed by Windows Store application backends. Note that URLs within the current domain are always implicitly allowed. :paramtype allowed_external_redirect_urls: list[str] :keyword cookie_expiration: The configuration settings of the session cookie's expiration. :paramtype cookie_expiration: ~azure.mgmt.appcontainers.models.CookieExpiration :keyword nonce: The configuration settings of the nonce used in the login flow. :paramtype nonce: ~azure.mgmt.appcontainers.models.Nonce """ super().__init__(**kwargs) self.routes = routes self.token_store = token_store self.preserve_url_fragments_for_logins = preserve_url_fragments_for_logins self.allowed_external_redirect_urls = allowed_external_redirect_urls self.cookie_expiration = cookie_expiration self.nonce = nonce
:keyword routes: The routes that specify the endpoints used for login and logout requests. :paramtype routes: ~azure.mgmt.appcontainers.models.LoginRoutes :keyword token_store: The configuration settings of the token store. :paramtype token_store: ~azure.mgmt.appcontainers.models.TokenStore :keyword preserve_url_fragments_for_logins: :code:`<code>true</code>` if the fragments from the request are preserved after the login request is made; otherwise, :code:`<code>false</code>`. :paramtype preserve_url_fragments_for_logins: bool :keyword allowed_external_redirect_urls: External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored. This is an advanced setting typically only needed by Windows Store application backends. Note that URLs within the current domain are always implicitly allowed. :paramtype allowed_external_redirect_urls: list[str] :keyword cookie_expiration: The configuration settings of the session cookie's expiration. :paramtype cookie_expiration: ~azure.mgmt.appcontainers.models.CookieExpiration :keyword nonce: The configuration settings of the nonce used in the login flow. :paramtype nonce: ~azure.mgmt.appcontainers.models.Nonce
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, logout_endpoint: Optional[str] = None, **kwargs: Any) -> None: """ :keyword logout_endpoint: The endpoint at which a logout request should be made. :paramtype logout_endpoint: str """ super().__init__(**kwargs) self.logout_endpoint = logout_endpoint
:keyword logout_endpoint: The endpoint at which a logout request should be made. :paramtype logout_endpoint: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, scopes: Optional[List[str]] = None, **kwargs: Any) -> None: """ :keyword scopes: A list of the scopes that should be requested while authenticating. :paramtype scopes: list[str] """ super().__init__(**kwargs) self.scopes = scopes
:keyword scopes: A list of the scopes that should be requested while authenticating. :paramtype scopes: list[str]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, location: str, tags: Optional[Dict[str, str]] = None, properties: Optional["ManagedCertificateProperties"] = None, **kwargs: Any ) -> None: """ :keyword tags: Resource tags. :paramtype tags: dict[str, str] :keyword location: The geo-location where the resource lives. Required. :paramtype location: str :keyword properties: Certificate resource specific properties. :paramtype properties: ~azure.mgmt.appcontainers.models.ManagedCertificateProperties """ super().__init__(tags=tags, location=location, **kwargs) self.properties = properties
:keyword tags: Resource tags. :paramtype tags: dict[str, str] :keyword location: The geo-location where the resource lives. Required. :paramtype location: str :keyword properties: Certificate resource specific properties. :paramtype properties: ~azure.mgmt.appcontainers.models.ManagedCertificateProperties
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["ManagedCertificate"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedCertificate] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedCertificate]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: """ :keyword tags: Application-specific metadata in the form of key-value pairs. :paramtype tags: dict[str, str] """ super().__init__(**kwargs) self.tags = tags
:keyword tags: Application-specific metadata in the form of key-value pairs. :paramtype tags: dict[str, str]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, subject_name: Optional[str] = None, domain_control_validation: Optional[Union[str, "ManagedCertificateDomainControlValidation"]] = None, **kwargs: Any ) -> None: """ :keyword subject_name: Subject name of the certificate. :paramtype subject_name: str :keyword domain_control_validation: Selected type of domain control validation for managed certificates. Known values are: "CNAME", "HTTP", and "TXT". :paramtype domain_control_validation: str or ~azure.mgmt.appcontainers.models.ManagedCertificateDomainControlValidation """ super().__init__(**kwargs) self.provisioning_state = None self.subject_name = subject_name self.error = None self.domain_control_validation = domain_control_validation self.validation_token = None
:keyword subject_name: Subject name of the certificate. :paramtype subject_name: str :keyword domain_control_validation: Selected type of domain control validation for managed certificates. Known values are: "CNAME", "HTTP", and "TXT". :paramtype domain_control_validation: str or ~azure.mgmt.appcontainers.models.ManagedCertificateDomainControlValidation
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( # pylint: disable=too-many-locals self, *, location: str, tags: Optional[Dict[str, str]] = None, kind: Optional[str] = None, dapr_ai_instrumentation_key: Optional[str] = None, dapr_ai_connection_string: Optional[str] = None, vnet_configuration: Optional["VnetConfiguration"] = None, app_logs_configuration: Optional["AppLogsConfiguration"] = None, zone_redundant: Optional[bool] = None, custom_domain_configuration: Optional["CustomDomainConfiguration"] = None, workload_profiles: Optional[List["WorkloadProfile"]] = None, keda_configuration: Optional["KedaConfiguration"] = None, dapr_configuration: Optional["DaprConfiguration"] = None, infrastructure_resource_group: Optional[str] = None, peer_authentication: Optional["ManagedEnvironmentPropertiesPeerAuthentication"] = None, peer_traffic_configuration: Optional["ManagedEnvironmentPropertiesPeerTrafficConfiguration"] = None, **kwargs: Any ) -> None: """ :keyword tags: Resource tags. :paramtype tags: dict[str, str] :keyword location: The geo-location where the resource lives. Required. :paramtype location: str :keyword kind: Kind of the Environment. :paramtype kind: str :keyword dapr_ai_instrumentation_key: Azure Monitor instrumentation key used by Dapr to export Service to Service communication telemetry. :paramtype dapr_ai_instrumentation_key: str :keyword dapr_ai_connection_string: Application Insights connection string used by Dapr to export Service to Service communication telemetry. :paramtype dapr_ai_connection_string: str :keyword vnet_configuration: Vnet configuration for the environment. :paramtype vnet_configuration: ~azure.mgmt.appcontainers.models.VnetConfiguration :keyword app_logs_configuration: Cluster configuration which enables the log daemon to export app logs to a destination. Currently only "log-analytics" is supported. :paramtype app_logs_configuration: ~azure.mgmt.appcontainers.models.AppLogsConfiguration :keyword zone_redundant: Whether or not this Managed Environment is zone-redundant. :paramtype zone_redundant: bool :keyword custom_domain_configuration: Custom domain configuration for the environment. :paramtype custom_domain_configuration: ~azure.mgmt.appcontainers.models.CustomDomainConfiguration :keyword workload_profiles: Workload profiles configured for the Managed Environment. :paramtype workload_profiles: list[~azure.mgmt.appcontainers.models.WorkloadProfile] :keyword keda_configuration: The configuration of Keda component. :paramtype keda_configuration: ~azure.mgmt.appcontainers.models.KedaConfiguration :keyword dapr_configuration: The configuration of Dapr component. :paramtype dapr_configuration: ~azure.mgmt.appcontainers.models.DaprConfiguration :keyword infrastructure_resource_group: Name of the platform-managed resource group created for the Managed Environment to host infrastructure resources. If a subnet ID is provided, this resource group will be created in the same subscription as the subnet. :paramtype infrastructure_resource_group: str :keyword peer_authentication: Peer authentication settings for the Managed Environment. :paramtype peer_authentication: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerAuthentication :keyword peer_traffic_configuration: Peer traffic settings for the Managed Environment. :paramtype peer_traffic_configuration: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerTrafficConfiguration """ super().__init__(tags=tags, location=location, **kwargs) self.kind = kind self.provisioning_state = None self.dapr_ai_instrumentation_key = dapr_ai_instrumentation_key self.dapr_ai_connection_string = dapr_ai_connection_string self.vnet_configuration = vnet_configuration self.deployment_errors = None self.default_domain = None self.static_ip = None self.app_logs_configuration = app_logs_configuration self.zone_redundant = zone_redundant self.custom_domain_configuration = custom_domain_configuration self.event_stream_endpoint = None self.workload_profiles = workload_profiles self.keda_configuration = keda_configuration self.dapr_configuration = dapr_configuration self.infrastructure_resource_group = infrastructure_resource_group self.peer_authentication = peer_authentication self.peer_traffic_configuration = peer_traffic_configuration
:keyword tags: Resource tags. :paramtype tags: dict[str, str] :keyword location: The geo-location where the resource lives. Required. :paramtype location: str :keyword kind: Kind of the Environment. :paramtype kind: str :keyword dapr_ai_instrumentation_key: Azure Monitor instrumentation key used by Dapr to export Service to Service communication telemetry. :paramtype dapr_ai_instrumentation_key: str :keyword dapr_ai_connection_string: Application Insights connection string used by Dapr to export Service to Service communication telemetry. :paramtype dapr_ai_connection_string: str :keyword vnet_configuration: Vnet configuration for the environment. :paramtype vnet_configuration: ~azure.mgmt.appcontainers.models.VnetConfiguration :keyword app_logs_configuration: Cluster configuration which enables the log daemon to export app logs to a destination. Currently only "log-analytics" is supported. :paramtype app_logs_configuration: ~azure.mgmt.appcontainers.models.AppLogsConfiguration :keyword zone_redundant: Whether or not this Managed Environment is zone-redundant. :paramtype zone_redundant: bool :keyword custom_domain_configuration: Custom domain configuration for the environment. :paramtype custom_domain_configuration: ~azure.mgmt.appcontainers.models.CustomDomainConfiguration :keyword workload_profiles: Workload profiles configured for the Managed Environment. :paramtype workload_profiles: list[~azure.mgmt.appcontainers.models.WorkloadProfile] :keyword keda_configuration: The configuration of Keda component. :paramtype keda_configuration: ~azure.mgmt.appcontainers.models.KedaConfiguration :keyword dapr_configuration: The configuration of Dapr component. :paramtype dapr_configuration: ~azure.mgmt.appcontainers.models.DaprConfiguration :keyword infrastructure_resource_group: Name of the platform-managed resource group created for the Managed Environment to host infrastructure resources. If a subnet ID is provided, this resource group will be created in the same subscription as the subnet. :paramtype infrastructure_resource_group: str :keyword peer_authentication: Peer authentication settings for the Managed Environment. :paramtype peer_authentication: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerAuthentication :keyword peer_traffic_configuration: Peer traffic settings for the Managed Environment. :paramtype peer_traffic_configuration: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerTrafficConfiguration
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, mtls: Optional["Mtls"] = None, **kwargs: Any) -> None: """ :keyword mtls: Mutual TLS authentication settings for the Managed Environment. :paramtype mtls: ~azure.mgmt.appcontainers.models.Mtls """ super().__init__(**kwargs) self.mtls = mtls
:keyword mtls: Mutual TLS authentication settings for the Managed Environment. :paramtype mtls: ~azure.mgmt.appcontainers.models.Mtls
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, encryption: Optional["ManagedEnvironmentPropertiesPeerTrafficConfigurationEncryption"] = None, **kwargs: Any ) -> None: """ :keyword encryption: Peer traffic encryption settings for the Managed Environment. :paramtype encryption: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerTrafficConfigurationEncryption """ super().__init__(**kwargs) self.encryption = encryption
:keyword encryption: Peer traffic encryption settings for the Managed Environment. :paramtype encryption: ~azure.mgmt.appcontainers.models.ManagedEnvironmentPropertiesPeerTrafficConfigurationEncryption
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: """ :keyword enabled: Boolean indicating whether the peer traffic encryption is enabled. :paramtype enabled: bool """ super().__init__(**kwargs) self.enabled = enabled
:keyword enabled: Boolean indicating whether the peer traffic encryption is enabled. :paramtype enabled: bool
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["ManagedEnvironment"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedEnvironment] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedEnvironment]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, properties: Optional["ManagedEnvironmentStorageProperties"] = None, **kwargs: Any ) -> None: """ :keyword properties: Storage properties. :paramtype properties: ~azure.mgmt.appcontainers.models.ManagedEnvironmentStorageProperties """ super().__init__(**kwargs) self.properties = properties
:keyword properties: Storage properties. :paramtype properties: ~azure.mgmt.appcontainers.models.ManagedEnvironmentStorageProperties
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, azure_file: Optional["AzureFileProperties"] = None, **kwargs: Any) -> None: """ :keyword azure_file: Azure file properties. :paramtype azure_file: ~azure.mgmt.appcontainers.models.AzureFileProperties """ super().__init__(**kwargs) self.azure_file = azure_file
:keyword azure_file: Azure file properties. :paramtype azure_file: ~azure.mgmt.appcontainers.models.AzureFileProperties
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["ManagedEnvironmentStorage"], **kwargs: Any) -> None: """ :keyword value: Collection of storage resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedEnvironmentStorage] """ super().__init__(**kwargs) self.value = value
:keyword value: Collection of storage resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ManagedEnvironmentStorage]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, type: Union[str, "ManagedServiceIdentityType"], user_assigned_identities: Optional[Dict[str, "UserAssignedIdentity"]] = None, **kwargs: Any ) -> None: """ :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned". :paramtype type: str or ~azure.mgmt.appcontainers.models.ManagedServiceIdentityType :keyword user_assigned_identities: The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long The dictionary values can be empty objects ({}) in requests. :paramtype user_assigned_identities: dict[str, ~azure.mgmt.appcontainers.models.UserAssignedIdentity] """ super().__init__(**kwargs) self.principal_id = None self.tenant_id = None self.type = type self.user_assigned_identities = user_assigned_identities
:keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned". :paramtype type: str or ~azure.mgmt.appcontainers.models.ManagedServiceIdentityType :keyword user_assigned_identities: The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long The dictionary values can be empty objects ({}) in requests. :paramtype user_assigned_identities: dict[str, ~azure.mgmt.appcontainers.models.UserAssignedIdentity]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: """ :keyword enabled: Boolean indicating whether the mutual TLS authentication is enabled. :paramtype enabled: bool """ super().__init__(**kwargs) self.enabled = enabled
:keyword enabled: Boolean indicating whether the mutual TLS authentication is enabled. :paramtype enabled: bool
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, validate_nonce: Optional[bool] = None, nonce_expiration_interval: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword validate_nonce: :code:`<code>false</code>` if the nonce should not be validated while completing the login flow; otherwise, :code:`<code>true</code>`. :paramtype validate_nonce: bool :keyword nonce_expiration_interval: The time after the request is made when the nonce should expire. :paramtype nonce_expiration_interval: str """ super().__init__(**kwargs) self.validate_nonce = validate_nonce self.nonce_expiration_interval = nonce_expiration_interval
:keyword validate_nonce: :code:`<code>false</code>` if the nonce should not be validated while completing the login flow; otherwise, :code:`<code>true</code>`. :paramtype validate_nonce: bool :keyword nonce_expiration_interval: The time after the request is made when the nonce should expire. :paramtype nonce_expiration_interval: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, method: Optional[Literal["ClientSecretPost"]] = None, client_secret_setting_name: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword method: The method that should be used to authenticate the user. Default value is "ClientSecretPost". :paramtype method: str :keyword client_secret_setting_name: The app setting that contains the client secret for the custom Open ID Connect provider. :paramtype client_secret_setting_name: str """ super().__init__(**kwargs) self.method = method self.client_secret_setting_name = client_secret_setting_name
:keyword method: The method that should be used to authenticate the user. Default value is "ClientSecretPost". :paramtype method: str :keyword client_secret_setting_name: The app setting that contains the client secret for the custom Open ID Connect provider. :paramtype client_secret_setting_name: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, authorization_endpoint: Optional[str] = None, token_endpoint: Optional[str] = None, issuer: Optional[str] = None, certification_uri: Optional[str] = None, well_known_open_id_configuration: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword authorization_endpoint: The endpoint to be used to make an authorization request. :paramtype authorization_endpoint: str :keyword token_endpoint: The endpoint to be used to request a token. :paramtype token_endpoint: str :keyword issuer: The endpoint that issues the token. :paramtype issuer: str :keyword certification_uri: The endpoint that provides the keys necessary to validate the token. :paramtype certification_uri: str :keyword well_known_open_id_configuration: The endpoint that contains all the configuration endpoints for the provider. :paramtype well_known_open_id_configuration: str """ super().__init__(**kwargs) self.authorization_endpoint = authorization_endpoint self.token_endpoint = token_endpoint self.issuer = issuer self.certification_uri = certification_uri self.well_known_open_id_configuration = well_known_open_id_configuration
:keyword authorization_endpoint: The endpoint to be used to make an authorization request. :paramtype authorization_endpoint: str :keyword token_endpoint: The endpoint to be used to request a token. :paramtype token_endpoint: str :keyword issuer: The endpoint that issues the token. :paramtype issuer: str :keyword certification_uri: The endpoint that provides the keys necessary to validate the token. :paramtype certification_uri: str :keyword well_known_open_id_configuration: The endpoint that contains all the configuration endpoints for the provider. :paramtype well_known_open_id_configuration: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name_claim_type: Optional[str] = None, scopes: Optional[List[str]] = None, **kwargs: Any ) -> None: """ :keyword name_claim_type: The name of the claim that contains the users name. :paramtype name_claim_type: str :keyword scopes: A list of the scopes that should be requested while authenticating. :paramtype scopes: list[str] """ super().__init__(**kwargs) self.name_claim_type = name_claim_type self.scopes = scopes
:keyword name_claim_type: The name of the claim that contains the users name. :paramtype name_claim_type: str :keyword scopes: A list of the scopes that should be requested while authenticating. :paramtype scopes: list[str]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, client_id: Optional[str] = None, client_credential: Optional["OpenIdConnectClientCredential"] = None, open_id_connect_configuration: Optional["OpenIdConnectConfig"] = None, **kwargs: Any ) -> None: """ :keyword client_id: The client id of the custom Open ID Connect provider. :paramtype client_id: str :keyword client_credential: The authentication credentials of the custom Open ID Connect provider. :paramtype client_credential: ~azure.mgmt.appcontainers.models.OpenIdConnectClientCredential :keyword open_id_connect_configuration: The configuration settings of the endpoints used for the custom Open ID Connect provider. :paramtype open_id_connect_configuration: ~azure.mgmt.appcontainers.models.OpenIdConnectConfig """ super().__init__(**kwargs) self.client_id = client_id self.client_credential = client_credential self.open_id_connect_configuration = open_id_connect_configuration
:keyword client_id: The client id of the custom Open ID Connect provider. :paramtype client_id: str :keyword client_credential: The authentication credentials of the custom Open ID Connect provider. :paramtype client_credential: ~azure.mgmt.appcontainers.models.OpenIdConnectClientCredential :keyword open_id_connect_configuration: The configuration settings of the endpoints used for the custom Open ID Connect provider. :paramtype open_id_connect_configuration: ~azure.mgmt.appcontainers.models.OpenIdConnectConfig
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, is_data_action: Optional[bool] = None, display: Optional["OperationDisplay"] = None, origin: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword name: Name of the operation. :paramtype name: str :keyword is_data_action: Indicates whether the operation is a data action. :paramtype is_data_action: bool :keyword display: Display of the operation. :paramtype display: ~azure.mgmt.appcontainers.models.OperationDisplay :keyword origin: Origin of the operation. :paramtype origin: str """ super().__init__(**kwargs) self.name = name self.is_data_action = is_data_action self.display = display self.origin = origin
:keyword name: Name of the operation. :paramtype name: str :keyword is_data_action: Indicates whether the operation is a data action. :paramtype is_data_action: bool :keyword display: Display of the operation. :paramtype display: ~azure.mgmt.appcontainers.models.OperationDisplay :keyword origin: Origin of the operation. :paramtype origin: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, provider: Optional[str] = None, resource: Optional[str] = None, operation: Optional[str] = None, description: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword provider: Resource provider of the operation. :paramtype provider: str :keyword resource: Resource of the operation. :paramtype resource: str :keyword operation: Localized friendly name for the operation. :paramtype operation: str :keyword description: Localized friendly description for the operation. :paramtype description: str """ super().__init__(**kwargs) self.provider = provider self.resource = resource self.operation = operation self.description = description
:keyword provider: Resource provider of the operation. :paramtype provider: str :keyword resource: Resource of the operation. :paramtype resource: str :keyword operation: Localized friendly name for the operation. :paramtype operation: str :keyword description: Localized friendly description for the operation. :paramtype description: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, queue_name: Optional[str] = None, queue_length: Optional[int] = None, auth: Optional[List["ScaleRuleAuth"]] = None, **kwargs: Any ) -> None: """ :keyword queue_name: Queue name. :paramtype queue_name: str :keyword queue_length: Queue length. :paramtype queue_length: int :keyword auth: Authentication secrets for the queue scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth] """ super().__init__(**kwargs) self.queue_name = queue_name self.queue_length = queue_length self.auth = auth
:keyword queue_name: Queue name. :paramtype queue_name: str :keyword queue_length: Queue length. :paramtype queue_length: int :keyword auth: Authentication secrets for the queue scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, server: Optional[str] = None, username: Optional[str] = None, password_secret_ref: Optional[str] = None, identity: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword server: Container Registry Server. :paramtype server: str :keyword username: Container Registry Username. :paramtype username: str :keyword password_secret_ref: The name of the Secret that contains the registry login password. :paramtype password_secret_ref: str :keyword identity: A Managed Identity to use to authenticate with Azure Container Registry. For user-assigned identities, use the full user-assigned identity Resource ID. For system-assigned identities, use 'system'. :paramtype identity: str """ super().__init__(**kwargs) self.server = server self.username = username self.password_secret_ref = password_secret_ref self.identity = identity
:keyword server: Container Registry Server. :paramtype server: str :keyword username: Container Registry Username. :paramtype username: str :keyword password_secret_ref: The name of the Secret that contains the registry login password. :paramtype password_secret_ref: str :keyword identity: A Managed Identity to use to authenticate with Azure Container Registry. For user-assigned identities, use the full user-assigned identity Resource ID. For system-assigned identities, use 'system'. :paramtype identity: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, registry_url: Optional[str] = None, registry_user_name: Optional[str] = None, registry_password: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword registry_url: registry server Url. :paramtype registry_url: str :keyword registry_user_name: registry username. :paramtype registry_user_name: str :keyword registry_password: registry secret. :paramtype registry_password: str """ super().__init__(**kwargs) self.registry_url = registry_url self.registry_user_name = registry_user_name self.registry_password = registry_password
:keyword registry_url: registry server Url. :paramtype registry_url: str :keyword registry_user_name: registry username. :paramtype registry_user_name: str :keyword registry_password: registry secret. :paramtype registry_password: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, containers: Optional[List["ReplicaContainer"]] = None, init_containers: Optional[List["ReplicaContainer"]] = None, **kwargs: Any ) -> None: """ :keyword containers: The containers collection under a replica. :paramtype containers: list[~azure.mgmt.appcontainers.models.ReplicaContainer] :keyword init_containers: The init containers collection under a replica. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.ReplicaContainer] """ super().__init__(**kwargs) self.created_time = None self.running_state = None self.running_state_details = None self.containers = containers self.init_containers = init_containers
:keyword containers: The containers collection under a replica. :paramtype containers: list[~azure.mgmt.appcontainers.models.ReplicaContainer] :keyword init_containers: The init containers collection under a replica. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.ReplicaContainer]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["Replica"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Replica] """ super().__init__(**kwargs) self.value = value
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Replica]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, container_id: Optional[str] = None, ready: Optional[bool] = None, started: Optional[bool] = None, restart_count: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword name: The Name of the Container. :paramtype name: str :keyword container_id: The Id of the Container. :paramtype container_id: str :keyword ready: The container ready status. :paramtype ready: bool :keyword started: The container start status. :paramtype started: bool :keyword restart_count: The container restart count. :paramtype restart_count: int """ super().__init__(**kwargs) self.name = name self.container_id = container_id self.ready = ready self.started = started self.restart_count = restart_count self.running_state = None self.running_state_details = None self.log_stream_endpoint = None self.exec_endpoint = None
:keyword name: The Name of the Container. :paramtype name: str :keyword container_id: The Id of the Container. :paramtype container_id: str :keyword ready: The container ready status. :paramtype ready: bool :keyword started: The container start status. :paramtype started: bool :keyword restart_count: The container restart count. :paramtype restart_count: int
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["Revision"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Revision] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.Revision]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, min_replicas: Optional[int] = None, max_replicas: int = 10, rules: Optional[List["ScaleRule"]] = None, **kwargs: Any ) -> None: """ :keyword min_replicas: Optional. Minimum number of container replicas. :paramtype min_replicas: int :keyword max_replicas: Optional. Maximum number of container replicas. Defaults to 10 if not set. :paramtype max_replicas: int :keyword rules: Scaling rules. :paramtype rules: list[~azure.mgmt.appcontainers.models.ScaleRule] """ super().__init__(**kwargs) self.min_replicas = min_replicas self.max_replicas = max_replicas self.rules = rules
:keyword min_replicas: Optional. Minimum number of container replicas. :paramtype min_replicas: int :keyword max_replicas: Optional. Maximum number of container replicas. Defaults to 10 if not set. :paramtype max_replicas: int :keyword rules: Scaling rules. :paramtype rules: list[~azure.mgmt.appcontainers.models.ScaleRule]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, azure_queue: Optional["QueueScaleRule"] = None, custom: Optional["CustomScaleRule"] = None, http: Optional["HttpScaleRule"] = None, tcp: Optional["TcpScaleRule"] = None, **kwargs: Any ) -> None: """ :keyword name: Scale Rule Name. :paramtype name: str :keyword azure_queue: Azure Queue based scaling. :paramtype azure_queue: ~azure.mgmt.appcontainers.models.QueueScaleRule :keyword custom: Custom scale rule. :paramtype custom: ~azure.mgmt.appcontainers.models.CustomScaleRule :keyword http: HTTP requests based scaling. :paramtype http: ~azure.mgmt.appcontainers.models.HttpScaleRule :keyword tcp: Tcp requests based scaling. :paramtype tcp: ~azure.mgmt.appcontainers.models.TcpScaleRule """ super().__init__(**kwargs) self.name = name self.azure_queue = azure_queue self.custom = custom self.http = http self.tcp = tcp
:keyword name: Scale Rule Name. :paramtype name: str :keyword azure_queue: Azure Queue based scaling. :paramtype azure_queue: ~azure.mgmt.appcontainers.models.QueueScaleRule :keyword custom: Custom scale rule. :paramtype custom: ~azure.mgmt.appcontainers.models.CustomScaleRule :keyword http: HTTP requests based scaling. :paramtype http: ~azure.mgmt.appcontainers.models.HttpScaleRule :keyword tcp: Tcp requests based scaling. :paramtype tcp: ~azure.mgmt.appcontainers.models.TcpScaleRule
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, secret_ref: Optional[str] = None, trigger_parameter: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword secret_ref: Name of the secret from which to pull the auth params. :paramtype secret_ref: str :keyword trigger_parameter: Trigger Parameter that uses the secret. :paramtype trigger_parameter: str """ super().__init__(**kwargs) self.secret_ref = secret_ref self.trigger_parameter = trigger_parameter
:keyword secret_ref: Name of the secret from which to pull the auth params. :paramtype secret_ref: str :keyword trigger_parameter: Trigger Parameter that uses the secret. :paramtype trigger_parameter: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, value: Optional[str] = None, identity: Optional[str] = None, key_vault_url: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword name: Secret Name. :paramtype name: str :keyword value: Secret Value. :paramtype value: str :keyword identity: Resource ID of a managed identity to authenticate with Azure Key Vault, or System to use a system-assigned identity. :paramtype identity: str :keyword key_vault_url: Azure Key Vault URL pointing to the secret referenced by the container app. :paramtype key_vault_url: str """ super().__init__(**kwargs) self.name = name self.value = value self.identity = identity self.key_vault_url = key_vault_url
:keyword name: Secret Name. :paramtype name: str :keyword value: Secret Value. :paramtype value: str :keyword identity: Resource ID of a managed identity to authenticate with Azure Key Vault, or System to use a system-assigned identity. :paramtype identity: str :keyword key_vault_url: Azure Key Vault URL pointing to the secret referenced by the container app. :paramtype key_vault_url: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["ContainerAppSecret"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ContainerAppSecret] """ super().__init__(**kwargs) self.value = value
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.ContainerAppSecret]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, secret_ref: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None: """ :keyword secret_ref: Name of the Container App secret from which to pull the secret value. :paramtype secret_ref: str :keyword path: Path to project secret to. If no path is provided, path defaults to name of secret listed in secretRef. :paramtype path: str """ super().__init__(**kwargs) self.secret_ref = secret_ref self.path = path
:keyword secret_ref: Name of the Container App secret from which to pull the secret value. :paramtype secret_ref: str :keyword path: Path to project secret to. If no path is provided, path defaults to name of secret listed in secretRef. :paramtype path: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, type: str, **kwargs: Any) -> None: """ :keyword type: Dev ContainerApp service type. Required. :paramtype type: str """ super().__init__(**kwargs) self.type = type
:keyword type: Dev ContainerApp service type. Required. :paramtype type: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, service_id: Optional[str] = None, name: Optional[str] = None, **kwargs: Any) -> None: """ :keyword service_id: Resource id of the target service. :paramtype service_id: str :keyword name: Name of the service bind. :paramtype name: str """ super().__init__(**kwargs) self.service_id = service_id self.name = name
:keyword service_id: Resource id of the target service. :paramtype service_id: str :keyword name: Name of the service bind. :paramtype name: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, repo_url: Optional[str] = None, branch: Optional[str] = None, github_action_configuration: Optional["GithubActionConfiguration"] = None, **kwargs: Any ) -> None: """ :keyword repo_url: The repo url which will be integrated to ContainerApp. :paramtype repo_url: str :keyword branch: The branch which will trigger the auto deployment. :paramtype branch: str :keyword github_action_configuration: Container App Revision Template with all possible settings and the defaults if user did not provide them. The defaults are populated as they were at the creation time. :paramtype github_action_configuration: ~azure.mgmt.appcontainers.models.GithubActionConfiguration """ super().__init__(**kwargs) self.operation_state = None self.repo_url = repo_url self.branch = branch self.github_action_configuration = github_action_configuration
:keyword repo_url: The repo url which will be integrated to ContainerApp. :paramtype repo_url: str :keyword branch: The branch which will trigger the auto deployment. :paramtype branch: str :keyword github_action_configuration: Container App Revision Template with all possible settings and the defaults if user did not provide them. The defaults are populated as they were at the creation time. :paramtype github_action_configuration: ~azure.mgmt.appcontainers.models.GithubActionConfiguration
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["SourceControl"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.SourceControl] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.SourceControl]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, created_by: Optional[str] = None, created_by_type: Optional[Union[str, "CreatedByType"]] = None, created_at: Optional[datetime.datetime] = None, last_modified_by: Optional[str] = None, last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None, last_modified_at: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """ :keyword created_by: The identity that created the resource. :paramtype created_by: str :keyword created_by_type: The type of identity that created the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". :paramtype created_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType :keyword created_at: The timestamp of resource creation (UTC). :paramtype created_at: ~datetime.datetime :keyword last_modified_by: The identity that last modified the resource. :paramtype last_modified_by: str :keyword last_modified_by_type: The type of identity that last modified the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". :paramtype last_modified_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType :keyword last_modified_at: The timestamp of resource last modification (UTC). :paramtype last_modified_at: ~datetime.datetime """ super().__init__(**kwargs) self.created_by = created_by self.created_by_type = created_by_type self.created_at = created_at self.last_modified_by = last_modified_by self.last_modified_by_type = last_modified_by_type self.last_modified_at = last_modified_at
:keyword created_by: The identity that created the resource. :paramtype created_by: str :keyword created_by_type: The type of identity that created the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". :paramtype created_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType :keyword created_at: The timestamp of resource creation (UTC). :paramtype created_at: ~datetime.datetime :keyword last_modified_by: The identity that last modified the resource. :paramtype last_modified_by: str :keyword last_modified_by_type: The type of identity that last modified the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". :paramtype last_modified_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType :keyword last_modified_at: The timestamp of resource last modification (UTC). :paramtype last_modified_at: ~datetime.datetime
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, metadata: Optional[Dict[str, str]] = None, auth: Optional[List["ScaleRuleAuth"]] = None, **kwargs: Any ) -> None: """ :keyword metadata: Metadata properties to describe tcp scale rule. :paramtype metadata: dict[str, str] :keyword auth: Authentication secrets for the tcp scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth] """ super().__init__(**kwargs) self.metadata = metadata self.auth = auth
:keyword metadata: Metadata properties to describe tcp scale rule. :paramtype metadata: dict[str, str] :keyword auth: Authentication secrets for the tcp scale rule. :paramtype auth: list[~azure.mgmt.appcontainers.models.ScaleRuleAuth]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, revision_suffix: Optional[str] = None, termination_grace_period_seconds: Optional[int] = None, init_containers: Optional[List["InitContainer"]] = None, containers: Optional[List["Container"]] = None, scale: Optional["Scale"] = None, volumes: Optional[List["Volume"]] = None, service_binds: Optional[List["ServiceBind"]] = None, **kwargs: Any ) -> None: """ :keyword revision_suffix: User friendly suffix that is appended to the revision name. :paramtype revision_suffix: str :keyword termination_grace_period_seconds: Optional duration in seconds the Container App Instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. :paramtype termination_grace_period_seconds: int :keyword init_containers: List of specialized containers that run before app containers. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.InitContainer] :keyword containers: List of container definitions for the Container App. :paramtype containers: list[~azure.mgmt.appcontainers.models.Container] :keyword scale: Scaling properties for the Container App. :paramtype scale: ~azure.mgmt.appcontainers.models.Scale :keyword volumes: List of volume definitions for the Container App. :paramtype volumes: list[~azure.mgmt.appcontainers.models.Volume] :keyword service_binds: List of container app services bound to the app. :paramtype service_binds: list[~azure.mgmt.appcontainers.models.ServiceBind] """ super().__init__(**kwargs) self.revision_suffix = revision_suffix self.termination_grace_period_seconds = termination_grace_period_seconds self.init_containers = init_containers self.containers = containers self.scale = scale self.volumes = volumes self.service_binds = service_binds
:keyword revision_suffix: User friendly suffix that is appended to the revision name. :paramtype revision_suffix: str :keyword termination_grace_period_seconds: Optional duration in seconds the Container App Instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. :paramtype termination_grace_period_seconds: int :keyword init_containers: List of specialized containers that run before app containers. :paramtype init_containers: list[~azure.mgmt.appcontainers.models.InitContainer] :keyword containers: List of container definitions for the Container App. :paramtype containers: list[~azure.mgmt.appcontainers.models.Container] :keyword scale: Scaling properties for the Container App. :paramtype scale: ~azure.mgmt.appcontainers.models.Scale :keyword volumes: List of volume definitions for the Container App. :paramtype volumes: list[~azure.mgmt.appcontainers.models.Volume] :keyword service_binds: List of container app services bound to the app. :paramtype service_binds: list[~azure.mgmt.appcontainers.models.ServiceBind]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, enabled: Optional[bool] = None, token_refresh_extension_hours: Optional[float] = None, azure_blob_storage: Optional["BlobStorageTokenStore"] = None, **kwargs: Any ) -> None: """ :keyword enabled: :code:`<code>true</code>` to durably store platform-specific security tokens that are obtained during login flows; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`. :paramtype enabled: bool :keyword token_refresh_extension_hours: The number of hours after session token expiration that a session token can be used to call the token refresh API. The default is 72 hours. :paramtype token_refresh_extension_hours: float :keyword azure_blob_storage: The configuration settings of the storage of the tokens if blob storage is used. :paramtype azure_blob_storage: ~azure.mgmt.appcontainers.models.BlobStorageTokenStore """ super().__init__(**kwargs) self.enabled = enabled self.token_refresh_extension_hours = token_refresh_extension_hours self.azure_blob_storage = azure_blob_storage
:keyword enabled: :code:`<code>true</code>` to durably store platform-specific security tokens that are obtained during login flows; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`. :paramtype enabled: bool :keyword token_refresh_extension_hours: The number of hours after session token expiration that a session token can be used to call the token refresh API. The default is 72 hours. :paramtype token_refresh_extension_hours: float :keyword azure_blob_storage: The configuration settings of the storage of the tokens if blob storage is used. :paramtype azure_blob_storage: ~azure.mgmt.appcontainers.models.BlobStorageTokenStore
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, revision_name: Optional[str] = None, weight: Optional[int] = None, latest_revision: bool = False, label: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword revision_name: Name of a revision. :paramtype revision_name: str :keyword weight: Traffic weight assigned to a revision. :paramtype weight: int :keyword latest_revision: Indicates that the traffic weight belongs to a latest stable revision. :paramtype latest_revision: bool :keyword label: Associates a traffic label with a revision. :paramtype label: str """ super().__init__(**kwargs) self.revision_name = revision_name self.weight = weight self.latest_revision = latest_revision self.label = label
:keyword revision_name: Name of a revision. :paramtype revision_name: str :keyword weight: Traffic weight assigned to a revision. :paramtype weight: int :keyword latest_revision: Indicates that the traffic weight belongs to a latest stable revision. :paramtype latest_revision: bool :keyword label: Associates a traffic label with a revision. :paramtype label: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, enabled: Optional[bool] = None, registration: Optional["TwitterRegistration"] = None, **kwargs: Any ) -> None: """ :keyword enabled: :code:`<code>false</code>` if the Twitter provider should not be enabled despite the set registration; otherwise, :code:`<code>true</code>`. :paramtype enabled: bool :keyword registration: The configuration settings of the app registration for the Twitter provider. :paramtype registration: ~azure.mgmt.appcontainers.models.TwitterRegistration """ super().__init__(**kwargs) self.enabled = enabled self.registration = registration
:keyword enabled: :code:`<code>false</code>` if the Twitter provider should not be enabled despite the set registration; otherwise, :code:`<code>true</code>`. :paramtype enabled: bool :keyword registration: The configuration settings of the app registration for the Twitter provider. :paramtype registration: ~azure.mgmt.appcontainers.models.TwitterRegistration
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, consumer_key: Optional[str] = None, consumer_secret_setting_name: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword consumer_key: The OAuth 1.0a consumer key of the Twitter application used for sign-in. This setting is required for enabling Twitter Sign-In. Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in. :paramtype consumer_key: str :keyword consumer_secret_setting_name: The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. :paramtype consumer_secret_setting_name: str """ super().__init__(**kwargs) self.consumer_key = consumer_key self.consumer_secret_setting_name = consumer_secret_setting_name
:keyword consumer_key: The OAuth 1.0a consumer key of the Twitter application used for sign-in. This setting is required for enabling Twitter Sign-In. Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in. :paramtype consumer_key: str :keyword consumer_secret_setting_name: The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. :paramtype consumer_secret_setting_name: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, current_value: float, limit: float, name: "UsageName", **kwargs: Any) -> None: """ :keyword current_value: The current usage of the resource. Required. :paramtype current_value: float :keyword limit: The maximum permitted usage of the resource. Required. :paramtype limit: float :keyword name: The name of the type of usage. Required. :paramtype name: ~azure.mgmt.appcontainers.models.UsageName """ super().__init__(**kwargs) self.current_value = current_value self.limit = limit self.name = name
:keyword current_value: The current usage of the resource. Required. :paramtype current_value: float :keyword limit: The maximum permitted usage of the resource. Required. :paramtype limit: float :keyword name: The name of the type of usage. Required. :paramtype name: ~azure.mgmt.appcontainers.models.UsageName
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: Optional[str] = None, localized_value: Optional[str] = None, **kwargs: Any) -> None: """ :keyword value: The name of the resource. :paramtype value: str :keyword localized_value: The localized name of the resource. :paramtype localized_value: str """ super().__init__(**kwargs) self.value = value self.localized_value = localized_value
:keyword value: The name of the resource. :paramtype value: str :keyword localized_value: The localized name of the resource. :paramtype localized_value: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, internal: Optional[bool] = None, infrastructure_subnet_id: Optional[str] = None, docker_bridge_cidr: Optional[str] = None, platform_reserved_cidr: Optional[str] = None, platform_reserved_dns_ip: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword internal: Boolean indicating the environment only has an internal load balancer. These environments do not have a public static IP resource. They must provide infrastructureSubnetId if enabling this property. :paramtype internal: bool :keyword infrastructure_subnet_id: Resource ID of a subnet for infrastructure components. Must not overlap with any other provided IP ranges. :paramtype infrastructure_subnet_id: str :keyword docker_bridge_cidr: CIDR notation IP range assigned to the Docker bridge, network. Must not overlap with any other provided IP ranges. :paramtype docker_bridge_cidr: str :keyword platform_reserved_cidr: IP range in CIDR notation that can be reserved for environment infrastructure IP addresses. Must not overlap with any other provided IP ranges. :paramtype platform_reserved_cidr: str :keyword platform_reserved_dns_ip: An IP address from the IP range defined by platformReservedCidr that will be reserved for the internal DNS server. :paramtype platform_reserved_dns_ip: str """ super().__init__(**kwargs) self.internal = internal self.infrastructure_subnet_id = infrastructure_subnet_id self.docker_bridge_cidr = docker_bridge_cidr self.platform_reserved_cidr = platform_reserved_cidr self.platform_reserved_dns_ip = platform_reserved_dns_ip
:keyword internal: Boolean indicating the environment only has an internal load balancer. These environments do not have a public static IP resource. They must provide infrastructureSubnetId if enabling this property. :paramtype internal: bool :keyword infrastructure_subnet_id: Resource ID of a subnet for infrastructure components. Must not overlap with any other provided IP ranges. :paramtype infrastructure_subnet_id: str :keyword docker_bridge_cidr: CIDR notation IP range assigned to the Docker bridge, network. Must not overlap with any other provided IP ranges. :paramtype docker_bridge_cidr: str :keyword platform_reserved_cidr: IP range in CIDR notation that can be reserved for environment infrastructure IP addresses. Must not overlap with any other provided IP ranges. :paramtype platform_reserved_cidr: str :keyword platform_reserved_dns_ip: An IP address from the IP range defined by platformReservedCidr that will be reserved for the internal DNS server. :paramtype platform_reserved_dns_ip: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: Optional[str] = None, storage_type: Optional[Union[str, "StorageType"]] = None, storage_name: Optional[str] = None, secrets: Optional[List["SecretVolumeItem"]] = None, mount_options: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword name: Volume name. :paramtype name: str :keyword storage_type: Storage type for the volume. If not provided, use EmptyDir. Known values are: "AzureFile", "EmptyDir", and "Secret". :paramtype storage_type: str or ~azure.mgmt.appcontainers.models.StorageType :keyword storage_name: Name of storage resource. No need to provide for EmptyDir and Secret. :paramtype storage_name: str :keyword secrets: List of secrets to be added in volume. If no secrets are provided, all secrets in collection will be added to volume. :paramtype secrets: list[~azure.mgmt.appcontainers.models.SecretVolumeItem] :keyword mount_options: Mount options used while mounting the AzureFile. Must be a comma-separated string. :paramtype mount_options: str """ super().__init__(**kwargs) self.name = name self.storage_type = storage_type self.storage_name = storage_name self.secrets = secrets self.mount_options = mount_options
:keyword name: Volume name. :paramtype name: str :keyword storage_type: Storage type for the volume. If not provided, use EmptyDir. Known values are: "AzureFile", "EmptyDir", and "Secret". :paramtype storage_type: str or ~azure.mgmt.appcontainers.models.StorageType :keyword storage_name: Name of storage resource. No need to provide for EmptyDir and Secret. :paramtype storage_name: str :keyword secrets: List of secrets to be added in volume. If no secrets are provided, all secrets in collection will be added to volume. :paramtype secrets: list[~azure.mgmt.appcontainers.models.SecretVolumeItem] :keyword mount_options: Mount options used while mounting the AzureFile. Must be a comma-separated string. :paramtype mount_options: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, volume_name: Optional[str] = None, mount_path: Optional[str] = None, sub_path: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword volume_name: This must match the Name of a Volume. :paramtype volume_name: str :keyword mount_path: Path within the container at which the volume should be mounted.Must not contain ':'. :paramtype mount_path: str :keyword sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). :paramtype sub_path: str """ super().__init__(**kwargs) self.volume_name = volume_name self.mount_path = mount_path self.sub_path = sub_path
:keyword volume_name: This must match the Name of a Volume. :paramtype volume_name: str :keyword mount_path: Path within the container at which the volume should be mounted.Must not contain ':'. :paramtype mount_path: str :keyword sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). :paramtype sub_path: str
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, name: str, workload_profile_type: str, minimum_count: Optional[int] = None, maximum_count: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword name: Workload profile type for the workloads to run on. Required. :paramtype name: str :keyword workload_profile_type: Workload profile type for the workloads to run on. Required. :paramtype workload_profile_type: str :keyword minimum_count: The minimum capacity. :paramtype minimum_count: int :keyword maximum_count: The maximum capacity. :paramtype maximum_count: int """ super().__init__(**kwargs) self.name = name self.workload_profile_type = workload_profile_type self.minimum_count = minimum_count self.maximum_count = maximum_count
:keyword name: Workload profile type for the workloads to run on. Required. :paramtype name: str :keyword workload_profile_type: Workload profile type for the workloads to run on. Required. :paramtype workload_profile_type: str :keyword minimum_count: The minimum capacity. :paramtype minimum_count: int :keyword maximum_count: The maximum capacity. :paramtype maximum_count: int
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, properties: Optional["WorkloadProfileStatesProperties"] = None, **kwargs: Any ) -> None: """ :keyword properties: Workload Profile resource specific properties. :paramtype properties: ~azure.mgmt.appcontainers.models.WorkloadProfileStatesProperties """ super().__init__(**kwargs) self.properties = properties
:keyword properties: Workload Profile resource specific properties. :paramtype properties: ~azure.mgmt.appcontainers.models.WorkloadProfileStatesProperties
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__(self, *, value: List["WorkloadProfileStates"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.WorkloadProfileStates] """ super().__init__(**kwargs) self.value = value self.next_link = None
:keyword value: Collection of resources. Required. :paramtype value: list[~azure.mgmt.appcontainers.models.WorkloadProfileStates]
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def __init__( self, *, minimum_count: Optional[int] = None, maximum_count: Optional[int] = None, current_count: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword minimum_count: Minimum count of instances. :paramtype minimum_count: int :keyword maximum_count: Maximum count of nodes. :paramtype maximum_count: int :keyword current_count: Current count of nodes. :paramtype current_count: int """ super().__init__(**kwargs) self.minimum_count = minimum_count self.maximum_count = maximum_count self.current_count = current_count
:keyword minimum_count: Minimum count of instances. :paramtype minimum_count: int :keyword maximum_count: Maximum count of nodes. :paramtype maximum_count: int :keyword current_count: Current count of nodes. :paramtype current_count: int
__init__
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/_sdk_models.py
MIT
def test_containerapp_create_with_environment_id(self, resource_group, laworkspace_customer_id, laworkspace_shared_key): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env1 = self.create_random_name(prefix='env1', length=24) env2 = self.create_random_name(prefix='env2', length=24) app = self.create_random_name(prefix='yaml1', length=24) create_containerapp_env(self, env1, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env1 = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env1)).get_output_in_json() create_containerapp_env(self, env2, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env2 = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env2)).get_output_in_json() # test `az containerapp up` with --environment image = 'mcr.microsoft.com/azuredocs/aks-helloworld:v1' ca_name = self.create_random_name(prefix='containerapp', length=24) self.cmd('containerapp up -g {} -n {} --environment {} --image {}'.format(resource_group, ca_name, env2, image), expect_failure=False) self.cmd(f'containerapp show -g {resource_group} -n {ca_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env2["id"]), JMESPathCheck('properties.template.containers[0].image', image), ]) # test `az containerapp up` for existing containerapp without --environment image2 = 'mcr.microsoft.com/k8se/quickstart:latest' self.cmd('containerapp up -g {} -n {} --image {}'.format(resource_group, ca_name, image2), expect_failure=False) self.cmd(f'containerapp show -g {resource_group} -n {ca_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env2["id"]), JMESPathCheck('properties.template.containers[0].image', image2), ]) user_identity_name = self.create_random_name(prefix='containerapp-user', length=24) user_identity = self.cmd( 'identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = user_identity['id'] # the value in --yaml is used, warning for different value in --environmentId containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: environmentId: {containerapp_env1["id"]} configuration: activeRevisionsMode: Multiple ingress: external: false allowInsecure: false targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto ipSecurityRestrictions: - name: name ipAddressRange: "1.1.1.1/10" action: "Allow" template: revisionSuffix: myrevision terminationGracePeriodSeconds: 90 containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 rules: - http: auth: - secretRef: secretref triggerParameter: trigger metadata: concurrentRequests: '50' key: value name: http-scale-rule identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd( f'containerapp create -n {app} -g {resource_group} --environment {env2} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.ingress.external", False), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.terminationGracePeriodSeconds", 90), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules[0].name", "http-scale-rule"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.key", "value"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"), ]) containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: configuration: activeRevisionsMode: Multiple ingress: external: false allowInsecure: false targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto ipSecurityRestrictions: - name: name ipAddressRange: "1.1.1.1/10" action: "Allow" template: revisionSuffix: myrevision terminationGracePeriodSeconds: 90 containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 rules: - http: auth: - secretRef: secretref triggerParameter: trigger metadata: concurrentRequests: '50' key: value name: http-scale-rule identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ write_test_file(containerapp_file_name, containerapp_yaml_text) app2 = self.create_random_name(prefix='yaml2', length=24) self.cmd( f'containerapp create -n {app2} -g {resource_group} --environment {env2} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app2}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env2["id"]), JMESPathCheck("properties.configuration.ingress.external", False), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env2["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.terminationGracePeriodSeconds", 90), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules[0].name", "http-scale-rule"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.key", "value"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"), ]) clean_up_test_file(containerapp_file_name)
containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd( f'containerapp create -n {app} -g {resource_group} --environment {env2} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.ingress.external", False), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.terminationGracePeriodSeconds", 90), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules[0].name", "http-scale-rule"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.key", "value"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"), ]) containerapp_yaml_text = f
test_containerapp_create_with_environment_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_scenario.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_scenario.py
MIT
def test_container_app_mount_azurefile_e2e(self, resource_group, laworkspace_customer_id, laworkspace_shared_key): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env = self.create_random_name(prefix='env', length=24) app = self.create_random_name(prefix='app1', length=24) storage = self.create_random_name(prefix='storage', length=24) share = self.create_random_name(prefix='share', length=10) self.cmd( f'az storage account create --resource-group {resource_group} --name {storage} --location {TEST_LOCATION} --kind StorageV2 --sku Standard_LRS --enable-large-file-share --output none') self.cmd( f'az storage share-rm create --resource-group {resource_group} --storage-account {storage} --name {share} --quota 1024 --enabled-protocols SMB --output none') create_containerapp_env(self, env, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) account_key = self.cmd(f'az storage account keys list -g {resource_group} -n {storage} --query "[0].value" ' '-otsv').output.strip() self.cmd(f'az containerapp env storage set -g {resource_group} -n {env} -a {storage} -k {account_key} -f {share} --storage-name {share} --access-mode ReadWrite') containerapp_env = self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[ JMESPathCheck('name', env) ]).get_output_in_json() containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps name: {app} resourceGroup: {resource_group} properties: managedEnvironmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Single ingress: external: true allowInsecure: true targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto template: containers: - image: mcr.microsoft.com/k8se/quickstart:latest name: acamounttest resources: cpu: 0.5 ephemeralStorage: 1Gi memory: 1Gi volumeMounts: - mountPath: /mnt/data volumeName: azure-files-volume subPath: sub volumes: - name: azure-files-volume storageType: AzureFile storageName: {share} mountOptions: uid=999,gid=999 """ containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd( f'az containerapp create -g {resource_group} --environment {env} -n {app} --yaml {containerapp_file_name}') self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) self.cmd('az containerapp revision list -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('[0].properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('[0].properties.template.volumes[0].storageName', share), JMESPathCheck('[0].properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('[0].properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) clean_up_test_file(containerapp_file_name) containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps name: {app} resourceGroup: {resource_group} properties: managedEnvironmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Single ingress: external: true allowInsecure: true targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto template: containers: - image: mcr.microsoft.com/k8se/quickstart:latest name: acamounttest resources: cpu: 0.5 ephemeralStorage: 1Gi memory: 1Gi volumeMounts: - mountPath: /mnt/data volumeName: azure-files-volume subPath: sub2 volumes: - name: azure-files-volume storageType: AzureFile storageName: {share} mountOptions: uid=1000,gid=1000 """ containerapp_file_name = f"{self._testMethodName}_containerapp_1.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd( f'az containerapp update -g {resource_group} -n {app} --yaml {containerapp_file_name}') self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=1000,gid=1000'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub2'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) self.cmd('az containerapp revision list -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('[1].properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('[1].properties.template.volumes[0].storageName', share), JMESPathCheck('[1].properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('[1].properties.template.volumes[0].mountOptions', 'uid=1000,gid=1000'), JMESPathCheck('[1].properties.template.containers[0].volumeMounts[0].subPath', 'sub2'), JMESPathCheck('[1].properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('[1].properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) clean_up_test_file(containerapp_file_name)
containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd( f'az containerapp create -g {resource_group} --environment {env} -n {app} --yaml {containerapp_file_name}') self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) self.cmd('az containerapp revision list -g {} -n {}'.format(resource_group, app), checks=[ JMESPathCheck('[0].properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('[0].properties.template.volumes[0].storageName', share), JMESPathCheck('[0].properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('[0].properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('[0].properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) clean_up_test_file(containerapp_file_name) containerapp_yaml_text = f
test_container_app_mount_azurefile_e2e
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_azurefile.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_azurefile.py
MIT
def test_containerappjob_create_with_yaml(self, resource_group, laworkspace_customer_id, laworkspace_shared_key): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env = self.create_random_name(prefix='env', length=24) job = self.create_random_name(prefix='yaml', length=24) storage = self.create_random_name(prefix='storage', length=24) share = self.create_random_name(prefix='share', length=24) self.cmd( f'az storage account create --resource-group {resource_group} --name {storage} --location {TEST_LOCATION} --kind StorageV2 --sku Standard_LRS --enable-large-file-share --output none') self.cmd( f'az storage share-rm create --resource-group {resource_group} --storage-account {storage} --name {share} --quota 1024 --enabled-protocols SMB --output none') create_containerapp_env(self, env, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() account_key = self.cmd(f'az storage account keys list -g {resource_group} -n {storage} --query "[0].value" ' '-otsv').output.strip() self.cmd( f'az containerapp env storage set -g {resource_group} -n {env} -a {storage} -k {account_key} -f {share} --storage-name {share} --access-mode ReadWrite') user_identity_name = self.create_random_name(prefix='containerapp-user', length=24) user_identity = self.cmd( 'identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = user_identity['id'] # test job create with yaml containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: environmentId: {containerapp_env["id"]} configuration: dapr: null eventTriggerConfig: null manualTriggerConfig: parallelism: 1 replicaCompletionCount: 1 registries: null replicaRetryLimit: 1 replicaTimeout: 100 scheduleTriggerConfig: null secrets: null triggerType: Manual template: containers: - env: - name: MY_ENV_VAR value: hello image: mcr.microsoft.com/k8se/quickstart-jobs:latest name: anfranci-azclitest-acaj1 resources: cpu: 0.5 ephemeralStorage: 1Gi memory: 1Gi volumeMounts: - mountPath: /mnt/data volumeName: azure-files-volume subPath: sub initContainers: - command: - /bin/sh - -c - sleep 150 image: k8seteste2e.azurecr.io/e2e-apps/kuar:green name: simple-sleep-container probes: - type: liveness httpGet: path: "/health" port: 8080 httpHeaders: - name: "Custom-Header" value: "liveness probe" initialDelaySeconds: 7 periodSeconds: 3 resources: cpu: "0.25" memory: 0.5Gi volumes: - name: azure-files-volume storageType: AzureFile storageName: {share} mountOptions: uid=999,gid=999 workloadProfileName: null identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd(f'containerapp job create -n {job} -g {resource_group} --environment {env} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('identity.type', "UserAssigned"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume') ]) self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('identity.type', "UserAssigned"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume') ]) # wait for provisioning state of job to be succeeded before updating jobProvisioning = True timeout = time.time() + 60 * 1 # 1 minutes from now while (jobProvisioning): jobProvisioning = self.cmd("az containerapp job show --resource-group {} --name {}".format(resource_group, job)).get_output_in_json()[ 'properties']['provisioningState'] != "Succeeded" if (time.time() > timeout): break # test container app job update with yaml containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: environmentId: {containerapp_env["id"]} configuration: dapr: null eventTriggerConfig: null manualTriggerConfig: parallelism: 1 replicaCompletionCount: 1 registries: null replicaRetryLimit: 1 replicaTimeout: 200 scheduleTriggerConfig: null secrets: null triggerType: Manual template: containers: - env: - name: MY_ENV_VAR value: hello image: mcr.microsoft.com/k8se/quickstart-jobs:latest name: anfranci-azclitest-acaj1 resources: cpu: 0.75 ephemeralStorage: 1Gi memory: 1.5Gi volumeMounts: - mountPath: /mnt/data volumeName: azure-files-volume subPath: sub2 initContainers: - command: - /bin/sh - -c - sleep 150 image: k8seteste2e.azurecr.io/e2e-apps/kuar:green name: simple-sleep-container probes: - type: liveness httpGet: path: "/health" port: 8080 httpHeaders: - name: "Custom-Header" value: "liveness probe" initialDelaySeconds: 7 periodSeconds: 3 resources: cpu: "0.25" memory: 0.5Gi volumes: - name: azure-files-volume storageType: AzureFile storageName: {share} mountOptions: uid=1000,gid=1000 """ write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd(f'containerapp job update -n {job} -g {resource_group} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck('properties.provisioningState', "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 200), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.75"), JMESPathCheck('properties.template.containers[0].resources.memory', "1.5Gi"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=1000,gid=1000'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub2'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 200), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.75"), JMESPathCheck('properties.template.containers[0].resources.memory', "1.5Gi"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=1000,gid=1000'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub2'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume'), ]) # wait for provisioning state of job to be succeeded before updating jobProvisioning = True timeout = time.time() + 60 * 1 # 1 minutes from now while (jobProvisioning): jobProvisioning = self.cmd("az containerapp job show --resource-group {} --name {}".format(resource_group, job)).get_output_in_json()[ 'properties']['provisioningState'] != "Succeeded" if (time.time() > timeout): break # test update for job with yaml not containing environmentId containerappjob_yaml_text = f""" properties: configuration: replicaTimeout: 300 """ write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd(f'containerapp job update -n {job} -g {resource_group} --yaml {containerappjob_file_name} --no-wait') self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.configuration.replicaTimeout", 300) ]) clean_up_test_file(containerappjob_file_name)
containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd(f'containerapp job create -n {job} -g {resource_group} --environment {env} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('identity.type', "UserAssigned"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume') ]) self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('identity.type', "UserAssigned"), JMESPathCheck('properties.template.volumes[0].storageType', 'AzureFile'), JMESPathCheck('properties.template.volumes[0].storageName', share), JMESPathCheck('properties.template.volumes[0].name', 'azure-files-volume'), JMESPathCheck('properties.template.volumes[0].mountOptions', 'uid=999,gid=999'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].subPath', 'sub'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'), JMESPathCheck('properties.template.containers[0].volumeMounts[0].volumeName', 'azure-files-volume') ]) # wait for provisioning state of job to be succeeded before updating jobProvisioning = True timeout = time.time() + 60 * 1 # 1 minutes from now while (jobProvisioning): jobProvisioning = self.cmd("az containerapp job show --resource-group {} --name {}".format(resource_group, job)).get_output_in_json()[ 'properties']['provisioningState'] != "Succeeded" if (time.time() > timeout): break # test container app job update with yaml containerappjob_yaml_text = f
test_containerappjob_create_with_yaml
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_executions.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_executions.py
MIT
def test_containerappjob_eventtriggered_create_with_yaml(self, resource_group, laworkspace_customer_id, laworkspace_shared_key): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env = self.create_random_name(prefix='env', length=24) job = self.create_random_name(prefix='yaml', length=24) create_containerapp_env(self, env, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() user_identity_name = self.create_random_name(prefix='containerapp-user', length=24) user_identity = self.cmd( 'identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = user_identity['id'] # test job create with yaml containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: environmentId: {containerapp_env["id"]} configuration: eventTriggerConfig: replicaCompletionCount: 1 parallelism: 1 scale: minExecutions: 1 maxExecutions: 11 rules: - name: github-runner-test type: github-runner metadata: github-runner: https://api.github.com owner: test_org runnerScope: repo repos: test_repo targetWorkflowQueueLength: "1" auth: - secretRef: personal-access-token triggerParameter: personalAccessToken replicaRetryLimit: 1 replicaTimeout: 100 secrets: - name: personal-access-token value: test_personal_access_token triggerType: Event template: containers: - env: - name: ACCESS_TOKEN secretRef: personal-access-token - name: DISABLE_RUNNER_UPDATE value: "true" - name: RUNNER_SCOPE value: repo - name: ORG_NAME value: test_org - name: ORG_RUNNER value: "false" - name: RUNNER_WORKDIR value: /tmp/runner - name: REPO_URL value: https://github.com/test_org/test_repo image: mcr.microsoft.com/k8se/quickstart-jobs:latest name: eventdriventjob resources: cpu: 0.5 ephemeralStorage: 1Gi memory: 1Gi volumeMounts: - volumeName: workdir mountPath: /tmp/github-runner-your-repo - volumeName: dockersock mountPath: /var/run/docker.sock volumes: - name: workdir storageType: EmptyDir - name: dockersock storageType: EmptyDir identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd( f'containerapp job create -n {job} -g {resource_group} --environment {env} --yaml {containerappjob_file_name}') self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Event", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('properties.configuration.eventTriggerConfig.replicaCompletionCount', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.parallelism', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.minExecutions', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.maxExecutions', 11), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].type', "github-runner"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].metadata.runnerScope', "repo"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].auth[0].secretRef', "personal-access-token"), JMESPathCheck('identity.type', "UserAssigned"), ]) # wait for provisioning state of job to be succeeded before updating jobProvisioning = True timeout = time.time() + 60 * 1 # 1 minutes from now while (jobProvisioning): jobProvisioning = self.cmd("az containerapp job show --resource-group {} --name {}".format(resource_group, job)).get_output_in_json()[ 'properties']['provisioningState'] != "Succeeded" if (time.time() > timeout): break # test container app job update with yaml containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: environmentId: {containerapp_env["id"]} configuration: eventTriggerConfig: replicaCompletionCount: 2 parallelism: 2 scale: minExecutions: 0 maxExecutions: 95 rules: - name: github-runner-testv2 type: github-runner metadata: github-runner: https://api.github.com owner: test_org_1 runnerScope: repo repos: test_repo_1 targetWorkflowQueueLength: "1" auth: - secretRef: personal-access-token triggerParameter: personalAccessToken replicaRetryLimit: 2 replicaTimeout: 200 secrets: - name: personal-access-token value: test_personal_access_token triggerType: Event """ write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd(f'containerapp job update -n {job} -g {resource_group} --yaml {containerappjob_file_name} --no-wait') self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.configuration.triggerType", "Event", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 200), JMESPathCheck('properties.configuration.replicaRetryLimit', 2), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.configuration.eventTriggerConfig.replicaCompletionCount', 2), JMESPathCheck('properties.configuration.eventTriggerConfig.parallelism', 2), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.minExecutions', 0), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.maxExecutions', 95), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].name', "github-runner-testv2"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].metadata.runnerScope', "repo"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].metadata.owner', "test_org_1"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].auth[0].secretRef', "personal-access-token"), ]) clean_up_test_file(containerappjob_file_name)
containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd( f'containerapp job create -n {job} -g {resource_group} --environment {env} --yaml {containerappjob_file_name}') self.cmd(f'containerapp job show -g {resource_group} -n {job}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.triggerType", "Event", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), JMESPathCheck('properties.configuration.eventTriggerConfig.replicaCompletionCount', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.parallelism', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.minExecutions', 1), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.maxExecutions', 11), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].type', "github-runner"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].metadata.runnerScope', "repo"), JMESPathCheck('properties.configuration.eventTriggerConfig.scale.rules[0].auth[0].secretRef', "personal-access-token"), JMESPathCheck('identity.type', "UserAssigned"), ]) # wait for provisioning state of job to be succeeded before updating jobProvisioning = True timeout = time.time() + 60 * 1 # 1 minutes from now while (jobProvisioning): jobProvisioning = self.cmd("az containerapp job show --resource-group {} --name {}".format(resource_group, job)).get_output_in_json()[ 'properties']['provisioningState'] != "Succeeded" if (time.time() > timeout): break # test container app job update with yaml containerappjob_yaml_text = f
test_containerappjob_eventtriggered_create_with_yaml
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_executions.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_executions.py
MIT
def test_containerapp_up_dapr_e2e(self, resource_group): """ Ensure that dapr can be enabled if the app has been created using containerapp up """ self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) image = 'mcr.microsoft.com/azuredocs/aks-helloworld:v1' ca_name = self.create_random_name(prefix='containerapp', length=24) env = prepare_containerapp_env_for_app_e2e_tests(self) self.cmd( 'containerapp up -g {} -n {} --environment {} --image {}'.format( resource_group, ca_name, env, image)) self.cmd( 'containerapp dapr enable -g {} -n {} --dapr-app-id containerapp1 --dapr-app-port 80 ' '--dapr-app-protocol http --dal --dhmrs 6 --dhrbs 60 --dapr-log-level warn'.format( resource_group, ca_name), checks=[ JMESPathCheck('appId', "containerapp1"), JMESPathCheck('enabled', True) ])
Ensure that dapr can be enabled if the app has been created using containerapp up
test_containerapp_up_dapr_e2e
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
MIT
def test_containerapp_create_with_yaml(self, resource_group): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) app = self.create_random_name(prefix='yaml', length=24) env_id = prepare_containerapp_env_for_app_e2e_tests(self) env_rg = parse_resource_id(env_id).get('resource_group') env_name = parse_resource_id(env_id).get('name') containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(env_rg, env_name)).get_output_in_json() user_identity_name = self.create_random_name(prefix='containerapp-user', length=24) user_identity = self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = user_identity['id'] # test managedEnvironmentId containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: managedEnvironmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Multiple ingress: external: true allowInsecure: false additionalPortMappings: - external: false targetPort: 12345 - external: false targetPort: 9090 exposedPort: 23456 targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto ipSecurityRestrictions: - name: name ipAddressRange: "1.1.1.1/10" action: "Allow" template: revisionSuffix: myrevision terminationGracePeriodSeconds: 90 containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 rules: - http: auth: - secretRef: secretref triggerParameter: trigger metadata: concurrentRequests: '50' key: value name: http-scale-rule identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp create -n {app} -g {resource_group} --environment {env_id} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].targetPort", 12345), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].targetPort", 9090), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].exposedPort", 23456), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.terminationGracePeriodSeconds", 90), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules[0].name", "http-scale-rule"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.key", "value"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"), ]) # test environmentId containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: environmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Multiple ingress: external: true additionalPortMappings: [] allowInsecure: false targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Auto template: revisionSuffix: myrevision2 containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 0 maxReplicas: 3 rules: [] """ write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp update -n {app} -g {resource_group} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.additionalPortMappings", None), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision2"), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 0), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules", None) ]) # test update without environmentId containerapp_yaml_text = f""" configuration: activeRevisionsMode: Multiple ingress: external: false additionalPortMappings: - external: false targetPort: 321 - external: false targetPort: 8080 exposedPort: 1234 properties: template: revisionSuffix: myrevision3 """ write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp update -n {app} -g {resource_group} --yaml {containerapp_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].targetPort", 321), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].targetPort", 8080), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].exposedPort", 1234), ]) self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision3") ]) # test invalid yaml containerapp_yaml_text = f""" """ containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) try: self.cmd(f'containerapp create -n {app} -g {resource_group} --yaml {containerapp_file_name}') except Exception as ex: print(ex) self.assertTrue(isinstance(ex, ValidationError)) self.assertEqual(ex.error_msg, 'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.') pass clean_up_test_file(containerapp_file_name)
containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp create -n {app} -g {resource_group} --environment {env_id} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[0].targetPort", 12345), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].external", False), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].targetPort", 9090), JMESPathCheck("properties.configuration.ingress.additionalPortMappings[1].exposedPort", 23456), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.terminationGracePeriodSeconds", 90), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3), JMESPathCheck("properties.template.scale.rules[0].name", "http-scale-rule"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"), JMESPathCheck("properties.template.scale.rules[0].http.metadata.key", "value"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"), JMESPathCheck("properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"), ]) # test environmentId containerapp_yaml_text = f
test_containerapp_create_with_yaml
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
MIT
def test_containerapp_create_with_vnet_yaml(self, resource_group): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env = self.create_random_name(prefix='env', length=24) vnet = self.create_random_name(prefix='name', length=24) self.cmd(f"network vnet create --address-prefixes '14.0.0.0/23' -g {resource_group} -n {vnet}") sub_id = self.cmd(f"network vnet subnet create --address-prefixes '14.0.0.0/23' --delegations Microsoft.App/environments -n sub -g {resource_group} --vnet-name {vnet}").get_output_in_json()["id"] self.cmd(f'containerapp env create -g {resource_group} -n {env} --internal-only -s {sub_id}') containerapp_env = self.cmd(f'containerapp env show -g {resource_group} -n {env}').get_output_in_json() while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd(f'containerapp env show -g {resource_group} -n {env}').get_output_in_json() app = self.create_random_name(prefix='yaml', length=24) user_identity_name = self.create_random_name(prefix='containerapp-user', length=24) user_identity = self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = user_identity['id'] # test create containerapp transport: Tcp, with exposedPort containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: managedEnvironmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Multiple ingress: external: true exposedPort: 3000 allowInsecure: false targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Tcp template: revisionSuffix: myrevision containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 identity: type: UserAssigned userAssignedIdentities: {user_identity_id}: {{}} """ containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp create -n {app} -g {resource_group} --environment {env} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.exposedPort", 3000), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3) ]) # test update containerapp transport: Tcp, with exposedPort containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: environmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Multiple ingress: external: true exposedPort: 9551 allowInsecure: false targetPort: 80 traffic: - latestRevision: true weight: 100 transport: Tcp template: revisionSuffix: myrevision containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 """ write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp update -n {app} -g {resource_group} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.exposedPort", 9551), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3) ]) # test create containerapp transport: http, with CORS policy containerapp_yaml_text = f""" location: {TEST_LOCATION} type: Microsoft.App/containerApps tags: tagname: value properties: environmentId: {containerapp_env["id"]} configuration: activeRevisionsMode: Multiple ingress: external: true allowInsecure: false clientCertificateMode: Require corsPolicy: allowedOrigins: [a, b] allowedMethods: [c, d] allowedHeaders: [e, f] exposeHeaders: [g, h] maxAge: 7200 allowCredentials: true targetPort: 80 ipSecurityRestrictions: - name: name ipAddressRange: "1.1.1.1/10" action: "Allow" traffic: - latestRevision: true weight: 100 transport: http template: revisionSuffix: myrevision containers: - image: nginx name: nginx env: - name: HTTP_PORT value: 80 command: - npm - start resources: cpu: 0.5 memory: 1Gi scale: minReplicas: 1 maxReplicas: 3 """ write_test_file(containerapp_file_name, containerapp_yaml_text) app2 = self.create_random_name(prefix='yaml', length=24) self.cmd(f'containerapp create -n {app2} -g {resource_group} --environment {env} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app2}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.clientCertificateMode", "Require"), JMESPathCheck("properties.configuration.ingress.corsPolicy.allowCredentials", True), JMESPathCheck("properties.configuration.ingress.corsPolicy.maxAge", 7200), JMESPathCheck("properties.configuration.ingress.corsPolicy.allowedHeaders[0]", "e"), JMESPathCheck("properties.configuration.ingress.corsPolicy.allowedMethods[0]", "c"), JMESPathCheck("properties.configuration.ingress.corsPolicy.allowedOrigins[0]", "a"), JMESPathCheck("properties.configuration.ingress.corsPolicy.exposeHeaders[0]", "g"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange", "1.1.1.1/10"), JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3) ]) clean_up_test_file(containerapp_file_name)
containerapp_file_name = f"{self._testMethodName}_containerapp.yml" write_test_file(containerapp_file_name, containerapp_yaml_text) self.cmd(f'containerapp create -n {app} -g {resource_group} --environment {env} --yaml {containerapp_file_name}') self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.configuration.ingress.external", True), JMESPathCheck("properties.configuration.ingress.exposedPort", 3000), JMESPathCheck("properties.environmentId", containerapp_env["id"]), JMESPathCheck("properties.template.revisionSuffix", "myrevision"), JMESPathCheck("properties.template.containers[0].name", "nginx"), JMESPathCheck("properties.template.scale.minReplicas", 1), JMESPathCheck("properties.template.scale.maxReplicas", 3) ]) # test update containerapp transport: Tcp, with exposedPort containerapp_yaml_text = f
test_containerapp_create_with_vnet_yaml
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerapp_commands.py
MIT
def test_containerappjob_create_with_environment_id(self, resource_group, laworkspace_customer_id, laworkspace_shared_key): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) env1 = self.create_random_name(prefix='env1', length=24) env2 = self.create_random_name(prefix='env2', length=24) job1 = self.create_random_name(prefix='yaml1', length=24) create_containerapp_env(self, env1, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env1 = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env1)).get_output_in_json() create_containerapp_env(self, env2, resource_group, logs_workspace=laworkspace_customer_id, logs_workspace_shared_key=laworkspace_shared_key) containerapp_env2 = self.cmd( 'containerapp env show -g {} -n {}'.format(resource_group, env2)).get_output_in_json() # the value in --yaml is used, warning for different value in --environmentId containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: environmentId: {containerapp_env1["id"]} configuration: dapr: null eventTriggerConfig: null manualTriggerConfig: parallelism: 1 replicaCompletionCount: 1 registries: null replicaRetryLimit: 1 replicaTimeout: 100 scheduleTriggerConfig: null secrets: null triggerType: Manual template: containers: - env: - name: MY_ENV_VAR value: hello image: mcr.microsoft.com/k8se/quickstart-jobs:latest name: anfranci-azclitest-acaj1 resources: cpu: 0.5 ephemeralStorage: 1Gi memory: 1Gi initContainers: - command: - /bin/sh - -c - sleep 150 image: k8seteste2e.azurecr.io/e2e-apps/kuar:green name: simple-sleep-container probes: - type: liveness httpGet: path: "/health" port: 8080 httpHeaders: - name: "Custom-Header" value: "liveness probe" initialDelaySeconds: 7 periodSeconds: 3 resources: cpu: "0.25" memory: 0.5Gi workloadProfileName: null """ containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd( f'containerapp job create -n {job1} -g {resource_group} --environment {env2} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), ]) self.cmd(f'containerapp job show -g {resource_group} -n {job1}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), ]) # test container app job update with yaml containerappjob_yaml_text = f""" location: {TEST_LOCATION} properties: configuration: dapr: null eventTriggerConfig: null manualTriggerConfig: parallelism: 1 replicaCompletionCount: 1 registries: null replicaRetryLimit: 1 replicaTimeout: 200 scheduleTriggerConfig: null secrets: null triggerType: Manual template: containers: - env: - name: MY_ENV_VAR value: hello image: mcr.microsoft.com/k8se/quickstart-jobs:latest name: anfranci-azclitest-acaj1 resources: cpu: 0.75 ephemeralStorage: 1Gi memory: 1.5Gi initContainers: - command: - /bin/sh - -c - sleep 150 image: k8seteste2e.azurecr.io/e2e-apps/kuar:green name: simple-sleep-container probes: - type: liveness httpGet: path: "/health" port: 8080 httpHeaders: - name: "Custom-Header" value: "liveness probe" initialDelaySeconds: 7 periodSeconds: 3 resources: cpu: "0.25" memory: 0.5Gi """ write_test_file(containerappjob_file_name, containerappjob_yaml_text) job2 = self.create_random_name(prefix='yaml2', length=24) self.cmd( f'containerapp job create -n {job2} -g {resource_group} --environment {env2} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env2["id"]), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 200), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.75"), JMESPathCheck('properties.template.containers[0].resources.memory', "1.5Gi"), ]) self.cmd(f'containerapp job list -g {resource_group}', checks=[ JMESPathCheck("length(@)", 2), ]) clean_up_test_file(containerappjob_file_name)
containerappjob_file_name = f"{self._testMethodName}_containerappjob.yml" write_test_file(containerappjob_file_name, containerappjob_yaml_text) self.cmd( f'containerapp job create -n {job1} -g {resource_group} --environment {env2} --yaml {containerappjob_file_name}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), ]) self.cmd(f'containerapp job show -g {resource_group} -n {job1}', checks=[ JMESPathCheck("properties.provisioningState", "Succeeded"), JMESPathCheck("properties.environmentId", containerapp_env1["id"]), JMESPathCheck("properties.configuration.triggerType", "Manual", case_sensitive=False), JMESPathCheck('properties.configuration.replicaTimeout', 100), JMESPathCheck('properties.configuration.replicaRetryLimit', 1), JMESPathCheck('properties.template.containers[0].image', "mcr.microsoft.com/k8se/quickstart-jobs:latest"), JMESPathCheck('properties.template.containers[0].resources.cpu', "0.5"), JMESPathCheck('properties.template.containers[0].resources.memory', "1Gi"), ]) # test container app job update with yaml containerappjob_yaml_text = f
test_containerappjob_create_with_environment_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_crud.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/test_containerappjob_crud.py
MIT
def _list_role_assignments(cmd, workspace_name, role=None, assignee=None, scope=None, resolve_assignee=True, item=None, item_type=None): """Prepare scope, role ID and resolve object ID from Graph API.""" if any([scope, item, item_type]): scope = _build_role_scope(workspace_name, scope, item, item_type) role_id = _resolve_role_id(cmd, role, workspace_name) object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name) token = "" result = [] while True: request = client.list_role_assignments(role_id, object_id, scope, continuation_token_parameter=token, cls=cust_help.get_deserialized_and_headers) token = request[1]['x-ms-continuation'] result += request[0].value if not token: break return result
Prepare scope, role ID and resolve object ID from Graph API.
_list_role_assignments
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
MIT
def create_role_assignment(cmd, workspace_name, role, assignee=None, assignee_object_id=None, scope=None, assignee_principal_type=None, item_type=None, item=None, assignment_id=None): """Check parameters are provided correctly, then call _create_role_assignment.""" if assignment_id and not is_guid(assignment_id): raise InvalidArgumentValueError('usage error: --id GUID') if bool(assignee) == bool(assignee_object_id): raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID') if assignee_principal_type and not assignee_object_id: raise ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]') if bool(item) != bool(item_type): raise ArgumentUsageError('usage error: --item-type STRING --item STRING') try: return _create_role_assignment(cmd, workspace_name, role, assignee or assignee_object_id, scope, item, item_type, resolve_assignee=(not assignee_object_id), assignee_principal_type=assignee_principal_type, assignment_id=assignment_id) except Exception as ex: # pylint: disable=broad-except if _error_caused_by_role_assignment_exists(ex): # for idempotent return list_role_assignments(cmd, workspace_name, role=role, assignee=assignee, assignee_object_id=assignee_object_id, scope=scope, item=item, item_type=item_type) raise
Check parameters are provided correctly, then call _create_role_assignment.
create_role_assignment
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
MIT
def _create_role_assignment(cmd, workspace_name, role, assignee, scope=None, item=None, item_type=None, resolve_assignee=True, assignee_principal_type=None, assignment_id=None): """Prepare scope, role ID and resolve object ID from Graph API.""" scope = _build_role_scope(workspace_name, scope, item, item_type) role_id = _resolve_role_id(cmd, role, workspace_name) object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee assignment_client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name) return assignment_client.create_role_assignment(assignment_id if assignment_id is not None else _gen_guid(), role_id, object_id, scope, assignee_principal_type)
Prepare scope, role ID and resolve object ID from Graph API.
_create_role_assignment
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py
MIT
def sqlpool_security_alert_policy_update( cmd, instance, state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, retention_days=None, email_addresses=None, disabled_alerts=None, email_account_admins=None, security_alert_policy_name=None): ''' Updates a SQL pool's security alert policy. Custom update function to apply parameters to instance. ''' # Apply state if state: instance.state = SecurityAlertPolicyState[state.lower()] enabled = instance.state.value.lower() == SecurityAlertPolicyState.enabled.value.lower() # pylint: disable=no-member # Set storage-related properties _sqlpool_security_policy_update( cmd.cli_ctx, instance, enabled, storage_account, storage_endpoint, storage_account_access_key, False) # Set other properties if retention_days: instance.retention_days = retention_days if email_addresses: instance.email_addresses = email_addresses if disabled_alerts: instance.disabled_alerts = disabled_alerts if email_account_admins: instance.email_account_admins = email_account_admins if security_alert_policy_name: instance.security_alert_policy_name = 'security_alert_policy_name' return instance
Updates a SQL pool's security alert policy. Custom update function to apply parameters to instance.
sqlpool_security_alert_policy_update
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsecurityalertpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsecurityalertpolicy.py
MIT
def _sqlpool_security_policy_update( cli_ctx, instance, enabled, storage_account, storage_endpoint, storage_account_access_key, use_secondary_key): """ Common code for updating audit and threat detection policy. """ # Validate storage endpoint arguments if storage_endpoint and storage_account: raise CLIError('--storage-endpoint and --storage-account cannot both be specified.') # Set storage endpoint if storage_endpoint: instance.storage_endpoint = storage_endpoint if storage_account: storage_resource_group = _find_storage_account_resource_group(cli_ctx, storage_account) instance.storage_endpoint = _get_storage_endpoint(cli_ctx, storage_account, storage_resource_group) # Set storage access key if storage_account_access_key: # Access key is specified instance.storage_account_access_key = storage_account_access_key elif enabled: # Access key is not specified, but state is Enabled. # If state is Enabled, then access key property is required in PUT. However access key is # readonly (GET returns empty string for access key), so we need to determine the value # and then PUT it back. (We don't want the user to be force to specify this, because that # would be very annoying when updating non-storage-related properties). # This doesn't work if the user used generic update args, i.e. `--set state=Enabled` # instead of `--state Enabled`, since the generic update args are applied after this custom # function, but at least we tried. if not storage_account: storage_account = _get_storage_account_name(instance.storage_endpoint) storage_resource_group = _find_storage_account_resource_group(cli_ctx, storage_account) instance.storage_account_access_key = _get_storage_key( cli_ctx, storage_account, storage_resource_group, use_secondary_key)
Common code for updating audit and threat detection policy.
_sqlpool_security_policy_update
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsecurityalertpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsecurityalertpolicy.py
MIT
def restore_sql_pool(cmd, client, resource_group_name, workspace_name, sql_pool_name, destination_name, performance_level=None, restore_point_in_time=None, source_database_deletion_date=None, storage_account_type=None, tags=None, no_wait=False, **kwargs): """ Restores an existing or deleted SQL pool (i.e. create with 'Restore' or 'PointInTimeRestore' create mode.) Custom function makes create mode more convenient. """ if not (restore_point_in_time or source_database_deletion_date): raise CLIError('Either --time or --deleted-time must be specified.') # Set create mode properties is_deleted = source_database_deletion_date is not None create_mode = SynapseSqlCreateMode.Restore if is_deleted else SynapseSqlCreateMode.PointInTimeRestore source_sql_pool_info = client.get(resource_group_name, workspace_name, sql_pool_name) # get the default performance_level if performance_level is None: performance_level = source_sql_pool_info.sku.name # create source database id source_database_id = _construct_database_resource_id(cmd.cli_ctx, resource_group_name, workspace_name, sql_pool_name) sku = Sku(name=performance_level) dest_sql_pool_info = SqlPool(sku=sku, location=source_sql_pool_info.location, create_mode=create_mode, restore_point_in_time=restore_point_in_time, storage_account_type=storage_account_type, source_database_id=source_database_id, tags=tags) return sdk_no_wait(no_wait, client.begin_create, resource_group_name, workspace_name, destination_name, dest_sql_pool_info)
Restores an existing or deleted SQL pool (i.e. create with 'Restore' or 'PointInTimeRestore' create mode.) Custom function makes create mode more convenient.
restore_sql_pool
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpool.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpool.py
MIT
def sql_pool_show_connection_string( cmd, client_provider, sql_pool_name='<sql pool name>', workspace_name='<workspace name>', auth_type=SqlPoolConnectionClientAuthenticationType.SqlPassword.value): """ Builds a SQL connection string for a specified client provider. """ workspace_sql_pool_compute_suffix = cmd.cli_ctx.cloud.suffixes.synapse_analytics_endpoint.replace('dev', 'sql') conn_str_props = { 'workspace': workspace_name, 'workspace_fqdn': '{}{}'.format(workspace_name, workspace_sql_pool_compute_suffix), 'workspace_suffix': workspace_sql_pool_compute_suffix, 'sql_pool': sql_pool_name } formats = { SqlPoolConnectionClientType.AdoDotNet: { SqlPoolConnectionClientAuthenticationType.SqlPassword: 'Server=tcp:{workspace_fqdn},1433;Initial Catalog={sql_pool};Persist Security Info=False;' 'User ID=<username>;Password=<password>;MultipleActiveResultSets=False;Encrypt=True;' 'TrustServerCertificate=False;Connection Timeout=30;', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryPassword: 'Server=tcp:{workspace_fqdn},1433;Initial Catalog={sql_pool};Persist Security Info=False;' 'User ID=<username>;Password=<password>;MultipleActiveResultSets=False;Encrypt=True;' 'TrustServerCertificate=False;Authentication="Active Directory Password";', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryIntegrated: 'Server=tcp:{workspace_fqdn},1433;Initial Catalog={sql_pool};Persist Security Info=False;' 'User ID=<username>;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;' 'Authentication="Active Directory Integrated";' }, SqlPoolConnectionClientType.Jdbc: { SqlPoolConnectionClientAuthenticationType.SqlPassword: 'jdbc:sqlserver://{workspace_fqdn}:1433;database={sql_pool};user=<username>@{workspace};' 'password=<password>;encrypt=true;trustServerCertificate=false;' 'hostNameInCertificate=*{workspace_suffix};loginTimeout=30;', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryPassword: 'jdbc:sqlserver://{workspace_fqdn}:1433;database={sql_pool};user=<username>;' 'password=<password>;encrypt=true;trustServerCertificate=false;' 'hostNameInCertificate=*{workspace_suffix};loginTimeout=30;authentication=ActiveDirectoryPassword', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryIntegrated: 'jdbc:sqlserver://{workspace_fqdn}:1433;database={sql_pool};' 'encrypt=true;trustServerCertificate=false;' 'hostNameInCertificate=*{workspace_suffix};loginTimeout=30;Authentication=ActiveDirectoryIntegrated', }, SqlPoolConnectionClientType.Odbc: { SqlPoolConnectionClientAuthenticationType.SqlPassword: 'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{workspace_fqdn},1433;' 'Database={sql_pool};Uid=<username>;Pwd=<password>;Encrypt=yes;' 'TrustServerCertificate=no;Connection Timeout=30;', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryPassword: 'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{workspace_fqdn},1433;' 'Database={sql_pool};Uid=<username>;Pwd=<password>;Encrypt=yes;' 'TrustServerCertificate=no;Connection Timeout=30;Authentication=ActiveDirectoryPassword', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryIntegrated: 'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{workspace_fqdn},1433;' 'Database={sql_pool};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;' 'Authentication=ActiveDirectoryIntegrated', }, SqlPoolConnectionClientType.Php: { # pylint: disable=line-too-long SqlPoolConnectionClientAuthenticationType.SqlPassword: '$connectionOptions = array("UID"=>"<username>@{workspace}", "PWD"=>"<password>", "Database"=>{sql_pool}, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:{workspace_fqdn},1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryPassword: CLIError('PHP sqlsrv driver only supports SQL Password authentication.'), SqlPoolConnectionClientAuthenticationType.ActiveDirectoryIntegrated: CLIError('PHP sqlsrv driver only supports SQL Password authentication.'), }, SqlPoolConnectionClientType.PhpPdo: { # pylint: disable=line-too-long SqlPoolConnectionClientAuthenticationType.SqlPassword: '$conn = new PDO("sqlsrv:server = tcp:{workspace_fqdn},1433; Database = {sql_pool}; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");', SqlPoolConnectionClientAuthenticationType.ActiveDirectoryPassword: CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'), SqlPoolConnectionClientAuthenticationType.ActiveDirectoryIntegrated: CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'), } } f = formats[client_provider][auth_type] if isinstance(f, Exception): # Error raise f # Success return f.format(**conn_str_props)
Builds a SQL connection string for a specified client provider.
sql_pool_show_connection_string
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpool.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpool.py
MIT
def create_workspace_sql_aad_admin(cmd, client, resource_group_name, workspace_name, login_name, object_id, no_wait=False): """ Set a Workspace SQL AD admin. """ workspace_client = cf_synapse_client_workspace_factory(cmd.cli_ctx) workspace_object = workspace_client.get(resource_group_name, workspace_name) workspace_id = workspace_object.id tenant_id = get_tenant_id() workspace_aad_admin_info = WorkspaceAadAdminInfo(id=workspace_id, login=login_name, sid=object_id, administrator_type=AdministratorType, tenant_id=tenant_id) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, workspace_name, workspace_aad_admin_info)
Set a Workspace SQL AD admin.
create_workspace_sql_aad_admin
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/workspacesqlaadadmin.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/workspacesqlaadadmin.py
MIT
def update_workspace_sql_aad_admin(instance, login_name=None, object_id=None): """ Update a Workspace SQL AD admin. """ instance.login = login_name or instance.login instance.sid = object_id or instance.sid return instance
Update a Workspace SQL AD admin.
update_workspace_sql_aad_admin
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/workspacesqlaadadmin.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/workspacesqlaadadmin.py
MIT
def sqlpool_blob_auditing_policy_update( cmd, instance, workspace_name, resource_group_name, sql_pool_name, state=None, blob_storage_target_state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, storage_account_subscription_id=None, is_storage_secondary_key_in_use=None, retention_days=None, audit_actions_and_groups=None, log_analytics_target_state=None, log_analytics_workspace_resource_id=None, event_hub_target_state=None, event_hub_authorization_rule_id=None, event_hub=None, is_azure_monitor_target_enabled=None): """ Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance. """ _audit_policy_update( cmd=cmd, instance=instance, workspace_name=workspace_name, resource_group_name=resource_group_name, sql_pool_name=sql_pool_name, state=state, blob_storage_target_state=blob_storage_target_state, storage_account=storage_account, storage_endpoint=storage_endpoint, storage_account_access_key=storage_account_access_key, storage_account_subscription_id=storage_account_subscription_id, is_storage_secondary_key_in_use=is_storage_secondary_key_in_use, retention_days=retention_days, category_name='SQLSecurityAuditEvents', log_analytics_target_state=log_analytics_target_state, log_analytics_workspace_resource_id=log_analytics_workspace_resource_id, event_hub_target_state=event_hub_target_state, event_hub_authorization_rule_id=event_hub_authorization_rule_id, event_hub_name=event_hub, audit_actions_and_groups=audit_actions_and_groups, is_azure_monitor_target_enabled=is_azure_monitor_target_enabled) return instance
Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance.
sqlpool_blob_auditing_policy_update
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_update_apply_blob_storage_details( cmd, instance, blob_storage_target_state, retention_days, storage_account, storage_account_access_key, storage_endpoint, storage_account_subscription_id): ''' Apply blob storage details on policy update ''' if hasattr(instance, 'is_storage_secondary_key_in_use'): is_storage_secondary_key_in_use = instance.is_storage_secondary_key_in_use else: is_storage_secondary_key_in_use = False if blob_storage_target_state is None: # Original audit policy has no storage_endpoint if not instance.storage_endpoint: instance.storage_endpoint = None instance.storage_account_access_key = None else: # Resolve storage_account_access_key based on original storage_endpoint storage_account = _get_storage_account_name(instance.storage_endpoint) storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account) instance.storage_account_access_key = _get_storage_key( cli_ctx=cmd.cli_ctx, storage_account=storage_account, resource_group_name=storage_resource_group, use_secondary_key=is_storage_secondary_key_in_use) elif _is_audit_policy_state_enabled(blob_storage_target_state): if storage_account is not None: storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account) storage_endpoint = _get_storage_endpoint(cmd.cli_ctx, storage_account, storage_resource_group) storage_account_subscription_id = _find_storage_account_subscription_id(cmd.cli_ctx, storage_account) if storage_endpoint is not None: instance.storage_endpoint = storage_endpoint if storage_account_subscription_id is not None: instance.storage_account_subscription_id = storage_account_subscription_id if storage_account_access_key is not None: instance.storage_account_access_key = storage_account_access_key elif storage_endpoint is not None: # Resolve storage_account if not provided if storage_account is None: storage_account = _get_storage_account_name(storage_endpoint) storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account) # Resolve storage_account_access_key based on storage_account instance.storage_account_access_key = _get_storage_key( cli_ctx=cmd.cli_ctx, storage_account=storage_account, resource_group_name=storage_resource_group, use_secondary_key=instance.is_storage_secondary_key_in_use) if retention_days is not None: instance.retention_days = retention_days else: instance.storage_endpoint = None instance.storage_account_access_key = None
Apply blob storage details on policy update
_audit_policy_update_apply_blob_storage_details
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _find_storage_account_resource_id(cli_ctx, name): ''' Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer. ''' storage_type = 'Microsoft.Storage/storageAccounts' classic_storage_type = 'Microsoft.ClassicStorage/storageAccounts' query = "name eq '{}' and (resourceType eq '{}' or resourceType eq '{}')".format( name, storage_type, classic_storage_type) client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) resources = list(client.resources.list(filter=query)) if not resources: raise CLIError("No storage account with name '{}' was found.".format(name)) if len(resources) > 1: raise CLIError("Multiple storage accounts with name '{}' were found.".format(name)) if resources[0].type == classic_storage_type: raise CLIError("The storage account with name '{}' is a classic storage account which is" " not supported by this command. Use a non-classic storage account or" " specify storage endpoint and key instead.".format(name)) return resources[0].id
Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer.
_find_storage_account_resource_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _find_storage_account_resource_group(cli_ctx, name): """ Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer. """ resource_id = _find_storage_account_resource_id(cli_ctx, name) # Split the uri and return just the resource group return resource_id.split('/')[4]
Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer.
_find_storage_account_resource_group
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _find_storage_account_subscription_id(cli_ctx, name): """ Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer. """ resource_id = _find_storage_account_resource_id(cli_ctx, name) # Split the uri and return just the resource group return resource_id.split('/')[2]
Finds a storage account's resource group by querying ARM resource cache. Why do we have to do this: so we know the resource group in order to later query the storage API to determine the account's keys and endpoint. Why isn't this just a command line parameter: because if it was a command line parameter then the customer would need to specify storage resource group just to update some unrelated property, which is annoying and makes no sense to the customer.
_find_storage_account_subscription_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _get_storage_account_name(storage_endpoint): """ Determines storage account name from endpoint url string. e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage' """ from urllib.parse import urlparse return urlparse(storage_endpoint).netloc.split('.')[0]
Determines storage account name from endpoint url string. e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage'
_get_storage_account_name
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _get_storage_endpoint( cli_ctx, storage_account, resource_group_name): """ Gets storage account endpoint by querying storage ARM API. """ from azure.mgmt.storage import StorageManagementClient # Get storage account client = get_mgmt_service_client(cli_ctx, StorageManagementClient) account = client.storage_accounts.get_properties( resource_group_name=resource_group_name, account_name=storage_account) # Get endpoint # pylint: disable=no-member endpoints = account.primary_endpoints try: return endpoints.blob except AttributeError: raise CLIError("The storage account with name '{}' (id '{}') has no blob endpoint. Use a" " different storage account.".format(account.name, account.id))
Gets storage account endpoint by querying storage ARM API.
_get_storage_endpoint
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _get_storage_key( cli_ctx, storage_account, resource_group_name, use_secondary_key): """ Gets storage account key by querying storage ARM API. """ from azure.mgmt.storage import StorageManagementClient # Get storage keys client = get_mgmt_service_client(cli_ctx, StorageManagementClient) keys = client.storage_accounts.list_keys( resource_group_name=resource_group_name, account_name=storage_account) # Choose storage key index = 1 if use_secondary_key else 0 return keys.keys[index].value # pylint: disable=no-member
Gets storage account key by querying storage ARM API.
_get_storage_key
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _get_diagnostic_settings( cmd, resource_group_name, workspace_name, sql_pool_name=None): ''' Common code to get workspace or sqlpool diagnostic settings ''' diagnostic_settings_url = _get_diagnostic_settings_url( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name) azure_monitor_client = cf_monitor(cmd.cli_ctx) return list(azure_monitor_client.diagnostic_settings.list(diagnostic_settings_url))
Common code to get workspace or sqlpool diagnostic settings
_get_diagnostic_settings
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_update_rollback( cmd, workspace_name, resource_group_name, sql_pool_name, rollback_data): ''' Rollback diagnostic settings change ''' diagnostic_settings_url = _get_diagnostic_settings_url( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name) azure_monitor_client = cf_monitor(cmd.cli_ctx) for rd in rollback_data: rollback_diagnostic_setting = rd[1] if rd[0] == "create" or rd[0] == "update": create_diagnostics_settings( client=azure_monitor_client.diagnostic_settings, name=rollback_diagnostic_setting.name, resource_uri=diagnostic_settings_url, logs=rollback_diagnostic_setting.logs, metrics=rollback_diagnostic_setting.metrics, event_hub=rollback_diagnostic_setting.event_hub_name, event_hub_rule=rollback_diagnostic_setting.event_hub_authorization_rule_id, storage_account=rollback_diagnostic_setting.storage_account_id, workspace=rollback_diagnostic_setting.workspace_id) else: # delete azure_monitor_client.diagnostic_settings.delete(diagnostic_settings_url, rollback_diagnostic_setting.name)
Rollback diagnostic settings change
_audit_policy_update_rollback
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_update_apply_azure_monitor_target_enabled( instance, diagnostic_settings, category_name, log_analytics_target_state, event_hub_target_state): ''' Apply value of is_azure_monitor_target_enabled on policy update ''' # If log_analytics_target_state and event_hub_target_state are None there is nothing to do if log_analytics_target_state is None and event_hub_target_state is None: return if _is_audit_policy_state_enabled(log_analytics_target_state) or\ _is_audit_policy_state_enabled(event_hub_target_state): instance.is_azure_monitor_target_enabled = True else: # Sort received diagnostic settings by name and get first element to ensure consistency # between command executions diagnostic_settings.sort(key=lambda d: d.name) audit_diagnostic_setting = _fetch_first_audit_diagnostic_setting(diagnostic_settings, category_name) # Determine value of is_azure_monitor_target_enabled if audit_diagnostic_setting is None: updated_log_analytics_workspace_id = None updated_event_hub_authorization_rule_id = None else: updated_log_analytics_workspace_id = audit_diagnostic_setting.workspace_id updated_event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id if _is_audit_policy_state_disabled(log_analytics_target_state): updated_log_analytics_workspace_id = None if _is_audit_policy_state_disabled(event_hub_target_state): updated_event_hub_authorization_rule_id = None instance.is_azure_monitor_target_enabled = updated_log_analytics_workspace_id is not None or\ updated_event_hub_authorization_rule_id is not None
Apply value of is_azure_monitor_target_enabled on policy update
_audit_policy_update_apply_azure_monitor_target_enabled
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_update_diagnostic_settings( cmd, workspace_name, resource_group_name, sql_pool_name=None, diagnostic_settings=None, category_name=None, log_analytics_target_state=None, log_analytics_workspace_resource_id=None, event_hub_target_state=None, event_hub_authorization_rule_id=None, event_hub_name=None): ''' Update audit policy's diagnostic settings ''' # Fetch all audit diagnostic settings audit_diagnostic_settings = _fetch_all_audit_diagnostic_settings(diagnostic_settings, category_name) num_of_audit_diagnostic_settings = len(audit_diagnostic_settings) # If more than 1 audit diagnostic settings found then throw error if num_of_audit_diagnostic_settings > 1: raise CLIError('Multiple audit diagnostics settings are already enabled') diagnostic_settings_url = _get_diagnostic_settings_url( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name) azure_monitor_client = cf_monitor(cmd.cli_ctx) # If no audit diagnostic settings found then create one if azure monitor is enabled if num_of_audit_diagnostic_settings == 0: if _is_audit_policy_state_enabled(log_analytics_target_state) or\ _is_audit_policy_state_enabled(event_hub_target_state): created_diagnostic_setting = _audit_policy_create_diagnostic_setting( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name, category_name=category_name, log_analytics_target_state=log_analytics_target_state, log_analytics_workspace_resource_id=log_analytics_workspace_resource_id, event_hub_target_state=event_hub_target_state, event_hub_authorization_rule_id=event_hub_authorization_rule_id, event_hub_name=event_hub_name) # Return rollback data tuple return [("delete", created_diagnostic_setting)] # azure monitor is disabled - there is nothing to do return None # This leaves us with case when num_of_audit_diagnostic_settings is 1 audit_diagnostic_setting = audit_diagnostic_settings[0] # Initialize actually updated azure monitor fields if log_analytics_target_state is None: log_analytics_workspace_resource_id = audit_diagnostic_setting.workspace_id elif _is_audit_policy_state_disabled(log_analytics_target_state): log_analytics_workspace_resource_id = None if event_hub_target_state is None: event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id event_hub_name = audit_diagnostic_setting.event_hub_name elif _is_audit_policy_state_disabled(event_hub_target_state): event_hub_authorization_rule_id = None event_hub_name = None is_azure_monitor_target_enabled = log_analytics_workspace_resource_id is not None or\ event_hub_authorization_rule_id is not None has_other_categories = next((log for log in audit_diagnostic_setting.logs if log.enabled and log.category != category_name), None) is not None # If there is no other categories except SQLSecurityAuditEvents\DevOpsOperationsAudit update or delete # the existing single diagnostic settings if not has_other_categories: # If azure monitor is enabled then update existing single audit diagnostic setting if is_azure_monitor_target_enabled: create_diagnostics_settings( client=azure_monitor_client.diagnostic_settings, name=audit_diagnostic_setting.name, resource_uri=diagnostic_settings_url, logs=audit_diagnostic_setting.logs, metrics=audit_diagnostic_setting.metrics, event_hub=event_hub_name, event_hub_rule=event_hub_authorization_rule_id, storage_account=audit_diagnostic_setting.storage_account_id, workspace=log_analytics_workspace_resource_id) # Return rollback data tuple return [("update", audit_diagnostic_setting)] # Azure monitor is disabled, delete existing single audit diagnostic setting azure_monitor_client.diagnostic_settings.delete(diagnostic_settings_url, audit_diagnostic_setting.name) # Return rollback data tuple return [("create", audit_diagnostic_setting)] # In case there are other categories in the existing single audit diagnostic setting a "split" must be performed: # 1. Disable SQLSecurityAuditEvents\DevOpsOperationsAudit category in found audit diagnostic setting # 2. Create new diagnostic setting with SQLSecurityAuditEvents\DevOpsOperationsAudit category, # i.e. audit diagnostic setting # Build updated logs list with disabled SQLSecurityAuditEvents\DevOpsOperationsAudit category updated_logs = [] LogSettings = cmd.get_models( 'LogSettings', resource_type=ResourceType.MGMT_MONITOR, operation_group='diagnostic_settings') RetentionPolicy = cmd.get_models( 'RetentionPolicy', resource_type=ResourceType.MGMT_MONITOR, operation_group='diagnostic_settings') for log in audit_diagnostic_setting.logs: if log.category == category_name: updated_logs.append(LogSettings(category=log.category, enabled=False, retention_policy=RetentionPolicy(enabled=False, days=0))) else: updated_logs.append(log) # Update existing diagnostic settings create_diagnostics_settings( client=azure_monitor_client.diagnostic_settings, name=audit_diagnostic_setting.name, resource_uri=diagnostic_settings_url, logs=updated_logs, metrics=audit_diagnostic_setting.metrics, event_hub=audit_diagnostic_setting.event_hub_name, event_hub_rule=audit_diagnostic_setting.event_hub_authorization_rule_id, storage_account=audit_diagnostic_setting.storage_account_id, workspace=audit_diagnostic_setting.workspace_id) # Add original 'audit_diagnostic_settings' to rollback_data list rollback_data = [("update", audit_diagnostic_setting)] # Create new diagnostic settings with enabled SQLSecurityAuditEvents\DevOpsOperationsAudit category # only if azure monitor is enabled if is_azure_monitor_target_enabled: created_diagnostic_setting = _audit_policy_create_diagnostic_setting( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name, category_name=category_name, log_analytics_target_state=log_analytics_target_state, log_analytics_workspace_resource_id=log_analytics_workspace_resource_id, event_hub_target_state=event_hub_target_state, event_hub_authorization_rule_id=event_hub_authorization_rule_id, event_hub_name=event_hub_name) # Add 'created_diagnostic_settings' to rollback_data list in reverse order rollback_data.insert(0, ("delete", created_diagnostic_setting)) return rollback_data
Update audit policy's diagnostic settings
_audit_policy_update_diagnostic_settings
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_create_diagnostic_setting( cmd, resource_group_name, workspace_name, sql_pool_name=None, category_name=None, log_analytics_target_state=None, log_analytics_workspace_resource_id=None, event_hub_target_state=None, event_hub_authorization_rule_id=None, event_hub_name=None): ''' Create audit diagnostic setting, i.e. containing single category - SQLSecurityAuditEvents or DevOpsOperationsAudit ''' # Generate diagnostic settings name to be created name = category_name import inspect test_methods = ["test_sql_ws_audit_policy_logentry_eventhub", "test_sql_pool_audit_policy_logentry_eventhub"] test_mode = next((e for e in inspect.stack() if e.function in test_methods), None) is not None # For test environment the name should be constant, i.e. match the name written in recorded yaml file if test_mode: name += '_LogAnalytics' if log_analytics_target_state is not None else '' name += '_EventHub' if event_hub_target_state is not None else '' else: import uuid name += '_' + str(uuid.uuid4()) diagnostic_settings_url = _get_diagnostic_settings_url( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name) azure_monitor_client = cf_monitor(cmd.cli_ctx) LogSettings = cmd.get_models( 'LogSettings', resource_type=ResourceType.MGMT_MONITOR, operation_group='diagnostic_settings') RetentionPolicy = cmd.get_models( 'RetentionPolicy', resource_type=ResourceType.MGMT_MONITOR, operation_group='diagnostic_settings') return create_diagnostics_settings( client=azure_monitor_client.diagnostic_settings, name=name, resource_uri=diagnostic_settings_url, logs=[LogSettings(category=category_name, enabled=True, retention_policy=RetentionPolicy(enabled=False, days=0))], metrics=None, event_hub=event_hub_name, event_hub_rule=event_hub_authorization_rule_id, storage_account=None, workspace=log_analytics_workspace_resource_id)
Create audit diagnostic setting, i.e. containing single category - SQLSecurityAuditEvents or DevOpsOperationsAudit
_audit_policy_create_diagnostic_setting
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _audit_policy_validate_arguments( state=None, blob_storage_target_state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, retention_days=None, log_analytics_target_state=None, log_analytics_workspace_resource_id=None, event_hub_target_state=None, event_hub_authorization_rule_id=None, event_hub_name=None): ''' Validate input agruments ''' blob_storage_arguments_provided = blob_storage_target_state is not None or\ storage_account is not None or storage_endpoint is not None or\ storage_account_access_key is not None or\ retention_days is not None log_analytics_arguments_provided = log_analytics_target_state is not None or\ log_analytics_workspace_resource_id is not None event_hub_arguments_provided = event_hub_target_state is not None or\ event_hub_authorization_rule_id is not None or\ event_hub_name is not None if not state and not blob_storage_arguments_provided and\ not log_analytics_arguments_provided and not event_hub_arguments_provided: raise CLIError('Either state or blob storage or log analytics or event hub arguments are missing') if _is_audit_policy_state_enabled(state) and\ blob_storage_target_state is None and log_analytics_target_state is None and event_hub_target_state is None: raise CLIError('One of the following arguments must be enabled:' ' blob-storage-target-state, log-analytics-target-state, event-hub-target-state') if _is_audit_policy_state_disabled(state) and\ (blob_storage_arguments_provided or log_analytics_arguments_provided or event_hub_name): raise CLIError('No additional arguments should be provided once state is disabled') if (_is_audit_policy_state_none_or_disabled(blob_storage_target_state)) and\ (storage_account is not None or storage_endpoint is not None or storage_account_access_key is not None): raise CLIError('Blob storage account arguments cannot be specified' ' if blob-storage-target-state is not provided or disabled') if _is_audit_policy_state_enabled(blob_storage_target_state): if storage_account is not None and storage_endpoint is not None: raise CLIError('storage-account and storage-endpoint cannot be provided at the same time') if storage_account is None and storage_endpoint is None: raise CLIError('Either storage-account or storage-endpoint must be provided') # Server upper limit max_retention_days = 3285 if retention_days is not None and\ (int(retention_days) <= 0 or int(retention_days) >= max_retention_days): raise CLIError('retention-days must be a positive number greater than zero and lower than {}' .format(max_retention_days)) if _is_audit_policy_state_none_or_disabled(log_analytics_target_state) and\ log_analytics_workspace_resource_id is not None: raise CLIError('Log analytics workspace resource id cannot be specified' ' if log-analytics-target-state is not provided or disabled') if _is_audit_policy_state_enabled(log_analytics_target_state) and\ log_analytics_workspace_resource_id is None: raise CLIError('Log analytics workspace resource id must be specified' ' if log-analytics-target-state is enabled') if _is_audit_policy_state_none_or_disabled(event_hub_target_state) and\ (event_hub_authorization_rule_id is not None or event_hub_name is not None): raise CLIError('Event hub arguments cannot be specified if event-hub-target-state is not provided or disabled') if _is_audit_policy_state_enabled(event_hub_target_state) and event_hub_authorization_rule_id is None: raise CLIError('event-hub-authorization-rule-id must be specified if event-hub-target-state is enabled')
Validate input agruments
_audit_policy_validate_arguments
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def _get_diagnostic_settings( cmd, resource_group_name, workspace_name, sql_pool_name=None): ''' Common code to get server or database diagnostic settings ''' diagnostic_settings_url = _get_diagnostic_settings_url( cmd=cmd, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name) azure_monitor_client = cf_monitor(cmd.cli_ctx) return list(azure_monitor_client.diagnostic_settings.list(diagnostic_settings_url))
Common code to get server or database diagnostic settings
_get_diagnostic_settings
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def workspace_audit_policy_show( cmd, client, workspace_name, resource_group_name): ''' Show workspace audit policy ''' return _audit_policy_show( cmd=cmd, client=client, resource_group_name=resource_group_name, workspace_name=workspace_name, category_name='SQLSecurityAuditEvents')
Show workspace audit policy
workspace_audit_policy_show
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT
def sqlpool_audit_policy_show( cmd, client, workspace_name, resource_group_name, sql_pool_name): ''' Show sql pool audit policy ''' return _audit_policy_show( cmd=cmd, client=client, resource_group_name=resource_group_name, workspace_name=workspace_name, sql_pool_name=sql_pool_name, category_name='SQLSecurityAuditEvents')
Show sql pool audit policy
sqlpool_audit_policy_show
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py
MIT