code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _get_versions(self, location): """Return the previous and current Kubernetes minor release versions, such as ("1.11.6", "1.12.4").""" supported_versions = self.cmd( "az aks get-versions -l {} --query 'values[*].patchVersions.keys(@)[]'".format(location) ).get_output_in_json() sorted_supported_versions = sorted(supported_versions, key=version_to_tuple, reverse=True) upgrade_version = sorted_supported_versions[0] # find the first version that doesn't start with the latest major.minor. prefix = upgrade_version[:upgrade_version.rfind('.')] create_version = next(x for x in sorted_supported_versions if not x.startswith(prefix)) return create_version, upgrade_version
Return the previous and current Kubernetes minor release versions, such as ("1.11.6", "1.12.4").
_get_versions
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def _get_lts_versions(self, location): """Return the AKS versions that are marked as LTS in ascending order.""" lts_versions = self.cmd( '''az aks get-versions -l {} --query "values[?contains(capabilities.supportPlan, 'AKSLongTermSupport')].patchVersions.keys(@)[]"'''.format(location) ).get_output_in_json() sorted_lts_versions = sorted(lts_versions, key=version_to_tuple, reverse=False) return sorted_lts_versions
Return the AKS versions that are marked as LTS in ascending order.
_get_lts_versions
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def _get_newer_non_lts_version(self, location, version): """Return the nearest newer non-lts version of the specified version.""" supported_versions = self.cmd( '''az aks get-versions -l {} --query "values[?!(contains(capabilities.supportPlan, 'AKSLongTermSupport'))].patchVersions.keys(@)[]"'''.format(location) ).get_output_in_json() newer_versions = [x for x in supported_versions if version_to_tuple(x) > version_to_tuple(version)] sorted_newer_versions = sorted(newer_versions, key=version_to_tuple, reverse=False) return sorted_newer_versions[0] if newer_versions else None
Return the nearest newer non-lts version of the specified version.
_get_newer_non_lts_version
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_enable_disable(self, resource_group, resource_group_location): """ This test case exercises enabling and disabling service mesh profile. It creates a cluster without azure service mesh profile. Then enable it by running `aks mesh enable` followed by disabling by running `aks mesh disable`. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), 'revision': self._get_asm_supported_revision(resource_group_location), }) # create cluster without --enable-azure-service-mesh create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--aks-custom-headers=AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureServiceMeshPreview ' \ '--ssh-key-value={ssh_key_value} --output=json' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded'), ]) # enable azure service mesh again update_cmd = 'aks mesh enable --resource-group={resource_group} --name={name} --revision={revision}' self.cmd(update_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Istio'), ]) # disable azure service mesh update_cmd = 'aks mesh disable --resource-group={resource_group} --name={name} --yes' self.cmd(update_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Disabled'), ]) # delete the cluster delete_cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(delete_cmd, checks=[ self.is_empty(), ])
This test case exercises enabling and disabling service mesh profile. It creates a cluster without azure service mesh profile. Then enable it by running `aks mesh enable` followed by disabling by running `aks mesh disable`.
test_aks_azure_service_mesh_enable_disable
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_with_ingress_gateway(self, resource_group, resource_group_location): """ This test case exercises enabling and disabling an ingress gateway. It creates a cluster with azure service mesh profile. After that, we enable an ingress gateway, then disable it. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), 'revision': self._get_asm_supported_revision(resource_group_location), }) # create cluster with --enable-azure-service-mesh create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--aks-custom-headers=AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureServiceMeshPreview ' \ '--ssh-key-value={ssh_key_value} ' \ '--enable-azure-service-mesh --revision={revision} --output=json' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.check('serviceMeshProfile.mode', 'Istio'), ]) # enable ingress gateway update_cmd = 'aks mesh enable-ingress-gateway --resource-group={resource_group} --name={name} ' \ '--ingress-gateway-type Internal' self.cmd(update_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Istio'), self.check('serviceMeshProfile.istio.components.ingressGateways[0].mode', 'Internal'), self.check('serviceMeshProfile.istio.components.ingressGateways[0].enabled', True) ]) # disable ingress gateway update_cmd = 'aks mesh disable-ingress-gateway --resource-group={resource_group} --name={name} ' \ '--ingress-gateway-type Internal --yes' self.cmd(update_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Istio'), self.check('serviceMeshProfile.istio.components.ingressGateways[0].mode', 'Internal'), self.check('serviceMeshProfile.istio.components.ingressGateways[0].enabled', None) ]) # delete the cluster delete_cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(delete_cmd, checks=[ self.is_empty(), ])
This test case exercises enabling and disabling an ingress gateway. It creates a cluster with azure service mesh profile. After that, we enable an ingress gateway, then disable it.
test_aks_azure_service_mesh_with_ingress_gateway
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_canary_upgrade( self, resource_group, resource_group_location ): """This test case exercises canary upgrade with mesh upgrade command. It creates a cluster, enables azure service mesh, fetches available upgrade revison, upgrades the cluster then disable it. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name("cliakstest", 16) installed_revision = self._get_asm_supported_revision(resource_group_location) self.kwargs.update( { "resource_group": resource_group, "name": aks_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), "revision": installed_revision, } ) # create cluster with --enable-azure-service-mesh create_cmd = ( "aks create --resource-group={resource_group} --name={name} --location={location} " "--aks-custom-headers=AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureServiceMeshPreview " "--ssh-key-value={ssh_key_value} " "--enable-azure-service-mesh --revision={revision} --output=json" ) aks_cluster_create = self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("serviceMeshProfile.mode", "Istio"), self.exists("serviceMeshProfile.istio.revisions") ], ).get_output_in_json() cluster_create_revisions = aks_cluster_create["serviceMeshProfile"]["istio"]["revisions"] assert len(cluster_create_revisions) == 1 assert installed_revision in cluster_create_revisions # get upgrades upgrade_revision = self._get_asm_upgrade_version(resource_group, "{name}") self.kwargs.update( { "upgrade_revision": upgrade_revision, } ) # upgrade start upgrade_start_cmd = ( "aks mesh upgrade start --revision {upgrade_revision} --resource-group={resource_group} --name={name}" ) aks_cluster_upgrade_start = self.cmd( upgrade_start_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("serviceMeshProfile.mode", "Istio"), ], ).get_output_in_json() upgrade_start_revisions = aks_cluster_upgrade_start["serviceMeshProfile"]["istio"]["revisions"] print(upgrade_start_revisions) assert len(upgrade_start_revisions) == 2 assert installed_revision in upgrade_start_revisions and upgrade_revision in upgrade_start_revisions # upgrade rollback upgrade_rollback_cmd = ( "aks mesh upgrade rollback --resource-group={resource_group} --name={name} --yes" ) aks_cluster_upgrade_rollback = self.cmd( upgrade_rollback_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("serviceMeshProfile.mode", "Istio"), ], ).get_output_in_json() upgrade_rollback_revisions = aks_cluster_upgrade_rollback["serviceMeshProfile"]["istio"]["revisions"] assert len(upgrade_rollback_revisions) == 1 assert installed_revision in upgrade_rollback_revisions # upgrade start again self.cmd( upgrade_start_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("serviceMeshProfile.mode", "Istio"), ], ) # upgrade complete upgrade_complete_cmd = ( "aks mesh upgrade complete --resource-group={resource_group} --name={name} --yes" ) aks_cluster_upgrade_complete = self.cmd( upgrade_complete_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("serviceMeshProfile.mode", "Istio"), ], ).get_output_in_json() upgrade_complete_revisions = aks_cluster_upgrade_complete["serviceMeshProfile"]["istio"]["revisions"] assert len(upgrade_complete_revisions) == 1 assert upgrade_revision in upgrade_complete_revisions # delete the cluster delete_cmd = ( "aks delete --resource-group={resource_group} --name={name} --yes --no-wait" ) self.cmd( delete_cmd, checks=[ self.is_empty(), ], )
This test case exercises canary upgrade with mesh upgrade command. It creates a cluster, enables azure service mesh, fetches available upgrade revison, upgrades the cluster then disable it.
test_aks_azure_service_mesh_canary_upgrade
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_with_pluginca(self, resource_group, resource_group_location): """ This test case exercises providing plugin ca params with mesh enable command. It creates a cluster, enables azure service mesh with plugica params, then disable it. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) akv_resource_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo' self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), 'akv_resource_id': akv_resource_id, 'revision': self._get_asm_supported_revision(resource_group_location), }) # create cluster create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--aks-custom-headers=AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureServiceMeshPreview ' \ '--ssh-key-value={ssh_key_value}' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded') ]) # enable azurekeyvaultsecretsprovider addon enable_cmd = 'aks enable-addons --addons azure-keyvault-secrets-provider --resource-group={resource_group} --name={name} -o json' self.cmd(enable_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.check( 'addonProfiles.azureKeyvaultSecretsProvider.enabled', True), self.check( 'addonProfiles.azureKeyvaultSecretsProvider.config.enableSecretRotation', "false") ]) # enable azure service mesh with pluginca update_cmd = 'aks mesh enable --resource-group={resource_group} --name={name} ' \ '--key-vault-id {akv_resource_id} ' \ '--ca-cert-object-name my-ca-cert ' \ '--ca-key-object-name my-ca-key ' \ '--cert-chain-object-name my-cert-chain ' \ '--root-cert-object-name my-root-cert ' \ '--revision {revision}' self.cmd(update_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Istio'), self.check('serviceMeshProfile.istio.certificateAuthority.plugin.keyVaultId', akv_resource_id), self.check('serviceMeshProfile.istio.certificateAuthority.plugin.certObjectName', 'my-ca-cert'), self.check('serviceMeshProfile.istio.certificateAuthority.plugin.keyObjectName', 'my-ca-key'), self.check('serviceMeshProfile.istio.certificateAuthority.plugin.rootCertObjectName', 'my-root-cert'), self.check('serviceMeshProfile.istio.certificateAuthority.plugin.certChainObjectName', 'my-cert-chain') ]) # delete the cluster delete_cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(delete_cmd, checks=[ self.is_empty(), ])
This test case exercises providing plugin ca params with mesh enable command. It creates a cluster, enables azure service mesh with plugica params, then disable it.
test_aks_azure_service_mesh_with_pluginca
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_get_revisions(self): """ This test case exercises getting all the available revisions for the location. """ revisions_cmd = 'aks mesh get-revisions -l westus2' revisions = self.cmd(revisions_cmd).get_output_in_json() assert len(revisions['meshRevisions']) > 0
This test case exercises getting all the available revisions for the location.
test_aks_azure_service_mesh_get_revisions
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_azure_service_mesh_get_upgrades(self, resource_group, resource_group_location): """ This test case exercises getting all the possible upgrades for azure service mesh enabled on the cluster. It creates a cluster, enables azure service mesh, then gets all the possible upgrades. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), 'revision': self._get_asm_supported_revision(resource_group_location), }) # create cluster create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--aks-custom-headers=AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureServiceMeshPreview ' \ '--ssh-key-value={ssh_key_value}' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded') ]) # enable azure service mesh enable_cmd = 'aks mesh enable --resource-group={resource_group} --name={name} --revision={revision}' self.cmd(enable_cmd, checks=[ self.check('serviceMeshProfile.mode', 'Istio'), ]) upgrades_cmd = 'aks mesh get-upgrades --resource-group={resource_group} --name={name}' upgrades = self.cmd(upgrades_cmd).get_output_in_json() assert 'compatibleWith' in upgrades and len(upgrades['compatibleWith']) > 0 # delete the cluster delete_cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(delete_cmd, checks=[ self.is_empty(), ])
This test case exercises getting all the possible upgrades for azure service mesh enabled on the cluster. It creates a cluster, enables azure service mesh, then gets all the possible upgrades.
test_aks_azure_service_mesh_get_upgrades
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_update_upgrade_settings(self, resource_group, resource_group_location): """ This test case exercises enabling and disabling forceUpgrade override in cluster upgradeSettings. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), }) # create create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--enable-managed-identity ' \ '--ssh-key-value={ssh_key_value}' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.not_exists('upgradeSettings') ]) # update upgrade settings self.cmd('aks update --resource-group={resource_group} --name={name} --upgrade-override-until 2020-01-01T22:30:17+00:00', checks=[ self.check('provisioningState', 'Succeeded'), self.not_exists('upgradeSettings.overrideSettings.forceUpgrade'), self.exists('upgradeSettings.overrideSettings.until') ]) self.cmd('aks update --resource-group={resource_group} --name={name} --enable-force-upgrade', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', True), self.exists('upgradeSettings.overrideSettings.until') ]) self.cmd('aks update --resource-group={resource_group} --name={name} --enable-force-upgrade --upgrade-override-until 2020-02-22T22:30:17+00:00', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', True), self.check('upgradeSettings.overrideSettings.until', '2020-02-22T22:30:17+00:00') ]) self.cmd('aks update --resource-group={resource_group} --name={name} --disable-force-upgrade', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', False), self.check('upgradeSettings.overrideSettings.until', '2020-02-22T22:30:17+00:00') ]) # delete self.cmd( 'aks delete -g {resource_group} -n {name} --yes --no-wait', checks=[self.is_empty()])
This test case exercises enabling and disabling forceUpgrade override in cluster upgradeSettings.
test_aks_update_upgrade_settings
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_upgrade_upgrade_settings(self, resource_group, resource_group_location): """ This test case exercises enabling and disabling forceUpgrade override in cluster upgradeSettings. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'ssh_key_value': self.generate_ssh_keys(), 'if_match': '*', 'if_none_match': '*', }) # create create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '--enable-managed-identity ' \ '--ssh-key-value={ssh_key_value} --if-none-match={if_none_match}' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.not_exists('upgradeSettings') ]) # upgrade upgrade settings self.cmd('aks upgrade --resource-group={resource_group} --name={name} --upgrade-override-until 2020-01-01T22:30:17+00:00 --yes --if-match={if_match}', checks=[ self.check('provisioningState', 'Succeeded'), self.not_exists('upgradeSettings.overrideSettings.forceUpgrade'), self.exists('upgradeSettings.overrideSettings.until') ]) self.cmd('aks upgrade --resource-group={resource_group} --name={name} --enable-force-upgrade --yes', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', True), self.exists('upgradeSettings.overrideSettings.until') ]) self.cmd('aks upgrade --resource-group={resource_group} --name={name} --enable-force-upgrade --upgrade-override-until 2020-02-22T22:30:17+00:00 --yes', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', True), self.check('upgradeSettings.overrideSettings.until', '2020-02-22T22:30:17+00:00') ]) self.cmd('aks upgrade --resource-group={resource_group} --name={name} --disable-force-upgrade --yes', checks=[ self.check('provisioningState', 'Succeeded'), self.check('upgradeSettings.overrideSettings.forceUpgrade', False), self.check('upgradeSettings.overrideSettings.until', '2020-02-22T22:30:17+00:00') ]) self.kwargs.update( { "if_match": "", } ) # delete self.cmd( 'aks delete -g {resource_group} -n {name} --yes --no-wait --if-match={if_match}', checks=[self.is_empty()])
This test case exercises enabling and disabling forceUpgrade override in cluster upgradeSettings.
test_aks_upgrade_upgrade_settings
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_upgrade_with_tier_switch(self, resource_group, resource_group_location): """ This test case exercises enabling LTS tier with upgrade command. """ # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # kwargs for string formatting aks_name = self.create_random_name('cliakstest', 16) lts_versions = self._get_lts_versions(resource_group_location) if len(lts_versions) == 0: self.skipTest('No LTS versions found in the location') create_version = lts_versions[0] upgrade_version = self._get_newer_non_lts_version(resource_group_location, create_version) if upgrade_version is None: self.skipTest('No newer non-LTS versions found in the location') self.kwargs.update({ 'resource_group': resource_group, 'name': aks_name, 'location': resource_group_location, 'k8s_version': create_version, 'upgrade_k8s_version': upgrade_version, 'ssh_key_value': self.generate_ssh_keys(), }) # create with LTS premium tier create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} ' \ '-k {k8s_version} --enable-managed-identity ' \ '--ssh-key-value={ssh_key_value} --tier premium --k8s-support-plan AKSLongTermSupport' self.cmd(create_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.check("sku.tier", "Premium"), self.check("supportPlan", "AKSLongTermSupport"), ]) # AKSLongTermSupport support plan does not work with standard tier fail_upgrade_cmd = ( "aks upgrade --resource-group={resource_group} --name={name} " "--tier standard -k {upgrade_k8s_version} --yes" ) self.cmd(fail_upgrade_cmd, expect_failure=True) upgrade_cmd = ( "aks upgrade --resource-group={resource_group} --name={name} " "--tier standard --k8s-support-plan KubernetesOfficial -k {upgrade_k8s_version} --yes" ) # upgrade upgrade settings self.cmd(upgrade_cmd, checks=[ self.check('provisioningState', 'Succeeded'), self.check("sku.tier", "Standard"), self.check("supportPlan", "KubernetesOfficial"), ]) # delete self.cmd( 'aks delete -g {resource_group} -n {name} --yes --no-wait', checks=[self.is_empty()])
This test case exercises enabling LTS tier with upgrade command.
test_aks_upgrade_with_tier_switch
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_create_with_app_routing_enabled( self, resource_group, resource_group_location ): """This test case exercises creating an AKS cluster with app routing addon enabled.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # create cluster with app routing addon enabled aks_name = self.create_random_name("cliakstest", 16) self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} --enable-app-routing " ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), ], )
This test case exercises creating an AKS cluster with app routing addon enabled.
test_aks_create_with_app_routing_enabled
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_enable_disable( self, resource_group, resource_group_location ): """This test case exercises enabling and disabling app routing addon in an AKS cluster.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # create cluster without app routing aks_name = self.create_random_name("cliakstest", 16) self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value}" ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), ], ) # enable app routing enable_app_routing_cmd = ( "aks approuting enable --resource-group={resource_group} --name={aks_name}" ) self.cmd( enable_app_routing_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), ], ) # disable app routing disable_app_routing_cmd = "aks approuting disable --resource-group={resource_group} --name={aks_name} --yes" self.cmd( disable_app_routing_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", False), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises enabling and disabling app routing addon in an AKS cluster.
test_aks_approuting_enable_disable
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_enable_with_keyvault_secrets_provider_addon_and_keyvault_id( self, resource_group, resource_group_location ): """This test case exercises enabling app routing addon in an AKS cluster along with keyvault secrets provider addon.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 # create cluster aks_name = self.create_random_name("cliakstest", 16) kv_name = self.create_random_name("cliakstestkv", 16) self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "kv_name": kv_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) # create keyvault with rbac auth enabled create_keyvault_cmd = "keyvault create --resource-group={resource_group} --location={location} --name={kv_name} --enable-rbac-authorization=true" keyvault = self.cmd( create_keyvault_cmd, checks=[ self.check("properties.provisioningState", "Succeeded"), self.check("properties.enableRbacAuthorization", True), self.check("name", kv_name), ], ).get_output_in_json() keyvault_id = keyvault["id"] self.kwargs.update({"keyvault_id": keyvault_id}) create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} " ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), ], ) # enable app routing with keyvault secrets provider addon enabled enable_app_routing_cmd = "aks approuting enable --enable-kv --attach-kv {keyvault_id} --resource-group={resource_group} --name={aks_name}" self.cmd( enable_app_routing_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("addonProfiles.azureKeyvaultSecretsProvider.enabled", True), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises enabling app routing addon in an AKS cluster along with keyvault secrets provider addon.
test_aks_approuting_enable_with_keyvault_secrets_provider_addon_and_keyvault_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_update(self, resource_group, resource_group_location): """This test case exercises updating app routing addon in an AKS cluster.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 aks_name = self.create_random_name("cliakstest", 16) kv_name = self.create_random_name("cliakstestkv", 16) self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "kv_name": kv_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) # create keyvault with rbac auth enabled create_keyvault_cmd = "keyvault create --resource-group={resource_group} --location={location} --name={kv_name} --enable-rbac-authorization=true" keyvault = self.cmd( create_keyvault_cmd, checks=[ self.check("properties.provisioningState", "Succeeded"), self.check("properties.enableRbacAuthorization", True), self.check("name", kv_name), ], ).get_output_in_json() keyvault_id = keyvault["id"] self.kwargs.update({"keyvault_id": keyvault_id}) # create cluster with app routing enabled create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} --enable-app-routing" ) result = self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), ], ).get_output_in_json() object_id = result["ingressProfile"]["webAppRouting"]["identity"]["objectId"] self.kwargs.update({"object_id": object_id}) # update with enable_rbac_authorization flag in keyvault set to true update_cmd = ( "aks approuting update --resource-group={resource_group} --name={aks_name} --enable-kv " "--attach-kv {keyvault_id}" ) self.cmd( update_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("addonProfiles.azureKeyvaultSecretsProvider.enabled", True), ], ) # update keyvault with rbac auth disabled update_keyvault_cmd = "keyvault update --resource-group={resource_group} --name={kv_name} --enable-rbac-authorization=false" self.cmd( update_keyvault_cmd, checks=[ self.check("properties.provisioningState", "Succeeded"), self.check("properties.enableRbacAuthorization", False), self.check("name", kv_name), ], ) # update with enable_rbac_authorization flag in keyvault set to false update_cmd = ( "aks approuting update --resource-group={resource_group} --name={aks_name} " "--attach-kv {keyvault_id}" ) self.cmd( update_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("addonProfiles.azureKeyvaultSecretsProvider.enabled", True), ], ) check_access_policy_cmd = "az keyvault show --resource-group={resource_group} --name={kv_name} --query \"properties.accessPolicies[?objectId=='{object_id}']\" -o json" self.cmd( check_access_policy_cmd, checks=[ self.check("length(@)", 1), self.check("[0].objectId", "{object_id}"), self.check("[0].permissions.certificates", ["Get"]), self.check("[0].permissions.keys", None), self.check("[0].permissions.secrets", ["Get"]), self.check("[0].permissions.storage", None), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises updating app routing addon in an AKS cluster.
test_aks_approuting_update
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_update_with_monitoring_addon_enabled(self, resource_group, resource_group_location): """This test case exercises updating app routing addon in an AKS cluster with monitoring addon enabled.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 aks_name = self.create_random_name("cliakstest", 16) kv_name = self.create_random_name("cliakstestkv", 16) self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "kv_name": kv_name, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) # create keyvault with rbac auth enabled create_keyvault_cmd = "keyvault create --resource-group={resource_group} --location={location} --name={kv_name} --enable-rbac-authorization=true" keyvault = self.cmd( create_keyvault_cmd, checks=[ self.check("properties.provisioningState", "Succeeded"), self.check("properties.enableRbacAuthorization", True), self.check("name", kv_name), ], ).get_output_in_json() keyvault_id = keyvault["id"] self.kwargs.update({"keyvault_id": keyvault_id}) # create cluster with app routing and monitoring addon enabled create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} --enable-app-routing --enable-addons monitoring" ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("addonProfiles.omsagent.enabled", True), self.check("ingressProfile.webAppRouting.enabled", True), ], ).get_output_in_json() # update with enable_rbac_authroization flag in keyvault set to true update_cmd = ( "aks approuting update --resource-group={resource_group} --name={aks_name} --enable-kv " "--attach-kv {keyvault_id}" ) self.cmd( update_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("addonProfiles.azureKeyvaultSecretsProvider.enabled", True), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises updating app routing addon in an AKS cluster with monitoring addon enabled.
test_aks_approuting_update_with_monitoring_addon_enabled
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_zone_add_delete_list( self, resource_group, resource_group_location ): """This test case exercises adding, deleting and listing zones to app routing addon in an AKS cluster.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 aks_name = self.create_random_name("cliakstest", 16) dns_zone_1 = self.create_random_name("cliakstest", 16) + ".com" dns_zone_2 = self.create_random_name("cliakstest", 16) + ".com" self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "dns_zone_1": dns_zone_1, "dns_zone_2": dns_zone_2, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) create_dns_zone_cmd_1 = "network dns zone create --resource-group={resource_group} --name {dns_zone_1}" dns_zone_result = self.cmd( create_dns_zone_cmd_1, checks=[ self.check("name", dns_zone_1), ], ).get_output_in_json() dns_zone_id_1 = dns_zone_result["id"] create_dns_zone_cmd_2 = "network dns zone create --resource-group={resource_group} --name {dns_zone_2}" dns_zone_result = self.cmd( create_dns_zone_cmd_2, checks=[ self.check("name", dns_zone_2), ], ).get_output_in_json() dns_zone_id_2 = dns_zone_result["id"] self.kwargs.update( {"dns_zone_id_1": dns_zone_id_1, "dns_zone_id_2": dns_zone_id_2} ) # create cluster with app routing enabled create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} --enable-app-routing" ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("ingressProfile.webAppRouting.dnsZoneResourceIds", None), ], ) # add dns zone add_dns_zone_cmd = "aks approuting zone add --resource-group={resource_group} --name={aks_name} --ids {dns_zone_id_1}" self.cmd( add_dns_zone_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[0]", dns_zone_id_1 ), ], ) # add dns zone with --atach-zones flag add_dns_zone_cmd = "aks approuting zone add --resource-group={resource_group} --name={aks_name} --ids {dns_zone_id_2} --attach-zones" self.cmd( add_dns_zone_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[0]", dns_zone_id_1 ), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[1]", dns_zone_id_2 ), ], ) # list dns zone list_dns_zone_cmd = "aks approuting zone list --resource-group={resource_group} --name={aks_name}" self.cmd( list_dns_zone_cmd, checks=[ self.check("length(@)", 2), self.check("[0].resource_group", resource_group), self.check("[0].type", "dnszones"), self.check("[0].name", dns_zone_1), self.check("[1].resource_group", resource_group), self.check("[1].type", "dnszones"), self.check("[1].name", dns_zone_2), ], ) # delete dns zone delete_dns_zone_cmd = "aks approuting zone delete --resource-group={resource_group} --name={aks_name} --ids {dns_zone_id_1} --yes" self.cmd( delete_dns_zone_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[0]", dns_zone_id_2 ), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises adding, deleting and listing zones to app routing addon in an AKS cluster.
test_aks_approuting_zone_add_delete_list
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def test_aks_approuting_zone_update(self, resource_group, resource_group_location): """This test case exercises updating zones for the app routing addon in an AKS cluster.""" # reset the count so in replay mode the random names will start with 0 self.test_resources_count = 0 aks_name = self.create_random_name("cliakstest", 16) dns_zone_1 = self.create_random_name("cliakstest", 16) + ".com" dns_zone_2 = self.create_random_name("cliakstest", 16) + ".com" self.kwargs.update( { "resource_group": resource_group, "aks_name": aks_name, "dns_zone_1": dns_zone_1, "dns_zone_2": dns_zone_2, "location": resource_group_location, "ssh_key_value": self.generate_ssh_keys(), } ) create_dns_zone_cmd_1 = "network dns zone create --resource-group={resource_group} --name {dns_zone_1}" dns_zone_1 = self.cmd( create_dns_zone_cmd_1, checks=[ self.check("name", dns_zone_1), ], ).get_output_in_json() dns_zone_id_1 = dns_zone_1["id"] create_dns_zone_cmd_2 = "network dns zone create --resource-group={resource_group} --name {dns_zone_2}" dns_zone_2 = self.cmd( create_dns_zone_cmd_2, checks=[ self.check("name", dns_zone_2), ], ).get_output_in_json() dns_zone_id_2 = dns_zone_2["id"] self.kwargs.update( {"dns_zone_id_1": dns_zone_id_1, "dns_zone_id_2": dns_zone_id_2} ) # create cluster with app routing enabled create_cmd = ( "aks create --resource-group={resource_group} --name={aks_name} --location={location} " "--ssh-key-value={ssh_key_value} --enable-app-routing" ) self.cmd( create_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check("ingressProfile.webAppRouting.enabled", True), self.check("ingressProfile.webAppRouting.dnsZoneResourceIds", None), ], ) # add dns zone add_dns_zone_cmd = "aks approuting zone add --resource-group={resource_group} --name={aks_name} --ids {dns_zone_id_1}" self.cmd( add_dns_zone_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[0]", dns_zone_id_1 ), ], ) # update dns zone update_dns_zone_cmd = "aks approuting zone update --resource-group={resource_group} --name={aks_name} --ids {dns_zone_id_2}" self.cmd( update_dns_zone_cmd, checks=[ self.check("provisioningState", "Succeeded"), self.check( "ingressProfile.webAppRouting.dnsZoneResourceIds[0]", dns_zone_id_2 ), ], ) # delete cluster delete_cmd = "aks delete --resource-group={resource_group} --name={aks_name} --yes --no-wait" self.cmd(delete_cmd, checks=[self.is_empty()])
This test case exercises updating zones for the app routing addon in an AKS cluster.
test_aks_approuting_zone_update
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_aks_commands.py
MIT
def validate_lab_vm_create(cmd, args): """ Validates parameters for lab vm create and updates args. """ formula = None collection = [args.image, args.formula] if not _single(collection): raise ArgumentUsageError("usage error: [--image name --image-type type | --formula name]") if args.formula and (args.image or args.image_type): raise ArgumentUsageError("usage error: [--image name --image-type type | --formula name]") if args.formula: formula = _get_formula(cmd.cli_ctx, args) _validate_location(cmd.cli_ctx, args) _validate_expiration_date(args) _validate_other_parameters(args, formula) validate_artifacts(cmd, args) _validate_image_argument(cmd.cli_ctx, args, formula) _validate_network_parameters(cmd.cli_ctx, args, formula) validate_authentication_type(args, formula)
Validates parameters for lab vm create and updates args.
validate_lab_vm_create
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def validate_lab_vm_list(cmd, args): """ Validates parameters for lab vm list and updates args. """ from azure.mgmt.core.tools import resource_id, is_valid_resource_id filters = has_value(args.filters) or False environment = has_value(args.environment) or False args_all = has_value(args.all) or False claimable = has_value(args.claimable) or False collection = [filters, args_all, claimable] if _any(collection) and not _single(collection): raise ArgumentUsageError("usage error: [--filters FILTER | [[--all | --claimable][--environment ENVIRONMENT]]") collection = [filters, environment] if _any(collection) and not _single(collection): raise ArgumentUsageError("usage error: [--filters FILTER | [[--all | --claimable][--environment ENVIRONMENT]]") if has_value(args.filters): return # Retrieve all the vms of the lab if args_all and args.all.to_serialized_data() is True: args.filters = None # Retrieve all the vms claimable by user elif claimable and args.claimable.to_serialized_data() is True: args.filters = 'properties/allowClaim' # Default to retrieving users vms only else: # Find out owner object id if not has_value(args.object_id): args.filters = "Properties/ownerObjectId eq '{}'".format(_get_owner_object_id(cmd.cli_ctx)) if environment: if not is_valid_resource_id(args.environment.to_serialized_data()): from azure.cli.core.commands.client_factory import get_subscription_id args.environment = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=args.resource_group.to_serialized_data(), args='Microsoft.DevTestLab', type='labs', name=args.lab_name.to_serialized_data(), child_type_1='users', child_name_1=_get_owner_object_id(cmd.cli_ctx), child_type_2='environments', child_name_2=args.environment.to_serialized_data()) if not filters: args.filters = "Properties/environmentId eq '{}'".format(args.environment.to_serialized_data()) else: args.filters = "{} and Properties/environmentId eq '{}'".format(args.filters.to_serialized_data(), args.environment.to_serialized_data())
Validates parameters for lab vm list and updates args.
validate_lab_vm_list
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _validate_location(cli_ctx, args): """ Selects the default location of the lab when location is not provided. """ if not has_value(args.location): from .custom import LabGet result = LabGet(cli_ctx=cli_ctx)(command_args={ "name": args.lab_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data() }) args.location = result['location']
Selects the default location of the lab when location is not provided.
_validate_location
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _validate_expiration_date(args): """ Validates expiration date if provided. """ if has_value(args.expiration_date): import datetime import dateutil.parser if datetime.datetime.utcnow() >= dateutil.parser.parse(args.expiration_date.to_serialized_data()): raise ArgumentUsageError( "Expiration date '{}' must be in future.".format(args.expiration_date.to_serialized_data()))
Validates expiration date if provided.
_validate_expiration_date
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _validate_network_parameters(cli_ctx, args, formula=None): """ Updates args for virtual network and subnet parameters """ from .aaz.latest.lab.vnet import List as LabVnetList, Get as LabVnetGet lab_vnet = None if formula and formula.get('formulaContent'): if formula['formulaContent'].get('labVirtualNetworkId'): args.vnet_name = \ args.vnet_name or \ formula['formulaContent']['labVirtualNetworkId'].split('/')[-1] if formula['formulaContent'].get('labSubnetName'): args.lab_subnet_name = args.lab_subnet_name or formula['formulaContent']['labSubnetName'] args.disallow_public_ip_address = formula['formulaContent']['disallowPublicIpAddress'] # User did not provide vnet and not selected from formula if not has_value(args.vnet_name): lab_vnets = LabVnetList(cli_ctx=cli_ctx)(command_args={ "lab_name": args.lab_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data(), "top": 1 }) lab_vnets = list(lab_vnets) if not lab_vnets: err = "Unable to find any virtual network in the '{}' lab.".format(args.lab_name) raise HttpResponseError(err) lab_vnet = lab_vnets[0] args.vnet_name = lab_vnet['name'] args.lab_virtual_network_id = lab_vnet['id'] # User did provide vnet or has been selected from formula else: lab_vnet = LabVnetGet(cli_ctx=cli_ctx)(command_args={ "lab_name": args.lab_name.to_serialized_data(), "name": args.vnet_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data(), "top": 1 }) args.lab_virtual_network_id = lab_vnet['id'] # User did not provide subnet and not selected from formula if not has_value(args.subnet): args.lab_subnet_name = lab_vnet['subnetOverrides'][0]['labSubnetName'] _validate_ip_configuration(args, lab_vnet)
Updates args for virtual network and subnet parameters
_validate_network_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _validate_ip_configuration(args, lab_vnet=None): """ Updates args with network_interface & disallow_public_ip_address """ # case 1: User selecting "shared" ip configuration if args.ip_configuration.to_serialized_data() == 'shared': rule = _inbound_rule_from_os(args) public_ip_config = {'inboundNatRules': [rule]} nic_properties = {'sharedPublicIpAddressConfiguration': public_ip_config} args.network_interface = nic_properties args.disallow_public_ip_address = True # case 2: User selecting "public" ip configuration elif args.ip_configuration.to_serialized_data() == 'public': args.disallow_public_ip_address = False # case 3: User selecting "private" ip configuration elif args.ip_configuration.to_serialized_data() == 'private': args.disallow_public_ip_address = True # case 4: User did not select any ip configuration preference elif not has_value(args.ip_configuration): # case 5: lab virtual network was selected from user's option / formula default then use it for look-up if lab_vnet: # Default to shared ip configuration based on os type only if inbound nat rules exist on the # shared configuration of the selected lab's virtual network if lab_vnet.get('subnetOverrides') and \ lab_vnet['subnetOverrides'][0].get('sharedPublicIpAddressConfiguration') and \ lab_vnet['subnetOverrides'][0]['sharedPublicIpAddressConfiguration'].get('allowedPorts'): rule = _inbound_rule_from_os(args) public_ip_config = {'inboundNatRules': [rule]} nic_properties = {'sharedPublicIpAddressConfiguration': public_ip_config} args.network_interface = nic_properties args.disallow_public_ip_address = True elif lab_vnet.get('subnetOverrides') and lab_vnet['subnetOverrides'][0].get( 'usePublicIpAddressPermission') == 'Allow': args.disallow_public_ip_address = False else: args.disallow_public_ip_address = True # case 6: User selecting invalid value for ip configuration else: raise ArgumentUsageError("incorrect value for ip-configuration: {}".format( args.ip_configuration.to_serialized_data()))
Updates args with network_interface & disallow_public_ip_address
_validate_ip_configuration
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _validate_image_argument(cli_ctx, args, formula=None): """ Update args for image based on image or formula """ if formula and formula.get('formulaContent'): if formula['formulaContent'].get('galleryImageReference'): gallery_image_reference = formula['formulaContent']['galleryImageReference'] args.gallery_image_reference = { 'offer': gallery_image_reference['offer'], 'publisher': gallery_image_reference['publisher'], 'os_type': gallery_image_reference['osType'], 'sku': gallery_image_reference['sku'], 'version': gallery_image_reference['version'] } args.os_type = gallery_image_reference['osType'] return if formula['formulaContent'].get('customImageId'): # Custom image id from the formula is in the form of "customimages/{name}" args.image = formula['formulaContent']['customImageId'].split('/')[-1] args.image_type = 'custom' if args.image_type == 'gallery': _use_gallery_image(cli_ctx, args) elif args.image_type == 'custom': _use_custom_image(cli_ctx, args) else: raise ArgumentUsageError("incorrect value for image-type: '{}'. Allowed values: gallery or custom".format( args.image_type.to_serialized_data()))
Update args for image based on image or formula
_validate_image_argument
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _use_gallery_image(cli_ctx, args): """ Retrieve gallery image from lab and update args """ from .aaz.latest.lab.gallery_image import List as GalleryImageList odata_filter = ODATA_NAME_FILTER.format(args.image.to_serialized_data()) gallery_images = GalleryImageList(cli_ctx=cli_ctx)(command_args={ "lab_name": args.lab_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data(), "filter": odata_filter }) gallery_images = list(gallery_images) if not gallery_images: err = "Unable to find image name '{}' in the '{}' lab Gallery.".format(args.image.to_serialized_data(), args.lab_name.to_serialized_data()) raise HttpResponseError(err) if len(gallery_images) > 1: err = "Found more than 1 image with name '{}'. Please pick one from {}" raise HttpResponseError(err.format(args.image.to_serialized_data(), [x['name'] for x in gallery_images])) image_reference = gallery_images[0]['imageReference'] args.gallery_image_reference = { 'offer': image_reference['offer'], 'publisher': image_reference['publisher'], 'os_type': image_reference['osType'], 'sku': image_reference['sku'], 'version': image_reference['version'] } args.os_type = image_reference['osType']
Retrieve gallery image from lab and update args
_use_gallery_image
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _use_custom_image(cli_ctx, args): """ Retrieve custom image from lab and update args """ from azure.mgmt.core.tools import is_valid_resource_id if is_valid_resource_id(args.image.to_serialized_data()): args.custom_image_id = args.image else: from .aaz.latest.lab.custom_image import List as CustomImageList odata_filter = ODATA_NAME_FILTER.format(args.image.to_serialized_data()) custom_images = CustomImageList(cli_ctx=cli_ctx)(command_args={ "lab_name": args.lab_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data(), "filter": odata_filter }) custom_images = list(custom_images) if not custom_images: err = "Unable to find custom image name '{}' in the '{}' lab.".format(args.image.to_serialized_data(), args.lab_name.to_serialized_data()) raise HttpResponseError(err) if len(custom_images) > 1: err = "Found more than 1 image with name '{}'. Please pick one from {}" raise HttpResponseError(err.format(args.image.to_serialized_data(), [x['name'] for x in custom_images])) args.custom_image_id = custom_images[0]['id'] if custom_images[0].get('vm'): if custom_images[0]['vm'].get('windowsOsInfo'): os_type = "Windows" else: os_type = "Linux" elif custom_images[0].get('vhd'): os_type = custom_images[0]['vhd']['os_type'] else: raise HttpResponseError("OS type cannot be inferred from the custom image {}".format(custom_images[0].id)) args.os_type = os_type
Retrieve custom image from lab and update args
_use_custom_image
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def _get_formula(cli_ctx, args): """ Retrieve formula image from lab """ from .aaz.latest.lab.formula import List as FormulaList odata_filter = ODATA_NAME_FILTER.format(args.formula) formula_images = FormulaList(cli_ctx=cli_ctx)(command_args={ "lab_name": args.lab_name.to_serialized_data(), "resource_group": args.resource_group.to_serialized_data(), "filter": odata_filter }) formula_images = list(formula_images) if not formula_images: err = "Unable to find formula name '{}' in the '{}' lab.".format(args.formula, args.lab_name) raise HttpResponseError(err) if len(formula_images) > 1: err = "Found more than 1 formula with name '{}'. Please pick one from {}" raise HttpResponseError(err.format(args.formula, [x.name for x in formula_images])) return formula_images[0]
Retrieve formula image from lab
_get_formula
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/validators.py
MIT
def claim_vm(cmd, lab_name=None, name=None, resource_group_name=None): """ Command to claim a VM in the Azure DevTest Lab""" if name is None: from .aaz.latest.lab import ClaimAnyVm result = ClaimAnyVm(cli_ctx=cmd.cli_ctx)(command_args={ "name": lab_name, "resource_group": resource_group_name }) return result result = LabVmClaim(cli_ctx=cmd.cli_ctx)(command_args={ "name": name, "lab_name": lab_name, "resource_group": resource_group_name }) return result
Command to claim a VM in the Azure DevTest Lab
claim_vm
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/custom.py
MIT
def export_artifacts(formula): """ Exports artifacts from the given formula. This method removes some of the properties of the artifact model as they do not play important part for users in create or read context. """ artifacts = [] if formula and formula.get('formulaContent') and formula['formulaContent'].get('artifacts'): artifacts = formula['formulaContent']['artifacts'] for artifact in formula['formulaContent']['artifacts']: artifact.pop('status', None) artifact.pop('deploymentStatusMessage', None) artifact.pop('vmExtensionStatusMessage', None) artifact.pop('installTime', None) return artifacts
Exports artifacts from the given formula. This method removes some of the properties of the artifact model as they do not play important part for users in create or read context.
export_artifacts
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/lab/_format.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/lab/_format.py
MIT
def create_bot_json(cmd, client, resource_group_name, resource_name, logger, app_password=None, # pylint:disable=too-many-locals raw_bot_properties=None, password_only=True): """ :param cmd: :param client: :param resource_group_name: :param resource_name: :param logger: :param app_password: :param raw_bot_properties: :return: Dictionary """ if not raw_bot_properties: raw_bot_properties = client.bots.get( resource_group_name=resource_group_name, resource_name=resource_name ) # Initialize names bot_file and secret to capture botFilePath and botFileSecret values from the application's # settings. bot_file = None bot_file_secret = None profile = Profile(cli_ctx=cmd.cli_ctx) if not app_password: site_name = WebAppOperations.get_bot_site_name(raw_bot_properties.properties.endpoint) app_settings = WebAppOperations.get_app_settings( cmd=cmd, resource_group_name=resource_group_name, name=site_name ) app_password_values = [item['value'] for item in app_settings if item['name'] == 'MicrosoftAppPassword'] app_password = app_password_values[0] if app_password_values else None if not app_password: bot_file_values = [item['value'] for item in app_settings if item['name'] == 'botFilePath'] bot_file = bot_file_values[0] if bot_file_values else None bot_file_secret_values = [item['value'] for item in app_settings if item['name'] == 'botFileSecret'] bot_file_secret = bot_file_secret_values[0] if bot_file_secret_values else None if not bot_file and not app_password: bot_site_name = WebAppOperations.get_bot_site_name(raw_bot_properties.properties.endpoint) scm_url = WebAppOperations.get_scm_url(cmd, resource_group_name, bot_site_name, None) # TODO: Reevaluate "Public-or-Gov" Azure logic. is_public_azure = ('azurewebsites.net' in raw_bot_properties.properties.endpoint or '.net' in raw_bot_properties.properties.endpoint or '.com' in raw_bot_properties.properties.endpoint) host = 'https://portal.azure.com/' if is_public_azure else 'https://portal.azure.us/' subscription_id = get_subscription_id(cmd.cli_ctx) tenant_id = profile.get_subscription(subscription=client.config.subscription_id)['tenantId'] settings_url = host + '#@{}/resource/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/app_settings'.format(tenant_id, subscription_id, resource_group_name, resource_name) # pylint: disable=line-too-long logger.warning('"MicrosoftAppPassword" and "botFilePath" not found in application settings') logger.warning('To see your bot\'s application settings, visit %s' % settings_url) logger.warning('To visit your deployed bot\'s code on Azure, visit Kudu for your bot at %s' % scm_url) elif not app_password and bot_file: # We have the information we need to obtain the MSA App app password via bot file data from Kudu. kudu_client = KuduClient(cmd, resource_group_name, resource_name, raw_bot_properties, logger) bot_file_data = kudu_client.get_bot_file(bot_file) app_password = BotJsonFormatter.__decrypt_bot_file(bot_file_data, bot_file_secret, logger, password_only) return { 'type': 'abs', 'id': raw_bot_properties.name, 'name': raw_bot_properties.properties.display_name, 'appId': raw_bot_properties.properties.msa_app_id, 'appPassword': app_password, 'endpoint': raw_bot_properties.properties.endpoint, 'resourceGroup': str(resource_group_name), 'tenantId': profile.get_subscription(subscription=client._config.subscription_id)['tenantId'], # pylint:disable=protected-access 'subscriptionId': client._config.subscription_id, # pylint:disable=protected-access 'serviceName': resource_name }
:param cmd: :param client: :param resource_group_name: :param resource_name: :param logger: :param app_password: :param raw_bot_properties: :return: Dictionary
create_bot_json
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py
MIT
def __decrypt_bot_file(bot_file_data, bot_file_secret, logger, password_only=True): """Decrypt .bot file retrieved from Kudu. :param bot_file_data: :param bot_file_secret: :param logger: :return: """ services = bot_file_data['services'] decrypt = BotJsonFormatter.__decrypt if password_only: # Get all endpoints that have potentially valid appPassword values endpoints = [service for service in services if service.get('type') == 'endpoint' and service.get('appPassword')] # Reduce the retrieved endpoints to just their passwords app_passwords = [e['appPassword'] for e in endpoints] if len(app_passwords) == 1: return decrypt(bot_file_secret, app_passwords[0], logger) if len(app_passwords) > 1: logger.info('More than one Microsoft App Password found in bot file. Evaluating if more than one ' 'unique App Password exists.') app_passwords = [decrypt(bot_file_secret, pw, logger) for pw in app_passwords] unique_passwords = list(Counter(app_passwords)) # pylint:disable=too-many-function-args if len(unique_passwords) == 1: logger.info('One unique Microsoft App Password found, returning password.') return unique_passwords[0] logger.warning('More than one unique Microsoft App Password found in the bot file, please ' 'manually retrieve your bot file from Kudu to retrieve this information.') logger.warning('No Microsoft App Password returned.') return '' logger.warning('No Microsoft App Passwords found in bot file.') return '' for service in services: # For Azure Blob Storage if service.get('connectionString'): service['connectionString'] = decrypt(bot_file_secret, service['connectionString'], logger) # For LUIS and Dispatch if service.get('authoringKey'): service['authoringKey'] = decrypt(bot_file_secret, service['authoringKey'], logger) # For LUIS and QnA Maker if service.get('subscriptionKey'): service['subscriptionKey'] = decrypt(bot_file_secret, service['subscriptionKey'], logger) # For QnA Maker if service.get('endpointKey'): service['endpointKey'] = decrypt(bot_file_secret, service['endpointKey'], logger) # For connecting to the bot if service.get('appPassword'): service['appPassword'] = decrypt(bot_file_secret, service['appPassword'], logger) # For Application Insights if service.get('instrumentationKey'): service['instrumentationKey'] = decrypt(bot_file_secret, service['instrumentationKey'], logger) if service.get('apiKeys'): for apiKey in service['apiKeys']: service['apiKeys'][apiKey] = decrypt(bot_file_secret, service['apiKeys'][apiKey], logger) # For Cosmos DB if service.get('key'): service['key'] = decrypt(bot_file_secret, service['key'], logger) # For generic services if service.get('configuration') and isinstance(service.get('configuration'), dict): for key in service['configuration']: service['configuration'][key] = decrypt(bot_file_secret, service['configuration'][key], logger) return services
Decrypt .bot file retrieved from Kudu. :param bot_file_data: :param bot_file_secret: :param logger: :return:
__decrypt_bot_file
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py
MIT
def __retrieve_node_v4_publish_zip(): """Retrieves required IIS Node.js v4 BotBuilder SDK deployment files from Azure. :return: zipfile.ZipFile instance """ response = requests.get('https://icscratch.blob.core.windows.net/bot-packages/node_v4_publish.zip') import io return zipfile.ZipFile(io.BytesIO(response.content))
Retrieves required IIS Node.js v4 BotBuilder SDK deployment files from Azure. :return: zipfile.ZipFile instance
__retrieve_node_v4_publish_zip
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/bot_publish_prep.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/bot_publish_prep.py
MIT
def __prepare_configuration_file(cmd, resource_group_name, kudu_client, folder_path): """For bots without a bot file.""" # If no bot file exists, create the language specific configuration file from the bot's Application Settings app_settings = WebAppOperations.get_app_settings( cmd=cmd, resource_group_name=resource_group_name, name=kudu_client.bot_site_name ) # Ignorable Application Settings, these are only used on Azure: ignorable_settings = ['BotEnv', 'WEBSITE_NODE_DEFAULT_VERSION', 'SCM_DO_BUILD_DURING_DEPLOYMENT'] if os.path.exists(os.path.join(folder_path, 'package.json')): logger.info('Detected runtime as Node.js. Package.json present at %s. Creating .env file in that ' 'folder.', folder_path) with open(os.path.join(folder_path, '.env'), 'w+') as f: for setting in app_settings: if setting['name'] not in ignorable_settings: f.write('{0}={1}\n'.format(setting['name'], setting['value'])) f.close() else: app_settings_path = os.path.join(folder_path, 'appsettings.json') logger.info('Detected language as CSharp. Loading app settings from %s.', app_settings_path) appsettings_content = {setting['name']: setting['value'] for setting in app_settings if setting['name'] not in ignorable_settings} existing = None if not os.path.exists(app_settings_path): logger.info('App settings not found at %s, defaulting app settings to {}.', app_settings_path) existing = {} else: with open(app_settings_path, 'r') as f: existing = json.load(f) with open(os.path.join(app_settings_path), 'w+') as f: for key, value in appsettings_content.items(): existing[key] = value f.write(json.dumps(existing))
For bots without a bot file.
__prepare_configuration_file
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/custom.py
MIT
def get_bot(cmd, client, resource_group_name, resource_name, bot_json=None): """Retrieves the bot's application's application settings. If called with '--msbot' flag, the operation outputs a collection of data that can be piped into a .bot file. This method is directly called via "bot show" :param cmd: :param client: :param resource_group_name: :param resource_name: :param bot_json: """ raw_bot_properties = client.bots.get( resource_group_name=resource_group_name, resource_name=resource_name ) if bot_json: return BotJsonFormatter.create_bot_json(cmd, client, resource_group_name, resource_name, logger, raw_bot_properties=raw_bot_properties) return raw_bot_properties
Retrieves the bot's application's application settings. If called with '--msbot' flag, the operation outputs a collection of data that can be piped into a .bot file. This method is directly called via "bot show" :param cmd: :param client: :param resource_group_name: :param resource_name: :param bot_json:
get_bot
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/custom.py
MIT
def create_connection(client, resource_group_name, resource_name, connection_name, client_id, client_secret, scopes, service_provider_name, parameters=None): """Create a custom OAuth service provider. This method is directly called via "bot authsetting create" :param client: :param resource_group_name: :param resource_name: :param connection_name: :param client_id: :param client_secret: :param scopes: :param service_provider_name: :param parameters: :return: """ service_provider = get_service_providers(client, name=service_provider_name) if not service_provider: raise CLIError('Invalid Service Provider Name passed. Use "az bot authsetting list-providers" ' 'command to see all available providers') connection_parameters = [] if parameters: for parameter in parameters: pair = parameter.split('=', 1) if len(pair) == 1: raise CLIError('usage error: --parameters STRING=STRING STRING=STRING') connection_parameters.append(ConnectionSettingParameter(key=pair[0], value=pair[1])) setting = ConnectionSetting( location='global', properties=ConnectionSettingProperties( client_id=client_id, client_secret=client_secret, scopes=scopes, service_provider_id=service_provider.properties.id, parameters=connection_parameters ) ) return client.bot_connection.create(resource_group_name, resource_name, connection_name, setting)
Create a custom OAuth service provider. This method is directly called via "bot authsetting create" :param client: :param resource_group_name: :param resource_name: :param connection_name: :param client_id: :param client_secret: :param scopes: :param service_provider_name: :param parameters: :return:
create_connection
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/custom.py
MIT
def get_service_providers(client, name=None): """Gets supported OAuth Service providers. This method is directly called via "bot authsetting list-providers" :param client: :param name: :return: """ service_provider_response = client.bot_connection.list_service_providers() name = name and name.lower() if name: try: return next(item for item in service_provider_response.value if item.properties.service_provider_name.lower() == name.lower()) except StopIteration: raise CLIError('A service provider with the name {0} was not found'.format(name)) return service_provider_response
Gets supported OAuth Service providers. This method is directly called via "bot authsetting list-providers" :param client: :param name: :return:
get_service_providers
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/custom.py
MIT
def prepare_publish(cmd, client, resource_group_name, resource_name, sln_name, proj_file_path, code_dir=None, # pylint:disable=too-many-statements version='v3'): """Adds PostDeployScripts folder with necessary scripts to deploy v3 bot to Azure. This method is directly called via "bot prepare-publish" :param cmd: :param client: :param resource_group_name: :param resource_name: :param sln_name: :param proj_file_path: :param code_dir: :param version: :return: """ # The prepare-publish process for v3 bots and v4 bots differ, so if the user specifies a v4 version, end the command # and inform user of az bot publish. if version == 'v4': raise CLIError('\'az bot prepare-publish\' is only for v3 bots. Please use \'az bot publish\' to prepare and ' 'publish a v4 bot.') logger.warning('WARNING: `az bot prepare-publish` is in maintenance mode for v3 bots as support for creating v3 ' 'SDK bots via `az bot create` will be discontinued on August 1st, 2019. We encourage developers ' 'move to creating and deploying v4 bots.\n\nFor more information on creating and deploying v4 bots, ' 'please visit https://aka.ms/create-and-deploy-v4-bot\n\nFor more information on v3 bot ' 'creation deprecation, please visit this blog post: ' 'https://blog.botframework.com/2019/06/07/v3-bot-broadcast-message/') bot = client.bots.get( resource_group_name=resource_group_name, resource_name=resource_name ) if bot.kind == 'bot': raise CLIError('Prepare Publish is not supported for registration only bots.') if not code_dir: code_dir = os.getcwd() logger.warning('Parameter --code-dir not provided, defaulting to current working directory, %s. For more ' 'information, run \'az bot prepare-publish -h\'', code_dir) code_dir = code_dir.strip() if not os.path.isdir(code_dir): raise CLIError('Please supply a valid directory path containing your source code. ' 'Path {0} does not exist.'.format(code_dir)) os.chdir(code_dir) # Ensure that the directory does not contain appropriate post deploy scripts folder if 'PostDeployScripts' in os.listdir(code_dir): raise CLIError('Post deploy azure scripts are already in Place.') # Download bot source download_path = download_app(cmd, client, resource_group_name, resource_name) shutil.copytree(os.path.join(download_path['downloadPath'], 'PostDeployScripts'), 'PostDeployScripts') # If javascript, we need these files there for Azure WebApps to start if os.path.exists(os.path.join('PostDeployScripts', 'publish.js.template')): logger.info('Detected language javascript.') shutil.copy(os.path.join(download_path['downloadPath'], 'iisnode.yml'), 'iisnode.yml') shutil.copy(os.path.join(download_path['downloadPath'], 'publish.js'), 'publish.js') shutil.copy(os.path.join(download_path['downloadPath'], 'web.config'), 'web.config') # If C#, we need other set of files for the WebApp to start including build.cmd else: logger.info('Detected language CSharp.') solution_path = None csproj_path = None old_namev4 = 'AspNetCore-EchoBot-With-State' old_namev3 = 'Microsoft.Bot.Sample.SimpleEchoBot' shutil.copy(os.path.join(download_path['downloadPath'], 'build.cmd'), 'build.cmd') shutil.copy(os.path.join(download_path['downloadPath'], '.deployment'), '.deployment') # "deploy.cmd.template" does not exist for v4 bots. If the next section of code fails due to deploy.cmd.template # not being found, it is most likely due to trying to call prepare-publish on a v4 bot. # Inform the user of the potential problem and raise the error to exit the process. try: shutil.copyfile(os.path.join(download_path['downloadPath'], 'PostDeployScripts', 'deploy.cmd.template'), 'deploy.cmd') except OSError as error: # FileNotFoundError introduced in Python 3 logger.error('"deploy.cmd.template" not found. This may be due to calling \'az bot prepare-publish\' on a ' 'v4 bot. To prepare and publish a v4 bot, please instead use \'az bot publish\'.') raise CLIError(error) # Find solution and project name for root, _, files in os.walk(os.curdir): if solution_path and csproj_path: break for fileName in files: if solution_path and csproj_path: break if fileName == sln_name: solution_path = os.path.relpath(os.path.join(root, fileName)) if fileName == proj_file_path: csproj_path = os.path.relpath(os.path.join(root, fileName)) # Read deploy script contents with open('deploy.cmd') as f: content = f.read() logger.info('Visual studio solution detected: %s.', solution_path) logger.info('Visual studio project detected: %s.', csproj_path) # Using the deploy.cmd as a template, adapt it to use our solution and csproj with open('deploy.cmd', 'w') as f: content = content.replace(old_namev3 + '.sln', solution_path) content = content.replace(old_namev3 + '.csproj', csproj_path) content = content.replace(old_namev4 + '.sln', solution_path) content = content.replace(old_namev4 + '.csproj', csproj_path) f.write(content) shutil.rmtree(download_path['downloadPath']) logger.info('Bot prepare publish completed successfully.')
Adds PostDeployScripts folder with necessary scripts to deploy v3 bot to Azure. This method is directly called via "bot prepare-publish" :param cmd: :param client: :param resource_group_name: :param resource_name: :param sln_name: :param proj_file_path: :param code_dir: :param version: :return:
prepare_publish
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/custom.py
MIT
def download_bot_zip(self, file_save_path, folder_path): """Download bot's source code from Kudu. This method looks for the zipped source code in the site/clirepo/ folder on Kudu. If the code is not there, the contents of site/wwwroot are zipped and then downloaded. :param file_save_path: string :param folder_path: string :return: None """ if not self.__initialized: self.__initialize() headers = self.__get_application_octet_stream_headers() # Download source code in zip format from Kudu response = requests.get(self.__scm_url + '/api/zip/site/clirepo/', headers=headers) # If the status_code is not 200, the source code was not successfully retrieved. # Run the prepareSrc.cmd to zip up the code and prepare it for download. if response.status_code != 200: # try getting the bot from wwwroot instead payload = { 'command': 'PostDeployScripts\\prepareSrc.cmd {0}'.format(self.__password), 'dir': r'site\wwwroot' } prepareSrc_response = requests.post(self.__scm_url + '/api/command', data=json.dumps(payload), headers=self.__get_application_json_headers()) HttpResponseValidator.check_response_status(prepareSrc_response) # Overwrite previous "response" with bot-src.zip. response = requests.get(self.__scm_url + '/api/vfs/site/bot-src.zip', headers=headers) HttpResponseValidator.check_response_status(response) download_path = os.path.join(file_save_path, 'download.zip') with open(os.path.join(file_save_path, 'download.zip'), 'wb') as f: f.write(response.content) zip_ref = zipfile.ZipFile(download_path) zip_ref.extractall(folder_path) zip_ref.close() os.remove(download_path)
Download bot's source code from Kudu. This method looks for the zipped source code in the site/clirepo/ folder on Kudu. If the code is not there, the contents of site/wwwroot are zipped and then downloaded. :param file_save_path: string :param folder_path: string :return: None
download_bot_zip
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def get_bot_file(self, bot_file): """Retrieve the .bot file from Kudu. :param bot_file: :return: """ if not self.__initialized: self.__initialize() if bot_file.startswith('./') or bot_file.startswith('.\\'): bot_file = bot_file[2:] # Format backslashes to forward slashes and URL escape bot_file = quote(bot_file.replace('\\', '/')) request_url = self.__scm_url + '/api/vfs/site/wwwroot/' + bot_file self.__logger.info('Attempting to retrieve .bot file content from %s' % request_url) response = requests.get(request_url, headers=self.__get_application_octet_stream_headers()) HttpResponseValidator.check_response_status(response) self.__logger.info('Bot file successfully retrieved from Kudu.') return json.loads(response.text)
Retrieve the .bot file from Kudu. :param bot_file: :return:
get_bot_file
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def install_node_dependencies(self): """Installs Node.js dependencies at `site/wwwroot/` for Node.js bots. This method is only called when the detected bot is a Node.js bot. :return: Dictionary with results of the HTTP Kudu request """ if not self.__initialized: self.__initialize() payload = { 'command': 'npm install', 'dir': r'site\wwwroot' } # npm install can take a very long time to complete. By default, Azure's load balancer will terminate # connections after 230 seconds of no inbound or outbound packets. This timeout is not configurable. try: response = requests.post(self.__scm_url + '/api/command', data=json.dumps(payload), headers=self.__get_application_json_headers()) HttpResponseValidator.check_response_status(response) except CLIError as e: if response.status_code == 500 and 'The request timed out.' in response.text: self.__logger.warning('npm install is taking longer than expected and did not finish within the ' 'Azure-specified timeout of 230 seconds.') self.__logger.warning('The installation is likely still in progress. This is a known issue, please wait' ' a short while before messaging your bot. You can also visit Kudu to manually ' 'install the npm dependencies. (https://github.com/projectkudu/kudu/wiki)') self.__logger.warning('Your Kudu website for this bot is: %s' % self.__scm_url) self.__logger.warning('\nYou can also use `--keep-node-modules` in your `az bot publish` command to ' 'not `npm install` the dependencies for the bot on Kudu.') subscription_id = get_subscription_id(self.__cmd.cli_ctx) self.__logger.warning('Alternatively, you can configure your Application Settings for the App Service ' 'to build during zipdeploy by using the following command:\n az webapp config ' 'appsettings set -n %s -g %s --subscription %s --settings ' 'SCM_DO_BUILD_DURING_DEPLOYMENT=true' % (self.bot_site_name, self.__resource_group_name, subscription_id)) else: raise e return response.json()
Installs Node.js dependencies at `site/wwwroot/` for Node.js bots. This method is only called when the detected bot is a Node.js bot. :return: Dictionary with results of the HTTP Kudu request
install_node_dependencies
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def publish(self, zip_file_path, timeout, keep_node_modules, detected_language): """Publishes zipped bot source code to Kudu. Performs the following steps: 1. Empties the `site/clirepo/` folder on Kudu 2. Pushes the code to `site/clirepo/` 3. Deploys the code via the zipdeploy API. (https://github.com/projectkudu/kudu/wiki/REST-API#zip-deployment) 4. Gets the results of the latest Kudu deployment :param zip_file_path: :param timeout: :param keep_node_modules: :param detected_language: :return: Dictionary with results of the latest deployment """ if not self.__initialized: self.__initialize() self.__empty_source_folder() headers = self.__get_application_octet_stream_headers() with open(zip_file_path, 'rb') as fs: zip_content = fs.read() response = requests.put(self.__scm_url + '/api/zip/site/clirepo', headers=headers, data=zip_content) HttpResponseValidator.check_response_status(response) return self.__enable_zip_deploy(zip_file_path, timeout, keep_node_modules, detected_language)
Publishes zipped bot source code to Kudu. Performs the following steps: 1. Empties the `site/clirepo/` folder on Kudu 2. Pushes the code to `site/clirepo/` 3. Deploys the code via the zipdeploy API. (https://github.com/projectkudu/kudu/wiki/REST-API#zip-deployment) 4. Gets the results of the latest Kudu deployment :param zip_file_path: :param timeout: :param keep_node_modules: :param detected_language: :return: Dictionary with results of the latest deployment
publish
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def __empty_source_folder(self): """Remove the `clirepo/` folder from Kudu. This method is called from KuduClient.publish() in preparation for uploading the user's local source code. After removing the folder from Kudu, the method performs another request to recreate the `clirepo/` folder. :return: """ # The `clirepo/` folder contains the zipped up source code payload = { 'command': 'rm -rf clirepo && mkdir clirepo', 'dir': r'site' } headers = self.__get_application_json_headers() response = requests.post(self.__scm_url + '/api/command', data=json.dumps(payload), headers=headers) HttpResponseValidator.check_response_status(response)
Remove the `clirepo/` folder from Kudu. This method is called from KuduClient.publish() in preparation for uploading the user's local source code. After removing the folder from Kudu, the method performs another request to recreate the `clirepo/` folder. :return:
__empty_source_folder
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def __empty_wwwroot_folder_except_for_node_modules(self): """Empty site/wwwroot/ folder but retain node_modules folder. :return: """ self.__logger.info('Removing all files and folders from "site/wwwroot/" except for node_modules.') payload = { 'command': '(for /D %i in (.\\*) do if not %~nxi == node_modules rmdir /s/q %i) && (for %i in (.\\*) ' 'del %i)', 'dir': r'site\wwwroot' } headers = self.__get_application_json_headers() response = requests.post(self.__scm_url + '/api/command', data=json.dumps(payload), headers=headers) HttpResponseValidator.check_response_status(response) self.__logger.info('All files and folders successfully removed from "site/wwwroot/" except for node_modules.')
Empty site/wwwroot/ folder but retain node_modules folder. :return:
__empty_wwwroot_folder_except_for_node_modules
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def __empty_wwwroot_folder(self): # pylint: disable=unused-private-member """Empty the site/wwwroot/ folder from Kudu. Empties the site/wwwroot/ folder by removing the entire directory, and then recreating it. Called when publishing a bot to Kudu. """ self.__logger.info('Emptying the "site/wwwroot/" folder on Kudu in preparation for publishing.') payload = { 'command': 'rm -rf wwwroot && mkdir wwwroot', 'dir': r'site' } headers = self.__get_application_json_headers() response = requests.post(self.__scm_url + '/api/command', data=json.dumps(payload), headers=headers) HttpResponseValidator.check_response_status(response) self.__logger.info('"site/wwwroot/" successfully emptied.')
Empty the site/wwwroot/ folder from Kudu. Empties the site/wwwroot/ folder by removing the entire directory, and then recreating it. Called when publishing a bot to Kudu.
__empty_wwwroot_folder
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def __enable_zip_deploy(self, zip_file_path, timeout, keep_node_modules, detected_language): """Pushes local bot's source code in zip format to Kudu for deployment. This method deploys the zipped bot source code via Kudu's zipdeploy API. This API does not run any build processes such as `npm install`, `dotnet restore`, `dotnet publish`, etc. :param zip_file_path: string :return: Dictionary with results of the latest deployment """ zip_url = self.__scm_url + '/api/zipdeploy?isAsync=true' headers = self.__get_application_octet_stream_headers() if not keep_node_modules or detected_language == 'Csharp': self.__empty_source_folder() else: self.__empty_wwwroot_folder_except_for_node_modules() with open(os.path.realpath(os.path.expanduser(zip_file_path)), 'rb') as fs: print(zip_file_path) zip_content = fs.read() self.__logger.info('Source code read, uploading to Kudu.') r = requests.post(zip_url, data=zip_content, headers=headers) if r.status_code != 202: raise CLIError("Zip deployment {} failed with status code '{}' and reason '{}'".format( zip_url, r.status_code, r.text)) self.__logger.info('Retrieving current deployment info.') # On successful deployment navigate to the app, display the latest deployment JSON response. return self.__check_zip_deployment_status(timeout)
Pushes local bot's source code in zip format to Kudu for deployment. This method deploys the zipped bot source code via Kudu's zipdeploy API. This API does not run any build processes such as `npm install`, `dotnet restore`, `dotnet publish`, etc. :param zip_file_path: string :return: Dictionary with results of the latest deployment
__enable_zip_deploy
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def __initialize(self): """Generates necessary data for performing calls to Kudu based off of data passed in on initialization. :return: None """ user_name, password = WebAppOperations.get_site_credential(self.__cmd.cli_ctx, self.__resource_group_name, self.bot_site_name, None) # Store the password for download_bot_zip: self.__password = password self.__scm_url = WebAppOperations.get_scm_url(self.__cmd, self.__resource_group_name, self.bot_site_name, None) self.__auth_headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) self.__initialized = True
Generates necessary data for performing calls to Kudu based off of data passed in on initialization. :return: None
__initialize
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/kudu_client.py
MIT
def send_message(self, text, retry_count=3): """Send raw text to bot framework using direct line api""" url = '/'.join([self._base_url, 'conversations', self._conversationid, 'activities']) json_payload = { 'conversationId': self._conversationid, 'type': 'message', 'from': {'id': 'user1'}, 'text': text } success = False current_retry = 0 while not success and current_retry < retry_count: bot_response = requests.post(url, headers=self._headers, json=json_payload) current_retry += 1 if bot_response.status_code == 200: success = True return bot_response
Send raw text to bot framework using direct line api
send_message
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
MIT
def get_message(self, retry_count=3): """Get a response message back from the bot framework using direct line api""" url = '/'.join([self._base_url, 'conversations', self._conversationid, 'activities']) url = url + '?watermark=' + self._watermark success = False current_retry = 0 while not success and current_retry < retry_count: bot_response = requests.get(url, headers=self._headers, json={'conversationId': self._conversationid}) current_retry += 1 if bot_response.status_code == 200: success = True json_response = bot_response.json() if 'watermark' in json_response: self._watermark = json_response['watermark'] if 'activities' in json_response: activities_count = len(json_response['activities']) if activities_count > 0: return bot_response, json_response['activities'][activities_count - 1]['text'] else: return bot_response, "No new messages" return bot_response, "error contacting bot for response"
Get a response message back from the bot framework using direct line api
get_message
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
MIT
def __talk_to_bot(self, message_text='Hi', expected_text=None): """Enables direct line channel, sends a message to the bot, and if expected_text is provided, verify that the bot answer matches it.""" # This setting is for local testing, specifying an app id and password. Set it to true to test directline. # For automation, we set it to false by default to avoid handling keys for now. use_directline = False # It is not possible to talk to the bot in playback mode. if self.is_live and use_directline: result = self.cmd('az bot directline create -g {rg} -n {botname}', checks=[ self.check('properties.properties.sites[0].siteName', 'Default Site') ]) json_output = json.loads(result.output) directline_key = json_output['properties']['properties']['sites'][0]['key'] directline_client = DirectLineClient(directline_key) send_result = directline_client.send_message(message_text) if send_result.status_code != 200: self.fail("Failed to send message to bot through directline api. Response:" + json.dumps(send_result.json())) response, text = directline_client.get_message() if response.status_code != 200: self.fail("Failed to receive message from bot through directline api. Error:" + response.json()) if expected_text: self.assertTrue(expected_text in text, "Bot response does not match expectation: " + text + expected_text)
Enables direct line channel, sends a message to the bot, and if expected_text is provided, verify that the bot answer matches it.
__talk_to_bot
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/botservice/tests/latest/test_bot_commands.py
MIT
def list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type="AzureWorkload", container_name=None, protectable_item_type=None, server_name=None): if backup_management_type != "AzureWorkload": raise ValidationError(""" Only supported value of backup-management-type is 'AzureWorkload' for this command. """) container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, backup_management_type) custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) container_uri = container.name return custom_wl.list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type, container_uri, protectable_item_type, server_name)
) container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, backup_management_type) custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(
list_protectable_items
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_base.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_base.py
MIT
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name=None, target_item_name=None, log_point_in_time=None, target_server_type=None, target_server_name=None, workload_type=None, backup_management_type="AzureWorkload", from_full_rp_name=None, filepath=None, target_container_name=None, target_resource_group=None, target_vault_name=None, target_subscription_id=None, target_instance_name=None): target_subscription = get_subscription_id(cmd.cli_ctx) if target_subscription_id is not None: vault_csr_state = custom.get_vault_csr_state(vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)) if vault_csr_state is None or vault_csr_state == "Enabled": target_subscription = target_subscription_id else: raise ArgumentUsageError( """ Cross Subscription Restore is not allowed on this Vault. Please either enable CSR on the vault or try restoring in the same subscription. """) target_resource_group = resource_group_name if target_resource_group is None else target_resource_group target_vault_name = vault_name if target_vault_name is None else target_vault_name target_container_name = container_name if target_container_name is None else target_container_name target_item = None if target_item_name is not None: protectable_items_client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=target_subscription).backup_protectable_items target_item = show_protectable_instance( cmd, protectable_items_client, target_resource_group, target_vault_name, target_server_name, target_server_type, workload_type, target_container_name, target_subscription, target_instance_name, "AzureWorkload") target_container = None if target_container_name is not None: container_client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=target_subscription).backup_protection_containers target_container = common.show_container(cmd, container_client, target_container_name, target_resource_group, target_vault_name, backup_management_type) if isinstance(target_container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) return custom_wl.show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container, target_resource_group, target_vault_name, target_subscription)
Cross Subscription Restore is not allowed on this Vault. Please either enable CSR on the vault or try restoring in the same subscription. """) target_resource_group = resource_group_name if target_resource_group is None else target_resource_group target_vault_name = vault_name if target_vault_name is None else target_vault_name target_container_name = container_name if target_container_name is None else target_container_name target_item = None if target_item_name is not None: protectable_items_client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=target_subscription).backup_protectable_items target_item = show_protectable_instance( cmd, protectable_items_client, target_resource_group, target_vault_name, target_server_name, target_server_type, workload_type, target_container_name, target_subscription, target_instance_name, "AzureWorkload") target_container = None if target_container_name is not None: container_client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=target_subscription).backup_protection_containers target_container = common.show_container(cmd, container_client, target_container_name, target_resource_group, target_vault_name, backup_management_type) if isinstance(target_container, list): raise ValidationError(
show_recovery_config
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_base.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_base.py
MIT
def list_protectable_items_in_subscription(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type="AzureWorkload", container_name=None, protectable_item_type=None, server_name=None, subscription_id=None): if backup_management_type != "AzureWorkload": raise ValidationError(""" Only supported value of backup-management-type is 'AzureWorkload' for this command. """) container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = (backup_protection_containers_cf(cmd.cli_ctx) if subscription_id is None else get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=subscription_id).backup_protection_containers) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, backup_management_type) custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) container_uri = container.name return custom_wl.list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type, container_uri, protectable_item_type, server_name, subscription_id)
) container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = (backup_protection_containers_cf(cmd.cli_ctx) if subscription_id is None else get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, subscription_id=subscription_id).backup_protection_containers) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, backup_management_type) custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(
list_protectable_items_in_subscription
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_base.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_base.py
MIT
def list_wl_policies(client, resource_group_name, vault_name, workload_type, backup_management_type): if workload_type is None: raise RequiredArgumentMissingError( """ Workload type is required for Azure Workload. Use --workload-type. """) if backup_management_type is None: raise CLIError( """ Backup Management Type needs to be specified for Azure Workload. """) workload_type = _check_map(workload_type, workload_type_map) filter_string = cust_help.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type}) policies = client.list(vault_name, resource_group_name, filter_string) return cust_help.get_list_from_paged_response(policies)
Workload type is required for Azure Workload. Use --workload-type. """) if backup_management_type is None: raise CLIError(
list_wl_policies
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, resource_id, container_type): if not cust_help.is_id(resource_id): raise CLIError( """ Resource ID is not a valid one. """) workload_type = _check_map(workload_type, workload_type_map) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): filter_string = cust_help.get_filter_string({'backupManagementType': "AzureWorkload"}) # refresh containers and try to get the protectable container object again refresh_result = client.refresh(vault_name, resource_group_name, fabric_name, filter=filter_string, cls=cust_help.get_pipeline_response) cust_help.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): raise ResourceNotFoundError( """ Container unavailable or already registered. """) properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type, source_resource_id=resource_id, workload_type=workload_type) param = ProtectionContainerResource(properties=properties) # Trigger register and wait for completion result = client.begin_register(vault_name, resource_group_name, fabric_name, container_name, param, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
Resource ID is not a valid one. """) workload_type = _check_map(workload_type, workload_type_map) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): filter_string = cust_help.get_filter_string({'backupManagementType': "AzureWorkload"}) # refresh containers and try to get the protectable container object again refresh_result = client.refresh(vault_name, resource_group_name, fabric_name, filter=filter_string, cls=cust_help.get_pipeline_response) cust_help.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): raise ResourceNotFoundError(
register_wl_container
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def re_register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, container_name, container_type): workload_type = _check_map(workload_type, workload_type_map) if not cust_help.is_native_name(container_name): raise CLIError( """ Container name passed cannot be a friendly name. Please pass a native container name. """) backup_cf = backup_protection_containers_cf(cmd.cli_ctx) containers = common.list_containers(backup_cf, resource_group_name, vault_name, container_type) source_resource_id = None for container in containers: if container.name == container_name: source_resource_id = container.properties.source_resource_id break if not source_resource_id: raise CLIError( """ No such registered container exists. """) properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type, workload_type=workload_type, operation_type='Reregister', source_resource_id=source_resource_id) param = ProtectionContainerResource(properties=properties) # Trigger register and wait for completion result = client.begin_register(vault_name, resource_group_name, fabric_name, container_name, param, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
Container name passed cannot be a friendly name. Please pass a native container name. """) backup_cf = backup_protection_containers_cf(cmd.cli_ctx) containers = common.list_containers(backup_cf, resource_group_name, vault_name, container_type) source_resource_id = None for container in containers: if container.name == container_name: source_resource_id = container.properties.source_resource_id break if not source_resource_id: raise CLIError(
re_register_wl_container
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def set_policy(cmd, client, resource_group_name, vault_name, policy, policy_name, fix_for_inconsistent_items, tenant_id=None, is_critical_operation=False): if policy_name is None: raise CLIError( """ Policy name is required for set policy. """) if policy is not None: policy_object = cust_help.get_policy_from_json(client, policy) if is_critical_operation: existing_policy = common.show_policy(client, resource_group_name, vault_name, policy_name) if cust_help.is_retention_duration_decreased(existing_policy, policy_object, "AzureWorkload"): # update the payload with critical operation and add auxiliary header for cross tenant case if tenant_id is not None: client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, aux_tenants=[tenant_id]).protection_policies policy_object.properties.resource_guard_operation_requests = [ cust_help.get_resource_guard_operation_request(cmd.cli_ctx, resource_group_name, vault_name, "updatePolicy")] else: if fix_for_inconsistent_items: policy_object = common.show_policy(client, resource_group_name, vault_name, policy_name) policy_object.properties.make_policy_consistent = True else: raise CLIError( """ Please provide policy object. """) return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
Policy name is required for set policy. """) if policy is not None: policy_object = cust_help.get_policy_from_json(client, policy) if is_critical_operation: existing_policy = common.show_policy(client, resource_group_name, vault_name, policy_name) if cust_help.is_retention_duration_decreased(existing_policy, policy_object, "AzureWorkload"): # update the payload with critical operation and add auxiliary header for cross tenant case if tenant_id is not None: client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, aux_tenants=[tenant_id]).protection_policies policy_object.properties.resource_guard_operation_requests = [ cust_help.get_resource_guard_operation_request(cmd.cli_ctx, resource_group_name, vault_name, "updatePolicy")] else: if fix_for_inconsistent_items: policy_object = common.show_policy(client, resource_group_name, vault_name, policy_name) policy_object.properties.make_policy_consistent = True else: raise CLIError(
set_policy
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def show_protectable_instance(items, server_name, protectable_item_type, instance_name=None): if server_name is None: raise RequiredArgumentMissingError(""" Server name missing. Please provide a valid server name using --target-server-name. """) if protectable_item_type is None: az_error = RequiredArgumentMissingError(""" Protectable item type missing. Please provide a valid protectable item type name using --target-server-type. """) recommendation_text = "{} are the allowed values.".format(str(list(protectable_item_type_map.keys()))) az_error.set_recommendation(recommendation_text) raise az_error protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map) # Protectable Item Type filter filtered_items = [item for item in items if item.properties.protectable_item_type is not None and item.properties.protectable_item_type.lower() == protectable_item_type.lower()] # Server Name filter filtered_items = [item for item in filtered_items if hasattr(item.properties, 'server_name') and item.properties.server_name.lower() == server_name.lower()] # Instance Name filter, if it is passed if instance_name: filtered_items = [item for item in items if item.name.lower() == instance_name.lower()] return cust_help.get_none_one_or_many(filtered_items)
) if protectable_item_type is None: az_error = RequiredArgumentMissingError(
show_protectable_instance
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type, enable_compression=False): if backup_type is None: raise RequiredArgumentMissingError("Backup type missing. Please provide a valid backup type using " "--backup-type argument.") message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy" if retain_until is None: if backup_type.lower() == 'copyonlyfull': logger.warning("The default value for retain-until for backup-type CopyOnlyFull is 30 days.") retain_until = datetime.now(timezone.utc) + timedelta(days=30) if backup_type.lower() == 'full': logger.warning("The default value for retain-until for backup-type Full is 45 days.") retain_until = datetime.now(timezone.utc) + timedelta(days=45) else: if backup_type.lower() in ['differential', 'log']: retain_until = None logger.warning(message) container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) backup_item_type = item_uri.split(';')[0] if not (cust_help.is_sql(backup_item_type) or cust_help.is_hana(backup_item_type)) and enable_compression: raise CLIError( """ Enable compression is only applicable for SQLDataBase and SAPHanaDatabase item types. """) if cust_help.is_hana(backup_item_type) and backup_type.lower() in ['log', 'copyonlyfull', 'incremental']: raise CLIError( """ Backup type cannot be Log, CopyOnlyFull, Incremental for SAPHanaDatabase Adhoc backup. """) properties = AzureWorkloadBackupRequest(backup_type=backup_type, enable_compression=enable_compression, recovery_point_expiry_time_in_utc=retain_until) param = BackupRequestResource(properties=properties) # Trigger backup and wait for completion result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, param, cls=cust_help.get_pipeline_response) return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Enable compression is only applicable for SQLDataBase and SAPHanaDatabase item types. """) if cust_help.is_hana(backup_item_type) and backup_type.lower() in ['log', 'copyonlyfull', 'incremental']: raise CLIError(
backup_now
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container, target_resource_group, target_vault_name, target_subscription): if log_point_in_time is not None: datetime_type(log_point_in_time) if restore_mode == 'AlternateWorkloadRestore': _check_none_and_many(target_item, "Target Item") protectable_item_type = target_item.properties.protectable_item_type if protectable_item_type.lower() not in ["sqlinstance", "saphanasystem"]: raise CLIError( """ Target Item must be either of type HANAInstance or SQLInstance. """) if restore_mode == 'RestoreAsFiles' and target_container is None: raise CLIError("Target Container must be provided.") if rp_name is None and log_point_in_time is None: raise CLIError( """ Log point in time or recovery point name must be provided. """) item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureWorkload") cust_help.validate_item(item) item_type = item.properties.workload_type item_name = item.name if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type): raise CLIError( """ Item must be either of type SQLDataBase or SAPHanaDatabase. """) # Mapping of restore mode restore_mode_map = {'OriginalWorkloadRestore': 'OriginalLocation', 'AlternateWorkloadRestore': 'AlternateLocation', 'RestoreAsFiles': 'AlternateLocation'} if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None: rp_name = from_full_rp_name rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint' if rp_name == 'DefaultRangeRecoveryPoint': recovery_points = list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, None, None, True) recovery_points = [rp for rp in recovery_points if rp.name == rp_name] if recovery_points == []: raise CLIError( """ Invalid input. """) recovery_point = recovery_points[0] else: recovery_point = common.show_recovery_point(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, item_type, backup_management_type="AzureWorkload") alternate_directory_paths = [] if 'sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore': items = list_workload_items(cmd, target_vault_name, target_resource_group, target_subscription, target_container.name) for titem in items: if titem.properties.friendly_name == target_item.properties.friendly_name: if titem.properties.server_name == target_item.properties.server_name: for path in recovery_point.properties.extended_info.data_directory_paths: target_path = cust_help.get_target_path(path.type, path.path, path.logical_name, titem.properties.data_directory_paths) alternate_directory_paths.append((path.type, path.path, path.logical_name, target_path)) db_name = None if restore_mode == 'AlternateWorkloadRestore': friendly_name = target_item.properties.friendly_name db_name = friendly_name + '/' + target_item_name container_id = None if restore_mode == 'AlternateWorkloadRestore': container_id = '/'.join(target_item.id.split('/')[:-2]) if not ('sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore'): alternate_directory_paths = None recovery_mode = None if restore_mode == 'RestoreAsFiles': recovery_mode = 'FileRecovery' container_id = target_container.id return { 'restore_mode': restore_mode_map[restore_mode], 'container_uri': item.properties.container_name, 'item_uri': item_name, 'recovery_point_id': recovery_point.name, 'log_point_in_time': log_point_in_time, 'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana', 'workload_type': item_type, 'source_resource_id': item.properties.source_resource_id, 'database_name': db_name, 'container_id': container_id, 'recovery_mode': recovery_mode, 'filepath': filepath, 'alternate_directory_paths': alternate_directory_paths}
Target Item must be either of type HANAInstance or SQLInstance. """) if restore_mode == 'RestoreAsFiles' and target_container is None: raise CLIError("Target Container must be provided.") if rp_name is None and log_point_in_time is None: raise CLIError(
show_recovery_config
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def _check_map(item_type, item_type_map): if item_type is None: if item_type_map == workload_type_map: az_error = RequiredArgumentMissingError(""" Workload type missing. Please enter a valid workload type using --workload-type. """) recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys()))) az_error.set_recommendation(recommendation_text) raise az_error if item_type_map == protectable_item_type_map: az_error = RequiredArgumentMissingError(""" Protectable item type missing. Please enter a valid protectable item type using --protectable-item-type. """) recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys()))) az_error.set_recommendation(recommendation_text) raise az_error raise RequiredArgumentMissingError("Item type missing. Enter a valid item type.") if item_type_map.get(item_type) is not None: return item_type_map[item_type] error_text = "{} is an invalid argument.".format(item_type) recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys()))) az_error = InvalidArgumentValueError(error_text) az_error.set_recommendation(recommendation_text) raise az_error
) recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys()))) az_error.set_recommendation(recommendation_text) raise az_error if item_type_map == protectable_item_type_map: az_error = RequiredArgumentMissingError(
_check_map
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
MIT
def datetime_type(string): """ Validate UTC datettime in accepted format. Examples: 31-12-2017, 31-12-2017-05:30:00 """ accepted_date_formats = ['%d-%m-%Y', '%d-%m-%Y-%H:%M:%S'] for form in accepted_date_formats: try: return datetime.strptime(string, form) except ValueError: # checks next format pass raise InvalidArgumentValueError(""" Input '{}' is not valid. Valid example: 31-12-2017, 31-12-2017-05:30:00 """.format(string))
Validate UTC datettime in accepted format. Examples: 31-12-2017, 31-12-2017-05:30:00
datetime_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/_validators.py
MIT
def validate_wl_restore(item, item_type, restore_mode, recovery_mode): # if source_resource_id is None or source_resource_id.lower() != item.properties.source_resource_id.lower(): # raise InvalidArgumentValueError(""" # The source_resource_id specified in recovery config file is incorrect. Please correct it and retry the # operation. Correct value should be - {}. # """.format(item.properties.source_resource_id)) # if workload_type is None or workload_type.lower() != item.properties.workload_type.lower(): # raise InvalidArgumentValueError(""" # The workload_type specified in recovery config file is incorrect. Please correct it and retry the # operation. Correct value should be - {}. # """.format(item.properties.workload_type)) if item_type is None or item_type.lower() not in ['sql', 'saphana']: raise InvalidArgumentValueError(""" The item_type specified in recovery config file is incorrect. Please correct it and retry the operation. Allowed values are: 'SQL', 'SAPHana'. """) if item_type.lower() not in item.properties.workload_type.lower(): raise InvalidArgumentValueError(""" The item_type and workload_type specified in recovery config file does not match. Please correct either of them and retry the operation. """) if restore_mode not in ['OriginalLocation', 'AlternateLocation']: raise InvalidArgumentValueError(""" The restore_mode specified in recovery config file is incorrect. Please correct it and retry the operation. Allowed values are: 'OriginalLocation', 'AlternateLocation'. """) if recovery_mode is not None and recovery_mode != 'FileRecovery': raise InvalidArgumentValueError(""" The recovery_mode specified in recovery config file is incorrect. Please correct it and retry the operation. """)
) if item_type.lower() not in item.properties.workload_type.lower(): raise InvalidArgumentValueError(
validate_wl_restore
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/_validators.py
MIT
def validate_czr(backup_config_response, recovery_point, use_secondary_region): backup_storage_redundancy = backup_config_response.properties.storage_type cross_region_restore_flag = backup_config_response.properties.cross_region_restore_flag if (cross_region_restore_flag or backup_storage_redundancy == StorageType.ZONE_REDUNDANT): if recovery_point.tier_type is not None and ( recovery_point.tier_type == "VaultStandard" or recovery_point.tier_type == "SnapshotAndVaultStandard"): if backup_storage_redundancy != StorageType.ZONE_REDUNDANT: if recovery_point.properties.zones is None: raise ArgumentUsageError(""" Please ensure that either the vault storage redundancy is ZoneRedundant or the recovery point is zone pinned, or remove --target-zone argument. """) if not use_secondary_region: raise ArgumentUsageError(""" Please ensure that either the vault storage redundancy is ZoneRedundant or the restore is not to the primary region, or remove --target-zone argument. """) else: raise ArgumentUsageError(""" Please ensure that the given RP tier type is either 'VaultStandard' or 'SnapshotAndVaultStandard', or remove --target-zone argument.""") else: raise ArgumentUsageError(""" Please ensure either the vault storage redundancy is ZoneRedundant or the vault has CRR enabled or try removing --target-zone argument. """)
) if not use_secondary_region: raise ArgumentUsageError(
validate_czr
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/_validators.py
MIT
def list_items(cmd, client, resource_group_name, vault_name, workload_type=None, container_name=None, container_type=None, use_secondary_region=None): workload_type = _check_map(workload_type, workload_type_map) filter_string = custom_help.get_filter_string({ 'backupManagementType': container_type, 'itemType': workload_type}) if use_secondary_region: if container_type is None: raise RequiredArgumentMissingError( """ Provide --backup-management-type to list protected items in secondary region """) if container_type and container_type.lower() in crr_not_supported_bmt: raise InvalidArgumentValueError( """ --use-secondary-region flag is not supported for the --backup-management-type provided. Please either remove the flag or query for any other backup-management-type. """) client = backup_protected_items_crr_cf(cmd.cli_ctx) items = client.list(vault_name, resource_group_name, filter_string) paged_items = custom_help.get_list_from_paged_response(items) if container_name: if custom_help.is_native_name(container_name): return [item for item in paged_items if _is_container_name_match(item, container_name)] return [item for item in paged_items if item.properties.container_name.lower().split(';')[-1] == container_name.lower()] return paged_items
Provide --backup-management-type to list protected items in secondary region """) if container_type and container_type.lower() in crr_not_supported_bmt: raise InvalidArgumentValueError(
list_items
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom_common.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom_common.py
MIT
def remove_identity(client, resource_group_name, vault_name, system_assigned=None, user_assigned=None): vault_details = client.get(resource_group_name, vault_name) curr_identity_details = vault_details.identity curr_identity_type = 'none' user_assigned_identity = None identity_type = 'none' if curr_identity_details is not None: curr_identity_type = curr_identity_details.type.lower() if user_assigned is not None: if curr_identity_type not in ["userassigned", "systemassigned, userassigned"]: raise ArgumentUsageError( """ There are no user assigned identities to be removed. """) userid = None remove_count_of_userMSI = 0 totaluserMSI = 0 user_assigned_identity = {} for element in curr_identity_details.user_assigned_identities.keys(): if element in user_assigned: remove_count_of_userMSI += 1 totaluserMSI += 1 if not user_assigned: remove_count_of_userMSI = totaluserMSI for userMSI in user_assigned: user_assigned_identity[userMSI] = userid if system_assigned is not None: if curr_identity_type != "systemassigned, userassigned": raise ArgumentUsageError( """ System assigned identity is not enabled for Recovery Services Vault. """) if remove_count_of_userMSI == totaluserMSI: identity_type = 'none' user_assigned_identity = None else: identity_type = "userassigned" else: if curr_identity_type == 'systemassigned, userassigned': if remove_count_of_userMSI == totaluserMSI: identity_type = 'systemassigned' user_assigned_identity = None else: identity_type = 'systemassigned,userassigned' else: if remove_count_of_userMSI == totaluserMSI: identity_type = 'none' user_assigned_identity = None else: identity_type = 'userassigned' elif system_assigned is not None: return _remove_system_identity(client, resource_group_name, vault_name, curr_identity_type) else: raise RequiredArgumentMissingError( """ Invalid parameters, no operation specified. """) identity_data = IdentityData(type=identity_type, user_assigned_identities=user_assigned_identity) vault = PatchVault(identity=identity_data) return client.begin_update(resource_group_name, vault_name, vault)
There are no user assigned identities to be removed. """) userid = None remove_count_of_userMSI = 0 totaluserMSI = 0 user_assigned_identity = {} for element in curr_identity_details.user_assigned_identities.keys(): if element in user_assigned: remove_count_of_userMSI += 1 totaluserMSI += 1 if not user_assigned: remove_count_of_userMSI = totaluserMSI for userMSI in user_assigned: user_assigned_identity[userMSI] = userid if system_assigned is not None: if curr_identity_type != "systemassigned, userassigned": raise ArgumentUsageError(
remove_identity
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def update_encryption(cmd, client, resource_group_name, vault_name, encryption_key_id, infrastructure_encryption=None, mi_user_assigned=None, mi_system_assigned=None, tenant_id=None): keyVaultproperties = CmkKeyVaultProperties(key_uri=encryption_key_id) vault_details = client.get(resource_group_name, vault_name) encryption_details = backup_resource_encryption_config_cf(cmd.cli_ctx).get(vault_name, resource_group_name) encryption_type = encryption_details.properties.encryption_at_rest_type identity_details = vault_details.identity identity_type = 'none' if identity_details is not None: identity_type = identity_details.type.lower() if identity_details is None or identity_type == 'none': raise ValidationError( """ Please enable identities of Recovery Services Vault """) if encryption_type != "CustomerManaged": if mi_system_assigned is None and mi_user_assigned is None: raise RequiredArgumentMissingError( """ Please provide user assigned identity id using --identity-id paramter or set --use-system-assigned flag """) if infrastructure_encryption is None: infrastructure_encryption = "Disabled" if mi_user_assigned is not None and mi_system_assigned: raise MutuallyExclusiveArgumentError( """ Both --identity-id and --use-system-assigned parameters can't be given at the same time. """) kekIdentity = None is_identity_present = False if mi_user_assigned is not None: if identity_type not in ["userassigned", "systemassigned, userassigned"]: raise ArgumentUsageError( """ Please add user assigned identity for Recovery Services Vault. """) if mi_user_assigned in identity_details.user_assigned_identities.keys(): is_identity_present = True if not is_identity_present: raise InvalidArgumentValueError( """ This user assigned identity not available for Recovery Services Vault. """) if mi_system_assigned: if identity_type not in ["systemassigned", "systemassigned, userassigned"]: raise ArgumentUsageError( """ Please make sure that system assigned identity is enabled for Recovery Services Vault """) if mi_user_assigned is not None or mi_system_assigned: kekIdentity = CmkKekIdentity(user_assigned_identity=mi_user_assigned, use_system_assigned_identity=mi_system_assigned) encryption_data = VaultPropertiesEncryption(key_vault_properties=keyVaultproperties, kek_identity=kekIdentity, infrastructure_encryption=infrastructure_encryption) vault_properties = VaultProperties(encryption=encryption_data) vault = PatchVault(properties=vault_properties) if cust_help.has_resource_guard_mapping(cmd.cli_ctx, resource_group_name, vault_name, "RecoveryServicesModifyEncryptionSettings"): # Cross tenant scenario if tenant_id is not None: client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesClient, aux_tenants=[tenant_id]).vaults vault.properties.resource_guard_operation_requests = [cust_help.get_resource_guard_operation_request( cmd.cli_ctx, resource_group_name, vault_name, "RecoveryServicesModifyEncryptionSettings")] client.begin_update(resource_group_name, vault_name, vault).result()
Please enable identities of Recovery Services Vault """) if encryption_type != "CustomerManaged": if mi_system_assigned is None and mi_user_assigned is None: raise RequiredArgumentMissingError(
update_encryption
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def set_policy(cmd, client, resource_group_name, vault_name, policy, policy_name, tenant_id=None, is_critical_operation=False): policy_object = cust_help.get_policy_from_json(client, policy) retention_range_in_days = policy_object.properties.instant_rp_retention_range_in_days schedule_run_frequency = policy_object.properties.schedule_policy.schedule_run_frequency # Validating range of days input if retention_range_in_days is not None: if policy_object.properties.policy_type != 'V2': if schedule_run_frequency == ScheduleRunType.weekly and retention_range_in_days != 5: raise InvalidArgumentValueError( """ Retention policy range must be equal to 5. """) if schedule_run_frequency == ScheduleRunType.daily and (retention_range_in_days > 5 or retention_range_in_days < 1): raise InvalidArgumentValueError( """ Retention policy range must be between 1 to 5. """) else: if (retention_range_in_days > 30 or retention_range_in_days < 1): raise InvalidArgumentValueError( """ Retention policy range must be between 1 to 30. """) if policy_name is None: policy_name = policy_object.name additional_properties = policy_object.properties.additional_properties if 'instantRpDetails' in additional_properties: policy_object.properties.instant_rp_details = additional_properties['instantRpDetails'] if is_critical_operation: existing_policy = common.show_policy(client, resource_group_name, vault_name, policy_name) if cust_help.is_retention_duration_decreased(existing_policy, policy_object, "AzureIaasVM"): # update the payload with critical operation and add auxiliary header for cross tenant case if tenant_id is not None: client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, aux_tenants=[tenant_id]).protection_policies policy_object.properties.resource_guard_operation_requests = [ cust_help.get_resource_guard_operation_request(cmd.cli_ctx, resource_group_name, vault_name, "updatePolicy")] return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
Retention policy range must be equal to 5. """) if schedule_run_frequency == ScheduleRunType.daily and (retention_range_in_days > 5 or retention_range_in_days < 1): raise InvalidArgumentValueError(
set_policy
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def enable_protection_for_vm(cmd, client, resource_group_name, vault_name, vm, policy_name, diskslist=None, disk_list_setting=None, exclude_all_data_disks=None): vm_name, vm_rg = cust_help.get_resource_name_and_rg(resource_group_name, vm) vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name) vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) logger.warning('Ignite (November) 2023 onwards Virtual Machine deployments using PS and CLI will default to ' 'security type Trusted Launch. Please ensure Policy Name used with "az backup ' 'protection enable-for-vm" command is of type Enhanced Policy for Trusted Launch VMs. Non-Trusted ' 'Launch Virtual Machines will not be impacted by this change. To know more about default change ' 'and Trusted Launch, please visit https://aka.ms/TLaD.') # throw error if policy has more than 1000 protected VMs. if policy.properties.protected_items_count >= 1000: raise CLIError("Cannot configure backup for more than 1000 VMs per policy") if vm.location.lower() != vault.location.lower(): raise CLIError( """ The VM should be in the same location as that of the Recovery Services vault to enable protection. """) if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value: raise CLIError( """ The policy type should match with the workload being protected. Use the relevant get-default policy command and use it to protect the workload. """) if (hasattr(vm, 'security_profile') and hasattr(vm.security_profile, 'security_type') and vm.security_profile.security_type is not None and vm.security_profile.security_type.lower() == 'trustedlaunch'): if policy.properties.policy_type != 'V2': raise InvalidArgumentValueError( """ Trusted VM can only be protected using Enhanced Policy. Please provide a valid IaasVM Enhanced Policy in --policy-name argument. """) # Get protectable item. protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name, resource_group_name, vm_name, vm_rg) if protectable_item is None: raise CLIError( """ The specified Azure Virtual Machine Not Found. Possible causes are 1. VM does not exist 2. The VM name or the Service name needs to be case sensitive 3. VM is already Protected with same or other Vault. Please Unprotect VM first and then try to protect it again. Please contact Microsoft for further assistance. """) # Construct enable protection request object container_uri = cust_help.get_protection_container_uri_from_id(protectable_item.id) item_uri = cust_help.get_protectable_item_uri_from_id(protectable_item.id) vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type) vm_item_properties.policy_id = policy.id vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id if disk_list_setting is not None and exclude_all_data_disks is not None: raise MutuallyExclusiveArgumentError(""" Both --disk-list-setting and --exclude-all-data-disks can not be provided together. """) if disk_list_setting is not None: if diskslist is None: raise CLIError("Please provide LUNs of disks that will be included or excluded.") is_inclusion_list = False if disk_list_setting == "include": is_inclusion_list = True disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=diskslist, is_inclusion_list=is_inclusion_list) extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties) vm_item_properties.extended_properties = extended_properties elif exclude_all_data_disks: disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=[], is_inclusion_list=True) extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties) vm_item_properties.extended_properties = extended_properties vm_item = ProtectedItemResource(properties=vm_item_properties) # Trigger enable protection and wait for completion result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item, cls=cust_help.get_pipeline_response) return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
The VM should be in the same location as that of the Recovery Services vault to enable protection. """) if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value: raise CLIError(
enable_protection_for_vm
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def _get_alr_restore_mode(target_vm_name, target_vnet_name, target_vnet_resource_group, target_subnet_name, target_resource_group): if (target_vm_name is None and target_vnet_name is None and target_vnet_resource_group is None and target_subnet_name is None): return 'RestoreDisks' if not (target_vm_name is None or target_vnet_name is None or target_vnet_resource_group is None or target_subnet_name is None): if target_resource_group is None: raise RequiredArgumentMissingError( """ --target-resource-group is required for ALR. Please specify a valid --target-resource-group. """) return 'AlternateLocation' raise RequiredArgumentMissingError( """ Target VM details are not specified completely. Please make sure all these parameters are specified: --target-vm-name, --target-vnet-name, --target-vnet-resource-group, --target-subnet-name. """)
--target-resource-group is required for ALR. Please specify a valid --target-resource-group. """) return 'AlternateLocation' raise RequiredArgumentMissingError(
_get_alr_restore_mode
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def restore_disks(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account, target_resource_group=None, restore_to_staging_storage_account=None, restore_only_osdisk=None, diskslist=None, restore_as_unmanaged_disks=None, use_secondary_region=None, rehydration_duration=15, rehydration_priority=None, disk_encryption_set_id=None, mi_system_assigned=None, mi_user_assigned=None, target_zone=None, restore_mode='AlternateLocation', target_vm_name=None, target_vnet_name=None, target_vnet_resource_group=None, target_subnet_name=None, target_subscription_id=None, storage_account_resource_group=None, restore_to_edge_zone=None, tenant_id=None, disk_access_option=None, target_disk_access_id=None): vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location vault_identity = vault.identity target_subscription = get_subscription_id(cmd.cli_ctx) if target_subscription_id is not None and restore_mode == "AlternateLocation": vault_csr_state = get_vault_csr_state(vault) if vault_csr_state is None or vault_csr_state == "Enabled": target_subscription = target_subscription_id else: raise ArgumentUsageError( """ Cross Subscription Restore is not allowed on this Vault. Please either enable CSR on the vault or try restoring in the same subscription. """) item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM", use_secondary_region) cust_help.validate_item(item) recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, rp_name, "AzureIaasVM", "VM", use_secondary_region) common.fetch_tier_for_rp(recovery_point) validators.validate_archive_restore(recovery_point, rehydration_priority) encryption = backup_resource_encryption_config_cf(cmd.cli_ctx).get(vault_name, resource_group_name) # Get container and item URIs container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) # Original Storage Account Restore Logic use_original_storage_account = _should_use_original_storage_account(recovery_point, restore_to_staging_storage_account) if use_original_storage_account: logger.warning( """ The disks will be restored to their original storage accounts. The VM config file will be uploaded to given storage account. """) # Construct trigger restore request object if storage_account_resource_group is None: storage_account_resource_group = resource_group_name sa_name, sa_rg = cust_help.get_resource_name_and_rg(storage_account_resource_group, storage_account) _storage_account_id = _get_storage_account_id(cmd.cli_ctx, target_subscription, sa_name, sa_rg) _source_resource_id = item.properties.source_resource_id target_rg_id = None if restore_mode == "AlternateLocation": restore_mode = _get_alr_restore_mode(target_vm_name, target_vnet_name, target_vnet_resource_group, target_subnet_name, target_resource_group) if restore_as_unmanaged_disks and target_resource_group is not None: raise MutuallyExclusiveArgumentError( """ Both restore_as_unmanaged_disks and target_resource_group can't be spceified. Please give Only one parameter and retry. """) if recovery_point.properties.is_managed_virtual_machine: if target_resource_group is not None: target_rg_id = "/subscriptions/" + target_subscription + "/resourceGroups/" + target_resource_group if not restore_as_unmanaged_disks and target_resource_group is None: logger.warning( """ The disks of the managed VM will be restored as unmanaged since targetRG parameter is not provided. This will NOT leverage the instant restore functionality. Hence it can be significantly slow based on given storage account. To leverage instant restore, provide the target RG parameter. Otherwise, provide the intent next time by passing the --restore-as-unmanaged-disks parameter """) _validate_restore_disk_parameters(restore_only_osdisk, diskslist) restore_disk_lun_list = None if restore_only_osdisk: restore_disk_lun_list = [] if diskslist: restore_disk_lun_list = diskslist validators.validate_mi_used_for_restore_disks(vault_identity, mi_system_assigned, mi_user_assigned) trigger_restore_properties = _get_trigger_restore_properties(rp_name, vault_location, _storage_account_id, _source_resource_id, target_rg_id, use_original_storage_account, restore_disk_lun_list, rehydration_duration, rehydration_priority, None if recovery_point. properties.recovery_point_tier_details is None else recovery_point.tier_type, disk_encryption_set_id, encryption, recovery_point, mi_system_assigned, mi_user_assigned, restore_mode) _set_trigger_restore_properties(cmd, trigger_restore_properties, target_vm_name, target_vnet_name, target_vnet_resource_group, target_subnet_name, vault_name, resource_group_name, recovery_point, target_zone, target_rg_id, _source_resource_id, restore_mode, target_subscription, use_secondary_region) # Edge zones-specific code. Not using existing set/get properties code as it is messy and prone to errors trigger_restore_properties = _set_edge_zones_trigger_restore_properties(cmd, trigger_restore_properties, restore_to_edge_zone, recovery_point, target_subscription, use_secondary_region, restore_mode) trigger_restore_properties = _set_pe_restore_trigger_restore_properties(cmd, trigger_restore_properties, disk_access_option, target_disk_access_id, recovery_point, use_secondary_region) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: validators.validate_crr(target_rg_id, rehydration_priority) azure_region = secondary_region_map[vault_location] crr_access_token = _get_crr_access_token(cmd, azure_region, vault_name, resource_group_name, container_uri, item_uri, rp_name) crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) if cust_help.has_resource_guard_mapping(cmd.cli_ctx, resource_group_name, vault_name, "RecoveryServicesRestore"): # Cross Tenant scenario if tenant_id is not None: client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient, aux_tenants=[tenant_id]).restores trigger_restore_request.properties.resource_guard_operation_requests = [ cust_help.get_resource_guard_operation_request( cmd.cli_ctx, resource_group_name, vault_name, "RecoveryServicesRestore")] # Trigger restore result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Cross Subscription Restore is not allowed on this Vault. Please either enable CSR on the vault or try restoring in the same subscription. """) item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM", use_secondary_region) cust_help.validate_item(item) recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, rp_name, "AzureIaasVM", "VM", use_secondary_region) common.fetch_tier_for_rp(recovery_point) validators.validate_archive_restore(recovery_point, rehydration_priority) encryption = backup_resource_encryption_config_cf(cmd.cli_ctx).get(vault_name, resource_group_name) # Get container and item URIs container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) # Original Storage Account Restore Logic use_original_storage_account = _should_use_original_storage_account(recovery_point, restore_to_staging_storage_account) if use_original_storage_account: logger.warning(
restore_disks
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/backup/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/backup/custom.py
MIT
def _configure_db_dw_params(arg_ctx): """ Configures params that are based on `Database` resource and therefore apply to one or more DB/DW create/update commands. The idea is that this does some basic configuration of each property. Each command can then potentially build on top of this (e.g. to give a parameter more specific help text) and .ignore() parameters that aren't applicable. Normally these param configurations would be implemented at the command group level, but these params are used across 2 different param groups - `sql db` and `sql dw`. So extracting it out into this common function prevents duplication. """ arg_ctx.argument('max_size_bytes', arg_type=max_size_bytes_param_type) arg_ctx.argument('elastic_pool_id', arg_type=elastic_pool_id_param_type) arg_ctx.argument('compute_model', arg_type=compute_model_param_type) arg_ctx.argument('auto_pause_delay', arg_type=auto_pause_delay_param_type) arg_ctx.argument('min_capacity', arg_type=min_capacity_param_type) arg_ctx.argument('read_scale', arg_type=read_scale_param_type) arg_ctx.argument('high_availability_replica_count', arg_type=read_replicas_param_type) creation_arg_group = 'Creation' arg_ctx.argument('collation', arg_group=creation_arg_group, help='The collation of the database.') arg_ctx.argument('catalog_collation', arg_group=creation_arg_group, arg_type=get_enum_type(CatalogCollationType), help='Collation of the metadata catalog.') # WideWorldImportersStd and WideWorldImportersFull cannot be successfully created. # AdventureWorksLT is the only sample name that is actually supported. arg_ctx.argument('sample_name', arg_group=creation_arg_group, arg_type=get_enum_type([SampleName.adventure_works_lt]), help='The name of the sample schema to apply when creating this' 'database.') arg_ctx.argument('license_type', arg_type=get_enum_type(DatabaseLicenseType), help='The license type to apply for this database.' '``LicenseIncluded`` if you need a license, or ``BasePrice``' 'if you have a license and are eligible for the Azure Hybrid' 'Benefit.') arg_ctx.argument('zone_redundant', arg_type=zone_redundant_param_type) arg_ctx.argument('preferred_enclave_type', arg_type=preferred_enclave_param_type) arg_ctx.argument('assign_identity', arg_type=database_assign_identity_param_type) arg_ctx.argument('encryption_protector', arg_type=database_encryption_protector_param_type) arg_ctx.argument('keys', arg_type=database_keys_param_type) arg_ctx.argument('keys_to_remove', arg_type=database_keys_to_remove_param_type) arg_ctx.argument('user_assigned_identity_id', arg_type=database_user_assigned_identity_param_type) arg_ctx.argument('federated_client_id', arg_type=database_federated_client_id_param_type) arg_ctx.argument('expand_keys', arg_type=database_expand_keys_param_type) arg_ctx.argument('availability_zone', arg_type=database_availability_zone_param_type) arg_ctx.argument('use_free_limit', arg_type=database_use_free_limit) arg_ctx.argument('free_limit_exhaustion_behavior', arg_type=database_free_limit_exhaustion_behavior) arg_ctx.argument('encryption_protector_auto_rotation', arg_type=database_encryption_protector_auto_rotation_param_type) arg_ctx.argument('manual-cutover', arg_type=manual_cutover_param_type) arg_ctx.argument('perform-cutover', arg_type=perform_cutover_param_type)
Configures params that are based on `Database` resource and therefore apply to one or more DB/DW create/update commands. The idea is that this does some basic configuration of each property. Each command can then potentially build on top of this (e.g. to give a parameter more specific help text) and .ignore() parameters that aren't applicable. Normally these param configurations would be implemented at the command group level, but these params are used across 2 different param groups - `sql db` and `sql dw`. So extracting it out into this common function prevents duplication.
_configure_db_dw_params
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/_params.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/_params.py
MIT
def _configure_db_dw_create_params( arg_ctx, engine, create_mode): """ Configures params for db/dw create commands. The PUT database REST API has many parameters and many modes (`create_mode`) that control which parameters are valid. To make it easier for CLI users to get the param combinations correct, these create modes are separated into different commands (e.g.: create, copy, restore, etc). On top of that, some create modes and some params are not allowed if the database edition is DataWarehouse. For this reason, regular database commands are separated from datawarehouse commands (`db` vs `dw`.) As a result, the param combination matrix is a little complicated. When adding a new param, we want to make sure that the param is visible for the appropriate commands. We also want to avoid duplication. Instead of spreading out & duplicating the param definitions across all the different commands, it has been more effective to define this reusable function. The main task here is to create extra params based on the `Database` model, then .ignore() the params that aren't applicable to the specified engine and create mode. There is also some minor tweaking of help text to make the help text more specific to creation. engine: Engine enum value (e.g. `db`, `dw`) create_mode: Valid CreateMode enum value (e.g. `default`, `copy`, etc) """ # *** Step 0: Validation *** # DW does not support all create modes. Check that engine and create_mode are consistent. if engine == Engine.dw and create_mode not in [ CreateMode.default, CreateMode.point_in_time_restore, CreateMode.restore]: raise ValueError('Engine {} does not support create mode {}'.format(engine, create_mode)) # *** Step 1: Create extra params *** # Create args that will be used to build up the Database object # # IMPORTANT: It is very easy to add a new parameter and accidentally forget to .ignore() it in # some commands that it is not applicable to. Therefore, when adding a new param, you should compare # command help before & after your change. # e.g.: # # # Get initial help text # git checkout dev # $file = 'help_original.txt' # az sql db create -h >> $file # az sql db copy -h >> $file # az sql db restore -h >> $file # az sql db replica create -h >> $file # az sql db update -h >> $file # az sql dw create -h >> $file # az sql dw update -h >> $file # # # Get updated help text # git checkout mybranch # $file = 'help_updated.txt' # az sql db create -h >> $file # az sql db copy -h >> $file # az sql db restore -h >> $file # az sql db replica create -h >> $file # az sql db update -h >> $file # az sql dw create -h >> $file # az sql dw update -h >> $file # # Then compare 'help_original.txt' <-> 'help_updated.txt' in your favourite text diff tool. create_args_for_complex_type( arg_ctx, 'parameters', Database, [ 'catalog_collation', 'collation', 'elastic_pool_id', 'license_type', 'max_size_bytes', 'name', 'restore_point_in_time', 'sample_name', 'sku', 'source_database_deletion_date', 'tags', 'zone_redundant', 'auto_pause_delay', 'min_capacity', 'compute_model', 'read_scale', 'high_availability_replica_count', 'requested_backup_storage_redundancy', 'maintenance_configuration_id', 'is_ledger_on', 'preferred_enclave_type', 'assign_identity', 'encryption_protector', 'keys', 'user_assigned_identity_id', 'federated_client_id', 'availability_zone', 'encryption_protector_auto_rotation', 'use_free_limit', 'free_limit_exhaustion_behavior', 'manual_cutover', 'perform_cutover' ]) # Create args that will be used to build up the Database's Sku object create_args_for_complex_type( arg_ctx, 'sku', Sku, [ 'capacity', 'family', 'name', 'tier', ]) # *** Step 2: Apply customizations specific to create (as opposed to update) *** arg_ctx.argument('name', # Note: this is sku name, not database name options_list=['--service-objective', '--service-level-objective'], arg_group=sku_arg_group, required=False, help='The service objective for the new database. For example: ' + (db_service_objective_examples if engine == Engine.db else dw_service_objective_examples)) arg_ctx.argument('elastic_pool_id', help='The name or resource id of the elastic pool to create the database in.') arg_ctx.argument('requested_backup_storage_redundancy', arg_type=backup_storage_redundancy_param_type) arg_ctx.argument('maintenance_configuration_id', arg_type=maintenance_configuration_id_param_type) arg_ctx.argument('is_ledger_on', arg_type=ledger_on_param_type) arg_ctx.argument('preferred_enclave_type', arg_type=preferred_enclave_param_type) arg_ctx.argument('assign_identity', arg_type=database_assign_identity_param_type) arg_ctx.argument('encryption_protector', arg_type=database_encryption_protector_param_type) arg_ctx.argument('keys', arg_type=database_keys_param_type) arg_ctx.argument('user_assigned_identity_id', arg_type=database_user_assigned_identity_param_type) arg_ctx.argument('federated_client_id', arg_type=database_federated_client_id_param_type) arg_ctx.argument('encryption_protector_auto_rotation', arg_type=database_encryption_protector_auto_rotation_param_type) # *** Step 3: Ignore params that are not applicable (based on engine & create mode) *** # Only applicable to default create mode. Also only applicable to db. if create_mode != CreateMode.default or engine != Engine.db: arg_ctx.ignore('sample_name') arg_ctx.ignore('catalog_collation') arg_ctx.ignore('maintenance_configuration_id') arg_ctx.ignore('is_ledger_on') arg_ctx.ignore('use_free_limit') arg_ctx.ignore('free_limit_exhaustion_behavior') # Only applicable to point in time restore or deleted restore create mode. if create_mode not in [CreateMode.restore, CreateMode.point_in_time_restore]: arg_ctx.ignore('restore_point_in_time', 'source_database_deletion_date') # 'collation', 'tier', and 'max_size_bytes' are ignored (or rejected) when creating a copy # or secondary because their values are determined by the source db. if create_mode in [CreateMode.copy, CreateMode.secondary]: arg_ctx.ignore('collation', 'tier', 'max_size_bytes') # collation and max_size_bytes are ignored when restoring because their values are determined by # the source db. if create_mode in [ CreateMode.restore, CreateMode.point_in_time_restore, CreateMode.RECOVERY, CreateMode.RESTORE_LONG_TERM_RETENTION_BACKUP]: arg_ctx.ignore('collation', 'max_size_bytes') # 'manual_cutover' and 'perform_cutover' are ignored when creating a database, # as they are only applicable during update if create_mode in CreateMode: arg_ctx.ignore('manual_cutover', 'perform_cutover') if engine == Engine.dw: # Elastic pool is only for SQL DB. arg_ctx.ignore('elastic_pool_id') # Edition is always 'DataWarehouse' arg_ctx.ignore('tier') # License types do not yet exist for DataWarehouse arg_ctx.ignore('license_type') # Preferred enclave types do not yet exist for DataWarehouse arg_ctx.ignore('preferred_enclave_type') # Family is not applicable to DataWarehouse arg_ctx.ignore('family') # Identity is not applicable to DataWarehouse arg_ctx.ignore('assign_identity') # Encryption Protector is not applicable to DataWarehouse arg_ctx.ignore('encryption_protector') # Keys is not applicable to DataWarehouse arg_ctx.ignore('keys') # User Assigned Identities is not applicable to DataWarehouse arg_ctx.ignore('user_assigned_identity_id') # Federated client id is not applicable to DataWarehouse arg_ctx.ignore('federated_client_id') # Encryption Protector auto rotation is not applicable to DataWarehouse arg_ctx.ignore('encryption_protector_auto_rotation') # Provisioning with capacity is not applicable to DataWarehouse arg_ctx.ignore('capacity') # Serverless offerings are not applicable to DataWarehouse arg_ctx.ignore('auto_pause_delay') arg_ctx.ignore('min_capacity') arg_ctx.ignore('compute_model') # Free limit parameters are not applicable to DataWarehouse arg_ctx.ignore('use_free_limit') arg_ctx.ignore('free_limit_exhaustion_behavior') # ReadScale properties are not valid for DataWarehouse # --read-replica-count was accidentally included in previous releases and # therefore is hidden using `deprecate_info` instead of `ignore` arg_ctx.ignore('read_scale') arg_ctx.ignore('high_availability_replica_count') arg_ctx.argument('read_replica_count', options_list=['--read-replica-count'], deprecate_info=arg_ctx.deprecate(hide=True)) # Zone redundant was accidentally included in previous releases and # therefore is hidden using `deprecate_info` instead of `ignore` arg_ctx.argument('zone_redundant', options_list=['--zone-redundant'], deprecate_info=arg_ctx.deprecate(hide=True)) # Manual-cutover and Perform-cutover are not valid for DataWarehouse arg_ctx.ignore('manual_cutover') arg_ctx.ignore('perform_cutover')
Configures params for db/dw create commands. The PUT database REST API has many parameters and many modes (`create_mode`) that control which parameters are valid. To make it easier for CLI users to get the param combinations correct, these create modes are separated into different commands (e.g.: create, copy, restore, etc). On top of that, some create modes and some params are not allowed if the database edition is DataWarehouse. For this reason, regular database commands are separated from datawarehouse commands (`db` vs `dw`.) As a result, the param combination matrix is a little complicated. When adding a new param, we want to make sure that the param is visible for the appropriate commands. We also want to avoid duplication. Instead of spreading out & duplicating the param definitions across all the different commands, it has been more effective to define this reusable function. The main task here is to create extra params based on the `Database` model, then .ignore() the params that aren't applicable to the specified engine and create mode. There is also some minor tweaking of help text to make the help text more specific to creation. engine: Engine enum value (e.g. `db`, `dw`) create_mode: Valid CreateMode enum value (e.g. `default`, `copy`, etc)
_configure_db_dw_create_params
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/_params.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/_params.py
MIT
def _expansion_validator_impl(namespace): ''' The validator create a argument of a given type from a specific set of arguments from CLI command. :param namespace: The argparse namespace represents the CLI arguments. :return: The argument of specific type. ''' ns = vars(namespace) kwargs = {k: ns[k] for k in ns if k in set(expanded_arguments)} setattr(namespace, assigned_arg, model_type(**kwargs))
The validator create a argument of a given type from a specific set of arguments from CLI command. :param namespace: The argparse namespace represents the CLI arguments. :return: The argument of specific type.
create_args_for_complex_type.create_args_for_complex_type.get_complex_argument_processor._expansion_validator_impl
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/_validators.py
MIT
def get_complex_argument_processor(expanded_arguments, assigned_arg, model_type): ''' Return a validator which will aggregate multiple arguments to one complex argument. ''' def _expansion_validator_impl(namespace): ''' The validator create a argument of a given type from a specific set of arguments from CLI command. :param namespace: The argparse namespace represents the CLI arguments. :return: The argument of specific type. ''' ns = vars(namespace) kwargs = {k: ns[k] for k in ns if k in set(expanded_arguments)} setattr(namespace, assigned_arg, model_type(**kwargs)) return _expansion_validator_impl
Return a validator which will aggregate multiple arguments to one complex argument.
create_args_for_complex_type.get_complex_argument_processor
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/_validators.py
MIT
def create_args_for_complex_type(arg_ctx, dest, model_type, arguments): ''' Creates args that will be combined into an object by an arg validator. ''' from knack.arguments import ignore_type from knack.introspection import option_descriptions def get_complex_argument_processor(expanded_arguments, assigned_arg, model_type): ''' Return a validator which will aggregate multiple arguments to one complex argument. ''' def _expansion_validator_impl(namespace): ''' The validator create a argument of a given type from a specific set of arguments from CLI command. :param namespace: The argparse namespace represents the CLI arguments. :return: The argument of specific type. ''' ns = vars(namespace) kwargs = {k: ns[k] for k in ns if k in set(expanded_arguments)} setattr(namespace, assigned_arg, model_type(**kwargs)) return _expansion_validator_impl # Fetch the documentation for model parameters first. for models, which are the classes # derive from msrest.serialization.Model and used in the SDK API to carry parameters, the # document of their properties are attached to the classes instead of constructors. parameter_docs = option_descriptions(model_type) for name in arguments: # Get the validation map from the model type in order to determine # whether the argument should be required validation = model_type._validation.get(name, None) # pylint: disable=protected-access required = validation.get('required', False) if validation else False # Generate the command line argument name from the property name options_list = ['--' + name.replace('_', '-')] # Get the help text from the model type help_text = parameter_docs.get(name, None) # Create the additional command line argument arg_ctx.extra( name, required=required, options_list=options_list, help=help_text) # Rename the original command line argument and ignore it (i.e. make invisible) # so that it does not show up on command line and does not conflict with any other # arguments. dest_option = ['--__{}'.format(dest.upper())] arg_ctx.argument(dest, arg_type=ignore_type, options_list=dest_option, # The argument is hidden from the command line, but its value # will be populated by this validator. validator=get_complex_argument_processor(arguments, dest, model_type))
Creates args that will be combined into an object by an arg validator.
create_args_for_complex_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/_validators.py
MIT
def _get_server_location(cli_ctx, server_name, resource_group_name): ''' Returns the location (i.e. Azure region) that the specified server is in. ''' server_client = get_sql_servers_operations(cli_ctx, None) # pylint: disable=no-member return server_client.get( server_name=server_name, resource_group_name=resource_group_name).location
Returns the location (i.e. Azure region) that the specified server is in.
_get_server_location
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_managed_restorable_dropped_database_backup_short_term_retention_client(cli_ctx): ''' Returns client for managed restorable dropped databases. ''' server_client = \ get_sql_restorable_dropped_database_managed_backup_short_term_retention_policies_operations(cli_ctx, None) # pylint: disable=no-member return server_client
Returns client for managed restorable dropped databases.
_get_managed_restorable_dropped_database_backup_short_term_retention_client
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_managed_instance_location(cli_ctx, managed_instance_name, resource_group_name): ''' Returns the location (i.e. Azure region) that the specified managed instance is in. ''' managed_instance_client = get_sql_managed_instances_operations(cli_ctx, None) # pylint: disable=no-member return managed_instance_client.get( managed_instance_name=managed_instance_name, resource_group_name=resource_group_name).location
Returns the location (i.e. Azure region) that the specified managed instance is in.
_get_managed_instance_location
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_location_capability(cli_ctx, location, group): ''' Gets the location capability for a location and verifies that it is available. ''' capabilities_client = get_sql_capabilities_operations(cli_ctx, None) location_capability = capabilities_client.list_by_location(location, group) _assert_capability_available(location_capability) return location_capability
Gets the location capability for a location and verifies that it is available.
_get_location_capability
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _any_sku_values_specified(sku): ''' Returns True if the sku object has any properties that are specified (i.e. not None). ''' return any(val for key, val in sku.__dict__.items())
Returns True if the sku object has any properties that are specified (i.e. not None).
_any_sku_values_specified
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _compute_model_matches(sku_name, compute_model): ''' Returns True if sku name matches the compute model. Please update is function if compute_model has more than 2 enums. ''' if (_is_serverless_slo(sku_name) and compute_model == ComputeModelType.serverless): return True if (not _is_serverless_slo(sku_name) and compute_model != ComputeModelType.serverless): return True return False
Returns True if sku name matches the compute model. Please update is function if compute_model has more than 2 enums.
_compute_model_matches
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _is_serverless_slo(sku_name): ''' Returns True if the sku name is a serverless sku. ''' return "_S_" in sku_name
Returns True if the sku name is a serverless sku.
_is_serverless_slo
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_default_server_version(location_capabilities): ''' Gets the default server version capability from the full location capabilities response. If none have 'default' status, gets the first capability that has 'available' status. If there is no default or available server version, falls back to server version 12.0 in order to maintain compatibility with older Azure CLI releases (2.0.25 and earlier). ''' server_versions = location_capabilities.supported_server_versions def is_v12(capability): return capability.name == "12.0" return _get_default_capability(server_versions, fallback_predicate=is_v12)
Gets the default server version capability from the full location capabilities response. If none have 'default' status, gets the first capability that has 'available' status. If there is no default or available server version, falls back to server version 12.0 in order to maintain compatibility with older Azure CLI releases (2.0.25 and earlier).
_get_default_server_version
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_default_capability(capabilities, fallback_predicate=None): ''' Gets the first capability in the collection that has 'default' status. If none have 'default' status, gets the first capability that has 'available' status. ''' logger.debug('_get_default_capability: %s', capabilities) # Get default capability r = next((c for c in capabilities if c.status == CapabilityStatus.DEFAULT), None) if r: logger.debug('_get_default_capability found default: %s', r) return r # No default capability, so fallback to first available capability r = next((c for c in capabilities if c.status == CapabilityStatus.AVAILABLE), None) if r: logger.debug('_get_default_capability found available: %s', r) return r # No available capability, so use custom fallback if fallback_predicate: logger.debug('_get_default_capability using fallback') r = next((c for c in capabilities if fallback_predicate(c)), None) if r: logger.debug('_get_default_capability found fallback: %s', r) return r # No custom fallback, so we have to throw an error. logger.debug('_get_default_capability failed') raise CLIError('Provisioning is restricted in this region. Please choose a different region.')
Gets the first capability in the collection that has 'default' status. If none have 'default' status, gets the first capability that has 'available' status.
_get_default_capability
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _assert_capability_available(capability): ''' Asserts that the capability is available (or default). Throws CLIError if the capability is unavailable. ''' logger.debug('_assert_capability_available: %s', capability) if not is_available(capability.status): raise CLIError(capability.reason)
Asserts that the capability is available (or default). Throws CLIError if the capability is unavailable.
_assert_capability_available
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def is_available(status): ''' Returns True if the capability status is available (including default). There are three capability statuses: VISIBLE: customer can see the slo but cannot use it AVAILABLE: customer can see the slo and can use it DEFAULT: customer can see the slo and can use it Thus, only check whether status is not VISIBLE would return the correct value. ''' return status not in CapabilityStatus.VISIBLE
Returns True if the capability status is available (including default). There are three capability statuses: VISIBLE: customer can see the slo but cannot use it AVAILABLE: customer can see the slo and can use it DEFAULT: customer can see the slo and can use it Thus, only check whether status is not VISIBLE would return the correct value.
is_available
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _filter_available(capabilities): ''' Filters out the capabilities by removing values that are not available. ''' return [c for c in capabilities if is_available(c.status)]
Filters out the capabilities by removing values that are not available.
_filter_available
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _find_edition_capability(sku, supported_editions): ''' Finds the DB edition capability in the collection of supported editions that matches the requested sku. If the sku has no edition specified, returns the default edition. (Note: tier and edition mean the same thing.) ''' logger.debug('_find_edition_capability: %s; %s', sku, supported_editions) if sku.tier: # Find requested edition capability try: return next(e for e in supported_editions if e.name == sku.tier) except StopIteration: candidate_editions = [e.name for e in supported_editions] raise CLIError('Could not find tier {}. Supported tiers are: {}'.format( sku.tier, candidate_editions )) else: # Find default edition capability return _get_default_capability(supported_editions)
Finds the DB edition capability in the collection of supported editions that matches the requested sku. If the sku has no edition specified, returns the default edition. (Note: tier and edition mean the same thing.)
_find_edition_capability
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _find_family_capability(sku, supported_families): ''' Finds the family capability in the collection of supported families that matches the requested sku. If the edition has no family specified, returns the default family. ''' logger.debug('_find_family_capability: %s; %s', sku, supported_families) if sku.family: # Find requested family capability try: return next(f for f in supported_families if f.name == sku.family) except StopIteration: candidate_families = [e.name for e in supported_families] raise CLIError('Could not find family {}. Supported families are: {}'.format( sku.family, candidate_families )) else: # Find default family capability return _get_default_capability(supported_families)
Finds the family capability in the collection of supported families that matches the requested sku. If the edition has no family specified, returns the default family.
_find_family_capability
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _find_performance_level_capability(sku, supported_service_level_objectives, allow_reset_family, compute_model=None): ''' Finds the DB or elastic pool performance level (i.e. service objective) in the collection of supported service objectives that matches the requested sku's family and capacity. If the sku has no capacity or family specified, returns the default service objective. ''' logger.debug('_find_performance_level_capability: %s, %s, allow_reset_family: %s, compute_model: %s', sku, supported_service_level_objectives, allow_reset_family, compute_model) if sku.capacity: try: # Find requested service objective based on capacity & family. # Note that for non-vcore editions, family is None. return next(slo for slo in supported_service_level_objectives if ((slo.sku.family == sku.family) or (slo.sku.family is None and allow_reset_family)) and int(slo.sku.capacity) == int(sku.capacity) and _compute_model_matches(slo.sku.name, compute_model)) except StopIteration: if allow_reset_family: raise CLIError( "Could not find sku in tier '{tier}' with capacity {capacity}." " Supported capacities for '{tier}' are: {capacities}." " Please specify one of these supported values for capacity.".format( tier=sku.tier, capacity=sku.capacity, capacities=[slo.sku.capacity for slo in supported_service_level_objectives] )) raise CLIError( "Could not find sku in tier '{tier}' with family '{family}', capacity {capacity}." " Supported families & capacities for '{tier}' are: {skus}. Please specify one of these" " supported combinations of family and capacity." " And ensure that the sku supports '{compute_model}' compute model.".format( tier=sku.tier, family=sku.family, capacity=sku.capacity, skus=[(slo.sku.family, slo.sku.capacity) for slo in supported_service_level_objectives], compute_model=compute_model )) elif sku.family: # Error - cannot find based on family alone. raise CLIError('If --family is specified, --capacity must also be specified.') else: # Find default service objective return _get_default_capability(supported_service_level_objectives)
Finds the DB or elastic pool performance level (i.e. service objective) in the collection of supported service objectives that matches the requested sku's family and capacity. If the sku has no capacity or family specified, returns the default service objective.
_find_performance_level_capability
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _db_elastic_pool_update_sku( cmd, instance, service_objective, tier, family, capacity, find_sku_from_capabilities_func, compute_model=None): ''' Updates the sku of a DB or elastic pool. ''' # Set sku name if service_objective: instance.sku = Sku(name=service_objective) # Set tier allow_reset_family = False if tier: if not service_objective: # Wipe out old sku name so that it does not conflict with new tier instance.sku.name = None instance.sku.tier = tier if instance.sku.family and not family: # If we are changing tier and old sku has family but # new family is unspecified, allow sku search to wipe out family. # # This is needed so that tier can be successfully changed from # a tier that has family (e.g. GeneralPurpose) to a tier that has # no family (e.g. Standard). allow_reset_family = True # Set family if family: if not service_objective: # Wipe out old sku name so that it does not conflict with new family instance.sku.name = None instance.sku.family = family # Set capacity if capacity: instance.sku.capacity = capacity # Wipe out sku name if serverless vs provisioned db offerings changed, # only if sku name has not be wiped by earlier logic, and new compute model has been requested. if instance.sku.name and compute_model: if not _compute_model_matches(instance.sku.name, compute_model): instance.sku.name = None # If sku name was wiped out by any of the above, resolve the requested sku name # using capabilities. if not instance.sku.name: instance.sku = find_sku_from_capabilities_func( cmd.cli_ctx, instance.location, instance.sku, allow_reset_family=allow_reset_family, compute_model=compute_model)
Updates the sku of a DB or elastic pool.
_db_elastic_pool_update_sku
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_tenant_id(): ''' Gets tenantId from current subscription. ''' from azure.cli.core._profile import Profile profile = Profile() sub = profile.get_subscription() return sub['tenantId']
Gets tenantId from current subscription.
_get_tenant_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_service_principal_object_from_type(servicePrincipalType): ''' Gets the service principal object from type. ''' servicePrincipalResult = None if (servicePrincipalType is not None and (servicePrincipalType == ServicePrincipalType.system_assigned.value or servicePrincipalType == ServicePrincipalType.none.value)): servicePrincipalResult = ServicePrincipal(type=servicePrincipalType) return servicePrincipalResult
Gets the service principal object from type.
_get_service_principal_object_from_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_identity_object_from_type( assignIdentityIsPresent, resourceIdentityType, userAssignedIdentities, existingResourceIdentity): ''' Gets the resource identity type. ''' identityResult = None if resourceIdentityType is not None and resourceIdentityType == ResourceIdType.none.value: identityResult = ResourceIdentity(type=ResourceIdType.none.value) return identityResult if assignIdentityIsPresent and resourceIdentityType is not None: # When UMI is of type SystemAssigned,UserAssigned if resourceIdentityType == ResourceIdType.system_assigned_user_assigned.value: umiDict = None if userAssignedIdentities is None: raise CLIError('"The list of user assigned identity ids needs to be passed if the' 'IdentityType is UserAssigned or SystemAssignedUserAssigned.') if existingResourceIdentity is not None and existingResourceIdentity.user_assigned_identities is not None: identityResult = _get_sys_assigned_user_assigned_identity(userAssignedIdentities, existingResourceIdentity) # Create scenarios else: for identity in userAssignedIdentities: if umiDict is None: umiDict = {identity: UserIdentity()} else: umiDict[identity] = UserIdentity() # pylint: disable=unsupported-assignment-operation identityResult = ResourceIdentity(type=ResourceIdType.system_assigned_user_assigned.value, user_assigned_identities=umiDict) # When UMI is of type UserAssigned if resourceIdentityType == ResourceIdType.user_assigned.value: umiDict = None if userAssignedIdentities is None: raise CLIError('"The list of user assigned identity ids needs to be passed if the ' 'IdentityType is UserAssigned or SystemAssignedUserAssigned.') if existingResourceIdentity is not None and existingResourceIdentity.user_assigned_identities is not None: identityResult = _get__user_assigned_identity(userAssignedIdentities, existingResourceIdentity) else: for identity in userAssignedIdentities: if umiDict is None: umiDict = {identity: UserIdentity()} else: umiDict[identity] = UserIdentity() # pylint: disable=unsupported-assignment-operation identityResult = ResourceIdentity(type=ResourceIdType.user_assigned.value, user_assigned_identities=umiDict) if resourceIdentityType == ResourceIdType.system_assigned.value: identityResult = ResourceIdentity(type=ResourceIdType.system_assigned.value) elif assignIdentityIsPresent: identityResult = ResourceIdentity(type=ResourceIdType.system_assigned.value) if assignIdentityIsPresent is False and existingResourceIdentity is not None: identityResult = existingResourceIdentity return identityResult
Gets the resource identity type.
_get_identity_object_from_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_database_identity( userAssignedIdentities): ''' Gets the resource identity type for the database. ''' databaseIdentity = None if userAssignedIdentities is None: raise CLIError('The list of user assigned identity ids needs to be passed for database CMK') umiDict = None for umi in userAssignedIdentities: if umiDict is None: umiDict = {umi: DatabaseUserIdentity()} else: umiDict[umi] = DatabaseUserIdentity() # pylint: disable=unsupported-assignment-operation from azure.mgmt.sql.models import DatabaseIdentity # pylint: disable=redefined-outer-name databaseIdentity = DatabaseIdentity(type=ResourceIdType.user_assigned.value, user_assigned_identities=umiDict) return databaseIdentity
Gets the resource identity type for the database.
_get_database_identity
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _failover_group_update_common( instance, failover_policy=None, grace_period=None,): ''' Updates the failover group grace period and failover policy. Common logic for both Sterling and Managed Instance ''' if failover_policy is not None: instance.read_write_endpoint.failover_policy = failover_policy if instance.read_write_endpoint.failover_policy == FailoverPolicyType.manual.value: grace_period = None instance.read_write_endpoint.failover_with_data_loss_grace_period_minutes = grace_period if grace_period is not None: grace_period = int(grace_period) * 60 instance.read_write_endpoint.failover_with_data_loss_grace_period_minutes = grace_period
Updates the failover group grace period and failover policy. Common logic for both Sterling and Managed Instance
_failover_group_update_common
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _complete_maintenance_configuration_id(cli_ctx, argument_value=None): ''' Completes maintenance configuration id from short to full type if needed ''' from azure.mgmt.core.tools import resource_id, is_valid_resource_id from azure.cli.core.commands.client_factory import get_subscription_id if argument_value and not is_valid_resource_id(argument_value): return resource_id( subscription=get_subscription_id(cli_ctx), namespace='Microsoft.Maintenance', type='publicMaintenanceConfigurations', name=argument_value) return argument_value
Completes maintenance configuration id from short to full type if needed
_complete_maintenance_configuration_id
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT
def _get_server_dns_suffx(cli_ctx): ''' Gets the DNS suffix for servers in this Azure environment. ''' # Allow dns suffix to be overridden by environment variable for testing purposes from os import getenv return getenv('_AZURE_CLI_SQL_DNS_SUFFIX', default=cli_ctx.cloud.suffixes.sql_server_hostname)
Gets the DNS suffix for servers in this Azure environment.
_get_server_dns_suffx
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/sql/custom.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py
MIT