code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _validate_lock_params_match_lock(
lock_client, name, resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name):
"""
Locks are scoped to subscription, resource group or resource.
However, the az list command returns all locks for the current scopes
and all lower scopes (e.g. resource group level also includes resource locks).
This can lead to a confusing user experience where the user specifies a lock
name and assumes that it will work, even if they haven't given the right
scope. This function attempts to validate the parameters and help the
user find the right scope, by first finding the lock, and then infering
what it's parameters should be.
"""
locks = lock_client.management_locks.list_at_subscription_level()
found_count = 0 # locks at different levels can have the same name
lock_resource_id = None
for lock in locks:
if lock.name == name:
found_count = found_count + 1
lock_resource_id = lock.id
if found_count == 1:
# If we only found one lock, let's validate that the parameters are correct,
# if we found more than one, we'll assume the user knows what they're doing
# TODO: Add validation for that case too?
resource = parse_resource_id(lock_resource_id)
_resource_group = resource.get('resource_group', None)
_resource_namespace = resource.get('namespace', None)
if _resource_group is None:
return
if resource_group and resource_group.lower() != _resource_group.lower():
raise CLIError(
'Unexpected --resource-group for lock {}, expected {}'.format(
name, _resource_group))
if _resource_namespace is None or _resource_namespace == 'Microsoft.Authorization':
return
if resource_provider_namespace != _resource_namespace:
raise CLIError(
'Unexpected --namespace for lock {}, expected {}'.format(name, _resource_namespace))
if resource.get('child_type_2', None) is None:
_resource_type = resource.get('type', None)
_resource_name = resource.get('name', None)
else:
if resource.get('child_type_3', None) is None:
_resource_type = resource.get('child_type_1', None)
_resource_name = resource.get('child_name_1', None)
parent = resource['type'] + '/' + resource['name']
else:
_resource_type = resource.get('child_type_2', None)
_resource_name = resource.get('child_name_2', None)
parent = (resource['type'] + '/' + resource['name'] + '/' +
resource['child_type_1'] + '/' + resource['child_name_1'])
if parent != parent_resource_path:
raise CLIError(
'Unexpected --parent for lock {}, expected {}'.format(
name, parent))
if resource_type != _resource_type:
raise CLIError('Unexpected --resource-type for lock {}, expected {}'.format(
name, _resource_type))
if resource_name != _resource_name:
raise CLIError('Unexpected --resource-name for lock {}, expected {}'.format(
name, _resource_name)) | Locks are scoped to subscription, resource group or resource.
However, the az list command returns all locks for the current scopes
and all lower scopes (e.g. resource group level also includes resource locks).
This can lead to a confusing user experience where the user specifies a lock
name and assumes that it will work, even if they haven't given the right
scope. This function attempts to validate the parameters and help the
user find the right scope, by first finding the lock, and then infering
what it's parameters should be. | _validate_lock_params_match_lock | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def list_locks(cmd, resource_group=None,
resource_provider_namespace=None, parent_resource_path=None, resource_type=None,
resource_name=None, filter_string=None):
"""
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param filter_string: A query filter to use to restrict the results.
:type filter_string: str
"""
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
if resource_group is None:
return lock_client.management_locks.list_at_subscription_level(filter=filter_string)
if resource_name is None:
return lock_client.management_locks.list_at_resource_group_level(
resource_group, filter=filter_string)
return lock_client.management_locks.list_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, filter=filter_string) | :param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param filter_string: A query filter to use to restrict the results.
:type filter_string: str | list_locks | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def get_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, ids=None):
"""
:param name: The name of the lock.
:type name: str
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock show: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [get_lock(cmd, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group,
resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
if resource_group is None:
return _call_subscription_get(cmd, lock_client, lock_name)
if resource_name is None:
return lock_client.management_locks.get_at_resource_group_level(resource_group, lock_name)
if cmd.supported_api_version(max_api='2015-01-01'):
lock_list = list_locks(resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
return next((lock for lock in lock_list if lock.name == lock_name), None)
return lock_client.management_locks.get_at_resource_level(
resource_group, resource_provider_namespace,
parent_resource_path or '', resource_type, resource_name, lock_name) | :param name: The name of the lock.
:type name: str | get_lock | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def delete_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, ids=None):
"""
:param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock delete: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [delete_lock(cmd, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group,
resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
if resource_group is None:
return lock_client.management_locks.delete_at_subscription_level(lock_name)
if resource_name is None:
return lock_client.management_locks.delete_at_resource_group_level(
resource_group, lock_name)
return lock_client.management_locks.delete_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name) | :param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str | delete_lock | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def create_lock(cmd, lock_name, level,
resource_group=None, resource_provider_namespace=None, notes=None,
parent_resource_path=None, resource_type=None, resource_name=None):
"""
:param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param notes: Notes about this lock.
:type notes: str
"""
ManagementLockObject = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_LOCKS, 'ManagementLockObject', mod='models')
parameters = ManagementLockObject(level=level, notes=notes)
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
if resource_group is None:
return lock_client.management_locks.create_or_update_at_subscription_level(lock_name, parameters)
if resource_name is None:
return lock_client.management_locks.create_or_update_at_resource_group_level(
resource_group, lock_name, parameters)
return lock_client.management_locks.create_or_update_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name, parameters) | :param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param notes: Notes about this lock.
:type notes: str | create_lock | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def update_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None, notes=None,
parent_resource_path=None, resource_type=None, resource_name=None, level=None, ids=None):
"""
Allows updates to the lock-type(level) and the notes of the lock
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock update: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [update_lock(cmd, level=level, notes=notes, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group, resource_provider_namespace,
parent_resource_path, resource_type, resource_name)
if resource_group is None:
params = _call_subscription_get(cmd, lock_client, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_subscription_level(lock_name, params)
if resource_name is None:
params = lock_client.management_locks.get_at_resource_group_level(resource_group, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_resource_group_level(
resource_group, lock_name, params)
if cmd.supported_api_version(max_api='2015-01-01'):
lock_list = list_locks(resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
return next((lock for lock in lock_list if lock.name == lock_name), None)
params = lock_client.management_locks.get_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name, params) | Allows updates to the lock-type(level) and the notes of the lock | update_lock | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def invoke_action(self, action, request_body):
"""
Formats Url if none provided and sends the POST request with the url and request-body.
"""
from azure.core.polling import LROPoller
from azure.mgmt.core.polling.arm_polling import ARMPolling
query_parameters = {}
serialize = self.rcf.resources._serialize # pylint: disable=protected-access
client = self.rcf.resources._client # pylint: disable=protected-access
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/' \
'{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/{action}'
if self.resource_id:
url = client.format_url(
'{resource_id}/{action}',
resource_id=self.resource_id,
action=serialize.url("action", action, 'str'))
else:
url = client.format_url(
url,
resourceGroupName=serialize.url(
"resource_group_name", self.resource_group_name, 'str',
max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
resourceProviderNamespace=serialize.url(
"resource_provider_namespace", self.resource_provider_namespace, 'str'),
parentResourcePath=serialize.url(
"parent_resource_path", self.parent_resource_path, 'str', skip_quote=True),
resourceType=serialize.url("resource_type", self.resource_type, 'str', skip_quote=True),
resourceName=serialize.url("resource_name", self.resource_name, 'str'),
subscriptionId=serialize.url(
"self._config.subscription_id", self.rcf.resources._config.subscription_id, 'str'),
action=serialize.url("action", action, 'str'))
# Construct parameters
query_parameters['api-version'] = serialize.query("api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
# This value of accept_language comes from the fixed configuration in the AzureConfiguration in track 1.
header_parameters['accept-language'] = 'en-US'
body_content_kwargs = {}
body_content_kwargs['content'] = json.loads(request_body) if request_body else None
def deserialization_cb(pipeline_response):
return json.loads(pipeline_response.http_response.text())
request = client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = client._pipeline.run(request, stream=False)
return LROPoller(client=client, initial_response=pipeline_response, deserialization_callback=deserialization_cb,
polling_method=ARMPolling()) | Formats Url if none provided and sends the POST request with the url and request-body. | invoke_action | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def pack(cmd, template_file):
"""
Packs the specified template and its referenced artifacts for use in a Template Spec.
:param template_file: The path to the template spec .json file.
:type name: str
"""
root_template_file_path = os.path.abspath(template_file)
context = PackingContext(os.path.dirname(root_template_file_path))
template_content = read_file_content(template_file)
template_json = json.loads(json.dumps(process_template(template_content)))
_pack_artifacts(cmd, root_template_file_path, context)
return PackagedTemplate(template_json, getattr(context, 'Artifact')) | Packs the specified template and its referenced artifacts for use in a Template Spec.
:param template_file: The path to the template spec .json file.
:type name: str | pack | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | MIT |
def _pack_artifacts(cmd, template_abs_file_path, context):
"""
Recursively packs the specified template and its referenced artifacts and
adds the artifact(s) to the current packing context.
:param template_abs_file_path: The path to the template spec .json file to pack.
:type template_abs_file_path : str
:param context : The packing context of the current packing operation
:type content : PackingContext
:param artifactableTemplateObj : The packageable template object
:type artifactableTemplateObj : JSON
"""
original_directory = getattr(context, 'CurrentDirectory')
try:
context.CurrentDirectory = os.path.dirname(template_abs_file_path)
template_content = read_file_content(template_abs_file_path)
artifactable_template_obj = _remove_comments_from_json(template_content)
template_link_to_artifact_objs = _get_template_links_to_artifacts(cmd, artifactable_template_obj,
includeNested=True)
for template_link_obj in template_link_to_artifact_objs:
relative_path = str(template_link_obj['relativePath'])
if not relative_path:
continue
# This is a templateLink to a local template... Get the absolute path of the
# template based on its relative path from the current template directory and
# make sure it exists:
abs_local_path = os.path.join(getattr(context, 'CurrentDirectory'), relative_path)
if not os.path.isfile(abs_local_path):
raise CLIError('File ' + abs_local_path + 'not found.')
# Let's make sure we're not referencing a file outside of our root directory
# hierarchy. We won't allow such references for security purposes:
if (not os.path.commonpath([getattr(context, 'RootTemplateDirectory')]) ==
os.path.commonpath([getattr(context, 'RootTemplateDirectory'), abs_local_path])):
raise BadRequestError('Unable to handle the reference to file ' + abs_local_path + 'from ' +
template_abs_file_path +
'because it exists outside of the root template directory of ' +
getattr(context, 'RootTemplateDirectory'))
# Convert the template relative path to one that is relative to our root
# directory path, and then if we haven't already processed that template into
# an artifact elsewhere, we'll do so here...
as_relative_path = _absolute_to_relative_path(getattr(context, 'RootTemplateDirectory'), abs_local_path)
duplicateFile = False
for prev_added_artifact in getattr(context, 'Artifact'):
prev_added_artifact = os.path.join(getattr(context, 'RootTemplateDirectory'),
getattr(prev_added_artifact, 'path'))
if os.path.samefile(prev_added_artifact, abs_local_path):
duplicateFile = True
continue
if duplicateFile:
continue
_pack_artifacts(cmd, abs_local_path, context)
LinkedTemplateArtifact = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS,
'LinkedTemplateArtifact', mod='models')
template_content = read_file_content(abs_local_path)
template_json = json.loads(json.dumps(process_template(template_content)))
artifact = LinkedTemplateArtifact(path=as_relative_path, template=template_json)
context.Artifact.append(artifact)
finally:
context.CurrentDirectory = original_directory | Recursively packs the specified template and its referenced artifacts and
adds the artifact(s) to the current packing context.
:param template_abs_file_path: The path to the template spec .json file to pack.
:type template_abs_file_path : str
:param context : The packing context of the current packing operation
:type content : PackingContext
:param artifactableTemplateObj : The packageable template object
:type artifactableTemplateObj : JSON | _pack_artifacts | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | MIT |
def _normalize_directory_seperators_for_local_file_system(abs_file_path):
"""
Simply normalizes directory path separators in the specified path
to match those of the local filesystem(s).
"""
# Windows based:
if os.sep == '\\':
return str(abs_file_path).replace(os.altsep, '\\')
# Unit/Other based:
return str(abs_file_path).replace('\\', os.sep) | Simply normalizes directory path separators in the specified path
to match those of the local filesystem(s). | _normalize_directory_seperators_for_local_file_system | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_packing_engine.py | MIT |
def validate_sqlvm_group(cmd, namespace):
'''
Validates if name or id has been provided. If name has been provided, it assumes the group is in the same resource group.
'''
group = namespace.sql_virtual_machine_group_resource_id
if group and not is_valid_resource_id(group):
namespace.sql_virtual_machine_group_resource_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.SqlVirtualMachine', type='sqlVirtualMachineGroups',
name=group
) | Validates if name or id has been provided. If name has been provided, it assumes the group is in the same resource group. | validate_sqlvm_group | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_sqlvm_list(cmd, namespace):
'''
Validates if name or id has been provided. If name has been provided, it assumes the vm is in the same resource group.
'''
vms = namespace.sql_virtual_machine_instances
for n, sqlvm in enumerate(vms):
if sqlvm and not is_valid_resource_id(sqlvm):
# add the correct resource id
namespace.sql_virtual_machine_instances[n] = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.SqlVirtualMachine', type='sqlVirtualMachines',
name=sqlvm
) | Validates if name or id has been provided. If name has been provided, it assumes the vm is in the same resource group. | validate_sqlvm_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_load_balancer(cmd, namespace):
'''
Validates if name or id has been provided. If name has been provided, it assumes the load balancer is in the same group.
'''
lb = namespace.load_balancer_resource_id
if not is_valid_resource_id(lb):
namespace.load_balancer_resource_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network', type='loadBalancers',
name=lb
) | Validates if name or id has been provided. If name has been provided, it assumes the load balancer is in the same group. | validate_load_balancer | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_public_ip_address(cmd, namespace):
'''
Validates if name or id has been provided. If name has been provided, it assumes the public ip address is in the same group.
'''
public_ip = namespace.public_ip_address_resource_id
if public_ip and not is_valid_resource_id(public_ip):
namespace.public_ip_address_resource_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network', type='publicIPAddresses',
name=public_ip
) | Validates if name or id has been provided. If name has been provided, it assumes the public ip address is in the same group. | validate_public_ip_address | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_subnet(cmd, namespace):
'''
Validates if name or id has been provided. If name has been provided, it assumes the public ip address is in the same group.
'''
subnet = namespace.subnet_resource_id
vnet = namespace.vnet_name
if vnet and '/' in vnet:
raise InvalidArgumentValueError("incorrect usage: --subnet ID | --subnet NAME --vnet-name NAME")
subnet_is_id = is_valid_resource_id(subnet)
if (subnet_is_id and vnet) or (not subnet_is_id and not vnet):
raise MutuallyExclusiveArgumentError("incorrect usage: --subnet ID | --subnet NAME --vnet-name NAME")
if not subnet_is_id and vnet:
namespace.subnet_resource_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network', type='virtualNetworks',
name=vnet, child_type_1='subnets',
child_name_1=subnet
) | Validates if name or id has been provided. If name has been provided, it assumes the public ip address is in the same group. | validate_subnet | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_sqlmanagement(namespace):
'''
Validates if sql management mode provided, the offer type and sku type has to be provided.
'''
sql_mgmt_mode = namespace.sql_management_mode
if (sql_mgmt_mode == "NoAgent" and (namespace.sql_image_sku is None or namespace.sql_image_offer is None)):
raise RequiredArgumentMissingError("usage error: --sql-mgmt-type NoAgent --image-sku NAME --image-offer NAME") | Validates if sql management mode provided, the offer type and sku type has to be provided. | validate_sqlmanagement | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_least_privilege_mode(namespace):
'''
Validates if least privilege mode provided, management mode is Full
'''
least_privilege_mode = namespace.least_privilege_mode
if (least_privilege_mode == "Enabled" and (namespace.sql_management_mode is None or namespace.sql_management_mode != "Full")):
raise RequiredArgumentMissingError("usage error: --least-privilege-mode Enabled --sql-mgmt-type Full") | Validates if least privilege mode provided, management mode is Full | validate_least_privilege_mode | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_expand(namespace):
'''
Concatenates expand parameters
'''
if namespace.expand is not None:
namespace.expand = ",".join(namespace.expand) | Concatenates expand parameters | validate_expand | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_assessment(namespace):
'''
Validates assessment settings
'''
enable_assessment = namespace.enable_assessment
enable_assessment_schedule = namespace.enable_assessment_schedule
assessment_weekly_interval = namespace.assessment_weekly_interval
assessment_monthly_occurrence = namespace.assessment_monthly_occurrence
assessment_day_of_week = namespace.assessment_day_of_week
assessment_start_time_local = namespace.assessment_start_time_local
is_assessment_schedule_provided = False
if (assessment_weekly_interval is not None or
assessment_weekly_interval is not None or assessment_monthly_occurrence is not None or
assessment_day_of_week is not None or assessment_start_time_local is not None):
is_assessment_schedule_provided = True
# Should we add new validations for workspace rg, name, agent rg here?
# Validate conflicting settings
if (enable_assessment_schedule is False and is_assessment_schedule_provided):
raise InvalidArgumentValueError("Assessment schedule settings cannot be provided while enable-assessment-schedule is False")
# Validate conflicting settings
if (enable_assessment is False and is_assessment_schedule_provided):
raise InvalidArgumentValueError("Assessment schedule settings cannot be provided while enable-assessment is False")
# Validate necessary fields for Assessment schedule
if is_assessment_schedule_provided:
if (assessment_weekly_interval is not None and assessment_monthly_occurrence is not None):
raise MutuallyExclusiveArgumentError("Both assessment-weekly-interval and assessment-montly-occurrence cannot be provided at the same time for Assessment schedule")
if (assessment_weekly_interval is None and assessment_monthly_occurrence is None):
raise RequiredArgumentMissingError("Either assessment-weekly-interval or assessment-montly-occurrence must be provided for Assessment schedule")
if assessment_day_of_week is None:
raise RequiredArgumentMissingError("assessment-day-of-week must be provided for Assessment schedule")
if assessment_start_time_local is None:
raise RequiredArgumentMissingError("assessment-start-time-local must be provided for Assessment schedule") | Validates assessment settings | validate_assessment | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_assessment_start_time_local(namespace):
'''
Validates assessment start time format
'''
assessment_start_time_local = namespace.assessment_start_time_local
TIME_FORMAT = '%H:%M'
if assessment_start_time_local:
try:
datetime.strptime(assessment_start_time_local, TIME_FORMAT)
except ValueError:
raise InvalidArgumentValueError("assessment-start-time-local input '{}' is not valid time. Valid example: 19:30".format(assessment_start_time_local)) | Validates assessment start time format | validate_assessment_start_time_local | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def validate_azure_ad_authentication(cmd, namespace):
""" Validates Azure AD authentication.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param namespace: The argparse namespace represents the arguments.
:type namespace: argpase.Namespace.
"""
skip_client_validation = False
if hasattr(namespace, "skip_client_validation"):
skip_client_validation = getattr(namespace, "skip_client_validation")
if skip_client_validation is True:
logger.warning('Skipping client-side validation ...')
return
logger.debug("Validate Azure AD authentication from client-side:")
# SQL VM Azure AD authentication is currently only supported on Azure Public Cloud
from azure.cli.core.cloud import AZURE_PUBLIC_CLOUD
if cmd.cli_ctx.cloud.name != AZURE_PUBLIC_CLOUD.name:
raise InvalidArgumentValueError("Azure AD authentication is not supported in {}".format(cmd.ctx_cli.cloud.name))
# validate the SQL VM supports Azure AD authentication, i.e. it is on Windows platform and is SQL 2022 or later
# this validation will take place in RP call
# validate the MSI is valid on the Azure virtual machine
principal_id = _validate_msi_valid_on_vm(cmd.cli_ctx, namespace)
logger.debug("Validate Azure AD authentication: the managed identity is valid with a principalId %s.", principal_id)
# validate the MSI has appropriate permission to query Microsoft Graph API
_validate_msi_with_enough_permission(cmd.cli_ctx, principal_id)
logger.debug("Validate Azure AD authentication: the managed identity has required Graph API permission.") | Validates Azure AD authentication.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param namespace: The argparse namespace represents the arguments.
:type namespace: argpase.Namespace. | validate_azure_ad_authentication | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def _validate_msi_valid_on_vm(cli_ctx, namespace):
""" Validate the MSI is valid on the Azure virtual machine return the principalId of the MSI
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param namespace: The argparse namespace represents the arguments.
:type namespace: argpase.Namespace.
:return: The principalId of the MSI if found on this VM.
:rtype: str
"""
logger.debug("Validate Azure AD authentication regarding the validity of the managed identity.")
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
compute_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE)
# Retrieve the vm instance. This is a rest call to the server and deserialization afterwards
# therefore there is a greater chance to encouter an exception. Instead of poping the exception
# to the caller directly, we will throw our own InvalidArgumentValueError with more context
# information.
try:
# Azure virtual machine has the same name as the SQL VM
vm = compute_client.virtual_machines.get(namespace.resource_group_name, namespace.sql_virtual_machine_name)
except Exception as ex:
raise InvalidArgumentValueError("Unable to validate Azure AD authentication due to retrieving the Azure virtual machine instance encountering an error: {}.".format(ex)) from ex
# The system-assigned MSI case.
if namespace.msi_client_id is None:
if vm.identity is None or not hasattr(vm.identity, 'principal_id') or getattr(vm.identity, 'principal_id') is None:
az_error = InvalidArgumentValueError("Enable Azure AD authentication with system-assigned managed identity but the system-assigned managed identity is not enabled on this Azure virtual machine.")
az_error.set_recommendation("Enable the system-assigned managed identity on the Azure virtual machine: {}.".format(namespace.sql_virtual_machine_name))
raise az_error
return vm.identity.principal_id
# The user-assigned MSI case.
if vm.identity is None or not hasattr(vm.identity, 'user_assigned_identities') or getattr(vm.identity, 'user_assigned_identities') is None:
az_error = InvalidArgumentValueError("Enable Azure AD authentication with user-assigned managed identity {}, but the managed identity is not attached to this Azure virtual machine.".format(namespace.msi_client_id))
az_error.set_recommendation("Attach the user-assigned managed identity {} to the Azure virtual machine {}.".format(namespace.msi_client_id, namespace.sql_virtual_machine_name))
raise az_error
for umi in vm.identity.user_assigned_identities.values():
if umi.client_id == namespace.msi_client_id:
return umi.principal_id
az_error = InvalidArgumentValueError("Enable Azure AD authentication with user-assigned managed identity {}, but the managed identity is not attached to this Azure virtual machine.".format(namespace.msi_client_id))
az_error.set_recommendation("Attach the user-assigned managed identity {} to the Azure virtual machine {}.".format(namespace.msi_client_id, namespace.sql_virtual_machine_name))
raise az_error | Validate the MSI is valid on the Azure virtual machine return the principalId of the MSI
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param namespace: The argparse namespace represents the arguments.
:type namespace: argpase.Namespace.
:return: The principalId of the MSI if found on this VM.
:rtype: str | _validate_msi_valid_on_vm | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def _validate_msi_with_enough_permission(cli_ctx, principal_id):
""" Validate the MSI has enough permissions to query Microsoft Graph API, which are needed for SQL server to
carry out the Azure AD authentication.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param principal_id: The principalId of the MSI.
:type principal_id: str.
"""
logger.debug("Validate Azure AD authentication regarding required Graph API permission.")
directory_roles = _directory_role_list(cli_ctx, principal_id)
# If the MSI is assigned the "Directory Readers" role, it has enough permissions.
if any(role["displayName"] == "Directory Readers" for role in directory_roles):
return
# If the MSI is not assigned the "Directory Readers" role, check the app roles.
# Retrieve the app role Id for User.Read.All, Application.Read.All, GroupMember.Read.All roles.
app_role_id_map = _find_role_id(cli_ctx)
logger.debug("Validate Azure AD authentication: app role to app role Id map:%s.", str(app_role_id_map))
# Retrieve all the role assignments assigned to the MSI
app_role_assignments = _app_role_assignment_list(cli_ctx, principal_id)
all_assigned_role_ids = [assignment["appRoleId"] for assignment in app_role_assignments]
# Find all the missing roles.
required_role_names = [USER_READ_ALL, APPLICATION_READ_ALL, GROUP_MEMBER_READ_ALL]
missing_roles = [role_name for role_name in required_role_names if app_role_id_map[role_name] not in all_assigned_role_ids]
if len(missing_roles) > 0:
az_error = InvalidArgumentValueError("The managed identity is lack of the following roles for Azure AD authentication: {}.".format(", ".join(missing_roles)))
az_error.set_recommendation("Grant the managed identity EITHER the Directory.Readers role OR the three App roles 'User.Read.All', 'Application.Read.All', 'GroupMember.Read.All'")
raise az_error | Validate the MSI has enough permissions to query Microsoft Graph API, which are needed for SQL server to
carry out the Azure AD authentication.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param principal_id: The principalId of the MSI.
:type principal_id: str. | _validate_msi_with_enough_permission | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def _send(cli_ctx, method, url, param=None, body=None):
""" Send the HTTP requet to the url
Copied from src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_client.py with minor modification
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param method: The HTTP method.
:type method: str.
:param url: The target HTTP url.
:type url: str.
:param param: The HTTP query parameters.
:type param: str.
:param body: The HTTP body as python object.
:type body: object.
"""
from azure.cli.core.util import send_raw_request
# Get the Microsoft Graph API endpoint from CLI metadata
# https://graph.microsoft.com/ (AzureCloud)
graph_endpoint = cli_ctx.cloud.endpoints.microsoft_graph_resource_id.rstrip('/')
graph_resource = cli_ctx.cloud.endpoints.microsoft_graph_resource_id
# Microsoft Graph API version to use
MICROSOFT_GRAPH_API_VERSION = "v1.0"
url = f'{graph_endpoint}/{MICROSOFT_GRAPH_API_VERSION}{url}'
if body:
body = json.dumps(body)
list_result = []
is_list_result = False
while True:
try:
r = send_raw_request(cli_ctx, method, url, resource=graph_resource, uri_parameters=param, body=body)
except Exception as ex:
raise InvalidArgumentValueError(MICROSOFT_GRAPH_API_ERROR.format(ex)) from ex
if r.text:
if 'InternalServerError' in r.text:
return None
dic = r.json()
# The result is a list. Add value to list_result.
if 'value' in dic:
is_list_result = True
list_result.extend(dic['value'])
# Follow nextLink if available
if '@odata.nextLink' in dic:
url = dic['@odata.nextLink']
continue
# Result a list
if is_list_result:
# 'value' can be empty list [], so we can't determine if the result is a list only by
# bool(list_result)
return list_result
# Return a single object
return r.json()
return None | Send the HTTP requet to the url
Copied from src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_client.py with minor modification
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:param method: The HTTP method.
:type method: str.
:param url: The target HTTP url.
:type url: str.
:param param: The HTTP query parameters.
:type param: str.
:param body: The HTTP body as python object.
:type body: object. | _send | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def _find_role_id(cli_ctx):
""" Find the appRoleId for the following three app roles User.Read.All, Application.Read.All, GroupMember.Read.All.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:return: The app role name to appRoleId map
:rtype: dict
"""
app_role_id_map = {}
MICROSOFT_GRAPH_URL = "/servicePrincipals?$filter=displayName%20eq%20'Microsoft%20Graph'"
try:
service_principals = _send(cli_ctx, "GET", MICROSOFT_GRAPH_URL)
except Exception as ex: # pylint: disable=broad-except
raise InvalidArgumentValueError(MICROSOFT_GRAPH_API_ERROR.format(ex)) from ex
# If we failed to find the Microsoft Graph service application, fail the validation.
# This in fact shoud not happen.
if service_principals is None or len(service_principals) == 0:
error_message = "Querying Microsoft Graph API failed to find the service principal of Microsoft Graph Application"
raise InvalidArgumentValueError(MICROSOFT_GRAPH_API_ERROR.format(error_message))
app_roles = service_principals[0]['appRoles']
for app_role in app_roles:
if app_role["value"] == USER_READ_ALL:
app_role_id_map[USER_READ_ALL] = app_role["id"]
elif app_role["value"] == APPLICATION_READ_ALL:
app_role_id_map[APPLICATION_READ_ALL] = app_role["id"]
elif app_role["value"] == GROUP_MEMBER_READ_ALL:
app_role_id_map[GROUP_MEMBER_READ_ALL] = app_role["id"]
# If we failed to find all role definitions, fail the validation.
# This in fact shoud not happen.
if len(app_role_id_map) < 3:
requird_role_defs = [USER_READ_ALL, APPLICATION_READ_ALL, GROUP_MEMBER_READ_ALL]
missing_role_defs = [role for role in requird_role_defs if role not in app_role_id_map]
error_message = "Querying Microsoft Graph API failed to find the following roles: %s.", ", ".join(missing_role_defs)
logger.warning(error_message)
raise InvalidArgumentValueError(MICROSOFT_GRAPH_API_ERROR.format(error_message))
return app_role_id_map | Find the appRoleId for the following three app roles User.Read.All, Application.Read.All, GroupMember.Read.All.
:param cli_ctx: The CLI context.
:type cli_ctx: AzCli.
:return: The app role name to appRoleId map
:rtype: dict | _find_role_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_validators.py | MIT |
def sqlvm_list(
client,
resource_group_name=None):
'''
Lists all SQL virtual machines in a resource group or subscription.
'''
if resource_group_name:
# List all sql vms in the resource group
return client.list_by_resource_group(
resource_group_name=resource_group_name)
# List all sql vms in the subscription
return client.list() | Lists all SQL virtual machines in a resource group or subscription. | sqlvm_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_group_list(
client,
resource_group_name=None):
'''
Lists all SQL virtual machine groups in a resource group or subscription.
'''
if resource_group_name:
# List all sql vm groups in the resource group
return client.list_by_resource_group(
resource_group_name=resource_group_name)
# List all sql vm groups in the subscription
return client.list() | Lists all SQL virtual machine groups in a resource group or subscription. | sqlvm_group_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_group_create(
client,
cmd,
sql_virtual_machine_group_name,
resource_group_name,
sql_image_offer,
sql_image_sku,
domain_fqdn,
cluster_operator_account,
sql_service_account,
storage_account_url,
cluster_subnet_type="SingleSubnet",
storage_account_key=None,
location=None,
cluster_bootstrap_account=None,
file_share_witness_path=None,
ou_path=None,
tags=None):
'''
Creates a SQL virtual machine group.
'''
tags = tags or {}
if not storage_account_key:
storage_account_key = prompt_pass('Storage Key: ', confirm=True)
# Create the windows server failover cluster domain profile object.
wsfc_domain_profile_object = WsfcDomainProfile(
domain_fqdn=domain_fqdn,
ou_path=ou_path,
cluster_bootstrap_account=cluster_bootstrap_account,
cluster_operator_account=cluster_operator_account,
sql_service_account=sql_service_account,
file_share_witness_path=file_share_witness_path,
storage_account_url=storage_account_url,
storage_account_primary_key=storage_account_key,
cluster_subnet_type=cluster_subnet_type)
sqlvm_group_object = SqlVirtualMachineGroup(
sql_image_offer=sql_image_offer,
sql_image_sku=sql_image_sku,
wsfc_domain_profile=wsfc_domain_profile_object,
location=location,
tags=tags)
# Since it's a running operation, we will do the put and then the get to
# display the instance.
LongRunningOperation(
cmd.cli_ctx)(
sdk_no_wait(
False,
client.begin_create_or_update,
resource_group_name,
sql_virtual_machine_group_name,
sqlvm_group_object))
return client.get(resource_group_name, sql_virtual_machine_group_name) | Creates a SQL virtual machine group. | sqlvm_group_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_group_update(
instance,
domain_fqdn=None,
cluster_operator_account=None,
sql_service_account=None,
storage_account_url=None,
storage_account_key=None,
cluster_bootstrap_account=None,
file_share_witness_path=None,
ou_path=None,
tags=None,
cluster_subnet_type=None):
'''
Updates a SQL virtual machine group.
'''
if domain_fqdn is not None:
instance.wsfc_domain_profile.domain_fqdn = domain_fqdn
if cluster_operator_account is not None:
instance.wsfc_domain_profile.cluster_operator_account = cluster_operator_account
if cluster_bootstrap_account is not None:
instance.wsfc_domain_profile.cluster_bootstrap_account = cluster_bootstrap_account
if sql_service_account is not None:
instance.wsfc_domain_profile.sql_service_account = sql_service_account
if storage_account_url is not None:
instance.wsfc_domain_profile.storage_account_url = storage_account_url
if storage_account_key is not None:
instance.wsfc_domain_profile.storage_account_primary_key = storage_account_key
if storage_account_url and not storage_account_key:
instance.wsfc_domain_profile.storage_account_primary_key = prompt_pass(
'Storage Key: ', confirm=True)
if file_share_witness_path is not None:
instance.wsfc_domain_profile.file_share_witness_path = file_share_witness_path
if ou_path is not None:
instance.wsfc_domain_profile.ou_path = ou_path
if cluster_subnet_type is not None:
instance.wsfc_domain_profile.cluster_subnet_type = cluster_subnet_type
if tags is not None:
instance.tags = tags
return instance | Updates a SQL virtual machine group. | sqlvm_group_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_aglistener_create(client, cmd, availability_group_listener_name, sql_virtual_machine_group_name,
resource_group_name, availability_group_name, ip_address, subnet_resource_id,
load_balancer_resource_id, probe_port, sql_virtual_machine_instances, port=1433,
public_ip_address_resource_id=None, vnet_name=None): # pylint: disable=unused-argument
'''
Creates an availability group listener
'''
# Create the private ip address
private_ip_object = PrivateIPAddress(ip_address=ip_address,
subnet_resource_id=subnet_resource_id
if is_valid_resource_id(subnet_resource_id) else None)
# Create the load balancer configurations
load_balancer_object = LoadBalancerConfiguration(
private_ip_address=private_ip_object,
public_ip_address_resource_id=public_ip_address_resource_id,
load_balancer_resource_id=load_balancer_resource_id,
probe_port=probe_port,
sql_virtual_machine_instances=sql_virtual_machine_instances)
# Create the availability group listener object
ag_listener_object = AvailabilityGroupListener(
availability_group_name=availability_group_name,
load_balancer_configurations=[load_balancer_object],
port=port)
LongRunningOperation(
cmd.cli_ctx)(
sdk_no_wait(
False,
client.begin_create_or_update,
resource_group_name,
sql_virtual_machine_group_name,
availability_group_listener_name,
ag_listener_object))
return client.get(
resource_group_name,
sql_virtual_machine_group_name,
availability_group_listener_name) | Creates an availability group listener | sqlvm_aglistener_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def aglistener_update(instance, sql_virtual_machine_instances=None):
'''
Updates an availability group listener
'''
# Get the list of all current machines in the ag listener
if sql_virtual_machine_instances:
instance.load_balancer_configurations[0].sql_virtual_machine_instances = sql_virtual_machine_instances
return instance | Updates an availability group listener | aglistener_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_create(
client,
cmd,
sql_virtual_machine_name,
resource_group_name,
sql_server_license_type=None,
location=None,
sql_image_sku=None,
enable_auto_patching=None,
sql_management_mode="LightWeight",
least_privilege_mode=None,
day_of_week=None,
maintenance_window_starting_hour=None,
maintenance_window_duration=None,
enable_auto_backup=None,
enable_encryption=False,
retention_period=None,
storage_account_url=None,
storage_access_key=None,
backup_password=None,
backup_system_dbs=False,
backup_schedule_type=None,
full_backup_frequency=None,
full_backup_start_time=None,
full_backup_window_hours=None,
log_backup_frequency=None,
enable_key_vault_credential=None,
credential_name=None,
azure_key_vault_url=None,
service_principal_name=None,
service_principal_secret=None,
connectivity_type=None,
port=None,
sql_auth_update_username=None,
sql_auth_update_password=None,
sql_workload_type=None,
enable_r_services=None,
tags=None,
sql_image_offer=None):
'''
Creates a SQL virtual machine.
'''
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
virtual_machine_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.Compute',
type='virtualMachines',
name=sql_virtual_machine_name)
tags = tags or {}
# If customer has provided any auto_patching settings, enabling plugin
# should be True
if (day_of_week or maintenance_window_duration or maintenance_window_starting_hour):
enable_auto_patching = True
auto_patching_object = AutoPatchingSettings(
enable=enable_auto_patching,
day_of_week=day_of_week,
maintenance_window_starting_hour=maintenance_window_starting_hour,
maintenance_window_duration=maintenance_window_duration)
# If customer has provided any auto_backup settings, enabling plugin
# should be True
if (enable_encryption or retention_period or storage_account_url or storage_access_key or backup_password or
backup_system_dbs or backup_schedule_type or full_backup_frequency or full_backup_start_time or
full_backup_window_hours or log_backup_frequency):
enable_auto_backup = True
if not storage_access_key:
storage_access_key = prompt_pass('Storage Key: ', confirm=True)
if enable_encryption and not backup_password:
backup_password = prompt_pass('Backup Password: ', confirm=True)
auto_backup_object = AutoBackupSettings(
enable=enable_auto_backup,
enable_encryption=enable_encryption if enable_auto_backup else None,
retention_period=retention_period,
storage_account_url=storage_account_url,
storage_access_key=storage_access_key,
password=backup_password,
backup_system_dbs=backup_system_dbs if enable_auto_backup else None,
backup_schedule_type=backup_schedule_type,
full_backup_frequency=full_backup_frequency,
full_backup_start_time=full_backup_start_time,
full_backup_window_hours=full_backup_window_hours,
log_backup_frequency=log_backup_frequency)
# If customer has provided any key_vault_credential settings, enabling
# plugin should be True
if (credential_name or azure_key_vault_url or service_principal_name or service_principal_secret):
enable_key_vault_credential = True
if not service_principal_secret:
service_principal_secret = prompt_pass(
'Service Principal Secret: ', confirm=True)
keyvault_object = KeyVaultCredentialSettings(
enable=enable_key_vault_credential,
credential_name=credential_name,
azure_key_vault_url=azure_key_vault_url,
service_principal_name=service_principal_name,
service_principal_secret=service_principal_secret)
connectivity_object = SqlConnectivityUpdateSettings(
port=port,
connectivity_type=connectivity_type,
sql_auth_update_user_name=sql_auth_update_username,
sql_auth_update_password=sql_auth_update_password)
workload_type_object = SqlWorkloadTypeUpdateSettings(
sql_workload_type=sql_workload_type)
additional_features_object = AdditionalFeaturesServerConfigurations(
is_r_services_enabled=enable_r_services)
server_configuration_object = ServerConfigurationsManagementSettings(
sql_connectivity_update_settings=connectivity_object,
sql_workload_type_update_settings=workload_type_object,
additional_features_server_configurations=additional_features_object)
sqlvm_object = SqlVirtualMachine(
location=location,
virtual_machine_resource_id=virtual_machine_resource_id,
sql_server_license_type=sql_server_license_type,
least_privilege_mode=least_privilege_mode,
sql_image_sku=sql_image_sku,
sql_management=sql_management_mode,
sql_image_offer=sql_image_offer,
auto_patching_settings=auto_patching_object,
auto_backup_settings=auto_backup_object,
key_vault_credential_settings=keyvault_object,
server_configurations_management_settings=server_configuration_object,
tags=tags)
# Since it's a running operation, we will do the put and then the get to
# display the instance.
LongRunningOperation(
cmd.cli_ctx)(
sdk_no_wait(
False,
client.begin_create_or_update,
resource_group_name,
sql_virtual_machine_name,
sqlvm_object))
return client.get(resource_group_name, sql_virtual_machine_name) | Creates a SQL virtual machine. | sqlvm_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_update(
cmd,
instance,
sql_virtual_machine_name,
resource_group_name,
sql_server_license_type=None,
sql_image_sku=None,
least_privilege_mode=None,
enable_auto_patching=None,
day_of_week=None,
maintenance_window_starting_hour=None,
maintenance_window_duration=None,
enable_auto_backup=None,
enable_encryption=False,
retention_period=None,
storage_account_url=None,
prompt=True,
storage_access_key=None,
backup_password=None,
backup_system_dbs=False,
backup_schedule_type=None,
sql_management_mode=None,
full_backup_frequency=None,
full_backup_start_time=None,
full_backup_window_hours=None,
log_backup_frequency=None,
enable_key_vault_credential=None,
credential_name=None,
azure_key_vault_url=None,
service_principal_name=None,
service_principal_secret=None,
connectivity_type=None,
port=None,
sql_workload_type=None,
enable_r_services=None,
tags=None,
enable_assessment=None,
enable_assessment_schedule=None,
assessment_weekly_interval=None,
assessment_monthly_occurrence=None,
assessment_day_of_week=None,
assessment_start_time_local=None,
workspace_name=None,
workspace_rg=None,
workspace_sub=None,
agent_rg=None):
'''
Updates a SQL virtual machine.
'''
if tags is not None:
instance.tags = tags
if sql_server_license_type is not None:
instance.sql_server_license_type = sql_server_license_type
if sql_image_sku is not None:
instance.sql_image_sku = sql_image_sku
if sql_management_mode is not None:
instance.sql_management = sql_management_mode
if least_privilege_mode is not None:
instance.least_privilege_mode = least_privilege_mode
if (enable_auto_patching is not None or day_of_week is not None or maintenance_window_starting_hour is not None or maintenance_window_duration is not None):
enable_auto_patching = enable_auto_patching if enable_auto_patching is False else True
instance.auto_patching_settings = AutoPatchingSettings(
enable=enable_auto_patching,
day_of_week=day_of_week,
maintenance_window_starting_hour=maintenance_window_starting_hour,
maintenance_window_duration=maintenance_window_duration)
if (enable_auto_backup is not None or enable_encryption or retention_period is not None or storage_account_url is not None or
storage_access_key is not None or backup_password is not None or backup_system_dbs or backup_schedule_type is not None or
full_backup_frequency is not None or full_backup_start_time is not None or full_backup_window_hours is not None or
log_backup_frequency is not None):
enable_auto_backup = enable_auto_backup if enable_auto_backup is False else True
if not storage_access_key:
storage_access_key = prompt_pass('Storage Key: ', confirm=True)
if enable_encryption and not backup_password:
backup_password = prompt_pass('Backup Password: ', confirm=True)
instance.auto_backup_settings = AutoBackupSettings(
enable=enable_auto_backup,
enable_encryption=enable_encryption if enable_auto_backup else None,
retention_period=retention_period,
storage_account_url=storage_account_url,
storage_access_key=storage_access_key,
password=backup_password,
backup_system_dbs=backup_system_dbs if enable_auto_backup else None,
backup_schedule_type=backup_schedule_type,
full_backup_frequency=full_backup_frequency,
full_backup_start_time=full_backup_start_time,
full_backup_window_hours=full_backup_window_hours,
log_backup_frequency=log_backup_frequency)
if (enable_key_vault_credential is not None or credential_name is not None or azure_key_vault_url is not None or
service_principal_name is not None or service_principal_secret is not None):
enable_key_vault_credential = enable_key_vault_credential if enable_key_vault_credential is False else True
if not service_principal_secret:
service_principal_secret = prompt_pass(
'Service Principal Secret: ', confirm=True)
instance.key_vault_credential_settings = KeyVaultCredentialSettings(
enable=enable_key_vault_credential,
credential_name=credential_name,
service_principal_name=service_principal_name,
service_principal_secret=service_principal_secret,
azure_key_vault_url=azure_key_vault_url)
instance.server_configurations_management_settings = ServerConfigurationsManagementSettings()
if (connectivity_type is not None or port is not None):
instance.server_configurations_management_settings.sql_connectivity_update_settings = SqlConnectivityUpdateSettings(
connectivity_type=connectivity_type, port=port)
if sql_workload_type is not None:
instance.server_configurations_management_settings.sql_workload_type_update_settings = SqlWorkloadTypeUpdateSettings(
sql_workload_type=sql_workload_type)
if enable_r_services is not None:
instance.server_configurations_management_settings.additional_features_server_configurations = AdditionalFeaturesServerConfigurations(
is_r_services_enabled=enable_r_services)
# If none of the settings was modified, reset
# server_configurations_management_settings to be null
if (instance.server_configurations_management_settings.sql_connectivity_update_settings is None and
instance.server_configurations_management_settings.sql_workload_type_update_settings is None and
instance.server_configurations_management_settings.sql_storage_update_settings is None and
instance.server_configurations_management_settings.additional_features_server_configurations is None):
instance.server_configurations_management_settings = None
set_assessment_properties(cmd,
instance,
enable_assessment,
enable_assessment_schedule,
assessment_weekly_interval,
assessment_monthly_occurrence,
assessment_day_of_week,
assessment_start_time_local,
resource_group_name,
sql_virtual_machine_name,
workspace_rg,
workspace_name,
workspace_sub,
agent_rg)
return instance | Updates a SQL virtual machine. | sqlvm_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_enable_azure_ad_auth(client, cmd, sql_virtual_machine_name, resource_group_name, msi_client_id=None, skip_client_validation=None):
''' Enable Azure AD authentication on a SQL virtual machine.
:param cmd: The CLI command.
:type cmd: AzCliCommand.
:param instance: The Sql Virtual Machine instance.
:type instance: SqlVirtualMachine.
:param resource_group_name: The resource group name
:type resource_group_name: str.
:param msi_client_id: The clientId of the managed identity used in Azure AD authentication.
None means system-assigned managed identity
:type: str.
:param skip_client_validation: Whether to skip the client side validation. The server side validation always happens.
This parameter is used in the validation and ignored here.
:type: bool.
:return: The updated Sql Virtual Machine instance.
:rtype: SqlVirtualMachine.
'''
sqlvm_object = client.get(resource_group_name, sql_virtual_machine_name)
if sqlvm_object.server_configurations_management_settings is None:
sqlvm_object.server_configurations_management_settings = ServerConfigurationsManagementSettings()
sqlvm_object.server_configurations_management_settings.azure_ad_authentication_settings = AADAuthenticationSettings(client_id=msi_client_id if msi_client_id else '')
# Since it's a running operation, we will do the put and then the get to display the instance.
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.begin_create_or_update,
resource_group_name, sql_virtual_machine_name, sqlvm_object))
return client.get(resource_group_name, sql_virtual_machine_name) | Enable Azure AD authentication on a SQL virtual machine.
:param cmd: The CLI command.
:type cmd: AzCliCommand.
:param instance: The Sql Virtual Machine instance.
:type instance: SqlVirtualMachine.
:param resource_group_name: The resource group name
:type resource_group_name: str.
:param msi_client_id: The clientId of the managed identity used in Azure AD authentication.
None means system-assigned managed identity
:type: str.
:param skip_client_validation: Whether to skip the client side validation. The server side validation always happens.
This parameter is used in the validation and ignored here.
:type: bool.
:return: The updated Sql Virtual Machine instance.
:rtype: SqlVirtualMachine. | sqlvm_enable_azure_ad_auth | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def validate_azure_ad_auth(cmd, sql_virtual_machine_name, resource_group_name, msi_client_id=None):
''' Valida if Azure AD authentication is ready on a SQL virtual machine.
The logic of validation is in the validator method. If the SQL virtual machine passes the validator,
it means this SQL virtual machine is valid for Azure AD authentication
:param cmd: The CLI command.
:type cmd: AzCliCommand.
:param resource_group_name: The resource group name
:type resource_group_name: str.
:param msi_client_id: The clientId of the managed identity used in Azure AD authentication.
None means system-assigned managed identity
:type: str.
:return: The updated Sql Virtual Machine instance.
:rtype: SqlVirtualMachine.
'''
passing_validation_message = "Sql virtual machine {} is valid for Azure AD authentication.".format(sql_virtual_machine_name)
return passing_validation_message | Valida if Azure AD authentication is ready on a SQL virtual machine.
The logic of validation is in the validator method. If the SQL virtual machine passes the validator,
it means this SQL virtual machine is valid for Azure AD authentication
:param cmd: The CLI command.
:type cmd: AzCliCommand.
:param resource_group_name: The resource group name
:type resource_group_name: str.
:param msi_client_id: The clientId of the managed identity used in Azure AD authentication.
None means system-assigned managed identity
:type: str.
:return: The updated Sql Virtual Machine instance.
:rtype: SqlVirtualMachine. | validate_azure_ad_auth | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_add_to_group(
client,
cmd,
sql_virtual_machine_name,
resource_group_name,
sql_virtual_machine_group_resource_id,
sql_service_account_password=None,
cluster_operator_account_password=None,
cluster_bootstrap_account_password=None):
'''
Adds a SQL virtual machine to a group.
'''
sqlvm_object = client.get(resource_group_name, sql_virtual_machine_name)
if not sql_service_account_password:
sql_service_account_password = prompt_pass(
'SQL Service account password: ', confirm=True)
if not cluster_operator_account_password:
cluster_operator_account_password = prompt_pass(
'Cluster operator account password: ',
confirm=True,
help_string='Password to authenticate with the domain controller.')
sqlvm_object.sql_virtual_machine_group_resource_id = sql_virtual_machine_group_resource_id
sqlvm_object.wsfc_domain_credentials = WsfcDomainCredentials(
cluster_bootstrap_account_password=cluster_bootstrap_account_password,
cluster_operator_account_password=cluster_operator_account_password,
sql_service_account_password=sql_service_account_password)
# Since it's a running operation, we will do the put and then the get to
# display the instance.
LongRunningOperation(
cmd.cli_ctx)(
sdk_no_wait(
False,
client.begin_create_or_update,
resource_group_name,
sql_virtual_machine_name,
sqlvm_object))
return client.get(resource_group_name, sql_virtual_machine_name) | Adds a SQL virtual machine to a group. | sqlvm_add_to_group | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def sqlvm_remove_from_group(
client,
cmd,
sql_virtual_machine_name,
resource_group_name):
'''
Removes a SQL virtual machine from a group.
'''
sqlvm_object = client.get(resource_group_name, sql_virtual_machine_name)
sqlvm_object.sql_virtual_machine_group_resource_id = None
sqlvm_object.wsfc_domain_credentials = None
# Since it's a running operation, we will do the put and then the get to
# display the instance.
LongRunningOperation(
cmd.cli_ctx)(
sdk_no_wait(
False,
client.begin_create_or_update,
resource_group_name,
sql_virtual_machine_name,
sqlvm_object))
return client.get(resource_group_name, sql_virtual_machine_name) | Removes a SQL virtual machine from a group. | sqlvm_remove_from_group | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def set_assessment_properties(
cmd,
instance,
enable_assessment,
enable_assessment_schedule,
assessment_weekly_interval,
assessment_monthly_occurrence,
assessment_day_of_week,
assessment_start_time_local,
resource_group_name,
sql_virtual_machine_name,
workspace_rg,
workspace_name,
workspace_sub,
agent_rg):
'''
Set assessment properties to be sent in sql vm update
'''
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, send_raw_request
from azure.cli.command_modules.vm._vm_utils import ArmTemplateBuilder20190401
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from ._assessment_data_source import table_name
from itertools import count
# If assessment.schedule settings are provided but enable schedule is
# skipped, then ensure schedule is enabled
if (enable_assessment_schedule is None and (
assessment_weekly_interval is not None or assessment_monthly_occurrence or assessment_day_of_week or assessment_start_time_local)):
enable_assessment_schedule = True
# If assessment schedule is enabled but enable assessment is skipped, then
# ensure assessment is enabled
if (enable_assessment_schedule is not None and enable_assessment is None):
enable_assessment = True
if enable_assessment is not None:
instance.assessment_settings = AssessmentSettings()
instance.assessment_settings.enable = enable_assessment
if enable_assessment_schedule is not None:
instance.assessment_settings.schedule = Schedule()
instance.assessment_settings.schedule.enable = enable_assessment_schedule
if enable_assessment_schedule:
instance.assessment_settings.schedule.weekly_interval = assessment_weekly_interval
instance.assessment_settings.schedule.monthly_occurrence = assessment_monthly_occurrence
instance.assessment_settings.schedule.day_of_week = assessment_day_of_week
instance.assessment_settings.schedule.start_time = assessment_start_time_local
# Validate and deploy pre-requisites if necessary
# 1. Log Analytics extension for given workspace
# 2. Custom log definition on workspace
if enable_assessment:
workspace_id = None
curr_subscription = get_subscription_id(cmd.cli_ctx)
# Raise error if workspace arguments not provided by user
if workspace_name is None or workspace_rg is None:
raise RequiredArgumentMissingError(
'Assessment requires a Log Analytics workspace and Log Analytics extension on VM - '
'workspace name and workspace resource group must be specified to deploy pre-requisites.')
if agent_rg is None:
agent_rg = resource_group_name
# raise Warning -
# raise RequiredArgumentMissingError(
# 'Assessment requires a Resource Group to deploy the AMA Agent resources- '
if workspace_sub is None:
# raise warning => --workspace-sub not provided. Using current
# subscription to find LA WS
workspace_sub = curr_subscription
api_version = "2021-12-01-preview"
la_url = f"https://management.azure.com/subscriptions/{workspace_sub}/resourcegroups/{workspace_rg}/providers/Microsoft.OperationalInsights/workspaces/{workspace_name}?api-version={api_version}"
try:
la_response = send_raw_request(
cmd.cli_ctx, method="GET", url=la_url)
except Exception as e:
raise AzureResponseError(
f'Could not connect to the LA workspace - Error {e}.'
' If the workspace is not in the same subscription as the VM, use the --workspace-sub parameter')
la_response = la_response.json()
workspace_id = la_response['properties']['customerId']
workspace_loc = la_response['location']
workspace_res_id = la_response['id']
# Validate the agent_rg -> Check if DCR + DCE exist already
ama_sub = curr_subscription
url = f"https://management.azure.com/subscriptions/{ama_sub}/resourceGroups/{agent_rg}/providers/Microsoft.Insights/dataCollectionRules?api-version=2022-06-01"
try:
dcr_response = send_raw_request(cmd.cli_ctx, method="GET", url=url)
except HTTPError as e:
raise AzureResponseError(
f'An Http Error occured: {e}'
'could not connect to the provided agent resource group {agent_rg}. Ensure the resource in the same subscription as {ama_sub}')
# response contains list of dcr's
dcr_response = dcr_response.json()
dcr_list = dcr_response['value']
dcr_found = False
# get list of all dcr names found
dcr_name_list = []
for dcr in dcr_list:
# Validate each dcr. Validation passes = reuse dcr
# Fully qualified resource url that can be used
dcr_id = dcr['id']
# Define the regex pattern for extracting the required fields
dcr_pattern = r'/subscriptions/([^/]+)/resourceGroups/([^/]+)/.*?/dataCollectionRules/([^/]+)'
# Search the id in dcr_List for the fields
dcr_match = re.search(dcr_pattern, dcr_id)
if dcr_match:
dcr_subId = dcr_match.group(1)
dcr_rg = dcr_match.group(2)
dcr_name = dcr_match.group(3)
dcr_name_list.append(dcr_name)
# Validate DCR Name with regex before continuing
dcr_name_pattern = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}_[a-z0-9]+_DCR_\d+$",
re.IGNORECASE)
if dcr_name_pattern.match(dcr_name):
url = f"https://management.azure.com/subscriptions/{dcr_subId}/resourceGroups/{dcr_rg}/providers/Microsoft.Insights/dataCollectionRules/{dcr_name}?api-version=2022-06-01"
try:
dcr_response = send_raw_request(
cmd.cli_ctx, method="GET", url=url)
except HTTPError:
# continue as we couldn't connect to DCR. If all
# connections fail we create new anyway
continue
else:
# If DCR Name doesn't match then skip to check next DCR
continue
# Validate dcr response
dcr_response = dcr_response.json()
dcr_location = dcr_response['location']
dce_endpoint_id = dcr_response['properties']['dataCollectionEndpointId']
dcr_source_filePattern = dcr_response['properties']['dataSources']['logFiles'][0]['filePatterns'][0]
dcr_custom_log = dcr_response['properties']['dataFlows'][0]['outputStream']
# Custom-SqlAssessment_CL
dcr_la_id = dcr_response['properties']['destinations']['logAnalytics'][0]['workspaceId']
# CustomerId is the workspace Id GUID. ResourceId is full qualified resource path
# dcr_la_name = dcr_response['properties']['destinations']['logAnalytics'][0]['name']
# workspace name is arbitrary name given by DCR resource metadata
dcr_found = validate_dcr(
cmd,
dcr_location,
workspace_loc,
dcr_source_filePattern,
dcr_custom_log,
dcr_la_id,
workspace_id,
dce_endpoint_id)
if dcr_found:
validated_dcr_res_id = dcr_id
break
# Match all the stuff
# collect a list of all dcr names found in this rg and ensure no
# collision in new create name
# Validate_DCR():
# dcr follows naming convention
# Sample DCR NAME => 0009fc4d-e310-4e40-8e63-c48a23e9cdc1_eastus_DCR_1
# dcr location = la workspace location as they must be in same
# New Custom table deployment workflow:
# Check if old table exists - if yes - run POST command.
# If does not exist add to the deployment template
log_exists = does_custom_log_exist(
cmd, workspace_name, workspace_rg, workspace_sub)
# Check if log exists. V1 log checked with above. V2 log checked with
# GET https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}?api-version=2021-12-01-preview
# **This may be redundant if above checks v2 table by default **
custom_table_url = f"https://management.azure.com/subscriptions/{curr_subscription}/resourceGroups/{workspace_rg}/providers/Microsoft.OperationalInsights/workspaces/{workspace_name}/tables/{table_name}?api-version=2021-12-01-preview"
try:
response = send_raw_request(
cmd.cli_ctx, method="GET", url=custom_table_url)
if response.status_code == 200:
log_exists = True
elif response.status_code == 404:
log_exists = log_exists or False
else:
return False
except HTTPError:
log_exists = log_exists or False
if log_exists:
# Run a POST on the existing table for migration
# POST
# https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}/migrate?api-version=2021-12-01-preview
try:
# Does a GET on the dce to ensure no http errors - suffices
table_migration_url = f"https://management.azure.com/subscriptions/{workspace_sub}/resourceGroups/{workspace_rg}/providers/Microsoft.OperationalInsights/workspaces/{workspace_name}/tables/{table_name}/migrate?api-version=2021-12-01-preview"
send_raw_request(
cmd.cli_ctx,
method="POST",
url=table_migration_url)
except Exception as e:
raise AzureResponseError(
f"Old Custom Log detected. Migrating to Custom Table failed for url {table_migration_url}. Exception: {e}")
else:
# Define the endpoint URL
url = f"/subscriptions/{workspace_sub}/resourceGroups/{workspace_rg}/providers/Microsoft.OperationalInsights/workspaces/{workspace_name}/tables/{table_name}?api-version=2021-12-01-preview"
# Define the request headers
headers = [
'Content-Type=application/json'
]
body = create_custom_table()
try:
send_raw_request(
cmd.cli_ctx,
method='PUT',
url=url,
headers=headers,
body=body)
except Exception as e:
raise AzureResponseError(
f"Creating new Custom Table failed with error {e}")
if not dcr_found:
# Create DCE, DCR, DCRA, AMA Agent
dce_name = ""
dcr_name = ""
dcra_name = ""
# These resources must be deployed to a Resource Group in the same
# region as the LA workspace
# we must do get req and loop on dce till we get an http error so we know it does not exist
# else increase x and try again
base_url = f"https://management.azure.com/subscriptions/{curr_subscription}/resourceGroups/{agent_rg}/providers/Microsoft.Insights/dataCollectionEndpoints/"
api_version = "?api-version=2022-06-01"
for index in count(start=1):
dce_name = f"{workspace_loc}-DCE-{index}"
dce_url = f"{base_url}{dce_name}{api_version}"
if not does_name_exist(cmd, dce_url):
break
dce_res = f"/subscriptions/{curr_subscription}/resourceGroups/{agent_rg}/providers/Microsoft.Insights/dataCollectionEndpoints/{dce_name}"
base_url = f"https://management.azure.com/subscriptions/{curr_subscription}/resourceGroups/{agent_rg}/providers/Microsoft.Insights/dataCollectionRules/"
api_version = "?api-version=2022-06-01"
for index in count(start=1):
dcr_name = f"{workspace_id}_{workspace_loc}_DCR_{index}"
dcr_url = f"{base_url}{dcr_name}{api_version}"
if not does_name_exist(cmd, dcr_url):
break
master_template = ArmTemplateBuilder20190401()
dce = build_dce_resource(dce_name, workspace_loc)
master_template.add_resource(dce)
dcr = build_dcr_resource(
dcr_name,
workspace_loc,
workspace_name,
workspace_res_id,
dce_res,
dce_name)
master_template.add_resource(dcr)
# amainstall = build_ama_install_resource(sql_virtual_machine_name, vm.location, resource_group_name, curr_subscription)
# master_template.add_resource(amainstall)
# /subscriptions/0009fc4d-e310-4e40-8e63-c48a23e9cdc1/resourceGroups/abhaga-iaasrg/providers/Microsoft.Insights/dataCollectionRules/0009fc4d-e310-4e40-8e63-c48a23e9cdc1_eastus_DCR_1
dcr_resource_id = f"/subscriptions/{curr_subscription}/resourceGroups/{agent_rg}/providers/Microsoft.Insights/dataCollectionRules/{dcr_name}"
# GET
# https://management.azure.com/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}?api-version=2022-06-01
# dcrlinkage = build_dcr_vm_linkage_resource(sql_virtual_machine_name, dcra_name, dcr_resource_id, dcr_name)
# For DCRA do a put request after template deployment
# PUT https://management.azure.com/subscriptions/703362b3-f278-4e4b-9179-c76eaf41ffc2/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVm/providers/Microsoft.Insights/dataCollectionRuleAssociations/myAssociation?api-version=2022-06-01
# {
# "properties": {
# "dataCollectionRuleId": "/subscriptions/703362b3-f278-4e4b-9179-c76eaf41ffc2/resourceGroups/myResourceGroup/providers/Microsoft.Insights/dataCollectionRules/myCollectionRule"
# }
# }
# master_template.add_resource(dcrlinkage)
template = master_template.build()
# deploy ARM template
deployment_name = 'vm_deploy_' + random_string(32)
client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
DeploymentProperties = cmd.get_models(
'DeploymentProperties',
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
# creates the AMA DEPLOYMENT
LongRunningOperation(
cmd.cli_ctx)(
client.begin_create_or_update(
agent_rg,
deployment_name,
deployment))
# amainstall = build_ama_install_resource(sql_virtual_machine_name, vm.location, resource_group_name, curr_subscription)
# master_template.add_resource(amainstall)
create_ama_and_dcra(cmd, curr_subscription, resource_group_name, sql_virtual_machine_name, workspace_id, workspace_loc, dcr_resource_id)
else:
# DCR and DCE were validated
# build ARM template for linkage resource and AMA installation
create_ama_and_dcra(cmd, curr_subscription, resource_group_name, sql_virtual_machine_name, workspace_id, workspace_loc, validated_dcr_res_id)
return
elif enable_assessment is False:
# Delete DCRA
# Otherwise AssessmentSetting payload is set above
# GET DCRA ATTACHED TO VM: Validate for Assessment and delete
# Unless we can track assessment dcra resource id.
# GET
# https://management.azure.com/subscriptions/703362b3-f278-4e4b-9179-c76eaf41ffc2/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVm/providers/Microsoft.Insights/dataCollectionRuleAssociations?api-version=2022-06-01
vm_sub = get_subscription_id(cmd.cli_ctx)
vm_rg = resource_group_name
vm_name = sql_virtual_machine_name
dcra_get_url = f"https://management.azure.com/subscriptions/{vm_sub}/resourceGroups/{vm_rg}/providers/Microsoft.Compute/virtualMachines/{vm_name}/providers/Microsoft.Insights/dataCollectionRuleAssociations?api-version=2022-06-01"
try:
# GET on VM DCRA endpoint to list all DCRA attached to this VM
dcra_list = send_raw_request(
cmd.cli_ctx, method="GET", url=dcra_get_url)
except HTTPError:
# No DCRA Found. Assessment is disabled through AMA.
return
dcra_list = dcra_list.json()
if 'value' in dcra_list and not dcra_list['value']:
# Raise warning or message saying no DCRA found
return
for dcra in dcra_list['value']:
dcra_name = dcra['name']
pattern = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}_[a-z0-9]+_DCRA_\d+$",
re.IGNORECASE)
if pattern.match(dcra_name):
# Match from response the values and add to url then delete
curr_subscription = get_subscription_id(cmd.cli_ctx)
resourceUri = f"subscriptions/{curr_subscription}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{sql_virtual_machine_name}"
# DELETE
# https://management.azure.com/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}?api-version=2022-06-01
dcra_url = f"https://management.azure.com/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{dcra_name}?api-version=2022-06-01"
send_raw_request(
cmd.cli_ctx, method="DELETE", url=dcra_url)
return | Set assessment properties to be sent in sql vm update | set_assessment_properties | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/custom.py | MIT |
def get_workspace_id_from_log_analytics_extension(
cmd, resource_group_name, sql_virtual_machine_name):
'''
Get workspace id from Log Analytics extension on VM
'''
extensions = list_extensions(
cmd,
resource_group_name,
sql_virtual_machine_name)
for ext in extensions:
if (ext.publisher == WINDOWS_LA_EXT_PUBLISHER and
ext.type_properties_type == WINDOWS_LA_EXT_NAME):
return ext.settings.get('workspaceId', None)
return None | Get workspace id from Log Analytics extension on VM | get_workspace_id_from_log_analytics_extension | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | MIT |
def set_log_analytics_extension(
cmd,
resource_group_name,
vm_name,
workspace_rg,
workspace_name):
'''
Deploy Log Analytics extension to the Windows VM
'''
log_client = _get_log_analytics_client(cmd)
# Get workspace id and key
customer_id = log_client.workspaces.get(
workspace_rg, workspace_name).customer_id
settings = {
'workspaceId': customer_id,
'stopOnMultipleConnections': 'true'
}
primary_shared_key = log_client.shared_keys.get_shared_keys(
workspace_rg, workspace_name).primary_shared_key
protected_settings = {
'workspaceKey': primary_shared_key
}
return set_extension(cmd, resource_group_name, vm_name,
WINDOWS_LA_EXT_NAME,
WINDOWS_LA_EXT_PUBLISHER,
WINDOWS_LA_EXT_VERSION,
settings,
protected_settings), customer_id | Deploy Log Analytics extension to the Windows VM | set_log_analytics_extension | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | MIT |
def get_workspace_in_sub(cmd, workspace_id):
'''
Get workspace details for given workspace id
'''
log_client = _get_log_analytics_client(cmd)
obj_list = log_client.workspaces.list()
workspaces = list(obj_list) if isinstance(
obj_list, ItemPaged) else obj_list # Convert iterable to list
return next((w for w in workspaces if w.customer_id == workspace_id), None) | Get workspace details for given workspace id | get_workspace_in_sub | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | MIT |
def does_custom_log_exist(cmd, workspace_name, workspace_rg, workspace_sub):
'''
Validate and deploy custom log definition for assessment feature
'''
subscription_id = workspace_sub
data_sources_client = cf_log_analytics_data_sources(
cmd.cli_ctx, subscription_id)
try:
# Verify if required custom log definition already exists. Same for custom table.
# Does this detect new custom tables? Checks if LA workspace has this
# data source
data_sources_client.get(workspace_rg, workspace_name, data_source_name)
except HttpResponseError as err:
# Required custom log definition does not exist so deploy it
if err.status_code == 404:
return False
raise err
return True | Validate and deploy custom log definition for assessment feature | does_custom_log_exist | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_util.py | MIT |
def transform_sqlvm_group_output(result):
'''
Transforms the result of SQL virtual machine group to eliminate unnecessary parameters.
'''
from collections import OrderedDict
from azure.mgmt.core.tools import parse_resource_id
try:
resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']
wsfc_object = format_wsfc_domain_profile(result.wsfc_domain_profile)
# Create a dictionary with the relevant parameters
output = OrderedDict([('id', result.id),
('location', result.location),
('name', result.name),
('provisioningState', result.provisioning_state),
('sqlImageOffer', result.sql_image_offer),
('sqlImageSku', result.sql_image_sku),
('resourceGroup', resource_group),
('wsfcDomainProfile', wsfc_object),
('tags', result.tags)])
return output
except AttributeError:
from msrest.pipeline import ClientRawResponse
# Return the response object if the formating fails
return None if isinstance(result, ClientRawResponse) else result | Transforms the result of SQL virtual machine group to eliminate unnecessary parameters. | transform_sqlvm_group_output | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def transform_sqlvm_group_list(group_list):
'''
Formats the list of results from a SQL virtual machine group
'''
return [transform_sqlvm_group_output(v) for v in group_list] | Formats the list of results from a SQL virtual machine group | transform_sqlvm_group_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def transform_sqlvm_output(result):
'''
Transforms the result of SQL virtual machine group to eliminate unnecessary parameters.
'''
from collections import OrderedDict
from azure.mgmt.core.tools import parse_resource_id
try:
resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']
# Create a dictionary with the relevant parameters
output = OrderedDict([('id', result.id),
('location', result.location),
('name', result.name),
('provisioningState', result.provisioning_state),
('sqlImageOffer', result.sql_image_offer),
('sqlImageSku', result.sql_image_sku),
('sqlManagement', result.sql_management),
('leastPrivilegeMode', result.least_privilege_mode),
('resourceGroup', resource_group),
('sqlServerLicenseType', result.sql_server_license_type),
('virtualMachineResourceId', result.virtual_machine_resource_id),
('tags', result.tags)])
# Note, wsfcDomainCredentials will not display
if result.sql_virtual_machine_group_resource_id is not None:
output['sqlVirtualMachineGroupResourceId'] = result.sql_virtual_machine_group_resource_id
if result.auto_patching_settings is not None:
output['autoPatchingSettings'] = format_auto_patching_settings(result.auto_patching_settings)
if result.auto_backup_settings is not None:
output['autoBackupSettings'] = format_auto_backup_settings(result.auto_backup_settings)
if result.server_configurations_management_settings is not None:
output['serverConfigurationsManagementSettings'] = format_server_configuration_management_settings(result.server_configurations_management_settings)
if result.key_vault_credential_settings is not None:
output['keyVaultCredentialSettings'] = format_key_vault_credential_settings(result.key_vault_credential_settings)
if result.assessment_settings is not None:
output['assessmentSettings'] = format_assessment_settings(result.assessment_settings)
return output
except AttributeError:
from msrest.pipeline import ClientRawResponse
# Return the response object if the formating fails
return None if isinstance(result, ClientRawResponse) else result | Transforms the result of SQL virtual machine group to eliminate unnecessary parameters. | transform_sqlvm_output | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def transform_sqlvm_list(vm_list):
'''
Formats the list of results from a SQL virtual machine
'''
return [transform_sqlvm_output(v) for v in vm_list] | Formats the list of results from a SQL virtual machine | transform_sqlvm_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def transform_aglistener_output(result):
'''
Transforms the result of Availability Group Listener to eliminate unnecessary parameters.
'''
from collections import OrderedDict
from azure.mgmt.core.tools import parse_resource_id
try:
resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']
# Create a dictionary with the relevant parameters
output = OrderedDict([('id', result.id),
('name', result.name),
('provisioningState', result.provisioning_state),
('port', result.port),
('resourceGroup', resource_group)])
# Note, wsfcDomainCredentials will not display
if result.load_balancer_configurations is not None:
output['loadBalancerConfigurations'] = format_load_balancer_configuration_list(result.load_balancer_configurations)
return output
except AttributeError:
from msrest.pipeline import ClientRawResponse
# Return the response object if the formating fails
return None if isinstance(result, ClientRawResponse) else result | Transforms the result of Availability Group Listener to eliminate unnecessary parameters. | transform_aglistener_output | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def transform_aglistener_list(ag_list):
'''
Formats the list of results from a SQL virtual machine
'''
return [transform_aglistener_output(v) for v in ag_list] | Formats the list of results from a SQL virtual machine | transform_aglistener_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_wsfc_domain_profile(result):
'''
Formats the WSFCDomainProfile object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.cluster_bootstrap_account is not None:
order_dict['clusterBootstrapAccount'] = result.cluster_bootstrap_account
if result.domain_fqdn is not None:
order_dict['domainFqdn'] = result.domain_fqdn
if result.ou_path is not None:
order_dict['ouPath'] = result.ou_path
if result.cluster_operator_account is not None:
order_dict['clusterOperatorAccount'] = result.cluster_operator_account
if result.file_share_witness_path is not None:
order_dict['fileShareWitnessPath'] = result.file_share_witness_path
if result.sql_service_account is not None:
order_dict['sqlServiceAccount'] = result.sql_service_account
if result.storage_account_url is not None:
order_dict['storageAccountUrl'] = result.storage_account_url
return order_dict | Formats the WSFCDomainProfile object removing arguments that are empty | format_wsfc_domain_profile | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_additional_features_server_configurations(result):
'''
Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.is_rservices_enabled is not None:
order_dict['isRServicesEnabled'] = result.is_rservices_enabled
return order_dict | Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty | format_additional_features_server_configurations | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_auto_backup_settings(result):
'''
Formats the AutoBackupSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
if result.enable_encryption is not None:
order_dict['enableEncryption'] = result.enable_encryption
if result.retention_period is not None:
order_dict['retentionPeriod'] = result.retention_period
if result.storage_account_url is not None:
order_dict['storageAccountUrl'] = result.storage_account_url
if result.backup_system_dbs is not None:
order_dict['backupSystemDbs'] = result.backup_system_dbs
if result.backup_schedule_type is not None:
order_dict['backupScheduleType'] = result.backup_schedule_type
if result.full_backup_frequency is not None:
order_dict['fullBackupFrequency'] = result.full_backup_frequency
if result.full_backup_start_time is not None:
order_dict['fullBackupStartTime'] = result.full_backup_start_time
if result.full_backup_window_hours is not None:
order_dict['fullBackupWindowHours'] = result.full_backup_window_hours
if result.log_backup_frequency is not None:
order_dict['logBackupFrequency'] = result.log_backup_frequency
return order_dict | Formats the AutoBackupSettings object removing arguments that are empty | format_auto_backup_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_auto_patching_settings(result):
'''
Formats the AutoPatchingSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
if result.day_of_week is not None:
order_dict['dayOfWeek'] = result.day_of_week
if result.maintenance_window_starting_hour is not None:
order_dict['maintenanceWindowStartingHour'] = result.maintenance_window_starting_hour
if result.maintenance_window_duration is not None:
order_dict['maintenanceWindowDuration'] = result.maintenance_window_duration
return order_dict | Formats the AutoPatchingSettings object removing arguments that are empty | format_auto_patching_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_key_vault_credential_settings(result):
'''
Formats the KeyVaultCredentialSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
if result.credential_name is not None:
order_dict['credentialName'] = result.credential_name
if result.azure_key_vault_url is not None:
order_dict['azureKeyVaultUrl'] = result.azure_key_vault_url
return order_dict | Formats the KeyVaultCredentialSettings object removing arguments that are empty | format_key_vault_credential_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_load_balancer_configuration_list(lb_list):
'''
Formats the list of results from a load configuration
'''
return [format_load_balancer_configuration(v) for v in lb_list] | Formats the list of results from a load configuration | format_load_balancer_configuration_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_load_balancer_configuration(result):
'''
Formats the LoadBalancerConfiguration object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.private_ip_address is not None:
order_dict['privateIpAddress'] = format_private_ip_address(result.private_ip_address)
if result.public_ip_address_resource_id is not None:
order_dict['publicIpAddressResourceId'] = result.public_ip_address_resource_id
if result.load_balancer_resource_id is not None:
order_dict['loadBalancerResourceId'] = result.load_balancer_resource_id
if result.probe_port is not None:
order_dict['probePort'] = result.probe_port
if result.sql_virtual_machine_instances is not None:
order_dict['sqlVirtualMachineInstances'] = result.sql_virtual_machine_instances
return order_dict | Formats the LoadBalancerConfiguration object removing arguments that are empty | format_load_balancer_configuration | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_private_ip_address(result):
'''
Formats the PrivateIPAddress object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.ip_address is not None:
order_dict['ipAddress'] = result.ip_address
if result.subnet_resource_id is not None:
order_dict['subnetResourceId'] = result.subnet_resource_id
return order_dict | Formats the PrivateIPAddress object removing arguments that are empty | format_private_ip_address | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_server_configuration_management_settings(result):
'''
Formats the ServerConfigurationsManagementSettings object removing arguments that are empty
'''
from collections import OrderedDict
order_dict = OrderedDict()
settings = format_sql_connectivity_update_settings(result.sql_connectivity_update_settings)
if settings:
order_dict['sqlConnectivityUpdateSettings'] = settings
settings = format_sql_workload_type_update_settings(result.sql_workload_type_update_settings)
if settings:
order_dict['sqlWorkloadTypeUpdateSettings'] = settings
settings = format_sql_storage_update_settings(result.sql_storage_update_settings)
if settings:
order_dict['sqlStorageUpdateSettings'] = settings
settings = format_additional_features_server_configurations(result.additional_features_server_configurations)
if settings:
order_dict['additionalFeaturesServerConfigurations'] = settings
settings = format_azure_ad_authentication_settings(result.azure_ad_authentication_settings)
if settings:
order_dict['azureAdAuthenticationSettings'] = settings
return order_dict | Formats the ServerConfigurationsManagementSettings object removing arguments that are empty | format_server_configuration_management_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_sql_connectivity_update_settings(result):
'''
Formats the SqlConnectivityUpdateSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.connectivity_type is not None:
order_dict['connectivityType'] = result.connectivity_type
if result.port is not None:
order_dict['port'] = result.port
if result.sql_auth_update_user_name is not None:
order_dict['sqlAuthUpdateUserName'] = result.sql_auth_update_user_name
return order_dict | Formats the SqlConnectivityUpdateSettings object removing arguments that are empty | format_sql_connectivity_update_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_sql_storage_update_settings(result):
'''
Formats the SqlStorageUpdateSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.disk_count is not None:
order_dict['diskCount'] = result.disk_count
if result.disk_configuration_type is not None:
order_dict['diskConfigurationType'] = result.disk_configuration_type
return order_dict | Formats the SqlStorageUpdateSettings object removing arguments that are empty | format_sql_storage_update_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_sql_workload_type_update_settings(result):
'''
Formats the SqlWorkloadTypeUpdateSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.sql_workload_type is not None:
order_dict['sqlWorkloadType'] = result.sql_workload_type
return order_dict | Formats the SqlWorkloadTypeUpdateSettings object removing arguments that are empty | format_sql_workload_type_update_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_assessment_settings(result):
'''
Formats the AssessmentSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
schedule = format_assessment_schedule(result.schedule)
if schedule:
order_dict['schedule'] = schedule
return order_dict | Formats the AssessmentSettings object removing arguments that are empty | format_assessment_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_assessment_schedule(result):
'''
Formats the AssessmentSchedule object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
if result.weekly_interval is not None:
order_dict['weeklyInterval'] = result.weekly_interval
if result.monthly_occurrence is not None:
order_dict['monthlyOccurrence'] = result.monthly_occurrence
if result.day_of_week is not None:
order_dict['dayOfWeek'] = result.day_of_week
if result.start_time is not None:
order_dict['startTimeLocal'] = result.start_time
return order_dict | Formats the AssessmentSchedule object removing arguments that are empty | format_assessment_schedule | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def format_azure_ad_authentication_settings(result):
'''
Formats the AzureAD authentication object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result is not None and result.client_id is not None:
order_dict['clientId'] = result.client_id
return order_dict | Formats the AzureAD authentication object removing arguments that are empty | format_azure_ad_authentication_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/_format.py | MIT |
def id_generator(self, size=6, chars=string.ascii_lowercase + string.digits):
'''
dns name must conform to the following regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.'}
'''
import random
return random.choice(string.ascii_lowercase) + ''.join(random.choice(chars) for _ in range(size)) | dns name must conform to the following regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.'} | id_generator | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py | MIT |
def test_sqlvm_aad_auth_negative(self, resource_group, resource_group_location, sqlvm2019, sqlvm2022):
"""
Due to the requirement of Azure AD Global Administrator or Privileged Role Administrator role to grant the
necessary permissions for the positive test cases. This automatic test case concentrates on negative test cases
covering the validation of Azure AD authentication
"""
# Test create sqlvm2019
self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm2019, resource_group, resource_group_location, 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm2019),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'LightWeight')
]).get_output_in_json()
# Test create sqlvm2022
self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm2022, resource_group, resource_group_location, 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm2022),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'LightWeight')
]).get_output_in_json()
# Create user-assigned managed identity to attach to the virtual machine
attached_identity = self.cmd('identity create -n {} -g {}'.format('attached_msi', resource_group)).get_output_in_json()
self.cmd('vm identity assign -n {} -g {} --identities {}'.format(sqlvm2022, resource_group, attached_identity['name']))
# Create user-assigned managed identity not attached to any virtual machine
unattached_identity = self.cmd('identity create -n {} -g {}'.format('other_msi', resource_group)).get_output_in_json()
# Test both enable and validate commands
commands = ["enable-azure-ad-auth", "validate-azure-ad-auth"]
for command in commands:
validate_sql2019 = 'sql vm {} -n {} -g {}'.format(command, sqlvm2019, resource_group)
# Assert customer cannot enable Azure AD authentication on SQL Server 2019
# with self.assertRaisesRegex(InvalidArgumentValueError, "Azure AD authentication requires SQL Server 2022 on Windows platform"):
# self.cmd(validate_sql2019)
# this validation will happen in RP call, so skipping this check.
validate_system_msi = 'sql vm {} -n {} -g {}'.format(command, sqlvm2022, resource_group)
validate_attached_msi = 'sql vm {} -n {} -g {} --msi-client-id {}'.format(command, sqlvm2022, resource_group, attached_identity['clientId'])
validate_unattached_msi = 'sql vm {} -n {} -g {} --msi-client-id {}'.format(command, sqlvm2022, resource_group, unattached_identity['clientId'])
# Assert customer cannot enable Azure AD authentication with system-assigned MSI but the system-asigned MSI is not enabled on the VM
with self.assertRaisesRegex(InvalidArgumentValueError, "Enable Azure AD authentication with system-assigned managed identity "\
"but the system-assigned managed identity is not enabled on this Azure virtual machine."):
self.cmd(validate_system_msi)
# Assert customer cannot enable Azure AD authentication with user-assigned MSI but the user-asigned MSI is not attached on the VM
with self.assertRaisesRegex(InvalidArgumentValueError, "Enable Azure AD authentication with user-assigned managed identity {}, "\
"but the managed identity is not attached to this Azure virtual machine.".format(unattached_identity['clientId'])):
self.cmd(validate_unattached_msi)
# Enable the system-assigned managed identity on the VM
self.cmd('vm identity assign -n {} -g {} --identities [system]'.format(sqlvm2022, resource_group))
# Assert customer cannot enable Azure AD authentication with system-assigned MSI
# if the system-assigned managed identity does not have enough permission
with self.assertRaisesRegex(InvalidArgumentValueError, "The managed identity is lack of the following roles for Azure AD authentication: "\
"User.Read.All, Application.Read.All, GroupMember.Read.All."):
self.cmd(validate_system_msi)
# Disable the system-assigned managed identity on the VM
self.cmd('vm identity remove -n {} -g {} --identities [system]'.format(sqlvm2022, resource_group))
# Assert customer cannot enable Azure AD authentication with user-assigned MSI
# if the user-assigned managed identity does not have enough permission
with self.assertRaisesRegex(InvalidArgumentValueError, "The managed identity is lack of the following roles for Azure AD authentication: "\
"User.Read.All, Application.Read.All, GroupMember.Read.All."):
self.cmd(validate_attached_msi) | Due to the requirement of Azure AD Global Administrator or Privileged Role Administrator role to grant the
necessary permissions for the positive test cases. This automatic test case concentrates on negative test cases
covering the validation of Azure AD authentication | test_sqlvm_aad_auth_negative | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py | MIT |
def update_search_service(instance, partition_count=0, replica_count=0, public_network_access=None,
ip_rules=None, identity_type=None, disable_local_auth=None, auth_options=None,
aad_auth_failure_mode=None):
"""
Update partition and replica of the given search service.
:param partition_count: Number of partitions in the search service.
:param replica_count: Number of replicas in the search service.
:param public_network_access: Public accessibility to the search service;
allowed values are "enabled" or "disabled".
:param ip_rules: Public IP(v4) addresses or CIDR ranges to the search service, seperated by comma(',') or
semicolon(';'); If spaces (' '), ',' or ';' is provided, any existing IP rule will be
nullified and no public IP rule is applied. These IP rules are applicable only when
public_network_access is "enabled".
:param identity_type: The identity type; possible values include: "None", "SystemAssigned".
:param disable_local_auth: If calls to the search service will not be permitted to utilize
API keys for authentication.
This cannot be combined with auth_options
:param auth_options: Options for authenticating calls to the search service;
possible values include "aadOrApiKey", "apiKeyOnly";
This cannot be combined with disable_local_auth.
:param aad_auth_failure_mode: Describes response code from calls to the search service that failed authentication;
possible values include "http401WithBearerChallenge", "http403";
This cannot be combined with disable_local_auth.
"""
from azure.mgmt.search.models import NetworkRuleSet, IpRule, Identity
import re
replica_count = int(replica_count)
partition_count = int(partition_count)
if replica_count > 0:
instance.replica_count = replica_count
if partition_count > 0:
instance.partition_count = partition_count
if public_network_access:
if (public_network_access.lower() not in ["enabled", "disabled"]):
raise UnrecognizedArgumentError(
"SearchService.PublicNetworkAccess: only [enabled, disabled] are allowed")
instance.public_network_access = public_network_access
if ip_rules:
_ip_rules = []
_ip_rules_array = re.split(';|,', ip_rules)
for _ip_rule in _ip_rules_array:
if _ip_rule:
_ip_rules.append(IpRule(value=_ip_rule))
instance.network_rule_set = NetworkRuleSet(ip_rules=_ip_rules)
if identity_type:
_identity = Identity(type=identity_type)
instance.identity = _identity
setup_search_auth(instance, disable_local_auth, auth_options, aad_auth_failure_mode)
return instance | Update partition and replica of the given search service.
:param partition_count: Number of partitions in the search service.
:param replica_count: Number of replicas in the search service.
:param public_network_access: Public accessibility to the search service;
allowed values are "enabled" or "disabled".
:param ip_rules: Public IP(v4) addresses or CIDR ranges to the search service, seperated by comma(',') or
semicolon(';'); If spaces (' '), ',' or ';' is provided, any existing IP rule will be
nullified and no public IP rule is applied. These IP rules are applicable only when
public_network_access is "enabled".
:param identity_type: The identity type; possible values include: "None", "SystemAssigned".
:param disable_local_auth: If calls to the search service will not be permitted to utilize
API keys for authentication.
This cannot be combined with auth_options
:param auth_options: Options for authenticating calls to the search service;
possible values include "aadOrApiKey", "apiKeyOnly";
This cannot be combined with disable_local_auth.
:param aad_auth_failure_mode: Describes response code from calls to the search service that failed authentication;
possible values include "http401WithBearerChallenge", "http403";
This cannot be combined with disable_local_auth. | update_search_service | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/search/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/search/custom.py | MIT |
def update_private_endpoint_connection(cmd, resource_group_name, search_service_name, private_endpoint_connection_name,
private_link_service_connection_status,
private_link_service_connection_description,
private_link_service_connection_actions_required):
"""
Update an existing private endpoint connection in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param private_endpoint_connection_name: Name of the private endpoint connection resource;
for example: {the name of the private endpoint resource}.{guid}.
:param private_link_service_connection_status: The updated status of the private endpoint connection resource.
Possible values include: "Pending", "Approved", "Rejected", "Disconnected".
:param private_link_service_connection_description: Custom description when updating
the private endpoint connection resource.
:param private_link_service_connection_actions_required: Custom 'actions required' message when updating
the private endpoint connection resource.
"""
from azure.mgmt.search.models import PrivateEndpointConnection, \
PrivateEndpointConnectionProperties, PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState
from azure.cli.command_modules.search._client_factory import cf_search_private_endpoint_connections
_client = cf_search_private_endpoint_connections(cmd.cli_ctx, None)
_private_endpoint_connection = PrivateEndpointConnection()
_private_link_service_connection_state = PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState(
status=private_link_service_connection_status,
description=private_link_service_connection_description,
actions_required=private_link_service_connection_actions_required
)
_private_endpoint_connection_properties = PrivateEndpointConnectionProperties(
private_link_service_connection_state=_private_link_service_connection_state)
_private_endpoint_connection.id = private_endpoint_connection_name
_private_endpoint_connection.properties = _private_endpoint_connection_properties
return _client.update(resource_group_name, search_service_name, private_endpoint_connection_name,
_private_endpoint_connection) | Update an existing private endpoint connection in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param private_endpoint_connection_name: Name of the private endpoint connection resource;
for example: {the name of the private endpoint resource}.{guid}.
:param private_link_service_connection_status: The updated status of the private endpoint connection resource.
Possible values include: "Pending", "Approved", "Rejected", "Disconnected".
:param private_link_service_connection_description: Custom description when updating
the private endpoint connection resource.
:param private_link_service_connection_actions_required: Custom 'actions required' message when updating
the private endpoint connection resource. | update_private_endpoint_connection | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/search/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/search/custom.py | MIT |
def create_shared_private_link_resource(cmd, resource_group_name, search_service_name,
shared_private_link_resource_name, shared_private_link_resource_id,
shared_private_link_resource_group_id,
shared_private_link_resource_request_message="Please approve",
no_wait=False):
"""
Create shared privatelink resources in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param shared_private_link_resource_name: Name of the shared private link resource.
:param shared_private_link_resource_id: Fully qualified resource ID for the resource.
for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/
{resourceProviderNamespace}/{resourceType}/{resourceName}.
:param shared_private_link_resource_group_id: The group id of the resource; for example: blob, sql or vault.
:param shared_private_link_resource_request_message: Custom request message when creating or updating the shared
privatelink resources.
"""
from azure.mgmt.search.models import SharedPrivateLinkResource, SharedPrivateLinkResourceProperties
from azure.cli.command_modules.search._client_factory import cf_search_shared_private_link_resources
_client = cf_search_shared_private_link_resources(cmd.cli_ctx, None)
_shared_private_link_resource = SharedPrivateLinkResource()
_shared_private_link_resource.name = shared_private_link_resource_name
_shared_private_link_resource.properties = SharedPrivateLinkResourceProperties(
private_link_resource_id=shared_private_link_resource_id,
group_id=shared_private_link_resource_group_id,
request_message=shared_private_link_resource_request_message
)
return sdk_no_wait(no_wait, _client.begin_create_or_update, resource_group_name,
search_service_name, shared_private_link_resource_name, _shared_private_link_resource) | Create shared privatelink resources in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param shared_private_link_resource_name: Name of the shared private link resource.
:param shared_private_link_resource_id: Fully qualified resource ID for the resource.
for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/
{resourceProviderNamespace}/{resourceType}/{resourceName}.
:param shared_private_link_resource_group_id: The group id of the resource; for example: blob, sql or vault.
:param shared_private_link_resource_request_message: Custom request message when creating or updating the shared
privatelink resources. | create_shared_private_link_resource | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/search/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/search/custom.py | MIT |
def update_shared_private_link_resource(cmd, resource_group_name, search_service_name,
shared_private_link_resource_name, shared_private_link_resource_id,
shared_private_link_resource_group_id,
shared_private_link_resource_request_message,
no_wait=False):
"""
Update shared privatelink resources in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param shared_private_link_resource_name: Name of the shared private link resource.
:param shared_private_link_resource_id: Fully qualified resource ID for the resource;
for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/
{resourceProviderNamespace}/{resourceType}/{resourceName}.
:param shared_private_link_resource_group_id: The group id of the resource; for example: blob, sql or vault.
:param shared_private_link_resource_request_message: Custom request message when creating or updating the shared
privatelink resources.
"""
from azure.mgmt.search.models import SharedPrivateLinkResource, SharedPrivateLinkResourceProperties
from azure.cli.command_modules.search._client_factory import cf_search_shared_private_link_resources
_client = cf_search_shared_private_link_resources(cmd.cli_ctx, None)
_shared_private_link_resource = SharedPrivateLinkResource()
_shared_private_link_resource.name = shared_private_link_resource_name
_shared_private_link_resource.properties = SharedPrivateLinkResourceProperties(
private_link_resource_id=shared_private_link_resource_id,
group_id=shared_private_link_resource_group_id,
request_message=shared_private_link_resource_request_message
)
return sdk_no_wait(no_wait, _client.begin_create_or_update, resource_group_name,
search_service_name, shared_private_link_resource_name, _shared_private_link_resource) | Update shared privatelink resources in a Search service in the given resource group.
:param resource_group_name: Name of resource group.
:param search_service_name: Name of the search service.
:param shared_private_link_resource_name: Name of the shared private link resource.
:param shared_private_link_resource_id: Fully qualified resource ID for the resource;
for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/
{resourceProviderNamespace}/{resourceType}/{resourceName}.
:param shared_private_link_resource_group_id: The group id of the resource; for example: blob, sql or vault.
:param shared_private_link_resource_request_message: Custom request message when creating or updating the shared
privatelink resources. | update_shared_private_link_resource | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/search/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/search/custom.py | MIT |
def setup_search_auth(instance, disable_local_auth, auth_options, aad_auth_failure_mode):
"""
Add auth options to a search service
:param disable_local_auth: If calls to the search service will not be permitted to utilize
API keys for authentication.
This cannot be combined with auth_options
:param auth_options: Options for authenticating calls to the search service;
possible values include "aadOrApiKey", "apiKeyOnly";
This cannot be combined with disable_local_auth.
:param aad_auth_failure_mode: Describes response code from calls to the search service that failed authentication;
possible values include "http401WithBearerChallenge", "http403";
This cannot be combined with disable_local_auth.
"""
# Done in aaz by default
if (disable_local_auth is not None and disable_local_auth not in [True, False]):
raise UnrecognizedArgumentError(
"SearchService.DisableLocalAuth: only [True, False] are allowed")
# Done by argument define
if (auth_options is not None and auth_options not in ["aadOrApiKey", "apiKeyOnly"]):
raise UnrecognizedArgumentError(
"SearchService.AuthOptions: only [aadOrApiKey, apiKeyOnly] are allowed")
# Done in aaz by default
if (aad_auth_failure_mode is not None and aad_auth_failure_mode not in ["http401WithBearerChallenge", "http403"]):
raise UnrecognizedArgumentError(
"SearchService.AuthOptions.AadAuthFailureMode: only "
"[http401WithBearerChallenge, http403] are allowed")
# Done in pre_operations
if disable_local_auth and auth_options:
raise MutuallyExclusiveArgumentError("Both the DisableLocalAuth and AuthOptions parameters "
"can't be given at the same time")
if disable_local_auth and aad_auth_failure_mode:
raise MutuallyExclusiveArgumentError("Both the DisableLocalAuth and AadAuthFailureMode parameters "
"can't be given at the same time")
if auth_options == "apiKeyOnly" and aad_auth_failure_mode:
raise MutuallyExclusiveArgumentError("Both an AuthOptions value of apiKeyOnly and an AadAuthFailureMode "
"can't be given at the same time")
if auth_options == "aadOrApiKey" and not aad_auth_failure_mode:
raise RequiredArgumentMissingError("An AuthOptions value of aadOrApiKey requires "
"an AadAuthFailureMode parameter")
instance.disable_local_auth = disable_local_auth
if auth_options:
instance.auth_options = {}
instance.auth_options[auth_options] = {}
if aad_auth_failure_mode:
instance.auth_options[auth_options]["aadAuthFailureMode"] = aad_auth_failure_mode | Add auth options to a search service
:param disable_local_auth: If calls to the search service will not be permitted to utilize
API keys for authentication.
This cannot be combined with auth_options
:param auth_options: Options for authenticating calls to the search service;
possible values include "aadOrApiKey", "apiKeyOnly";
This cannot be combined with disable_local_auth.
:param aad_auth_failure_mode: Describes response code from calls to the search service that failed authentication;
possible values include "http401WithBearerChallenge", "http403";
This cannot be combined with disable_local_auth. | setup_search_auth | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/search/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/search/custom.py | MIT |
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert | Download a certificate from a KeyVault. | _get_certificate | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/servicefabric/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/servicefabric/custom.py | MIT |
def validate_storage_account_id(cmd, namespace):
"""Validate storage account name"""
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.core.tools import is_valid_resource_id, resource_id
if namespace.storage_account:
if not is_valid_resource_id(namespace.storage_account):
namespace.storage_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=namespace.storage_account
) | Validate storage account name | validate_storage_account_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def datetime_format(value):
"""Validate the correct format of a datetime string and deserialize."""
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
try:
datetime_obj = Deserializer.deserialize_iso(value)
except DeserializationError:
message = "Argument {} is not a valid ISO-8601 datetime format"
raise ValueError(message.format(value))
return datetime_obj | Validate the correct format of a datetime string and deserialize. | datetime_format | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def validate_archive_window_length(ns):
"""Validate the correct format of a datetime string and the range."""
if ns.archive_window_length is not None:
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
try:
datetime_obj = Deserializer.deserialize_duration(ns.archive_window_length)
except DeserializationError:
message = "archive-window-length {} is not a valid ISO-8601 duration format"
raise ValueError(message.format(ns.archive_window_length))
minwindow = Deserializer.deserialize_duration("PT5M")
maxwindow = Deserializer.deserialize_duration("PT25H")
if datetime_obj < minwindow or datetime_obj > maxwindow:
message = "archive-window-length '{}' is not in the range of PT5M and PT25H"\
.format(ns.archive_window_length)
raise ValueError(message) | Validate the correct format of a datetime string and the range. | validate_archive_window_length | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def validate_key_frame_interval_duration(ns):
"""Validate the correct format of a datetime string and the range."""
if ns.key_frame_interval_duration is not None:
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
try:
datetime_obj = Deserializer.deserialize_duration(ns.key_frame_interval_duration)
except DeserializationError:
message = "key-frame-interval-duration {} is not a valid ISO-8601 duration format"
raise ValueError(message.format(ns.key_frame_interval_duration))
minwindow = Deserializer.deserialize_duration("PT1S")
maxwindow = Deserializer.deserialize_duration("PT30S")
if datetime_obj < minwindow or datetime_obj > maxwindow:
message = "key-frame-interval-duration '{}' is not in the range of PT1S and PT30S"\
.format(ns.key_frame_interval_duration)
raise ValueError(message) | Validate the correct format of a datetime string and the range. | validate_key_frame_interval_duration | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def validate_correlation_data(ns):
""" Extracts multiple space-separated correlation data in key[=value] format """
if isinstance(ns.correlation_data, list):
correlation_data_dict = {}
for item in ns.correlation_data:
correlation_data_dict.update(validate_tag(item))
ns.correlation_data = correlation_data_dict | Extracts multiple space-separated correlation data in key[=value] format | validate_correlation_data | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def validate_token_claim(ns):
""" Extracts multiple space-separated token claims in key[=value] format """
if isinstance(ns.token_claims, list):
token_claims_dict = {}
for item in ns.token_claims:
token_claims_dict.update(validate_tag(item))
ns.token_claims = token_claims_dict | Extracts multiple space-separated token claims in key[=value] format | validate_token_claim | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def validate_output_assets(ns):
""" Extracts multiple space-separated output assets in key[=value] format """
def _get_asset(asset_string):
from azure.mgmt.media.models import JobOutputAsset
name_and_label = asset_string.split('=')
if len(name_and_label) <= 1:
message = "Output assets are not in correct format. Output assets should be in 'assetName=label'" \
" format. An asset without label can be sent like this: 'assetName='"
raise ValueError(message)
name = name_and_label[0]
label = name_and_label[1]
return JobOutputAsset(asset_name=name, label=label)
if isinstance(ns.output_assets, list):
ns.output_assets = list(map(_get_asset, ns.output_assets)) | Extracts multiple space-separated output assets in key[=value] format | validate_output_assets | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/_validators.py | MIT |
def list_role_assignments(cmd, assignee_object_id, scope=None):
'''
:param include_groups: include extra assignments to the groups of which the user is a
member(transitively).
'''
graph_client = _graph_client_factory(cmd.cli_ctx)
factory = _auth_client_factory(cmd.cli_ctx)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
assignments = _search_role_assignments(assignments_client, assignee_object_id)
subscription_id = get_subscription_id(cmd.cli_ctx)
results = todict(assignments) if assignments else []
if not results:
return []
# 1. fill in logic names to get things understandable.
# (it's possible that associated roles and principals were deleted, and we just do nothing.)
# 2. fill in role names
role_defs = list(definitions_client.list(
scope=(scope if scope else '/subscriptions/' + subscription_id)))
role_dics = {i.id: i.role_name for i in role_defs}
for i in results:
if role_dics.get(i['roleDefinitionId']):
i['roleDefinitionName'] = role_dics[i['roleDefinitionId']]
# fill in principal names
principal_ids = set(i['principalId'] for i in results if i['principalId'])
if principal_ids:
try:
principals = _get_object_stubs(graph_client, principal_ids)
principal_dics = {i.get('principalId'): _get_displayable_name(i) for i in principals}
for i in [r for r in results if not r.get('principalName')]:
i['principalName'] = ''
if principal_dics.get(i['principalId']):
i['principalName'] = principal_dics[i['principalId']]
except (HttpResponseError, GraphError) as ex:
# failure on resolving principal due to graph permission should not fail the whole thing
logger.info("Failed to resolve graph object information per error '%s'", ex)
return results | :param include_groups: include extra assignments to the groups of which the user is a
member(transitively). | list_role_assignments | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/ams/operations/sp.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/ams/operations/sp.py | MIT |
def _get_log_record_list(log_fp, p_id):
"""
Get list of records / messages in the log file
:param log_fp: log file object
:param p_id: process id of command
:return:
"""
prev_record = None
log_record_list = []
for line in log_fp:
# attempt to extract log data
log_record = CommandLogFile._get_info_from_log_line(line, p_id)
if log_record: # if new record parsed, add old record to the list
if prev_record:
log_record_list.append(prev_record)
prev_record = log_record
elif prev_record: # otherwise this is a continuation of a log record, add to prev record
new_log_msg = prev_record.log_msg + line
prev_record = CommandLogFile._LogRecordType(p_id=prev_record.p_id, date_time=prev_record.date_time,
# pylint: disable=line-too-long
level=prev_record.level, logger=prev_record.logger,
log_msg=new_log_msg)
if prev_record:
log_record_list.append(prev_record)
return log_record_list | Get list of records / messages in the log file
:param log_fp: log file object
:param p_id: process id of command
:return: | _get_command_data_from_metadata._get_log_record_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/feedback/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/feedback/custom.py | MIT |
def _get_command_data_from_metadata(self): # pylint: disable=too-many-statements
def _get_log_record_list(log_fp, p_id):
"""
Get list of records / messages in the log file
:param log_fp: log file object
:param p_id: process id of command
:return:
"""
prev_record = None
log_record_list = []
for line in log_fp:
# attempt to extract log data
log_record = CommandLogFile._get_info_from_log_line(line, p_id)
if log_record: # if new record parsed, add old record to the list
if prev_record:
log_record_list.append(prev_record)
prev_record = log_record
elif prev_record: # otherwise this is a continuation of a log record, add to prev record
new_log_msg = prev_record.log_msg + line
prev_record = CommandLogFile._LogRecordType(p_id=prev_record.p_id, date_time=prev_record.date_time,
# pylint: disable=line-too-long
level=prev_record.level, logger=prev_record.logger,
log_msg=new_log_msg)
if prev_record:
log_record_list.append(prev_record)
return log_record_list
if not self.metadata_tup:
return {}
_EXT_NAME_PREFIX = "extension name:"
_EXT_VERS_PREFIX = "extension version:"
file_name = self.metadata_tup.file_path
p_id = self.metadata_tup.p_id
try:
with open(file_name, 'r') as log_fp:
log_record_list = _get_log_record_list(log_fp, p_id)
except OSError:
logger.debug("Failed to open command log file %s", file_name)
return {}
if not log_record_list:
logger.debug("No command log messages found in file %s", file_name)
return {}
log_data = {}
# 1. Figure out whether the command was successful or not. Last log record should be the exit code
try:
status_msg = log_record_list[-1].log_msg.strip()
if status_msg.startswith("exit code"):
idx = status_msg.index(":") # raises ValueError
exit_code = int(log_record_list[-1].log_msg[idx + 1:].strip())
log_data["success"] = bool(not exit_code)
except (IndexError, ValueError):
logger.debug("Couldn't extract exit code from command log %s.", file_name)
# 2. If there are any errors, this is a failed command. Log the errors
# 3. Also get extension information.
for record in log_record_list:
errors = log_data.setdefault("errors", []) # log_data["errors"]
if record.level.lower() == "error":
log_data["success"] = False
errors.append(record.log_msg)
poss_ext_msg = record.log_msg.strip()
if record.level.lower() == "info":
if poss_ext_msg.startswith(_EXT_NAME_PREFIX):
log_data["extension_name"] = poss_ext_msg[len(_EXT_NAME_PREFIX):].strip()
elif poss_ext_msg.startswith(_EXT_VERS_PREFIX):
log_data["extension_version"] = poss_ext_msg[len(_EXT_VERS_PREFIX):].strip()
# 4. Get command args string. from first record
try:
command_args_msg = log_record_list[0].log_msg.strip()
if command_args_msg.lower().startswith("command args:"):
idx = command_args_msg.index(":")
log_data["command_args"] = command_args_msg[idx + 1:].strip()
else:
raise ValueError
except (IndexError, ValueError):
logger.debug("Couldn't get command args from command log %s.", file_name)
return log_data | Get list of records / messages in the log file
:param log_fp: log file object
:param p_id: process id of command
:return: | _get_command_data_from_metadata | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/feedback/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/feedback/custom.py | MIT |
def _get_info_from_log_line(line, p_id):
"""
Extract log line information based on the following command log format in azlogging.py
lfmt = logging.Formatter('%(process)d | %(created)s | %(levelname)s | %(name)s | %(message)s')
:param line: the line from the log file.
:return: returned parsed line information or None
"""
if not line.startswith(_CMD_LOG_LINE_PREFIX):
return None
line = line[len(_CMD_LOG_LINE_PREFIX):]
parts = line.split("|", 4)
if len(parts) != 5: # there must be 5 items
return None
for i, part in enumerate(parts):
parts[i] = part.strip()
if i == 0:
parts[0] = int(parts[0])
if parts[0] != p_id: # ensure that this is indeed a valid log.
return None
# add newline at end of log
if not parts[-1].endswith("\n"):
parts[-1] += "\n"
return CommandLogFile._LogRecordType(*parts) | Extract log line information based on the following command log format in azlogging.py
lfmt = logging.Formatter('%(process)d | %(created)s | %(levelname)s | %(name)s | %(message)s')
:param line: the line from the log file.
:return: returned parsed line information or None | _get_info_from_log_line | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/feedback/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/feedback/custom.py | MIT |
def _get_az_version_summary():
"""
This depends on get_az_version_string not being changed, add some tests to make this and other methods more robust.
:return: az version info
"""
az_vers_string = get_az_version_string()[0]
# Remove consecutive spaces
import re
az_vers_string = re.sub(' +', ' ', az_vers_string)
# Add each line until 'python location'
lines = az_vers_string.splitlines()
# First line is azure-cli
new_lines = [lines[0], '']
# Only add lines between 'Extensions:' and 'Python location'
extension_line = -1
python_line = -1
for i, line in enumerate(lines):
if 'extensions:' in line.lower():
extension_line = i
if 'python location' in line.lower():
python_line = i
break
new_lines.extend(lines[extension_line:python_line])
# Remove last line which is empty
new_lines.pop()
return "\n".join(new_lines) | This depends on get_az_version_string not being changed, add some tests to make this and other methods more robust.
:return: az version info | _get_az_version_summary | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/feedback/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/feedback/custom.py | MIT |
def set_recommendation(self, recommendation):
"""" Set manual recommendations for the error.
Command module or extension authors could call this method to provide recommendations,
the recommendations will be printed after the error message, one recommendation per line
"""
if isinstance(recommendation, str):
self.recommendations.append(recommendation)
elif isinstance(recommendation, list):
self.recommendations.extend(recommendation) | Set manual recommendations for the error.
Command module or extension authors could call this method to provide recommendations,
the recommendations will be printed after the error message, one recommendation per line | set_recommendation | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/azclierror.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/azclierror.py | MIT |
def set_aladdin_recommendation(self, recommendations):
""" Set aladdin recommendations for the error.
One item should be a tuple with the form: (recommendation, description)
"""
self.aladdin_recommendations.extend(recommendations) | Set aladdin recommendations for the error.
One item should be a tuple with the form: (recommendation, description) | set_aladdin_recommendation | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/azclierror.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/azclierror.py | MIT |
def get_examples(self, command, parser, is_group):
"""Get examples of a certain command from the help file.
Get the text of the example, strip the newline character and
return a list of commands which start with the given command name.
"""
nouns = command.split(' ')[1:]
self.update_loaders_with_help_file_contents(nouns)
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
def strip_command(command):
command = command.replace('\\\n', '')
contents = [item for item in command.split(' ') if item]
return ' '.join(contents).strip()
examples = []
for example in help_file.examples:
if example.command and example.name:
examples.append({
'command': strip_command(example.command),
'description': example.name
})
return examples | Get examples of a certain command from the help file.
Get the text of the example, strip the newline character and
return a list of commands which start with the given command name. | get_examples | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_help.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_help.py | MIT |
def _detect_adfs_authority(authority_url, tenant):
"""Prepare authority and tenant for Azure Identity with ADFS support.
If `authority_url` ends with '/adfs', `tenant` will be set to 'adfs'. For example:
'https://adfs.redmond.azurestack.corp.microsoft.com/adfs'
-> ('https://adfs.redmond.azurestack.corp.microsoft.com/', 'adfs')
"""
authority_url = authority_url.rstrip('/')
if authority_url.endswith('/adfs'):
authority_url = authority_url[:-len('/adfs')]
# The custom tenant is discarded in ADFS environment
tenant = 'adfs'
return authority_url, tenant | Prepare authority and tenant for Azure Identity with ADFS support.
If `authority_url` ends with '/adfs', `tenant` will be set to 'adfs'. For example:
'https://adfs.redmond.azurestack.corp.microsoft.com/adfs'
-> ('https://adfs.redmond.azurestack.corp.microsoft.com/', 'adfs') | _detect_adfs_authority | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def _attach_token_tenant(subscription, tenant, tenant_id_description=None):
"""Attach the token tenant information to the subscription. CLI uses tenant_id to know which token should be used
to access the subscription.
This function supports multiple APIs:
- v2016_06_01: Subscription doesn't have tenant_id
- v2022_12_01:
- Subscription has tenant_id representing the home tenant ID, mapped to home_tenant_id
- TenantIdDescription has default_domain, mapped to tenant_default_domain
- TenantIdDescription has display_name, mapped to tenant_display_name
"""
if hasattr(subscription, "tenant_id"):
setattr(subscription, 'home_tenant_id', subscription.tenant_id)
setattr(subscription, 'tenant_id', tenant)
# Attach tenant_default_domain, if available
if tenant_id_description and hasattr(tenant_id_description, "default_domain"):
setattr(subscription, 'tenant_default_domain', tenant_id_description.default_domain)
# Attach display_name, if available
if tenant_id_description and hasattr(tenant_id_description, "display_name"):
setattr(subscription, 'tenant_display_name', tenant_id_description.display_name) | Attach the token tenant information to the subscription. CLI uses tenant_id to know which token should be used
to access the subscription.
This function supports multiple APIs:
- v2016_06_01: Subscription doesn't have tenant_id
- v2022_12_01:
- Subscription has tenant_id representing the home tenant ID, mapped to home_tenant_id
- TenantIdDescription has default_domain, mapped to tenant_default_domain
- TenantIdDescription has display_name, mapped to tenant_display_name | _attach_token_tenant | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def __init__(self, cli_ctx=None, storage=None):
"""Class to manage CLI's accounts (profiles) and identities (credentials).
:param cli_ctx: The CLI context
:param storage: A dict to store accounts, by default persisted to ~/.azure/azureProfile.json as JSON
"""
from azure.cli.core import get_default_cli
self.cli_ctx = cli_ctx or get_default_cli()
self._storage = storage or ACCOUNT
self._authority = self.cli_ctx.cloud.endpoints.active_directory
from .auth.util import resource_to_scopes
self._arm_scope = resource_to_scopes(self.cli_ctx.cloud.endpoints.active_directory_resource_id) | Class to manage CLI's accounts (profiles) and identities (credentials).
:param cli_ctx: The CLI context
:param storage: A dict to store accounts, by default persisted to ~/.azure/azureProfile.json as JSON | __init__ | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def login(self,
interactive,
username,
password,
is_service_principal,
tenant,
scopes=None,
use_device_code=False,
allow_no_subscriptions=False,
use_cert_sn_issuer=None,
show_progress=False,
**kwargs):
"""
For service principal, `password` is a dict returned by ServicePrincipalAuth.build_credential
"""
if not scopes:
scopes = self._arm_scope
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=tenant)
user_identity = None
if interactive:
if not use_device_code and not can_launch_browser():
logger.info('No web browser is available. Fall back to device code.')
use_device_code = True
if not use_device_code and is_github_codespaces():
logger.info('GitHub Codespaces is detected. Fall back to device code.')
use_device_code = True
if use_device_code:
user_identity = identity.login_with_device_code(scopes=scopes, **kwargs)
else:
user_identity = identity.login_with_auth_code(scopes=scopes, **kwargs)
else:
if not is_service_principal:
user_identity = identity.login_with_username_password(username, password, scopes=scopes, **kwargs)
else:
identity.login_with_service_principal(username, password, scopes=scopes)
# We have finished login. Let's find all subscriptions.
if show_progress:
message = ('Retrieving subscriptions for the selection...' if tenant else
'Retrieving tenants and subscriptions for the selection...')
print(f"\n{message}")
if user_identity:
username = user_identity['username']
subscription_finder = SubscriptionFinder(self.cli_ctx)
# Create credentials
if user_identity:
credential = identity.get_user_credential(username)
else:
credential = identity.get_service_principal_credential(username)
if tenant:
subscriptions = subscription_finder.find_using_specific_tenant(tenant, credential)
else:
subscriptions = subscription_finder.find_using_common_tenant(username, credential)
if not subscriptions and not allow_no_subscriptions:
raise CLIError("No subscriptions found for {}.".format(username))
if allow_no_subscriptions:
t_list = [s.tenant_id for s in subscriptions]
bare_tenants = [t for t in subscription_finder.tenants if t not in t_list]
tenant_accounts = self._build_tenant_level_accounts(bare_tenants)
subscriptions.extend(tenant_accounts)
if not subscriptions:
return []
consolidated = self._normalize_properties(username, subscriptions,
is_service_principal, bool(use_cert_sn_issuer))
self._set_subscriptions(consolidated)
return deepcopy(consolidated) | For service principal, `password` is a dict returned by ServicePrincipalAuth.build_credential | login | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def get_login_credentials(self, subscription_id=None, aux_subscriptions=None, aux_tenants=None):
"""Get a credential compatible with Track 2 SDK."""
if aux_tenants and aux_subscriptions:
raise CLIError("Please specify only one of aux_subscriptions and aux_tenants, not both")
account = self.get_subscription(subscription_id)
managed_identity_type, managed_identity_id = Profile._parse_managed_identity_account(account)
if in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
# Cloud Shell
from .auth.msal_credentials import CloudShellCredential
# The credential must be wrapped by CredentialAdaptor so that it can work with Track 1 SDKs.
sdk_cred = CredentialAdaptor(CloudShellCredential())
elif managed_identity_type:
# managed identity
if _on_azure_arc():
from .auth.msal_credentials import ManagedIdentityCredential
# The credential must be wrapped by CredentialAdaptor so that it can work with Track 1 SDKs.
sdk_cred = CredentialAdaptor(ManagedIdentityCredential())
else:
# The resource is merely used by msrestazure to get the first access token.
# It is not actually used in an API invocation.
sdk_cred = MsiAccountTypes.msi_auth_factory(
managed_identity_type, managed_identity_id,
self.cli_ctx.cloud.endpoints.active_directory_resource_id)
else:
# user and service principal
external_tenants = []
if aux_tenants:
external_tenants = [tenant for tenant in aux_tenants if tenant != account[_TENANT_ID]]
if aux_subscriptions:
ext_subs = [aux_sub for aux_sub in aux_subscriptions if aux_sub != subscription_id]
for ext_sub in ext_subs:
sub = self.get_subscription(ext_sub)
if sub[_TENANT_ID] != account[_TENANT_ID]:
external_tenants.append(sub[_TENANT_ID])
credential = self._create_credential(account)
external_credentials = []
for external_tenant in external_tenants:
external_credentials.append(self._create_credential(account, tenant_id=external_tenant))
sdk_cred = CredentialAdaptor(credential, auxiliary_credentials=external_credentials)
return (sdk_cred,
str(account[_SUBSCRIPTION_ID]),
str(account[_TENANT_ID])) | Get a credential compatible with Track 2 SDK. | get_login_credentials | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def _new_account(self):
"""Build an empty Subscription which will be used as a tenant account.
API version doesn't matter as only specified attributes are preserved by _normalize_properties."""
from azure.cli.core.profiles import ResourceType, get_sdk
SubscriptionType = get_sdk(self.cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS,
'Subscription', mod='models')
s = SubscriptionType()
s.state = 'Enabled'
return s | Build an empty Subscription which will be used as a tenant account.
API version doesn't matter as only specified attributes are preserved by _normalize_properties. | _new_account | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def _create_credential(self, account, tenant_id=None, client_id=None):
"""Create a credential object driven by MSAL
:param account: The CLI account to create credential for
:param tenant_id: If not None, override tenantId from 'account'
:param client_id: Client ID of another public client application
:return:
"""
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
tenant_id = tenant_id or account[_TENANT_ID]
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=tenant_id, client_id=client_id)
# User
if user_type == _USER:
return identity.get_user_credential(username_or_sp_id)
# Service Principal
if user_type == _SERVICE_PRINCIPAL:
return identity.get_service_principal_credential(username_or_sp_id)
raise NotImplementedError | Create a credential object driven by MSAL
:param account: The CLI account to create credential for
:param tenant_id: If not None, override tenantId from 'account'
:param client_id: Client ID of another public client application
:return: | _create_credential | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def get_sp_auth_info(self, subscription_id=None, name=None, password=None, cert_file=None):
"""Generate a JSON for --json-auth argument when used in:
- az ad sp create-for-rbac --json-auth
"""
from collections import OrderedDict
account = self.get_subscription(subscription_id)
# is the credential created through command like 'create-for-rbac'?
result = OrderedDict()
result['clientId'] = name
if password:
result['clientSecret'] = password
else:
result['clientCertificate'] = cert_file
result['subscriptionId'] = subscription_id or account[_SUBSCRIPTION_ID]
result[_TENANT_ID] = account[_TENANT_ID]
endpoint_mappings = OrderedDict() # use OrderedDict to control the output sequence
endpoint_mappings['active_directory'] = 'activeDirectoryEndpointUrl'
endpoint_mappings['resource_manager'] = 'resourceManagerEndpointUrl'
endpoint_mappings['active_directory_graph_resource_id'] = 'activeDirectoryGraphResourceId'
endpoint_mappings['sql_management'] = 'sqlManagementEndpointUrl'
endpoint_mappings['gallery'] = 'galleryEndpointUrl'
endpoint_mappings['management'] = 'managementEndpointUrl'
from azure.cli.core.cloud import CloudEndpointNotSetException
for e in endpoint_mappings:
try:
result[endpoint_mappings[e]] = getattr(get_active_cloud(self.cli_ctx).endpoints, e)
except CloudEndpointNotSetException:
result[endpoint_mappings[e]] = None
return result | Generate a JSON for --json-auth argument when used in:
- az ad sp create-for-rbac --json-auth | get_sp_auth_info | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def find_using_specific_tenant(self, tenant, credential, tenant_id_description=None):
"""List subscriptions that can be accessed from a specific tenant.
If called from find_using_common_tenant, tenant_id_description is TenantIdDescription retrieved from
'Tenants - List' REST API. If directly called, tenant_id_description is None.
"""
client = self._create_subscription_client(credential)
# https://learn.microsoft.com/en-us/rest/api/resources/subscriptions/list
subscriptions = client.subscriptions.list()
all_subscriptions = []
for s in subscriptions:
_attach_token_tenant(s, tenant, tenant_id_description=tenant_id_description)
all_subscriptions.append(s)
self.tenants.append(tenant)
return all_subscriptions | List subscriptions that can be accessed from a specific tenant.
If called from find_using_common_tenant, tenant_id_description is TenantIdDescription retrieved from
'Tenants - List' REST API. If directly called, tenant_id_description is None. | find_using_specific_tenant | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def _transform_subscription_for_multiapi(s, s_dict):
"""
Transforms properties from Subscriptions - List 2019-06-01 and later to the subscription dict.
:param s: subscription object
:param s_dict: subscription dict
"""
if hasattr(s, 'home_tenant_id'):
s_dict[_HOME_TENANT_ID] = s.home_tenant_id
if hasattr(s, 'tenant_default_domain'):
s_dict[_TENANT_DEFAULT_DOMAIN] = s.tenant_default_domain
if hasattr(s, 'tenant_display_name'):
s_dict[_TENANT_DISPLAY_NAME] = s.tenant_display_name
if hasattr(s, 'managed_by_tenants'):
if s.managed_by_tenants is None:
s_dict[_MANAGED_BY_TENANTS] = None
else:
s_dict[_MANAGED_BY_TENANTS] = [{_TENANT_ID: t.tenant_id} for t in s.managed_by_tenants] | Transforms properties from Subscriptions - List 2019-06-01 and later to the subscription dict.
:param s: subscription object
:param s_dict: subscription dict | _transform_subscription_for_multiapi | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def _create_identity_instance(cli_ctx, authority, tenant_id=None, client_id=None):
"""Lazily import and create Identity instance to avoid unnecessary imports."""
from .auth.identity import Identity
from .util import should_encrypt_token_cache
encrypt = should_encrypt_token_cache(cli_ctx)
# EXPERIMENTAL: Use core.use_msal_http_cache=False to turn off MSAL HTTP cache.
use_msal_http_cache = cli_ctx.config.getboolean('core', 'use_msal_http_cache', fallback=True)
# On Windows, use core.enable_broker_on_windows=false to disable broker (WAM) for authentication.
enable_broker_on_windows = cli_ctx.config.getboolean('core', 'enable_broker_on_windows', fallback=True)
from .telemetry import set_broker_info
set_broker_info(enable_broker_on_windows)
# PREVIEW: In Azure Stack environment, use core.instance_discovery=false to disable MSAL's instance discovery.
instance_discovery = cli_ctx.config.getboolean('core', 'instance_discovery', True)
return Identity(authority, tenant_id=tenant_id, client_id=client_id,
encrypt=encrypt,
use_msal_http_cache=use_msal_http_cache,
enable_broker_on_windows=enable_broker_on_windows,
instance_discovery=instance_discovery) | Lazily import and create Identity instance to avoid unnecessary imports. | _create_identity_instance | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/_profile.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/_profile.py | MIT |
def get_error_type(error_msg):
"""The the error type of the failed command from the error message.
The error types are only consumed by aladdin service for better recommendations.
"""
error_type = AladdinUserFaultType.Unknown
if not error_msg:
return error_type.value
error_msg = error_msg.lower()
if 'unrecognized' in error_msg:
error_type = AladdinUserFaultType.UnrecognizedArguments
elif 'expected one argument' in error_msg or 'expected at least one argument' in error_msg \
or 'value required' in error_msg:
error_type = AladdinUserFaultType.ExpectedArgument
elif 'misspelled' in error_msg:
error_type = AladdinUserFaultType.UnknownSubcommand
elif 'arguments are required' in error_msg or 'argument required' in error_msg:
error_type = AladdinUserFaultType.MissingRequiredParameters
if '_subcommand' in error_msg:
error_type = AladdinUserFaultType.MissingRequiredSubcommand
elif '_command_package' in error_msg:
error_type = AladdinUserFaultType.UnableToParseCommandInput
elif 'not found' in error_msg or 'could not be found' in error_msg \
or 'resource not found' in error_msg:
error_type = AladdinUserFaultType.AzureResourceNotFound
if 'storage_account' in error_msg or 'storage account' in error_msg:
error_type = AladdinUserFaultType.StorageAccountNotFound
elif 'resource_group' in error_msg or 'resource group' in error_msg:
error_type = AladdinUserFaultType.ResourceGroupNotFound
elif 'pattern' in error_msg or 'is not a valid value' in error_msg or 'invalid' in error_msg:
error_type = AladdinUserFaultType.InvalidParameterValue
if 'jmespath_type' in error_msg:
error_type = AladdinUserFaultType.InvalidJMESPathQuery
elif 'datetime_type' in error_msg:
error_type = AladdinUserFaultType.InvalidDateTimeArgumentValue
elif '--output' in error_msg:
error_type = AladdinUserFaultType.InvalidOutputType
elif 'resource_group' in error_msg:
error_type = AladdinUserFaultType.InvalidResourceGroupName
elif 'storage_account' in error_msg:
error_type = AladdinUserFaultType.InvalidAccountName
elif "validation error" in error_msg:
error_type = AladdinUserFaultType.ValidationError
return error_type.value | The the error type of the failed command from the error message.
The error types are only consumed by aladdin service for better recommendations. | get_error_type | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/command_recommender.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/command_recommender.py | MIT |
def __init__(self, command, parameters, extension, error_msg, cli_ctx):
"""
:param command: The command name in user's input.
:type command: str
:param parameters: The raw parameters in users input.
:type parameters: list
:param extension: The extension name in user's input if the command comes from an extension.
:type extension: str
:param error_msg: The error message of the failed command.
:type error_msg: str
:param cli_ctx: CLI context when parser fails.
:type cli_ctx: knack.cli.CLI
"""
self.command = command.strip()
self.parameters = parameters
self.extension = extension
self.error_msg = error_msg
self.cli_ctx = cli_ctx
# the item is a dict with the form {'command': #, 'description': #}
self.help_examples = []
# the item is a dict with the form {'command': #, 'description': #, 'link': #}
self.aladdin_recommendations = [] | :param command: The command name in user's input.
:type command: str
:param parameters: The raw parameters in users input.
:type parameters: list
:param extension: The extension name in user's input if the command comes from an extension.
:type extension: str
:param error_msg: The error message of the failed command.
:type error_msg: str
:param cli_ctx: CLI context when parser fails.
:type cli_ctx: knack.cli.CLI | __init__ | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/command_recommender.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/command_recommender.py | MIT |
def set_help_examples(self, examples):
"""Set help examples.
:param examples: The examples from CLI help file.
:type examples: list
"""
self.help_examples.extend(examples) | Set help examples.
:param examples: The examples from CLI help file.
:type examples: list | set_help_examples | python | Azure/azure-cli | src/azure-cli-core/azure/cli/core/command_recommender.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli-core/azure/cli/core/command_recommender.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.