code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _audit_policy_show(
cmd,
client,
resource_group_name,
workspace_name,
sql_pool_name=None,
category_name=None):
'''
Common code to get workspace or sqlpool audit policy including diagnostic settings
'''
# Request audit policy
if sql_pool_name is None:
audit_policy = client.get(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name='default')
else:
audit_policy = client.get(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.disabled
audit_policy.event_hub_target_state = BlobAuditingPolicyState.disabled
audit_policy.log_analytics_target_state = BlobAuditingPolicyState.disabled
# If audit policy's state is disabled there is nothing to do
if _is_audit_policy_state_disabled(audit_policy.state):
return audit_policy
if not audit_policy.storage_endpoint:
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.disabled
else:
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.enabled
# If 'is_azure_monitor_target_enabled' is false there is no reason to request diagnostic settings
if not audit_policy.is_azure_monitor_target_enabled:
return audit_policy
# Request diagnostic settings
diagnostic_settings = _get_diagnostic_settings(
cmd=cmd, resource_group_name=resource_group_name,
workspace_name=workspace_name, sql_pool_name=sql_pool_name)
# Sort received diagnostic settings by name and get first element to ensure consistency between command executions
diagnostic_settings.sort(key=lambda d: d.name)
audit_diagnostic_setting = _fetch_first_audit_diagnostic_setting(diagnostic_settings, category_name)
# Initialize azure monitor properties
if audit_diagnostic_setting is not None:
if audit_diagnostic_setting.workspace_id is not None:
audit_policy.log_analytics_target_state = BlobAuditingPolicyState.enabled
audit_policy.log_analytics_workspace_resource_id = audit_diagnostic_setting.workspace_id
if audit_diagnostic_setting.event_hub_authorization_rule_id is not None:
audit_policy.event_hub_target_state = BlobAuditingPolicyState.enabled
audit_policy.event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id
audit_policy.event_hub_name = audit_diagnostic_setting.event_hub_name
return audit_policy | Common code to get workspace or sqlpool audit policy including diagnostic settings | _audit_policy_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolblobauditingpolicy.py | MIT |
def sqlpool_sensitivity_label_update(
cmd,
client,
sql_pool_name,
workspace_name,
schema_name,
table_name,
column_name,
resource_group_name,
label_name=None,
information_type=None):
'''
Updates a sensitivity label. Custom update function to apply parameters to instance.
'''
# Get the information protection policy
from azure.mgmt.security import SecurityCenter
from azure.core.exceptions import HttpResponseError
security_center_client = get_mgmt_service_client(cmd.cli_ctx, SecurityCenter, asc_location="centralus")
information_protection_policy = security_center_client.information_protection_policies.get(
scope=_create_scope(), information_protection_policy_name="effective")
sensitivity_label = SensitivityLabel()
# Get the current label
try:
current_label = client.get(
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name,
SensitivityLabelSource.current)
# Initialize with existing values
sensitivity_label.label_name = current_label.label_name
sensitivity_label.label_id = current_label.label_id
sensitivity_label.information_type = current_label.information_type
sensitivity_label.information_type_id = current_label.information_type_id
except HttpResponseError as ex:
if not (ex and 'SensitivityLabelsLabelNotFound' in str(ex)):
raise ex
# Find the label id and information type id in the policy by the label name provided
if label_name:
label_id = next((id for id in information_protection_policy.labels
if information_protection_policy.labels[id].display_name.lower() ==
label_name.lower()),
None)
if label_id is None:
raise CLIError('The provided label name was not found in the information protection policy.')
sensitivity_label.label_id = label_id
sensitivity_label.label_name = label_name
if information_type:
information_type_id = next((id for id in information_protection_policy.information_types
if information_protection_policy.information_types[id].display_name.lower() ==
information_type.lower()),
None)
if information_type_id is None:
raise CLIError('The provided information type was not found in the information protection policy.')
sensitivity_label.information_type_id = information_type_id
sensitivity_label.information_type = information_type
return client.create_or_update(
resource_group_name, workspace_name, sql_pool_name, schema_name, table_name, column_name, sensitivity_label) | Updates a sensitivity label. Custom update function to apply parameters to instance. | sqlpool_sensitivity_label_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsensitivitylabel.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/synapse/manual/operations/sqlpoolsensitivitylabel.py | MIT |
def rgetattr(obj, attr, *args):
"""See https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return reduce(_getattr, [obj] + attr.split('.')) | See https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects | rgetattr | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/netappfiles/validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/netappfiles/validators.py | MIT |
def create_role_assignment(cmd, role, scope,
assignee=None, assignee_object_id=None,
assignee_principal_type=None, description=None,
condition=None, condition_version=None, assignment_name=None):
"""Check parameters are provided correctly, then call _create_role_assignment."""
if bool(assignee) == bool(assignee_object_id):
raise CLIError('usage error: --assignee STRING | --assignee-object-id GUID')
if assignee_principal_type and not assignee_object_id:
raise CLIError('usage error: --assignee-object-id GUID --assignee-principal-type TYPE')
# If condition is set and condition-version is empty, condition-version defaults to "2.0".
if condition and not condition_version:
condition_version = "2.0"
# If condition-version is set, condition must be set as well.
if condition_version and not condition:
raise CLIError('usage error: When --condition-version is set, --condition must be set as well.')
if assignee:
object_id, principal_type = _resolve_object_id_and_type(cmd.cli_ctx, assignee, fallback_to_object_id=True)
else:
object_id = assignee_object_id
if assignee_principal_type:
# If principal type is provided, nothing to resolve, do not call Graph
principal_type = assignee_principal_type
else:
# Try best to get principal type
logger.warning('RBAC service might reject creating role assignment without --assignee-principal-type '
'in the future. Better to specify --assignee-principal-type manually.')
principal_type = _get_principal_type_from_object_id(cmd.cli_ctx, assignee_object_id)
try:
return _create_role_assignment(cmd.cli_ctx, role, object_id, scope=scope, resolve_assignee=False,
assignee_principal_type=principal_type, description=description,
condition=condition, condition_version=condition_version,
assignment_name=assignment_name)
except Exception as ex: # pylint: disable=broad-except
if _error_caused_by_role_assignment_exists(ex): # for idempotent
return list_role_assignments(cmd, assignee=assignee, role=role, scope=scope)[0]
raise | Check parameters are provided correctly, then call _create_role_assignment. | create_role_assignment | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None,
resolve_assignee=True, assignee_principal_type=None, description=None,
condition=None, condition_version=None, assignment_name=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
assignment_name = assignment_name or _gen_guid()
factory = _auth_client_factory(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope,
assignments_client._config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
worker = MultiAPIAdaptor(cli_ctx)
return worker.create_role_assignment(assignments_client, assignment_name, role_id, object_id, scope,
assignee_principal_type, description=description,
condition=condition, condition_version=condition_version) | Prepare scope, role ID and resolve object ID from Graph API. | _create_role_assignment | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def list_role_assignments(cmd, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False,
show_all=False, include_groups=False, include_classic_administrators=False):
'''
:param include_groups: include extra assignments to the groups of which the user is a
member(transitively).
'''
if include_classic_administrators:
logger.warning(CLASSIC_ADMINISTRATOR_WARNING)
graph_client = _graph_client_factory(cmd.cli_ctx)
authorization_client = _auth_client_factory(cmd.cli_ctx, scope)
assignments_client = authorization_client.role_assignments
definitions_client = authorization_client.role_definitions
if show_all:
if resource_group_name or scope:
raise CLIError('group or scope are not required when --all is used')
scope = None
else:
scope = _build_role_scope(resource_group_name, scope,
definitions_client._config.subscription_id)
assignments = _search_role_assignments(cmd.cli_ctx, assignments_client, definitions_client,
scope, assignee, role,
include_inherited, include_groups)
results = todict(assignments) if assignments else []
if include_classic_administrators:
results += _backfill_assignments_for_co_admins(cmd.cli_ctx, authorization_client, assignee)
if not results:
return []
# 1. fill in logic names to get things understandable.
# (it's possible that associated roles and principals were deleted, and we just do nothing.)
# 2. fill in role names
role_defs = list(definitions_client.list(
scope=scope or ('/subscriptions/' + definitions_client._config.subscription_id)))
worker = MultiAPIAdaptor(cmd.cli_ctx)
role_dics = {i.id: worker.get_role_property(i, 'role_name') for i in role_defs}
for i in results:
if not i.get('roleDefinitionName'):
if role_dics.get(worker.get_role_property(i, 'roleDefinitionId')):
worker.set_role_property(i, 'roleDefinitionName',
role_dics[worker.get_role_property(i, 'roleDefinitionId')])
else:
i['roleDefinitionName'] = None # the role definition might have been deleted
# fill in principal names
principal_ids = set(worker.get_role_property(i, 'principalId')
for i in results if worker.get_role_property(i, 'principalId'))
if principal_ids:
try:
principals = _get_object_stubs(graph_client, principal_ids)
principal_dics = {i[ID]: _get_displayable_name(i) for i in principals}
for i in [r for r in results if not r.get('principalName')]:
i['principalName'] = ''
if principal_dics.get(worker.get_role_property(i, 'principalId')):
worker.set_role_property(i, 'principalName',
principal_dics[worker.get_role_property(i, 'principalId')])
except (HttpResponseError, GraphError) as ex:
# failure on resolving principal due to graph permission should not fail the whole thing
logger.info("Failed to resolve graph object information per error '%s'", ex)
for r in results:
if not r.get('additionalProperties'): # remove the useless "additionalProperties"
r.pop('additionalProperties', None)
return results | :param include_groups: include extra assignments to the groups of which the user is a
member(transitively). | list_role_assignments | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _resolve_application(client, identifier):
"""Resolve an application's id (previously known as objectId) from
- appId
- id (returned as-is)
- identifierUris
"""
if is_guid(identifier):
# it is either app id or object id, let us verify
result = client.application_list(filter="appId eq '{}'".format(identifier))
# If not found, this looks like an object id
return result[0][ID] if result else identifier
result = client.application_list(filter="identifierUris/any(s:s eq '{}')".format(identifier))
if not result:
error = CLIError("Application with identifier URI '{}' doesn't exist".format(identifier))
error.status_code = 404 # Make sure CLI returns 3
raise error
return result[0][ID] | Resolve an application's id (previously known as objectId) from
- appId
- id (returned as-is)
- identifierUris | _resolve_application | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _resolve_service_principal(client, identifier):
"""Resolve a service principal's id (previously known as objectId) from
- servicePrincipalNames (contains appId and identifierUris of the corresponding app)
- id (returned as-is)
"""
result = client.service_principal_list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier))
if result:
return result[0][ID]
if is_guid(identifier):
return identifier # assume an object id
error = CLIError("Service principal '{}' doesn't exist".format(identifier))
error.status_code = 404 # Make sure CLI returns 3
raise error | Resolve a service principal's id (previously known as objectId) from
- servicePrincipalNames (contains appId and identifierUris of the corresponding app)
- id (returned as-is) | _resolve_service_principal | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _application_add_password(client, app, display_name, start_datetime, end_datetime):
"""Let graph service generate a random password."""
body = _build_add_password_credential_body(display_name, start_datetime, end_datetime)
result = client.application_add_password(app[ID], body)
return result | Let graph service generate a random password. | _application_add_password | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _reset_credential(cmd, graph_object, add_password_func, remove_password_func, patch_func,
create_cert=False, cert=None, years=None,
end_date=None, keyvault=None, append=False, display_name=None):
# pylint: disable=too-many-locals
"""Reset passwordCredentials and keyCredentials properties for application or service principal.
Application and service principal share the same interface for operating credentials.
:param graph_object: The application or service principal object (dict).
:param add_password_func: Add password API function.
:param remove_password_func: Remove password API function.
:param patch_func: Patch API function. Used to update keyCredentials.
"""
# https://learn.microsoft.com/en-us/graph/api/resources/passwordcredential
# https://learn.microsoft.com/en-us/graph/api/resources/keycredential
# Only displayName should be set.
# For passwordCredential, customKeyIdentifier is not applicable.
# For keyCredential, customKeyIdentifier is automatically computed by Graph service as certificate thumbprint.
# https://github.com/Azure/azure-cli/issues/20561
app_start_date = datetime.datetime.now(datetime.timezone.utc)
if years is not None and end_date is not None:
raise CLIError('usage error: --years | --end-date')
if end_date is None:
years = years or 1
app_end_date = app_start_date + relativedelta(years=years)
else:
app_end_date = dateutil.parser.parse(end_date)
if app_end_date.tzinfo is None:
app_end_date = app_end_date.replace(tzinfo=datetime.timezone.utc)
years = (app_end_date - app_start_date).days / 365
# Created password
password = None
# Created certificate
cert_file = None
if not append:
# Delete all existing password
for cred in graph_object['passwordCredentials']:
body = {"keyId": cred['keyId']}
remove_password_func(graph_object[ID], body)
# By default, add password
if not (cert or create_cert):
body = _build_add_password_credential_body(display_name, app_start_date, app_end_date)
add_password_result = add_password_func(graph_object[ID], body)
password = add_password_result['secretText']
# key_id = add_password_result['keyId']
else:
public_cert_string, cert_file, cert_start_date, cert_end_date = \
_process_certificate(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert,
keyvault)
app_start_date, app_end_date, cert_start_date, cert_end_date = \
_validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date)
key_creds = []
if append:
key_creds = graph_object['keyCredentials']
new_key_creds = _build_key_credentials(
key_value=public_cert_string, start_date=app_start_date, end_date=app_end_date, display_name=display_name)
# key_id = new_key_creds[0]['keyId']
key_creds.extend(new_key_creds)
patch_body = {
'keyCredentials': key_creds
}
patch_func(graph_object[ID], patch_body)
# Keep backward compatibility
# TODO: Should we return the passwordCredential or keyCredential directly?
result = {
'appId': graph_object['appId'],
# 'keyId': key_id,
'password': password
}
if cert_file:
result['fileWithCertAndPrivateKey'] = cert_file
logger.warning(CREDENTIAL_WARNING)
return result | Reset passwordCredentials and keyCredentials properties for application or service principal.
Application and service principal share the same interface for operating credentials.
:param graph_object: The application or service principal object (dict).
:param add_password_func: Add password API function.
:param remove_password_func: Remove password API function.
:param patch_func: Patch API function. Used to update keyCredentials. | _reset_credential | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _match_odata_type(odata_type, user_input):
"""Compare the @odata.type property of the object with the user's input.
For example, a service principal object has
"@odata.type": "#microsoft.graph.servicePrincipal"
"""
odata_type = odata_type.lower()
user_input = user_input.lower()
# Full match "#microsoft.graph.servicePrincipal" == "#microsoft.graph.servicePrincipal"
# Partial match "#microsoft.graph.servicePrincipal" ~= "servicePrincipal"
return odata_type == user_input or odata_type.split('.')[-1] == user_input | Compare the @odata.type property of the object with the user's input.
For example, a service principal object has
"@odata.type": "#microsoft.graph.servicePrincipal" | _match_odata_type | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _open(location):
"""Open a file that only the current user can access."""
# The 600 seems no-op on Windows, and that is fine.
return os.open(location, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600) | Open a file that only the current user can access. | _open | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def create_user(client, user_principal_name, display_name, password,
mail_nickname=None, immutable_id=None, force_change_password_next_sign_in=False):
'''
:param mail_nickname: mail alias. default to user principal name
'''
mail_nickname = mail_nickname or user_principal_name.split('@')[0]
body = {}
_set_user_properties(body, user_principal_name=user_principal_name, display_name=display_name, password=password,
mail_nickname=mail_nickname, immutable_id=immutable_id,
force_change_password_next_sign_in=force_change_password_next_sign_in, account_enabled=True)
return client.user_create(body) | :param mail_nickname: mail alias. default to user principal name | create_user | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def list_groups(client, display_name=None, query_filter=None):
"""List groups in the directory"""
sub_filters = []
if query_filter:
sub_filters.append(query_filter)
if display_name:
sub_filters.append("startswith(displayName,'{}')".format(display_name))
return client.group_list(filter=' and '.join(sub_filters) if sub_filters else None) | List groups in the directory | list_groups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _build_directory_object_json(client, object_id):
"""Get JSON representation of the id of the directoryObject."""
body = {
"@odata.id": client.get_object_url(object_id)
}
return body | Get JSON representation of the id of the directoryObject. | _build_directory_object_json | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def _get_member_groups(get_member_group_func, identifier, security_enabled_only):
"""Call 'directoryObject: getMemberGroups' API with specified get_member_group_func.
https://learn.microsoft.com/en-us/graph/api/directoryobject-getmembergroups
"""
body = {
"securityEnabledOnly": security_enabled_only
}
return get_member_group_func(identifier, body) | Call 'directoryObject: getMemberGroups' API with specified get_member_group_func.
https://learn.microsoft.com/en-us/graph/api/directoryobject-getmembergroups | _get_member_groups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/custom.py | MIT |
def get_role_property(self, obj, property_name): # pylint: disable=no-self-use
"""Get property for RoleDefinition and RoleAssignment object."""
# 2015-07-01 RoleDefinition: flattened, RoleAssignment: unflattened
# 2018-01-01-preview RoleDefinition: flattened
# 2020-04-01-preview RoleAssignment: flattened
# 2022-04-01 RoleDefinition: flattened RoleAssignment: flattened
# Get property_name from properties if the model is unflattened.
if isinstance(obj, dict):
if 'properties' in obj:
obj = obj['properties']
return obj[property_name]
if hasattr(obj, 'properties'):
obj = obj.properties
return getattr(obj, property_name) | Get property for RoleDefinition and RoleAssignment object. | get_role_property | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/_multi_api_adaptor.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/_multi_api_adaptor.py | MIT |
def set_role_property(self, obj, property_name, property_value): # pylint: disable=no-self-use
"""Set property for RoleDefinition and RoleAssignment object.
Luckily this function is only called for an RoleAssignment `obj` returned by the service, and `properties`
has been processed, either by being flattened or set. We can definitively know whether `obj` is flattened
or not.
There is NO use case where `obj` is provided by the user and `properties` has not been processed.
In such case, we won't be able to decide if `obj` is flattened or not."""
if isinstance(obj, dict):
if 'properties' in obj:
obj = obj['properties']
obj[property_name] = property_value
else:
if hasattr(obj, 'properties'):
obj = obj.properties
obj.property_name = property_value | Set property for RoleDefinition and RoleAssignment object.
Luckily this function is only called for an RoleAssignment `obj` returned by the service, and `properties`
has been processed, either by being flattened or set. We can definitively know whether `obj` is flattened
or not.
There is NO use case where `obj` is provided by the user and `properties` has not been processed.
In such case, we won't be able to decide if `obj` is flattened or not. | set_role_property | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/_multi_api_adaptor.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/_multi_api_adaptor.py | MIT |
def get_current_identity_object_id(cli_ctx):
"""This function tries to get the current identity's object ID following below steps:
1. First try to resolve with /me API: https://learn.microsoft.com/en-us/graph/api/user-get
2. If failed, try to resolve with either
- /users API: https://learn.microsoft.com/en-us/graph/api/user-list
- /servicePrincipals API: https://learn.microsoft.com/en-us/graph/api/serviceprincipal-list
If all of these attempts fail, return None.
"""
from azure.cli.command_modules.role import graph_client_factory, GraphError
graph_client = graph_client_factory(cli_ctx)
try:
return _get_current_user_object_id(graph_client)
except GraphError:
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx)
# To align with _get_current_user_object_id, only look up the current upn/spn, so
# cli_ctx.data['subscription_id'] should not be used in get_subscription.
# Otherwise, it may result in looking up a upn/spn different from the current login context.
subscription = profile.get_subscription()
return _get_object_id_from_subscription(graph_client, subscription) | This function tries to get the current identity's object ID following below steps:
1. First try to resolve with /me API: https://learn.microsoft.com/en-us/graph/api/user-get
2. If failed, try to resolve with either
- /users API: https://learn.microsoft.com/en-us/graph/api/user-list
- /servicePrincipals API: https://learn.microsoft.com/en-us/graph/api/serviceprincipal-list
If all of these attempts fail, return None. | get_current_identity_object_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/util.py | MIT |
def retry(func, sleep_duration=ROLE_COMMAND_SLEEP_DURATION, max_retry=ROLE_COMMAND_MAX_RETRY):
"""Retry func until success."""
# Due to unstable role definition ARIs: https://github.com/Azure/azure-cli/issues/3187
import time
while True:
try:
return func()
except (AssertionError, CLIError):
# AssertionError is raised by checks in self.cmd or self.assert*
# CLIError is raised by failed command execution
if max_retry > 0:
max_retry -= 1
time.sleep(sleep_duration)
else:
raise | Retry func until success. | retry | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/util.py | MIT |
def escape_apply_kwargs(val):
"""Replace {} as {{}} so that val is preserved after _apply_kwargs."""
return val.replace('{', "{{").replace('}', "}}") | Replace {} as {{}} so that val is preserved after _apply_kwargs. | escape_apply_kwargs | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/util.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/util.py | MIT |
def _test_credential(self, object_type):
"""Test app/sp credential commands. Make sure app_id has been configured in self.kwargs."""
self.kwargs['object_type'] = object_type
# Test password
self.cmd('ad {object_type} credential reset --id {app_id} --append --years 2 --display-name key1',
checks=self.check('appId', '{app_id}'))
result = self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('length([*])', 1)).get_output_in_json()
key_id = result[0]['keyId']
self.cmd('ad {object_type} credential reset --id {app_id} --append --display-name key2')
self.cmd('ad {object_type} credential list --id {app_id}', checks=[
self.check('length([*])', 2),
# Graph API reverses the order of insertion
self.check('[0].displayName', 'key2'),
self.check('[1].displayName', 'key1')
])
self.cmd('ad {object_type} credential delete --id {app_id} --key-id ' + key_id)
self.cmd('ad {object_type} credential list --id {app_id}', checks=self.check('length([*])', 1))
# try use --end-date
self.cmd('ad {object_type} credential reset --id {app_id} --end-date "2100-12-31T11:59:59+00:00"')
self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('[0].endDateTime', '2100-12-31T11:59:59Z'))
self.cmd('ad {object_type} credential reset --id {app_id} --end-date "2100-12-31"')
self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('[0].endDateTime', '2100-12-31T00:00:00Z')) | Test app/sp credential commands. Make sure app_id has been configured in self.kwargs. | _test_credential | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def test_app_scenario(self):
"""
- Test creating application with its properties.
- Test creating application first and update its properties.
"""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
# identifierUris must be on verified domain
# https://learn.microsoft.com/en-us/azure/active-directory/develop/security-best-practices-for-app-registration#appid-uri-configuration
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}',
'homepage': 'https://myapp.com/',
'web_redirect_uri_1': 'http://localhost/webtest1',
'web_redirect_uri_2': 'http://localhost/webtest2',
'public_client_redirect_uri_1': 'http://localhost/publicclienttest1',
'public_client_redirect_uri_2': 'http://localhost/publicclienttest2',
'key_value': TEST_CERTIFICATE,
'app_roles': TEST_APP_ROLES,
'optional_claims': TEST_OPTIONAL_CLAIMS,
'required_resource_accesses': TEST_REQUIRED_RESOURCE_ACCESS,
'service_management_reference': '96524024-75b0-497b-ab38-0381399a6a9d',
'requested_access_token_version': 2
})
# Create
result = self.cmd(
'ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri} '
'--is-fallback-public-client True '
'--service-management-reference {service_management_reference} '
'--sign-in-audience AzureADMultipleOrgs '
# api
'--requested-access-token-version {requested_access_token_version} '
# web
'--web-home-page-url {homepage} '
'--web-redirect-uris {web_redirect_uri_1} {web_redirect_uri_2} '
'--enable-access-token-issuance true --enable-id-token-issuance true '
# publicClient
'--public-client-redirect-uris {public_client_redirect_uri_1} {public_client_redirect_uri_2} '
# keyCredentials
'--key-value {key_value} '
# JSON properties
"--app-roles '{app_roles}' "
"--optional-claims '{optional_claims}' "
"--required-resource-accesses '{required_resource_accesses}'",
checks=[
self.check('displayName', '{display_name}'),
self.check('identifierUris[0]', '{identifier_uri}'),
self.check('isFallbackPublicClient', True),
self.check('serviceManagementReference', '{service_management_reference}'),
self.check('signInAudience', 'AzureADMultipleOrgs'),
self.check('api.requestedAccessTokenVersion', '{requested_access_token_version}'),
self.check('web.homePageUrl', '{homepage}'),
self.check('web.redirectUris[0]', '{web_redirect_uri_1}'),
self.check('web.redirectUris[1]', '{web_redirect_uri_2}'),
self.check('web.implicitGrantSettings.enableIdTokenIssuance', True),
self.check('web.implicitGrantSettings.enableAccessTokenIssuance', True),
self.check('publicClient.redirectUris[0]', '{public_client_redirect_uri_1}'),
self.check('publicClient.redirectUris[1]', '{public_client_redirect_uri_2}'),
self.check('length(keyCredentials)', 1),
self.check('length(appRoles)', 2),
self.check('length(optionalClaims)', 3),
self.check('length(requiredResourceAccess)', 2)
]).get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('ad app delete --id {app_id}')
self.cmd('ad app show --id {app_id}', expect_failure=True)
# Create, then update
display_name_2 = self.create_random_name(prefix='azure-cli-test', length=30)
display_name_3 = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name_2': display_name_2,
'display_name_3': display_name_3,
'identifier_uri_3': f'api://{display_name_3}',
})
# Graph cannot create app with same identifierUris even after deleting the previous one. Still confirming with
# service team.
result = self.cmd('ad app create --display-name {display_name_2}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd(
'ad app update --id {app_id} --display-name {display_name_3} '
'--identifier-uris {identifier_uri_3} '
'--is-fallback-public-client True '
'--service-management-reference {service_management_reference} '
# signInAudience can't be PATCHed currently due to service issue. PATCH first fails with 404, then 500
# '--sign-in-audience AzureADMultipleOrgs '
# api
'--requested-access-token-version {requested_access_token_version} '
# web
'--web-home-page-url {homepage} '
'--web-redirect-uris {web_redirect_uri_1} {web_redirect_uri_2} '
'--enable-access-token-issuance true --enable-id-token-issuance true '
# keyCredentials
'--key-value {key_value} '
# publicClient
'--public-client-redirect-uris {public_client_redirect_uri_1} {public_client_redirect_uri_2} '
"--app-roles '{app_roles}' "
"--optional-claims '{optional_claims}' "
"--required-resource-accesses '{required_resource_accesses}'"
)
self.cmd(
'ad app show --id {app_id}',
checks=[
self.check('displayName', '{display_name_3}'),
self.check('identifierUris[0]', '{identifier_uri_3}'),
self.check('isFallbackPublicClient', True),
self.check('serviceManagementReference', '{service_management_reference}'),
# self.check('signInAudience', 'AzureADMultipleOrgs'),
# api
self.check('api.requestedAccessTokenVersion', '{requested_access_token_version}'),
# web
self.check('web.homePageUrl', '{homepage}'),
# redirectUris doesn't preserve item order.
# self.check('web.redirectUris[0]', '{web_redirect_uri_1}'),
# self.check('web.redirectUris[1]', '{web_redirect_uri_2}'),
self.check('length(web.redirectUris)', 2),
self.check('web.implicitGrantSettings.enableIdTokenIssuance', True),
self.check('web.implicitGrantSettings.enableAccessTokenIssuance', True),
# self.check('publicClient.redirectUris[0]', '{public_client_redirect_uri_1}'),
# self.check('publicClient.redirectUris[1]', '{public_client_redirect_uri_2}'),
self.check('length(publicClient.redirectUris)', 2),
self.check('length(keyCredentials)', 1),
self.check('length(appRoles)', 2),
self.check('length(optionalClaims)', 3),
self.check('length(requiredResourceAccess)', 2)
]).get_output_in_json()
# Update with generic update
self.cmd('ad app update --id {app_id} --set isDeviceOnlyAuthSupported=true')
self.cmd('ad app show --id {app_id}', checks=self.check('isDeviceOnlyAuthSupported', True))
self.cmd('ad app delete --id {app_id}')
self.cmd('ad app show --id {app_id}', expect_failure=True) | - Test creating application with its properties.
- Test creating application first and update its properties. | test_app_scenario | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def test_app_resolution(self):
"""Test application can be resolved with identifierUris, appId, or id."""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}'
})
app = self.cmd('ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri}').get_output_in_json()
self.kwargs['app_id'] = app['appId']
self.kwargs['id'] = app['id']
# Show with appId
self.cmd('ad app show --id {app_id}', checks=[self.check('displayName', '{display_name}')])
# Show with id
self.cmd('ad app show --id {id}', checks=[self.check('displayName', '{display_name}')])
# Show with identifierUris
self.cmd('ad app show --id {identifier_uri}', checks=[self.check('displayName', '{display_name}')])
self.cmd('ad app delete --id {app_id}') | Test application can be resolved with identifierUris, appId, or id. | test_app_resolution | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def get_required_resource_access(required_resource_access_list, resource_app_id):
"""Search for the RequiredResourceAccess from required_resource_access(list) by resourceAppId."""
return next(
filter(lambda a: a['resourceAppId'] == resource_app_id, required_resource_access_list), None) | Search for the RequiredResourceAccess from required_resource_access(list) by resourceAppId. | test_app_permission.get_required_resource_access | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def test_app_permission(self):
if not self._get_signed_in_user():
return
self.kwargs = {
'display_name': self.create_random_name('cli-app-', 15),
# Microsoft Graph
'microsoft_graph_api': '00000003-0000-0000-c000-000000000000',
# Azure Storage
'azure_service_management_api': '797f4846-ba00-4fd7-ba43-dac1f8f63013',
}
# Look up for permission IDs
graph_sp = self.cmd('ad sp show --id {microsoft_graph_api}').get_output_in_json()
# Delegated permission Directory.AccessAsUser.All
self.kwargs['microsoft_graph_permission1'] = _get_id_from_value(
graph_sp['oauth2PermissionScopes'], 'Application.Read.All')
# Application permission Application.ReadWrite.OwnedBy
self.kwargs['microsoft_graph_permission2'] = _get_id_from_value(
graph_sp['appRoles'], 'Application.ReadWrite.OwnedBy')
arm_sp = self.cmd('ad sp show --id {azure_service_management_api}').get_output_in_json()
# Delegated permission user_impersonation
self.kwargs['azure_service_management_permission'] = _get_id_from_value(
arm_sp['oauth2PermissionScopes'],'user_impersonation')
result = self.cmd('ad sp create-for-rbac --name {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
# Test add permissions using a list
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
self.cmd('ad app permission add --id {app_id} --api {azure_service_management_api} '
'--api-permissions {azure_service_management_permission}=Scope')
permissions = self.cmd(
'ad app permission list --id {app_id}', checks=[self.check('length([*])', 2)]).get_output_in_json()
# Sample result (required_resource_access):
# "requiredResourceAccess": [
# {
# "resourceAccess": [
# {
# "id": "41094075-9dad-400e-a0bd-54e686782033",
# "type": "Scope"
# }
# ],
# "resourceAppId": "797f4846-ba00-4fd7-ba43-dac1f8f63013"
# },
# {
# "resourceAccess": [
# {
# "id": "c79f8feb-a9db-4090-85f9-90d820caa0eb",
# "type": "Scope"
# },
# {
# "id": "18a4783c-866b-4cc7-a460-3d5e5662c884",
# "type": "Role"
# }
# ],
# "resourceAppId": "00000003-0000-0000-c000-000000000000"
# }
# ],
microsoft_graph_permission1_object = {
"id": self.kwargs['microsoft_graph_permission1'],
"type": "Scope"}
microsoft_graph_permission2_object = {
"id": self.kwargs['microsoft_graph_permission2'],
"type": "Role"}
azure_service_management_permission_object = {
"id": self.kwargs['azure_service_management_permission'],
"type": "Scope"}
def get_required_resource_access(required_resource_access_list, resource_app_id):
"""Search for the RequiredResourceAccess from required_resource_access(list) by resourceAppId."""
return next(
filter(lambda a: a['resourceAppId'] == resource_app_id, required_resource_access_list), None)
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
azure_service_management_api_object = get_required_resource_access(
permissions, self.kwargs['azure_service_management_api'])
# Check initial `permission add` is correct
self.assertEqual(microsoft_graph_api_object['resourceAccess'],
[microsoft_graph_permission1_object, microsoft_graph_permission2_object])
self.assertEqual(azure_service_management_api_object['resourceAccess'],
[azure_service_management_permission_object])
# Test delete first permission (ResourceAccess) from microsoft_graph_api.
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission1}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_permission1 (ResourceAccess) is deleted and
# microsoft_graph_permission2 (ResourceAccess) remains
self.assertEqual(microsoft_graph_api_object['resourceAccess'], [microsoft_graph_permission2_object])
# Test delete remaining permission (ResourceAccess) from microsoft_graph_api.
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_api (RequiredResourceAccess) is removed automatically
self.assertIsNone(microsoft_graph_api_object)
# Add back microsoft_graph_permission1 and microsoft_graph_permission2
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
# Delete both microsoft_graph_permission1 and microsoft_graph_permission2 at the same time
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1} {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_api (RequiredResourceAccess) is removed automatically
self.assertIsNone(microsoft_graph_api_object)
# Test delete 1 api azure_service_management_api (RequiredResourceAccess)
self.cmd('ad app permission delete --id {app_id} --api {azure_service_management_api}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
azure_service_management_api_object = get_required_resource_access(permissions, self.kwargs['azure_service_management_api'])
self.assertIsNone(azure_service_management_api_object)
# Test delete non-existing api
self.cmd('ad app permission delete --id {app_id} --api 11111111-0000-0000-c000-000000000000')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
self.assertEqual(permissions, [])
# Test delete api permission from non-existing api
self.cmd('ad app permission delete --id {app_id} '
'--api 11111111-0000-0000-c000-000000000000 '
'--api-permissions {microsoft_graph_permission1} {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
self.assertEqual(permissions, [])
# Test delete non-existing api permission from existing api
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions 22222222-0000-0000-c000-000000000000')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
self.assertEqual(microsoft_graph_api_object['resourceAccess'],
[microsoft_graph_permission1_object, microsoft_graph_permission2_object])
# Test permission type '=Scope' is missing
from azure.cli.core.azclierror import ArgumentUsageError
with self.assertRaisesRegex(ArgumentUsageError, 'both permission id and type'):
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}') | Search for the RequiredResourceAccess from required_resource_access(list) by resourceAppId. | test_app_permission | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def test_service_principal_scenario(self):
"""
- Test service principal creation.
- Test service principal can be resolved with servicePrincipalNames (appId and identifierUris) or id.
"""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}'
})
# Create
app = self.cmd('ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri}').get_output_in_json()
self.kwargs['app_id'] = app['appId']
sp = self.cmd('ad sp create --id {app_id}',
checks=[
self.check('appId', app['appId']),
self.check('appDisplayName', app['displayName']),
self.check('servicePrincipalNames[0]', '{app_id}')
]).get_output_in_json()
self.kwargs['id'] = sp['id']
# Show with appId as one of servicePrincipalNames
self.cmd('ad sp show --id {app_id}')
# Show with identifierUri as one of servicePrincipalNames
self.cmd('ad sp show --id {identifier_uri}')
# Show with id
self.cmd('ad sp show --id {id}')
# Update with generic update
self.cmd('ad sp update --id {id} --set appRoleAssignmentRequired=true')
self.cmd('ad sp show --id {id}', checks=self.check('appRoleAssignmentRequired', True))
self.cmd('ad sp delete --id {app_id}')
self.cmd('ad app delete --id {app_id}')
self.cmd('ad sp show --id {app_id}', expect_failure=True)
self.cmd('ad app show --id {app_id}', expect_failure=True) | - Test service principal creation.
- Test service principal can be resolved with servicePrincipalNames (appId and identifierUris) or id. | test_service_principal_scenario | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def _get_id_from_value(permissions, value):
"""Get id from value for appRoles or oauth2PermissionScopes."""
# https://learn.microsoft.com/en-us/graph/api/resources/serviceprincipal?view=graph-rest-1.0#properties
return next(p['id'] for p in permissions if p['value'] == value) | Get id from value for appRoles or oauth2PermissionScopes. | _get_id_from_value | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py | MIT |
def get_object_url(self, object_id_or_url, api_version=V1_0):
"""The object URL should be in the form of https://graph.microsoft.com/v1.0/directoryObjects/{id}
If object_id_or_url is a GUID, convert it to a URL.
Otherwise, it may already be a URL, use it as-is.
"""
from azure.cli.core.util import is_guid
return f'{self._endpoint}/{api_version}/directoryObjects/{object_id_or_url}' if is_guid(object_id_or_url) \
else object_id_or_url | The object URL should be in the form of https://graph.microsoft.com/v1.0/directoryObjects/{id}
If object_id_or_url is a GUID, convert it to a URL.
Otherwise, it may already be a URL, use it as-is. | get_object_url | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_client.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_client.py | MIT |
def set_object_properties(object_type, graph_object, **kwargs):
"""Set properties of the graph object according to its property map.
:param object_type: String representation of the object type. One of 'application', 'user', 'group'.
:param graph_object: Dict representing the graph object.
:param kwargs: CLI argument name-value pairs.
"""
# This design of passing the string representation of the object type mimics Azure Python SDK:
# body_content = self._serialize.body(parameters, 'ApplicationCreateParameters')
property_map = _object_type_to_property_map[object_type]
for arg, value in kwargs.items():
if value is not None:
property_path = property_map[arg]
# If property path is a list, such as web/implicitGrantSettings/enableIdTokenIssuance,
# create intermediate sub-objects if not present
if isinstance(property_path, list):
sub_object = graph_object
for property_name in property_path[0:-1]:
sub_object = sub_object.setdefault(property_name, {})
sub_object[property_path[-1]] = value
else:
graph_object[property_path] = value | Set properties of the graph object according to its property map.
:param object_type: String representation of the object type. One of 'application', 'user', 'group'.
:param graph_object: Dict representing the graph object.
:param kwargs: CLI argument name-value pairs. | set_object_properties | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_objects.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/role/_msgrpah/_graph_objects.py | MIT |
def test_eh_namespace_premium_standard(self, resource_group):
self.kwargs.update({
'loc': 'eastus',
'rg': resource_group,
'namespacename': self.create_random_name(prefix='eventhubs-nscli', length=20),
'namespacename1': self.create_random_name(prefix='eventhubs-nscli', length=20),
'namespacename2': self.create_random_name(prefix='eventhubs-nscli', length=20),
'namespacename3': self.create_random_name(prefix='eventhubs-nscli', length=20),
'loc1': 'East US',
'loc2': 'AustraliaEast',
'loc3': 'TaiwanNorth',
'clusterid': '/subscriptions/326100e2-f69d-4268-8503-075374f62b6e/resourceGroups/test-migration21/providers/Microsoft.EventHub/clusters/cluster91',
'clusterid2': '/subscriptions/326100e2-f69d-4268-8503-075374f62b6e/resourceGroups/AutomatedPowershellTesting/providers/Microsoft.EventHub/clusters/TestClusterAutomatic'
})
# Check for the NameSpace name Availability
self.cmd('eventhubs namespace exists --name {namespacename}', checks=[self.check('nameAvailable', True)])
# Create standard namespace with autoinflate enabled
namespace = self.cmd('eventhubs namespace create --name {namespacename} --resource-group {rg} '
'--capacity 10 --maximum-throughput-units 18 --sku Standard --location {loc} '
'--zone-redundant --tags k1=v1 k2=v2 --enable-auto-inflate --disable-local-auth '
'--enable-kafka --minimum-tls-version 1.1').get_output_in_json()
self.assertEqual(10, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(18, namespace['maximumThroughputUnits'])
self.assertEqual('1.1', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Set auto inflate enabled to false and true using update command
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--enable-auto-inflate false --maximum-throughput-units 0').get_output_in_json()
self.assertEqual(10, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(0, namespace['maximumThroughputUnits'])
self.assertEqual('1.1', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertFalse(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--enable-auto-inflate --maximum-throughput-units 18').get_output_in_json()
self.assertEqual(10, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(18, namespace['maximumThroughputUnits'])
self.assertEqual('1.1', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Update Capacity of standard namespace
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--capacity 12').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(18, namespace['maximumThroughputUnits'])
self.assertEqual('1.1', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Update max throughput units
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--maximum-throughput-units 25').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(25, namespace['maximumThroughputUnits'])
self.assertEqual('1.1', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Update Minimum tls version
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--minimum-tls-version 1.0').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(25, namespace['maximumThroughputUnits'])
self.assertEqual('1.0', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Update Minimum tls version
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--minimum-tls-version 1.2').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(25, namespace['maximumThroughputUnits'])
self.assertEqual('1.2', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--disable-local-auth false').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(25, namespace['maximumThroughputUnits'])
self.assertEqual('1.2', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertFalse(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
namespace = self.cmd('eventhubs namespace update --name {namespacename} --resource-group {rg} '
'--disable-local-auth').get_output_in_json()
self.assertEqual(12, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(25, namespace['maximumThroughputUnits'])
self.assertEqual('1.2', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertTrue(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertTrue(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(2, len(namespace['tags']))
# Create default standard namespace
namespace = self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename1} '
'--location {loc} --sku Standard').get_output_in_json()
self.assertEqual(1, namespace['sku']['capacity'])
self.assertEqual('Standard', namespace['sku']['name'])
self.assertEqual(0, namespace['maximumThroughputUnits'])
self.assertEqual('1.2', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertFalse(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertFalse(namespace['disableLocalAuth'])
self.assertFalse(namespace['zoneRedundant'])
self.assertEqual(0, len(namespace['tags']))
# Create premium namespace
namespace = self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename2} '
'--location {loc} --sku Premium').get_output_in_json()
self.assertEqual(1, namespace['sku']['capacity'])
self.assertEqual('Premium', namespace['sku']['name'])
self.assertEqual(0, namespace['maximumThroughputUnits'])
self.assertEqual('1.2', namespace['minimumTlsVersion'])
self.assertEqual(self.kwargs['loc1'].strip().replace(' ', '').lower(), namespace['location'].strip().replace(' ', '').lower())
self.assertFalse(namespace['isAutoInflateEnabled'])
self.assertTrue(namespace['kafkaEnabled'])
self.assertFalse(namespace['disableLocalAuth'])
self.assertTrue(namespace['zoneRedundant'])
self.assertEqual(0, len(namespace['tags']))
# create a namespace with geo-replication enable
namespace = self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename3} '
'--location {loc3} --sku Standard --geo-data-replication-config cluster-arm-id={clusterid} role-type=Primary location-name={loc3} '
'--cluster-arm-id {clusterid}').get_output_in_json()
namespace = self.cmd('eventhubs namespace replica add --resource-group {rg} --name {namespacename3} '
'--geo-data-replication-config cluster-arm-id={clusterid2} role-type=Secondary location-name={loc2} ').get_output_in_json()
self.assertEqual(2, len(namespace['geoDataReplication']['locations']))
namespace = self.cmd('eventhubs namespace update --resource-group {rg} --name {namespacename3} '
'--max-replication-lag-duration-in-seconds 300').get_output_in_json()
self.assertEqual(300, namespace['geoDataReplication']['maxReplicationLagDurationInSeconds'])
time.sleep(600)
namespace = self.cmd('eventhubs namespace failover --name {namespacename3} --resource-group {rg} '
'--primary-location {loc2} ').get_output_in_json()
#az eventhubs namespace failover --name namespace51 -g test-migration21 --primary-location australiaeast --debug
'''namespace = self.cmd('eventhubs namespace replica remove --resource-group {rg} --name {namespacename3} '
'--geo-data-replication-config cluster-arm-id={clusterid2} role-type=Secondary location-name={loc2} ').get_output_in_json()'''
# List Namespace within ResourceGroup
self.cmd('eventhubs namespace list --resource-group {rg}')
# List all Namespace within subscription
# self.cmd('eventhubs namespace list'),format()
# Delete Namespace list by ResourceGroup
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename}')
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename1}')
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename1}') | namespace = self.cmd('eventhubs namespace replica remove --resource-group {rg} --name {namespacename3} '
'--geo-data-replication-config cluster-arm-id={clusterid2} role-type=Secondary location-name={loc2} ').get_output_in_json() | test_eh_namespace_premium_standard | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/eventhubs/tests/latest/test_eventhub_commands_namespace_premium_test.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/eventhubs/tests/latest/test_eventhub_commands_namespace_premium_test.py | MIT |
def test_eh_create_update(self, resource_group):
self.kwargs.update({
'loc': 'westus2',
'rg': resource_group,
'namespacename': self.create_random_name(prefix='eventhubs-nscli', length=20),
'namespacename1': self.create_random_name(prefix='eventhubs-nscli', length=20),
'namespacename2': self.create_random_name(prefix='eventhubs-nscli', length=20),
'tags': {'tag1: value1', 'tag2: value2'},
'sku': 'Standard',
'tier': 'Standard',
'eventhubname1': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'eventhubname2': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'eventhubname3': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'eventhubname4': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'eventhubname5': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'eventhubname6': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'isautoinflateenabled': 'True',
'maximumthroughputunits': 4,
'messageretentionindays': 4,
'partitioncount': 4,
'identity1': self.create_random_name(prefix='eh-identity1', length=20),
'identity2': self.create_random_name(prefix='eh-identity2', length=20),
'destinationname': 'EventHubArchive.AzureBlockBlob',
'storageaccount': self.create_random_name(prefix='storageehnscli', length=20),
'containername': self.create_random_name(prefix='container-nscli', length=20),
'blobcontainer': 'container01',
'storageaccount1': self.create_random_name(prefix='storageehnscli', length=20),
'containername1': self.create_random_name(prefix='container-nscli', length=20),
'blobcontainer1': 'container02',
'storageaccount2': self.create_random_name(prefix='storageehnscli', length=20),
'containername2': self.create_random_name(prefix='container-nscli', length=20),
'blobcontainer2': 'container03',
'capturesizelimit': 314572799,
'archinvenameformat': '{Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}'
})
storage_account = self.cmd('storage account create -n {storageaccount} -g {rg} -l westus --sku Standard_LRS').get_output_in_json()
self.kwargs.update({'storageid': storage_account['id']})
container = self.cmd('storage container create -n {containername} -g {rg} --account-name {storageaccount}').get_output_in_json()
self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename} --location {loc} --tags {tags} --sku {sku} --enable-auto-inflate {isautoinflateenabled} --maximum-throughput-units {maximumthroughputunits}')
eh1 = self.cmd('eventhubs eventhub create -g {rg} -n {eventhubname1} --namespace-name {namespacename} --retention-time-in-hours 24 --partition-count 15 --enable-capture true --capture-interval 100 --capture-size-limit 314572799 '
'--destination-name {destinationname} --storage-account {storageid} --blob-container {containername} --archive-name-format {archinvenameformat} --cleanup-policy Delete').get_output_in_json()
self.assertEqual(eh1['name'], self.kwargs['eventhubname1'])
self.assertEqual(eh1['retentionDescription']['retentionTimeInHours'], 24)
self.assertEqual(eh1['partitionCount'], 15)
self.assertEqual(eh1['captureDescription']['enabled'], True)
self.assertEqual(eh1['captureDescription']['intervalInSeconds'], 100)
self.assertEqual(eh1['captureDescription']['sizeLimitInBytes'], self.kwargs['capturesizelimit'])
self.assertEqual(eh1['captureDescription']['destination']['archiveNameFormat'], self.kwargs['archinvenameformat'])
self.assertEqual(eh1['captureDescription']['destination']['blobContainer'], self.kwargs['containername'])
self.assertEqual(eh1['captureDescription']['destination']['name'], self.kwargs['destinationname'])
self.assertEqual(eh1['captureDescription']['destination']['storageAccountResourceId'], self.kwargs['storageid'])
eh1 = self.cmd(
'eventhubs eventhub update -g {rg} -n {eventhubname1} --namespace-name {namespacename} --enable-capture false').get_output_in_json()
self.assertEqual(eh1['name'], self.kwargs['eventhubname1'])
self.assertEqual(eh1['messageRetentionInDays'], 1)
self.assertEqual(eh1['partitionCount'], 15)
self.assertEqual(eh1['captureDescription']['enabled'], False)
self.assertEqual(eh1['captureDescription']['intervalInSeconds'], 100)
self.assertEqual(eh1['captureDescription']['sizeLimitInBytes'], self.kwargs['capturesizelimit'])
self.assertEqual(eh1['captureDescription']['destination']['archiveNameFormat'],
self.kwargs['archinvenameformat'])
self.assertEqual(eh1['captureDescription']['destination']['blobContainer'], self.kwargs['containername'])
self.assertEqual(eh1['captureDescription']['destination']['name'], self.kwargs['destinationname'])
self.assertEqual(eh1['captureDescription']['destination']['storageAccountResourceId'], self.kwargs['storageid'])
eh2 = self.cmd(
'eventhubs eventhub create -g {rg} -n {eventhubname2} --namespace-name {namespacename} --partition-count 15').get_output_in_json()
self.assertEqual(eh2['name'], self.kwargs['eventhubname2'])
self.assertEqual(eh2['partitionCount'], 15)
eh2 = self.cmd(
'eventhubs eventhub update -g {rg} -n {eventhubname2} --namespace-name {namespacename} --enable-capture --capture-interval 100 --capture-size-limit 314572799 '
'--destination-name {destinationname} --storage-account {storageid} --blob-container {containername} --archive-name-format {archinvenameformat}').get_output_in_json()
self.assertEqual(eh2['name'], self.kwargs['eventhubname2'])
self.assertEqual(eh2['partitionCount'], 15)
self.assertEqual(eh2['captureDescription']['enabled'], True)
self.assertEqual(eh2['captureDescription']['intervalInSeconds'], 100)
self.assertEqual(eh2['captureDescription']['sizeLimitInBytes'], self.kwargs['capturesizelimit'])
self.assertEqual(eh2['captureDescription']['destination']['archiveNameFormat'],
self.kwargs['archinvenameformat'])
self.assertEqual(eh2['captureDescription']['destination']['blobContainer'], self.kwargs['containername'])
self.assertEqual(eh2['captureDescription']['destination']['name'], self.kwargs['destinationname'])
self.assertEqual(eh2['captureDescription']['destination']['storageAccountResourceId'], self.kwargs['storageid'])
self.cmd(
'eventhubs namespace create --resource-group {rg} --name {namespacename1} --location {loc} --tags {tags} --sku Premium ')
eh3 = self.cmd(
'eventhubs eventhub create -g {rg} -n {eventhubname3} --namespace-name {namespacename1} --cleanup-policy Delete --retention-time 7 ').get_output_in_json()
self.assertEqual(eh3['name'], self.kwargs['eventhubname3'])
self.assertEqual(eh3['retentionDescription']['cleanupPolicy'], "Delete")
self.assertEqual(eh3['retentionDescription']['retentionTimeInHours'], 7)
eh4 = self.cmd(
'eventhubs eventhub create -g {rg} -n {eventhubname4} --namespace-name {namespacename1} --cleanup-policy Compact').get_output_in_json()
self.assertEqual(eh4['name'], self.kwargs['eventhubname4'])
self.assertEqual(eh4['retentionDescription']['cleanupPolicy'], "Compact")
storage_account1 = self.cmd(
'storage account create -n {storageaccount1} -g {rg} -l westus --sku Standard_LRS').get_output_in_json()
self.kwargs.update({'storageid1': storage_account1['id']})
container = self.cmd(
'storage container create -n {containername1} -g {rg} --account-name {storageaccount1}').get_output_in_json()
eh5 = self.cmd(
'eventhubs eventhub create -g {rg} -n {eventhubname5} --namespace-name {namespacename} --partition-count 15 --enable-capture true --capture-interval 100 --capture-size-limit 314572799 '
'--destination-name {destinationname} --storage-account {storageid1} --blob-container {containername1} --archive-name-format {archinvenameformat} --cleanup-policy Compact').get_output_in_json()
self.assertEqual(eh5['name'], self.kwargs['eventhubname5'])
self.assertEqual(eh5['partitionCount'], 15)
self.assertEqual(eh5['captureDescription']['enabled'], True)
self.assertEqual(eh5['captureDescription']['intervalInSeconds'], 100)
self.assertEqual(eh5['captureDescription']['sizeLimitInBytes'], self.kwargs['capturesizelimit'])
self.assertEqual(eh5['captureDescription']['destination']['archiveNameFormat'],
self.kwargs['archinvenameformat'])
self.assertEqual(eh5['captureDescription']['destination']['blobContainer'], self.kwargs['containername1'])
self.assertEqual(eh5['captureDescription']['destination']['name'], self.kwargs['destinationname'])
self.assertEqual(eh5['captureDescription']['destination']['storageAccountResourceId'], self.kwargs['storageid1'])
identity2 = self.cmd('identity create --name {identity2} --resource-group {rg}').get_output_in_json()
self.kwargs.update({'id2': identity2['id']})
self.kwargs.update({'id3': identity2['principalId']})
storage_account2 = self.cmd(
'storage account create -n {storageaccount2} -g {rg} -l westus --sku Standard_RAGRS ').get_output_in_json()
self.kwargs.update({'storageid2': storage_account2['id']})
container = self.cmd(
'storage container create -n {containername2} -g {rg} --account-name {storageaccount2}').get_output_in_json()
'''self.cmd(
'az role assignment create --assignee {id3} --role "Storage Blob Data Contributor" --scope {storageid2}')'''
self.cmd(
'eventhubs namespace create --resource-group {rg} --name {namespacename2} --location {loc} --tags {tags} --sku Premium '
'--mi-user-assigned {id2}').get_output_in_json()
time.sleep(200)
eh6 = self.cmd(
'eventhubs eventhub create -g {rg} -n {eventhubname6} --namespace-name {namespacename2} --partition-count 15 --enable-capture true --capture-interval 100 --capture-size-limit 314572799 '
'--destination-name {destinationname} --storage-account {storageid2} --blob-container {containername2} --archive-name-format {archinvenameformat} '
'--mi-user-assigned {id2}').get_output_in_json()
self.cmd('eventhubs eventhub delete --resource-group {rg} --namespace-name {namespacename1} --name {eventhubname4}')
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename}')
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename1}')
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename2}') | self.cmd(
'az role assignment create --assignee {id3} --role "Storage Blob Data Contributor" --scope {storageid2}') | test_eh_create_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/eventhubs/tests/latest/test_eventhub_entity_crud.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/eventhubs/tests/latest/test_eventhub_entity_crud.py | MIT |
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2017-02-11T23:59:59Z".format(string)) | Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 | datetime_type | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/consumption/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/consumption/_validators.py | MIT |
def validate_both_start_end_dates(args):
"""Validates the existence of both start and end dates in the parameter or neither"""
if bool(has_value(args.start_date)) != bool(has_value(args.end_date)):
raise CLIError("usage error: Both --start-date and --end-date need to be supplied or neither.") | Validates the existence of both start and end dates in the parameter or neither | validate_both_start_end_dates | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/consumption/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/consumption/custom.py | MIT |
def validate_reservation_summary(self):
"""lowercase the data grain for comparison"""
args = self.ctx.args
data_grain = args.grain.to_serialized_data().lower()
if data_grain not in ('daily', 'monthly'):
raise CLIError("usage error: --grain can be either daily or monthly.")
if data_grain == 'daily' and (not has_value(args.start_date) or not has_value(args.end_date)):
raise CLIError("usage error: Both --start-date and --end-date need to be supplied for daily grain.") | lowercase the data grain for comparison | validate_reservation_summary | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/consumption/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/consumption/custom.py | MIT |
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2017-02-11T23:59:59Z".format(string)) | Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 | datetime_type | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/consumption/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/consumption/custom.py | MIT |
def _resolve_api_version(rcf, resource_provider_namespace, parent_resource_path, resource_type):
"""
This is copied from src/azure-cli/azure/cli/command_modules/resource/custom.py in Azure/azure-cli
"""
from azure.cli.core.parser import IncorrectUsageError
provider = rcf.providers.get(resource_provider_namespace)
# If available, we will use parent resource's api-version
resource_type_str = (parent_resource_path.split('/')[0] if parent_resource_path else resource_type)
rt = [t for t in provider.resource_types if t.resource_type.lower() == resource_type_str.lower()]
if not rt:
raise IncorrectUsageError('Resource type {} not found.'.format(resource_type_str))
if len(rt) == 1 and rt[0].api_versions:
npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()]
return npv[0] if npv else rt[0].api_versions[0]
raise IncorrectUsageError(
'API version is required and could not be resolved for resource {}'.format(resource_type)) | This is copied from src/azure-cli/azure/cli/command_modules/resource/custom.py in Azure/azure-cli | _resolve_api_version | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/_validators.py | MIT |
def validate_ip_tags(namespace):
""" Extracts multiple space-separated tags in TYPE=VALUE format """
if namespace.ip_tags:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append({"ip_tag_type": tag_type, "tag": tag_value})
namespace.ip_tags = ip_tags | Extracts multiple space-separated tags in TYPE=VALUE format | validate_ip_tags | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/_validators.py | MIT |
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from azure.mgmt.core.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.public_ip_address:
is_list = isinstance(namespace.public_ip_address, list)
def _validate_name_or_id(public_ip):
# determine if public_ip_address is name or ID
is_id = is_valid_resource_id(public_ip)
return public_ip if is_id else resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='publicIPAddresses',
name=public_ip)
if is_list:
for i, public_ip in enumerate(namespace.public_ip_address):
namespace.public_ip_address[i] = _validate_name_or_id(public_ip)
else:
namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'public_ip_address', 'Microsoft.Network/publicIPAddresses', '--public-ip-address',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator | Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. | get_public_ip_validator | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/_validators.py | MIT |
def add_waf_managed_rule_set(cmd, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://learn.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
if rules is None:
managed_rule_overrides = []
else:
managed_rule_overrides = rules
rule_group_override = None
if rule_group_name is not None:
rule_group_override = {
"rule_group_name": rule_group_name,
"rules": managed_rule_overrides
}
if rule_group_override is None:
rule_group_overrides = []
else:
rule_group_overrides = [rule_group_override]
new_managed_rule_set = {
"rule_set_type": rule_set_type,
"rule_set_version": rule_set_version,
"rule_group_overrides": rule_group_overrides
}
from .aaz.latest.network.application_gateway.waf_policy import Update
class WAFManagedRuleSetAdd(Update):
def pre_instance_update(self, instance):
for rule_set in instance.properties.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# add new rule set
instance.properties.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return WAFManagedRuleSetAdd(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"name": policy_name
}) | Add managed rule set to the WAF policy managed rules.
Visit: https://learn.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules | add_waf_managed_rule_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/custom.py | MIT |
def update_waf_managed_rule_set(cmd, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update (Override) existing rule set of a WAF policy managed rules.
"""
managed_rule_overrides = rules if rules else None
rule_group_override = {
"rule_group_name": rule_group_name,
"rules": managed_rule_overrides
} if managed_rule_overrides else None
if rule_group_override is None:
rule_group_overrides = []
else:
rule_group_overrides = [rule_group_override]
new_managed_rule_set = {
"rule_set_type": rule_set_type,
"rule_set_version": rule_set_version,
"rule_group_overrides": rule_group_overrides
}
from .aaz.latest.network.application_gateway.waf_policy import Update
class WAFManagedRuleSetUpdate(Update):
def pre_instance_update(self, instance):
updated_rule_set = None
for rule_set in instance.properties.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((g for g in rule_set.rule_group_overrides if g.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
new_managed_rule_sets = []
for rule_set in instance.properties.managed_rules.managed_rule_sets:
if rule_set == updated_rule_set:
continue
new_managed_rule_sets.append(rule_set)
new_managed_rule_sets.append(new_managed_rule_set)
instance.properties.managed_rules.managed_rule_sets = new_managed_rule_sets
return WAFManagedRuleSetUpdate(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"name": policy_name
}) | Update (Override) existing rule set of a WAF policy managed rules. | update_waf_managed_rule_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/custom.py | MIT |
def remove_waf_managed_rule_set(cmd, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
from .aaz.latest.network.application_gateway.waf_policy import Update
class WAFManagedRuleSetRemove(Update):
def pre_instance_update(self, instance):
delete_rule_set = None
for rule_set in instance.properties.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# remove one rule from rule group
is_removed = False
new_rule_group_overrides = []
for rg in rule_set.rule_group_overrides:
if rg.rule_group_name == rule_group_name and not is_removed:
is_removed = True
continue
new_rule_group_overrides.append(rg)
if not is_removed:
err_msg = f"Rule set group [{rule_group_name}] is not found."
raise ResourceNotFoundError(err_msg)
rule_set.rule_group_overrides = new_rule_group_overrides
if delete_rule_set:
new_managed_rule_sets = []
for rule_set in instance.properties.managed_rules.managed_rule_sets:
if rule_set == delete_rule_set:
continue
new_managed_rule_sets.append(rule_set)
instance.properties.managed_rules.managed_rule_sets = new_managed_rule_sets
return WAFManagedRuleSetRemove(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"name": policy_name
}) | Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set. | remove_waf_managed_rule_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/custom.py | MIT |
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone["nameServers"]:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added successfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr) | :param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone | add_dns_delegation | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/custom.py | MIT |
def test_nw_flow_log_show(self, resource_group, resource_group_location, storage_account):
"""
This test is used to demonstrate different outputs between the new and deprecating parameters
:param resource_group:
:param resource_group_location:
:param storage_account:
:return:
"""
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'nsg': 'nsg1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
nsg_info = self.cmd('network nsg create -g {rg} -n {nsg}').get_output_in_json()
self.kwargs.update({
'nsg_id': nsg_info['NewNSG']['id']
})
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location {location} '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
# This output is new Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('enabled', True),
self.check('format.type', 'JSON'),
self.check('format.version', 1),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
# This output is deprecating
self.cmd('network watcher flow-log show --nsg {nsg_id}', checks=[
self.check('enabled', True),
self.check('format.type', 'JSON'),
self.check('format.version', 1),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False)
]) | This test is used to demonstrate different outputs between the new and deprecating parameters
:param resource_group:
:param resource_group_location:
:param storage_account:
:return: | test_nw_flow_log_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_nw_flow_log.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_nw_flow_log.py | MIT |
def test_manage_appgw_private_endpoint(self, resource_group):
"""
Add/Remove/Show/List Private Link
"""
self.kwargs.update({
'appgw': 'appgw',
'appgw_private_link_for_public': 'appgw_private_link_for_public',
'appgw_private_link_for_private': 'appgw_private_link_for_private',
'appgw_private_link_subnet_for_public': 'appgw_private_link_subnet_for_public',
'appgw_private_link_subnet_for_private': 'appgw_private_link_subnet_for_private',
'appgw_public_ip': 'public_ip',
'appgw_private_ip': 'private_ip',
'appgw_private_endpoint_for_public': 'appgw_private_endpoint_for_public',
'appgw_private_endpoint_for_private': 'appgw_private_endpoint_for_private',
'appgw_private_endpoint_vnet': 'appgw_private_endpoint_vnet',
'appgw_private_endpoint_subnet_for_public': 'appgw_private_endpoint_subnet_for_public',
'appgw_private_endpoint_subnet_for_private': 'appgw_private_endpoint_subnet_for_private',
'appgw_private_endpoint_connection_for_public': 'appgw_private_endpoint_connection_for_public',
'appgw_private_endpoint_connection_for_private': 'appgw_private_endpoint_connection_for_private'
})
# Enable private link feature on Application Gateway would require a public IP with Standard tier
self.cmd('network public-ip create -g {rg} -n {appgw_public_ip} --sku Standard')
# Create a application gateway without enable --enable-private-link
self.cmd('network application-gateway create -g {rg} -n {appgw} '
'--sku Standard_v2 '
'--public-ip-address {appgw_public_ip} '
'--priority 1001')
# Add one private link
self.cmd('network application-gateway private-link add -g {rg} '
'--gateway-name {appgw} '
'--name {appgw_private_link_for_public} '
'--frontend-ip appGatewayFrontendIP '
'--subnet {appgw_private_link_subnet_for_public} '
'--subnet-prefix 10.0.4.0/24')
show_appgw_data = self.cmd('network application-gateway show -g {rg} -n {appgw}').get_output_in_json()
self.kwargs.update({
'appgw_id': show_appgw_data['id']
})
self.cmd('network application-gateway private-link show -g {rg} --gateway-name {appgw} '
'--name {appgw_private_link_for_public}')
self.cmd('network application-gateway private-link list -g {rg} --gateway-name {appgw} ')
private_link_resource = self.cmd('network private-link-resource list --id {appgw_id}').get_output_in_json()
self.assertEqual(len(private_link_resource), 1)
self.assertEqual(private_link_resource[0]['name'], 'appGatewayFrontendIP')
self.kwargs.update({
'private_link_group_id_for_public': private_link_resource[0]['properties']['groupId']
})
# Prepare the first vnet to be connected to
self.cmd('network vnet create -g {rg} '
'--name {appgw_private_endpoint_vnet} '
'--subnet-name {appgw_private_endpoint_subnet_for_public}')
# Enable private endpoint on a vnet would require --disable-private-endpoint-network-policies=true
self.cmd('network vnet subnet update -g {rg} '
'--vnet-name {appgw_private_endpoint_vnet} '
'--name {appgw_private_endpoint_subnet_for_public} '
'--disable-private-endpoint-network-policies true')
# Create the first private endpoint against this application gateway's public IP
self.cmd('network private-endpoint create -g {rg} '
'--name {appgw_private_endpoint_for_public} '
'--connection-name {appgw_private_endpoint_connection_for_public} '
'--vnet-name {appgw_private_endpoint_vnet} '
'--subnet {appgw_private_endpoint_subnet_for_public} '
'--private-connection-resource-id {appgw_id} '
'--group-id {private_link_group_id_for_public}')
# ------------------------------------------------------------------------------------------
# Add another frontend IP
self.cmd('network application-gateway frontend-ip create -g {rg} '
'--gateway-name {appgw} '
'--name {appgw_private_ip} '
'--vnet-name "{appgw}Vnet" '
'--subnet default '
'--private-ip-address 10.0.0.11')
# Add another private link
self.cmd('network application-gateway private-link add -g {rg} '
'--gateway-name {appgw} '
'--name {appgw_private_link_for_private} '
'--frontend-ip {appgw_private_ip} '
'--subnet {appgw_private_link_subnet_for_private} '
'--subnet-prefix 10.0.5.0/24')
self.cmd('network application-gateway private-link show -g {rg} --gateway-name {appgw} '
'--name {appgw_private_link_for_private}')
self.cmd('network application-gateway private-link list -g {rg} --gateway-name {appgw} ')
self.cmd('network application-gateway frontend-port create -g {rg} '
'--gateway {appgw} '
'--name privatePort '
'--port 8080 ')
# The another http listener for private IP is necessary to setup an private link properly
self.cmd('network application-gateway http-listener create -g {rg} '
'--gateway-name {appgw} '
'--name privateHTTPListener '
'--frontend-port privatePort '
'--frontend-ip {appgw_private_ip} ')
# Associate a rule for private http listener
self.cmd('network application-gateway rule create -g {rg} '
'--gateway {appgw} '
'--name privateRule '
'--http-listener privateHTTPListener '
'--priority 1002')
private_link_resource = self.cmd('network private-link-resource list --id {appgw_id}').get_output_in_json()
self.assertEqual(len(private_link_resource), 2)
self.assertEqual(private_link_resource[1]['name'], self.kwargs['appgw_private_ip'])
self.kwargs.update({
'private_link_group_id_for_private': private_link_resource[1]['properties']['groupId']
})
# Prepare the second vnet to be connected to
self.cmd('network vnet subnet create -g {rg} '
'--vnet-name {appgw_private_endpoint_vnet} '
'--name {appgw_private_endpoint_subnet_for_private} '
'--address-prefixes 10.0.6.0/24')
# Enable private endpoint on a vnet would require --disable-private-endpoint-network-policies=true
self.cmd('network vnet subnet update -g {rg} '
'--vnet-name {appgw_private_endpoint_vnet} '
'--name {appgw_private_endpoint_subnet_for_private} '
'--disable-private-endpoint-network-policies true')
# Create the second private endpoint against this application gateway's private IP
self.cmd('network private-endpoint create -g {rg} '
'--name {appgw_private_endpoint_for_private} '
'--connection-name {appgw_private_endpoint_connection_for_private} '
'--vnet-name {appgw_private_endpoint_vnet} '
'--subnet {appgw_private_endpoint_subnet_for_private} '
'--private-connection-resource-id {appgw_id} '
'--group-id {private_link_group_id_for_private}')
# Could not remove unless remove all private endpoint connections
# self.cmd('network application-gateway private-link remove -g {rg} '
# '--gateway-name {appgw} '
# '--name {appgw_private_link_for_public} '
# '--yes')
# self.cmd('network application-gateway private-link remove -g {rg} '
# '--gateway-name {appgw} '
# '--name {appgw_private_link_for_private} '
# '--yes')
self.cmd('network application-gateway private-link list -g {rg} --gateway-name {appgw} ') | Add/Remove/Show/List Private Link | test_manage_appgw_private_endpoint | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | MIT |
def test_manage_appgw_private_endpoint_without_standard(self, resource_group):
"""
Add Private Link without standard
"""
self.kwargs.update({
'appgw': 'appgw',
'appgw_private_link_for_public': 'appgw_private_link_for_public',
'appgw_private_link_subnet_for_public': 'appgw_private_link_subnet_for_public',
'appgw_public_ip': 'public_ip',
})
# Enable private link feature on Application Gateway would require a public IP without Standard tier
self.cmd('network public-ip create -g {rg} -n {appgw_public_ip} --sku basic')
# Create a application gateway without enable --enable-private-link
self.cmd('network application-gateway create -g {rg} -n {appgw} '
'--public-ip-address {appgw_public_ip} --priority 1001')
# Add one private link
# These will fail because application-gateway feature cannot be enabled for selected sku
with self.assertRaises(HttpResponseError):
self.cmd('network application-gateway private-link add -g {rg} '
'--gateway-name {appgw} '
'--name {appgw_private_link_for_public} '
'--frontend-ip appGatewayFrontendIP '
'--subnet {appgw_private_link_subnet_for_public} '
'--subnet-prefix 10.0.4.0/24'
'--no-wait') | Add Private Link without standard | test_manage_appgw_private_endpoint_without_standard | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | MIT |
def test_private_link_resource_deidservice(self, resource_group):
"""Test for private link resource deidservice"""
self.kwargs.update({
'serviceName': self.create_random_name('cli-test-deid-plr-', 24),
'loc': 'eastus',
'rg': resource_group
})
self.cmd('az extension add -n healthcareapis')
self.cmd(
'az healthcareapis deidservice create --name {serviceName} -g {rg} --location {loc}'
)
self.cmd(
'az network private-link-resource list --name {serviceName} --resource-group {rg} '
'--type Microsoft.HealthDataAiservices/deidservices',
checks=[
self.check('length(@)', 1),
self.check('[0].properties.groupId', 'deid')
]
) | Test for private link resource deidservice | test_private_link_resource_deidservice | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | MIT |
def test_private_endpoint_connection_deidservice(self, resource_group):
"""Test for private endpoint connection to the deidservice"""
self.kwargs.update({
'serviceName': self.create_random_name('cli-test-deid-pe-', 24),
'loc': 'eastus',
'rg': resource_group,
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24)
})
# Prepare deidservice and network
self.cmd('az extension add -n healthcareapis')
service = self.cmd(
'az healthcareapis deidservice create --name {serviceName} -g {rg} --location {loc}'
).get_output_in_json()
self.kwargs['service_id'] = service['id']
# Create vnet and subnet
self.cmd(
'az network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1)
)
# Set private-endpoint-network-policies to disabled to allow private endpoint connection
self.cmd(
'az network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--private-endpoint-network-policies Disabled',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled')
)
# Create a private-endpoint connection to the deidservice
pe = self.cmd(
'az network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {service_id} '
'--group-id deid'
).get_output_in_json()
print(f"Private endpoint created: {pe}", flush=True) #< Does not return the full connection ID
# Show the connection at deidservice side
list_result = self.cmd(
'az network private-endpoint-connection list --name {serviceName} -g {rg} '
'--type Microsoft.HealthDataAiservices/deidservices',
checks=self.check('length(@)', 1)
).get_output_in_json()
# Find the private endpoint ID
# << Bug Workaround >>
# Workaround for obtaining the full private endpoint connection ID.
# The command 'az network private-endpoint create' does not return the full ID,
# as it lacks the unique identifier at the end. The following code remedies this issue.
pe_connection_id = None
pe_connection_name = None
for connection in list_result:
if connection["name"].startswith(self.kwargs["pe"]):
pe_connection_id = connection["id"]
pe_connection_name = connection["name"]
break
if pe_connection_id:
# Show the private endpoint connection details
show_result = self.cmd(
f'az network private-endpoint-connection show --id {pe_connection_id}',
checks=self.check(
'properties.privateLinkServiceConnectionState.status', 'Approved'
)
).get_output_in_json()
print(f"Private endpoint connection details: {show_result}", flush=True)
# Delete private endpoint connection
delete_cmd = (
f'az network private-endpoint-connection delete --name {pe_connection_name} '
f'-g {self.kwargs["rg"]} --resource-name {self.kwargs["serviceName"]} '
f'--type Microsoft.HealthDataAiservices/deidservices -y'
)
print(f"delete_cmd: {delete_cmd}", flush=True)
delete_result = self.cmd(delete_cmd)
print(f"delete_result: {delete_result}", flush=True)
# Wait for deletion to complete try for up to 60 seconds
for _ in range(60):
# Verify deletion via list command
connections = self.cmd(
'az network private-endpoint-connection list --name {serviceName} -g {rg} '
'--type Microsoft.HealthDataAiservices/deidservices'
).get_output_in_json()
if len(connections) == 0:
print('Private endpoint connection deleted successfully')
break
print(f"Connections still exist: {connections}", flush=True)
time.sleep(1)
else: # This block runs if the for loop completes without breaking (i.e., if the deletion didn't complete in time)
self.fail("Private endpoint connection deletion did not complete in time")
self.assertEqual(len(connections), 0)
else:
self.fail("Created private endpoint connection not found, could not proceed with further tests") | Test for private endpoint connection to the deidservice | test_private_endpoint_connection_deidservice | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py | MIT |
def test_network_private_endpoints(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'sku': 'Standard',
'vnet': 'vnet1',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'location': 'centralus',
'ip': 'pubip1',
'lks1': 'lks1',
'lks2': 'lks2',
'pe': 'pe1',
'rg': resource_group,
'nic': 'nic1',
})
# Create PLS
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1} -l {location}')
self.cmd('network lb create -g {rg} -l {location} -n {lb} --public-ip-address {ip} --sku {sku}')
self.cmd('network vnet subnet update -g {rg} -n {subnet1} --vnet-name {vnet} --disable-private-link-service-network-policies')
self.cmd('network vnet subnet create -g {rg} -n {subnet2} --vnet-name {vnet} --address-prefixes 10.0.2.0/24 --default-outbound false')
self.cmd('network vnet subnet update -g {rg} -n {subnet2} --vnet-name {vnet} --disable-private-endpoint-network-policies')
pls1 = self.cmd('network private-link-service create -g {rg} -n {lks1} --vnet-name {vnet} --subnet {subnet1} --lb-name {lb} --lb-frontend-ip-configs LoadBalancerFrontEnd -l {location}', checks=[
self.check('type', 'Microsoft.Network/privateLinkServices'),
self.check('provisioningState', 'Succeeded'),
self.check('name', self.kwargs['lks1'])
]).get_output_in_json()
self.kwargs['pls_id'] = pls1['id']
self.cmd('network private-endpoint list-types -l {location}')
self.cmd('network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet2} --private-connection-resource-id {pls_id} --connection-name tttt -l {location} --nic-name {nic}', checks=[
self.check('name', 'pe1'),
self.check('provisioningState', 'Succeeded'),
self.check('customNetworkInterfaceName', self.kwargs['nic']),
])
# temporarily disable the test
'''
self.cmd('network private-endpoint update -g {rg} -n {pe} --request-message "test"', checks=[
self.check('privateLinkServiceConnections[0].requestMessage', 'test')
])
'''
self.cmd('network private-endpoint list')
self.cmd('network private-endpoint list -g {rg}', checks=[
self.check('length(@)', 1)
])
pe_connection_name = self.cmd('network private-link-service show -g {rg} -n {lks1}').get_output_in_json()['privateEndpointConnections'][0]['name']
self.kwargs['pe_connect'] = pe_connection_name
self.cmd('network private-link-service connection update -g {rg} -n {pe_connect} --service-name {lks1} --connection-status Rejected --action-required "need action"')
self.cmd('network private-endpoint show -g {rg} -n {pe}', checks=[
self.check('privateLinkServiceConnections[0].privateLinkServiceConnectionState.status', 'Rejected'),
self.check('privateLinkServiceConnections[0].privateLinkServiceConnectionState.actionsRequired', "need action")
])
self.cmd('network private-link-service connection delete -g {rg} -n {pe_connect} --service-name {lks1}')
self.cmd('network private-link-service show -g {rg} -n {lks1}', checks=[
self.check('length(privateEndpointConnections)', 0)
])
self.cmd('network private-endpoint delete -g {rg} -n {pe}') | self.cmd('network private-endpoint update -g {rg} -n {pe} --request-message "test"', checks=[
self.check('privateLinkServiceConnections[0].requestMessage', 'test')
]) | test_network_private_endpoints | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_network_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_network_commands.py | MIT |
def _test_zone(self, zone_name, filename):
""" This tests that a zone file can be imported, exported, and re-imported without any changes to the
record sets. It does not test that the imported files meet any specific requirements. For that, run
additional checks in the individual zone file tests.
"""
self.kwargs.update({
'zone': zone_name,
'path': os.path.join(TEST_DIR, 'zone_files', filename),
'export': os.path.join(TEST_DIR, 'zone_files', filename + '_export.txt')
})
# Import from zone file
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{path}"')
records1 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# Export zone file and delete the zone
self.cmd('network dns zone export -g {rg} -n {zone} --file-name "{export}"')
self.cmd('network dns zone delete -g {rg} -n {zone} -y')
# Reimport zone file and verify both record sets are equivalent
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{export}"')
records2 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# verify that each record in the original import is unchanged after export/re-import
self._check_records(records1, records2) | This tests that a zone file can be imported, exported, and re-imported without any changes to the
record sets. It does not test that the imported files meet any specific requirements. For that, run
additional checks in the individual zone file tests. | _test_zone | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/latest/test_dns_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/latest/test_dns_commands.py | MIT |
def _test_zone(self, zone_name, filename):
""" This tests that a zone file can be imported, exported, and re-imported without any changes to the
record sets. It does not test that the imported files meet any specific requirements. For that, run
additional checks in the individual zone file tests.
"""
self.kwargs.update({
'zone': zone_name,
'path': os.path.join(TEST_DIR, 'zone_files', filename),
'export': os.path.join(TEST_DIR, 'zone_files', filename + '_export.txt')
})
# Import from zone file
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{path}"')
records1 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# Export zone file and delete the zone
self.cmd('network dns zone export -g {rg} -n {zone} --file-name "{export}"')
self.cmd('network dns zone delete -g {rg} -n {zone} -y')
# Reimport zone file and verify both record sets are equivalent
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{export}"')
records2 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# verify that each record in the original import is unchanged after export/re-import
self._check_records(records1, records2) | This tests that a zone file can be imported, exported, and re-imported without any changes to the
record sets. It does not test that the imported files meet any specific requirements. For that, run
additional checks in the individual zone file tests. | _test_zone | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/tests/hybrid_2018_03_01/test_dns_commands.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/tests/hybrid_2018_03_01/test_dns_commands.py | MIT |
def validate_ip_tags(namespace):
''' Extracts multiple space-separated tags in TYPE=VALUE format '''
if namespace.ip_tags:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append({"ip_tag_type": tag_type, "tag": tag_value})
namespace.ip_tags = ip_tags | Extracts multiple space-separated tags in TYPE=VALUE format | validate_ip_tags | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/_validators.py | MIT |
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from azure.mgmt.core.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.public_ip_address:
is_list = isinstance(namespace.public_ip_address, list)
def _validate_name_or_id(public_ip):
# determine if public_ip_address is name or ID
is_id = is_valid_resource_id(public_ip)
return public_ip if is_id else resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='publicIPAddresses',
name=public_ip)
if is_list:
for i, public_ip in enumerate(namespace.public_ip_address):
namespace.public_ip_address[i] = _validate_name_or_id(public_ip)
else:
namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'public_ip_address', 'Microsoft.Network/publicIPAddresses', '--public-ip-address',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator | Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. | get_public_ip_validator | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/_validators.py | MIT |
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr) | :param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone | add_dns_delegation | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/custom.py | MIT |
def _register_one_provider(provider, api_version, support_list_or_not, resource_get_api_version=None, support_connection_operation=True): # pylint: disable=line-too-long
"""
:param provider: namespace + type.
:param api_version: API version for private link scenarios.
:param support_list_or_not: support list rest call or not.
:param resource_get_api_version: API version to get the service resource.
"""
general_client_settings = {
"api_version": api_version,
"support_list_or_not": support_list_or_not,
"resource_get_api_version": resource_get_api_version,
"support_connection_operation": support_connection_operation
}
TYPE_CLIENT_MAPPING[provider] = general_client_settings | :param provider: namespace + type.
:param api_version: API version for private link scenarios.
:param support_list_or_not: support list rest call or not.
:param resource_get_api_version: API version to get the service resource. | _register_one_provider | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/private_link_resource_and_endpoint_connections/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/private_link_resource_and_endpoint_connections/custom.py | MIT |
def process_soa(io, data, name, print_name=False):
"""
Replace {SOA} in template with a set of serialized SOA records
"""
indent = ' ' * len('{} {} IN SOA '.format(name, data['ttl']))
print('{} {} IN SOA {} {} ('.format(name, data['ttl'], data['mname'], data['rname']), file=io)
for item in ['serial', 'refresh', 'retry', 'expire', 'minimum']:
print('{}{} ; {}'.format(indent, data[item], item), file=io)
print('{})'.format(indent), file=io) | Replace {SOA} in template with a set of serialized SOA records | process_soa | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | MIT |
def _quote_field(data, field):
"""
Quote a field in a list of DNS records.
Return the new data records.
"""
if data is None:
return None
# embedded quotes require escaping - but only if not escaped already
# note that semi-colons do not need escaping here since we are putting it
# inside of a quoted string
fieldBuf = ""
escape = False
for c in data[field]:
if c == '"':
fieldBuf += '\\"'
escape = False
elif c == '\\':
if escape:
fieldBuf += '\\\\'
escape = False
else:
escape = True
else:
if escape:
fieldBuf += '\\'
fieldBuf += c
escape = False
data[field] = '"%s"' % fieldBuf
return data | Quote a field in a list of DNS records.
Return the new data records. | _quote_field | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | MIT |
def process_rr(io, data, record_type, record_keys, name, print_name):
""" Print out single line record entries """
if data is None:
return
if isinstance(record_keys, str):
record_keys = [record_keys]
elif not isinstance(record_keys, list):
raise ValueError('record_keys must be a string or list of strings')
in_or_azure = "IN"
if record_type == 'ALIAS':
in_or_azure = "AZURE"
name_display = name if print_name else ' ' * len(name)
print('{} {} {} {} '.format(name_display, data['ttl'], in_or_azure, record_type), end='', file=io)
for i, key in enumerate(record_keys):
print(data[key], end='\n' if i == len(record_keys) - 1 else ' ', file=io) | Print out single line record entries | process_rr | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/record_processors.py | MIT |
def make_zone_file(json_obj):
"""
Generate the DNS zonefile, given a json-encoded description of the
zone file (@json_zone_file) and the template to fill in (@template)
json_zone_file = {
"$origin": origin server,
"$ttl": default time-to-live,
"soa": [ soa records ],
"ns": [ ns records ],
"a": [ a records ],
"aaaa": [ aaaa records ]
"caa": [ caa records ]
"cname": [ cname records ]
"mx": [ mx records ]
"ptr": [ ptr records ]
"txt": [ txt records ]
"srv": [ srv records ]
"spf": [ spf records ]
"uri": [ uri records ]
}
"""
import azure.cli.command_modules.network.azure_stack.zone_file.record_processors as record_processors
from io import StringIO
zone_file = StringIO()
HEADER = """
; Exported zone file from Azure DNS\n\
; Zone name: {zone_name}\n\
; Resource Group Name: {resource_group}\n\
; Date and time (UTC): {datetime}\n\n\
$TTL {ttl}\n\
$ORIGIN {origin}\n\
"""
zone_name = json_obj.pop('zone-name')
print(HEADER.format(
zone_name=zone_name,
resource_group=json_obj.pop('resource-group'),
datetime=json_obj.pop('datetime'),
ttl=json_obj.pop('$ttl'),
origin=json_obj.pop('$origin')
), file=zone_file)
for record_set_name in json_obj.keys():
record_set = json_obj[record_set_name]
if record_set_name.endswith(zone_name):
record_set_name = record_set_name[:-(len(zone_name) + 1)]
if isinstance(record_set, str):
# These are handled above so we can skip them
continue
first_line = True
record_set_keys = list(record_set.keys())
if 'soa' in record_set_keys:
record_set_keys.remove('soa')
record_set_keys = ['soa'] + record_set_keys
for record_type in record_set_keys:
record = record_set[record_type]
if not isinstance(record, list):
record = [record]
for entry in record:
method = 'process_{}'.format(record_type.strip('$'))
getattr(record_processors, method)(zone_file, entry, record_set_name, first_line)
first_line = False
print('', file=zone_file)
result = zone_file.getvalue()
zone_file.close()
return result | Generate the DNS zonefile, given a json-encoded description of the
zone file (@json_zone_file) and the template to fill in (@template)
json_zone_file = {
"$origin": origin server,
"$ttl": default time-to-live,
"soa": [ soa records ],
"ns": [ ns records ],
"a": [ a records ],
"aaaa": [ aaaa records ]
"caa": [ caa records ]
"cname": [ cname records ]
"mx": [ mx records ]
"ptr": [ ptr records ]
"txt": [ txt records ]
"srv": [ srv records ]
"spf": [ spf records ]
"uri": [ uri records ]
} | make_zone_file | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/make_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/make_zone_file.py | MIT |
def _tokenize_line(line, quote_strings=False, infer_name=True):
"""
Tokenize a line:
* split tokens on whitespace
* treat quoted strings as a single token
"""
ret = []
escape = False
quote = False
tokbuf = ""
firstchar = True
ll = list(line)
while len(ll) > 0:
c = ll.pop(0)
if c.isspace():
if firstchar:
# used by the _add_record_names method
tokbuf += '$NAME' if infer_name else ' '
if not quote and not escape:
# end of token
if len(tokbuf) > 0:
ret.append(tokbuf)
tokbuf = ''
elif escape:
# escaped space (can be inside or outside of quote)
tokbuf += '\\' + c
escape = False
elif quote:
# in quotes
tokbuf += c
else:
tokbuf = ''
elif c == '\\':
if escape:
# escape of an escape is valid part of the line sequence
tokbuf += '\\\\'
escape = False
else:
escape = True
elif c == '"':
if not escape:
if quote:
# end of quote
if quote_strings:
tokbuf += '"'
ret.append(tokbuf)
tokbuf = ''
quote = False
else:
# beginning of quote
if quote_strings:
tokbuf += '"'
quote = True
else:
# append the escaped quote
tokbuf += '\\"'
escape = False
else:
# normal character
if escape:
# append escape character
tokbuf += '\\'
tokbuf += c
escape = False
firstchar = False
if len(tokbuf.strip(' \r\n\t')):
ret.append(tokbuf)
if len(ret) == 1 and ret[0] == '$NAME':
return []
else:
return ret | Tokenize a line:
* split tokens on whitespace
* treat quoted strings as a single token | _tokenize_line | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _find_comment_index(line):
"""
Finds the index of a ; denoting a comment.
Ignores escaped semicolons and semicolons inside quotes
"""
escape = False
quote = False
for i, char in enumerate(line):
if char == '\\':
escape = True
continue
elif char == '"':
if escape:
escape = False
continue
else:
quote = not quote
elif char == ';':
if quote:
escape = False
continue
elif escape:
escape = False
continue
else:
# comment denoting semicolon found
return i
else:
escape = False
continue
# no unquoted, unescaped ; found
return -1 | Finds the index of a ; denoting a comment.
Ignores escaped semicolons and semicolons inside quotes | _find_comment_index | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _serialize(tokens):
"""
Serialize tokens:
* quote whitespace-containing tokens
"""
ret = []
for tok in tokens:
if tok is None:
continue
elif " " in tok:
tok = '"%s"' % tok
ret.append(tok)
return " ".join(ret) | Serialize tokens:
* quote whitespace-containing tokens | _serialize | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _remove_comments(text):
"""
Remove comments from a zonefile
"""
ret = []
lines = text.split("\n")
for line in lines:
if not line:
continue
index = _find_comment_index(line)
if index != -1:
line = line[:index]
if line:
ret.append(line)
return "\n".join(ret) | Remove comments from a zonefile | _remove_comments | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _flatten(text):
"""
Flatten the text:
* make sure each record is on one line.
* remove parenthesis
* remove Windows line endings
"""
lines = text.split('\n')
SENTINEL = '%%%'
# tokens: sequence of non-whitespace separated by '' where a newline was
tokens = []
for line in (x for x in lines if len(x) > 0):
line = line.replace('\t', ' ')
tokens += _tokenize_line(line, quote_strings=True, infer_name=False)
tokens.append(SENTINEL)
# find (...) and turn it into a single line ("capture" it)
capturing = False
captured = []
flattened = []
while len(tokens) > 0:
tok = tokens.pop(0)
if tok == '$NAME':
tok = ' '
if not capturing and tok == SENTINEL:
# normal end-of-line
if len(captured) > 0:
flattened.append(" ".join(captured))
captured = []
continue
if tok.startswith("("):
# begin grouping
tok = tok.lstrip("(")
capturing = True
if capturing and tok.endswith(")"):
# end grouping. next end-of-line will turn this sequence into a flat line
tok = tok.rstrip(")")
capturing = False
if tok != SENTINEL:
captured.append(tok)
return "\n".join(flattened) | Flatten the text:
* make sure each record is on one line.
* remove parenthesis
* remove Windows line endings | _flatten | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _add_record_names(text):
"""
Go through each line of the text and ensure that
a name is defined. Use previous record name if there is none.
"""
global SUPPORTED_RECORDS
lines = text.split("\n")
ret = []
previous_record_name = None
for line in lines:
tokens = _tokenize_line(line)
if not tokens:
continue
record_name = tokens[0]
if record_name == '$NAME':
tokens = [previous_record_name] + tokens[1:]
elif not record_name.startswith('$'):
previous_record_name = record_name
ret.append(_serialize(tokens))
return "\n".join(ret) | Go through each line of the text and ensure that
a name is defined. Use previous record name if there is none. | _add_record_names | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _convert_to_seconds(value):
""" Converts TTL strings into seconds """
try:
return int(value)
except ValueError:
# parse the BIND format
# (w)eek, (d)ay, (h)our, (m)inute, (s)econd
seconds = 0
ttl_string = value.lower()
for component in ['w', 'd', 'h', 'm', 's']:
regex = date_regex_dict[component]['regex']
match = regex.search(ttl_string)
if match:
match_string = match.group(0)
ttl_string = ttl_string.replace(match_string, '')
match_value = int(match_string.strip(component))
seconds += match_value * date_regex_dict[component]['scale']
if not ttl_string:
return seconds
# convert the last piece without any units, which must be seconds
try:
seconds += int(ttl_string)
return seconds
except ValueError:
raise InvalidArgumentValueError("Unable to convert value '{}' to seconds.".format(value)) | Converts TTL strings into seconds | _convert_to_seconds | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def parse_zone_file(text, zone_name, ignore_invalid=False):
"""
Parse a zonefile into a dict
"""
text = _remove_comments(text)
text = _flatten(text)
text = _add_record_names(text)
zone_obj = OrderedDict()
record_lines = text.split("\n")
current_origin = zone_name.rstrip('.') + '.'
current_ttl = 3600
soa_processed = False
for record_line in record_lines:
parse_match = False
record = None
for record_type, regex in _COMPILED_REGEX.items():
match = regex.match(record_line)
if not match:
continue
parse_match = True
record = match.groupdict()
if not parse_match and not ignore_invalid:
raise InvalidArgumentValueError('Unable to parse: {}'.format(record_line))
record_type = record['delim'].lower()
if record_type == '$origin':
origin_value = record['val']
if not origin_value.endswith('.'):
logger.warning("$ORIGIN '{}' should have terminating dot.".format(origin_value))
current_origin = origin_value.rstrip('.') + '.'
elif record_type == '$ttl':
current_ttl = _convert_to_seconds(record['val'])
else:
record_name = record['name']
if '@' in record_name:
record_name = record_name.replace('@', current_origin)
elif not record_name.endswith('.'):
record_name = '{}.{}'.format(record_name, current_origin)
# special record-specific fix-ups
if record_type == 'ptr':
record['fullname'] = record_name + '.' + current_origin
elif record_type == 'soa':
for key in ['refresh', 'retry', 'expire', 'minimum']:
record[key] = _convert_to_seconds(record[key])
_expand_with_origin(record, 'email', current_origin)
elif record_type == 'cname':
_expand_with_origin(record, 'alias', current_origin)
elif record_type == 'mx':
_expand_with_origin(record, 'host', current_origin)
elif record_type == 'ns':
_expand_with_origin(record, 'host', current_origin)
elif record_type == 'srv':
_expand_with_origin(record, 'target', current_origin)
elif record_type == 'spf':
record_type = 'txt'
record['ttl'] = _convert_to_seconds(record['ttl'] or current_ttl)
# handle quotes for CAA and TXT
if record_type == 'caa':
_post_process_caa_record(record)
elif record_type == 'txt':
# handle TXT concatenation and splitting separately
_post_process_txt_record(record)
if record_name not in zone_obj:
zone_obj[record_name] = OrderedDict()
if record_type == 'soa':
if soa_processed:
raise InvalidArgumentValueError('Zone file can contain only one SOA record.')
if record_name != current_origin:
raise InvalidArgumentValueError("Zone SOA record must be at the apex '@'.")
zone_obj[record_name][record_type] = record
soa_processed = True
continue
if not soa_processed:
raise InvalidArgumentValueError('First record in zone file must be SOA.')
if record_type == 'cname':
if record_type in zone_obj[record_name]:
logger.warning("CNAME record already exists for '{}'. Ignoring '{}'."
.format(record_name, record['alias']))
continue
zone_obj[record_name][record_type] = record
continue
# any other record can have multiple entries
if record_type not in zone_obj[record_name]:
zone_obj[record_name][record_type] = []
zone_obj[record_name][record_type].append(record)
_post_process_ttl(zone_obj)
_post_check_names(zone_obj)
return zone_obj | Parse a zonefile into a dict | parse_zone_file | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/azure_stack/zone_file/parse_zone_file.py | MIT |
def _register_one_provider(provider, api_version, support_list_or_not, resource_get_api_version=None, support_connection_operation=True): # pylint: disable=line-too-long
"""
:param provider: namespace + type.
:param api_version: API version for private link scenarios.
:param support_list_or_not: support list rest call or not.
:param resource_get_api_version: API version to get the service resource.
"""
general_client_settings = {
"api_version": api_version,
"support_list_or_not": support_list_or_not,
"resource_get_api_version": resource_get_api_version,
"support_connection_operation": support_connection_operation
}
TYPE_CLIENT_MAPPING[provider] = general_client_settings | :param provider: namespace + type.
:param api_version: API version for private link scenarios.
:param support_list_or_not: support list rest call or not.
:param resource_get_api_version: API version to get the service resource. | _register_one_provider | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/private_link_resource_and_endpoint_connections/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/private_link_resource_and_endpoint_connections/custom.py | MIT |
def process_soa(io, data, name, print_name=False):
"""
Replace {SOA} in template with a set of serialized SOA records
"""
indent = ' ' * len('{} {} IN SOA '.format(name, data['ttl']))
print('{} {} IN SOA {} {} ('.format(name, data['ttl'], data['mname'], data['rname']), file=io)
for item in ['serial', 'refresh', 'retry', 'expire', 'minimum']:
print('{}{} ; {}'.format(indent, data[item], item), file=io)
print('{})'.format(indent), file=io) | Replace {SOA} in template with a set of serialized SOA records | process_soa | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | MIT |
def _quote_field(data, field):
"""
Quote a field in a list of DNS records.
Return the new data records.
"""
if data is None:
return None
# embedded quotes require escaping - but only if not escaped already
# note that semi-colons do not need escaping here since we are putting it
# inside of a quoted string
fieldBuf = ""
escape = False
for c in data[field]:
if c == '"':
fieldBuf += '\\"'
escape = False
elif c == '\\':
if escape:
fieldBuf += '\\\\'
escape = False
else:
escape = True
else:
if escape:
fieldBuf += '\\'
fieldBuf += c
escape = False
data[field] = '"%s"' % fieldBuf
return data | Quote a field in a list of DNS records.
Return the new data records. | _quote_field | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | MIT |
def process_rr(io, data, record_type, record_keys, name, print_name):
""" Print out single line record entries """
if data is None:
return
if isinstance(record_keys, str):
record_keys = [record_keys]
elif not isinstance(record_keys, list):
raise ValueError('record_keys must be a string or list of strings')
in_or_azure = "IN"
if record_type == 'ALIAS':
in_or_azure = "AZURE"
name_display = name if print_name else ' ' * len(name)
print('{} {} {} {} '.format(name_display, data['ttl'], in_or_azure, record_type), end='', file=io)
for i, key in enumerate(record_keys):
print(data[key], end='\n' if i == len(record_keys) - 1 else ' ', file=io) | Print out single line record entries | process_rr | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/record_processors.py | MIT |
def make_zone_file(json_obj):
"""
Generate the DNS zonefile, given a json-encoded description of the
zone file (@json_zone_file) and the template to fill in (@template)
json_zone_file = {
"$origin": origin server,
"$ttl": default time-to-live,
"soa": [ soa records ],
"ns": [ ns records ],
"a": [ a records ],
"aaaa": [ aaaa records ]
"caa": [ caa records ]
"cname": [ cname records ]
"ds": [ ds records ]
"mx": [ mx records ]
"naptr": [ naptr records ]
"ptr": [ ptr records ]
"tlsa": [ tlsa records ]
"txt": [ txt records ]
"srv": [ srv records ]
"spf": [ spf records ]
"uri": [ uri records ]
}
"""
import azure.cli.command_modules.network.zone_file.record_processors as record_processors
from io import StringIO
zone_file = StringIO()
HEADER = """
; Exported zone file from Azure DNS\n\
; Zone name: {zone_name}\n\
; Resource Group Name: {resource_group}\n\
; Date and time (UTC): {datetime}\n\n\
$TTL {ttl}\n\
$ORIGIN {origin}\n\
"""
zone_name = json_obj.pop('zone-name')
print(HEADER.format(
zone_name=zone_name,
resource_group=json_obj.pop('resource-group'),
datetime=json_obj.pop('datetime'),
ttl=json_obj.pop('$ttl'),
origin=json_obj.pop('$origin')
), file=zone_file)
for record_set_name in json_obj.keys():
record_set = json_obj[record_set_name]
if record_set_name.endswith(zone_name):
record_set_name = record_set_name[:-(len(zone_name) + 1)]
if isinstance(record_set, str):
# These are handled above so we can skip them
continue
first_line = True
record_set_keys = list(record_set.keys())
if 'soa' in record_set_keys:
record_set_keys.remove('soa')
record_set_keys = ['soa'] + record_set_keys
for record_type in record_set_keys:
record = record_set[record_type]
if not isinstance(record, list):
record = [record]
for entry in record:
method = 'process_{}'.format(record_type.strip('$'))
getattr(record_processors, method)(zone_file, entry, record_set_name, first_line)
first_line = False
print('', file=zone_file)
result = zone_file.getvalue()
zone_file.close()
return result | Generate the DNS zonefile, given a json-encoded description of the
zone file (@json_zone_file) and the template to fill in (@template)
json_zone_file = {
"$origin": origin server,
"$ttl": default time-to-live,
"soa": [ soa records ],
"ns": [ ns records ],
"a": [ a records ],
"aaaa": [ aaaa records ]
"caa": [ caa records ]
"cname": [ cname records ]
"ds": [ ds records ]
"mx": [ mx records ]
"naptr": [ naptr records ]
"ptr": [ ptr records ]
"tlsa": [ tlsa records ]
"txt": [ txt records ]
"srv": [ srv records ]
"spf": [ spf records ]
"uri": [ uri records ]
} | make_zone_file | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/make_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/make_zone_file.py | MIT |
def _tokenize_line(line, quote_strings=False, infer_name=True):
"""
Tokenize a line:
* split tokens on whitespace
* treat quoted strings as a single token
"""
ret = []
escape = False
quote = False
tokbuf = ""
firstchar = True
ll = list(line)
while len(ll) > 0:
c = ll.pop(0)
if c.isspace():
if firstchar:
# used by the _add_record_names method
tokbuf += '$NAME' if infer_name else ' '
if not quote and not escape:
# end of token
if len(tokbuf) > 0:
ret.append(tokbuf)
tokbuf = ''
elif escape:
# escaped space (can be inside or outside of quote)
tokbuf += '\\' + c
escape = False
elif quote:
# in quotes
tokbuf += c
else:
tokbuf = ''
elif c == '\\':
if escape:
# escape of an escape is valid part of the line sequence
tokbuf += '\\\\'
escape = False
else:
escape = True
elif c == '"':
if not escape:
if quote:
# end of quote
if quote_strings:
tokbuf += '"'
ret.append(tokbuf)
tokbuf = ''
quote = False
else:
# beginning of quote
if quote_strings:
tokbuf += '"'
quote = True
else:
# append the escaped quote
tokbuf += '\\"'
escape = False
else:
# normal character
if escape:
# append escape character
tokbuf += '\\'
tokbuf += c
escape = False
firstchar = False
if len(tokbuf.strip(' \r\n\t')):
ret.append(tokbuf)
if len(ret) == 1 and ret[0] == '$NAME':
return []
else:
return ret | Tokenize a line:
* split tokens on whitespace
* treat quoted strings as a single token | _tokenize_line | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _find_comment_index(line):
"""
Finds the index of a ; denoting a comment.
Ignores escaped semicolons and semicolons inside quotes
"""
escape = False
quote = False
for i, char in enumerate(line):
if char == '\\':
escape = True
continue
elif char == '"':
if escape:
escape = False
continue
else:
quote = not quote
elif char == ';':
if quote:
escape = False
continue
elif escape:
escape = False
continue
else:
# comment denoting semicolon found
return i
else:
escape = False
continue
# no unquoted, unescaped ; found
return -1 | Finds the index of a ; denoting a comment.
Ignores escaped semicolons and semicolons inside quotes | _find_comment_index | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _serialize(tokens):
"""
Serialize tokens:
* quote whitespace-containing tokens
"""
ret = []
for tok in tokens:
if tok is None:
continue
elif tok == '':
tok = 'EMPTY'
elif " " in tok:
tok = '"%s"' % tok
ret.append(tok)
return " ".join(ret) | Serialize tokens:
* quote whitespace-containing tokens | _serialize | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _remove_comments(text):
"""
Remove comments from a zonefile
"""
ret = []
lines = text.split("\n")
for line in lines:
if not line:
continue
index = _find_comment_index(line)
if index != -1:
line = line[:index]
if line:
ret.append(line)
return "\n".join(ret) | Remove comments from a zonefile | _remove_comments | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _flatten(text):
"""
Flatten the text:
* make sure each record is on one line.
* remove parenthesis
* remove Windows line endings
"""
lines = text.split('\n')
SENTINEL = '%%%'
# tokens: sequence of non-whitespace separated by '' where a newline was
tokens = []
for line in (x for x in lines if len(x) > 0):
line = line.replace('\t', ' ')
tokens += _tokenize_line(line, quote_strings=True, infer_name=False)
tokens.append(SENTINEL)
# find (...) and turn it into a single line ("capture" it)
capturing = False
captured = []
flattened = []
while len(tokens) > 0:
tok = tokens.pop(0)
if tok == '$NAME':
tok = ' '
if not capturing and tok == SENTINEL:
# normal end-of-line
if len(captured) > 0:
flattened.append(" ".join(captured))
captured = []
continue
if tok.startswith("("):
# begin grouping
tok = tok.lstrip("(")
capturing = True
if capturing and tok.endswith(")"):
# end grouping. next end-of-line will turn this sequence into a flat line
tok = tok.rstrip(")")
capturing = False
if tok != SENTINEL:
captured.append(tok)
return "\n".join(flattened) | Flatten the text:
* make sure each record is on one line.
* remove parenthesis
* remove Windows line endings | _flatten | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _add_record_names(text):
"""
Go through each line of the text and ensure that
a name is defined. Use previous record name if there is none.
"""
global SUPPORTED_RECORDS
lines = text.split("\n")
ret = []
previous_record_name = None
for line in lines:
tokens = _tokenize_line(line)
if not tokens:
continue
record_name = tokens[0]
if record_name == '$NAME':
tokens = [previous_record_name] + tokens[1:]
elif not record_name.startswith('$'):
previous_record_name = record_name
ret.append(_serialize(tokens))
return "\n".join(ret) | Go through each line of the text and ensure that
a name is defined. Use previous record name if there is none. | _add_record_names | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _convert_to_seconds(value):
""" Converts TTL strings into seconds """
try:
return int(value)
except ValueError:
# parse the BIND format
# (w)eek, (d)ay, (h)our, (m)inute, (s)econd
seconds = 0
ttl_string = value.lower()
for component in ['w', 'd', 'h', 'm', 's']:
regex = date_regex_dict[component]['regex']
match = regex.search(ttl_string)
if match:
match_string = match.group(0)
ttl_string = ttl_string.replace(match_string, '')
match_value = int(match_string.strip(component))
seconds += match_value * date_regex_dict[component]['scale']
if not ttl_string:
return seconds
# convert the last piece without any units, which must be seconds
try:
seconds += int(ttl_string)
return seconds
except ValueError:
raise InvalidArgumentValueError("Unable to convert value '{}' to seconds.".format(value)) | Converts TTL strings into seconds | _convert_to_seconds | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def parse_zone_file(text, zone_name, ignore_invalid=False):
"""
Parse a zonefile into a dict
"""
text = _remove_comments(text)
text = _flatten(text)
text = _add_record_names(text)
zone_obj = OrderedDict()
record_lines = text.split("\n")
current_origin = zone_name.rstrip('.') + '.'
current_ttl = 3600
soa_processed = False
for record_line in record_lines:
parse_match = False
record = None
for record_type, regex in _COMPILED_REGEX.items():
match = regex.match(record_line)
if not match:
continue
parse_match = True
record = match.groupdict()
if not parse_match and not ignore_invalid:
raise InvalidArgumentValueError('Unable to parse: {}'.format(record_line))
record_type = record['delim'].lower()
if record_type == '$origin':
origin_value = record['val']
if not origin_value.endswith('.'):
logger.warning("$ORIGIN '{}' should have terminating dot.".format(origin_value))
current_origin = origin_value.rstrip('.') + '.'
elif record_type == '$ttl':
current_ttl = _convert_to_seconds(record['val'])
else:
record_name = record['name']
if '@' in record_name:
record_name = record_name.replace('@', current_origin)
elif not record_name.endswith('.'):
record_name = '{}.{}'.format(record_name, current_origin)
# special record-specific fix-ups
if record_type == 'ptr':
record['fullname'] = record_name + '.' + current_origin
elif record_type == 'soa':
for key in ['refresh', 'retry', 'expire', 'minimum']:
record[key] = _convert_to_seconds(record[key])
_expand_with_origin(record, 'email', current_origin)
elif record_type == 'cname':
_expand_with_origin(record, 'alias', current_origin)
elif record_type == 'mx':
_expand_with_origin(record, 'host', current_origin)
elif record_type == 'ns':
_expand_with_origin(record, 'host', current_origin)
elif record_type == 'srv':
_expand_with_origin(record, 'target', current_origin)
elif record_type == 'spf':
record_type = 'txt'
record['ttl'] = _convert_to_seconds(record['ttl'] or current_ttl)
# handle quotes for CAA and TXT
if record_type == 'caa':
_post_process_caa_record(record)
elif record_type == 'txt':
# handle TXT concatenation and splitting separately
_post_process_txt_record(record)
elif record_type == 'naptr':
# handle NAPTR empty regexp separately
_post_process_naptr_record(record)
if record_name not in zone_obj:
zone_obj[record_name] = OrderedDict()
if record_type == 'soa':
if soa_processed:
raise InvalidArgumentValueError('Zone file can contain only one SOA record.')
if record_name != current_origin:
raise InvalidArgumentValueError("Zone SOA record must be at the apex '@'.")
zone_obj[record_name][record_type] = record
soa_processed = True
continue
if not soa_processed:
raise InvalidArgumentValueError('First record in zone file must be SOA.')
if record_type == 'cname':
if record_type in zone_obj[record_name]:
logger.warning("CNAME record already exists for '{}'. Ignoring '{}'."
.format(record_name, record['alias']))
continue
zone_obj[record_name][record_type] = record
continue
# any other record can have multiple entries
if record_type not in zone_obj[record_name]:
zone_obj[record_name][record_type] = []
zone_obj[record_name][record_type].append(record)
_post_process_ttl(zone_obj)
_post_check_names(zone_obj)
return zone_obj | Parse a zonefile into a dict | parse_zone_file | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/network/zone_file/parse_zone_file.py | MIT |
def _format_noise_notice(builder):
builder.append_line(
"""Note: The result may contain false positive predictions (noise).
You can help us improve the accuracy of the result by opening an issue here: https://aka.ms/WhatIfIssues"""
)
builder.append_line() | Note: The result may contain false positive predictions (noise).
You can help us improve the accuracy of the result by opening an issue here: https://aka.ms/WhatIfIssues | _format_noise_notice | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_formatters.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_formatters.py | MIT |
def split_resource_id(resource_id):
"""Splits a fully qualified resource ID into two parts.
Returns the resource scope and the relative resource ID extracted from the given resource ID.
Examples:
- split_resource_id("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG) returns
"/subscriptions/00000000-0000-0000-0000-000000000000", "resourceGroups/myRG"
- split_resource_id("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG\
/providers/Microsoft.Storage/storageAccounts/myStorageAccount) returns
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG,
"Microsoft.Storage/storageAccounts/myStorageAccount"
"""
if not resource_id:
return None, None
remaining = resource_id
management_group_match = re.match(_management_group_pattern, remaining, flags=re.IGNORECASE)
management_group_id = management_group_match.group("management_group_id") if management_group_match else ""
remaining = remaining[len(management_group_match.group(0)) if management_group_match else 0:]
# Parse subscription_id.
subscription_match = re.match(_subscription_pattern, remaining, flags=re.IGNORECASE)
subscription_id = subscription_match.group("subscription_id") if subscription_match else ""
remaining = remaining[len(subscription_match.group(0)) if subscription_match else 0:]
# Parse resource_group_name.
resource_group_match = re.match(_resource_group_pattern, remaining, flags=re.IGNORECASE)
resource_group_name = resource_group_match.group("resource_group_name") if resource_group_match else ""
remaining = remaining[len(resource_group_match.group(0)) if resource_group_match else 0:]
# Parse relateive_path.
relative_resource_id_match = re.match(_relative_resource_id_pattern, remaining, flags=re.IGNORECASE)
relative_resource_id = (
relative_resource_id_match.group("relative_resource_id") if relative_resource_id_match else ""
)
if management_group_match:
management_group_relative_id = f"Microsoft.Management/ManagementGroups/{management_group_id}"
return (
(f"/providers/{management_group_relative_id}", relative_resource_id)
if relative_resource_id
else ("/", management_group_relative_id)
)
if subscription_match:
subscription_scope = f"/subscriptions/{subscription_id.lower()}"
if resource_group_match:
resource_group_id = f"resourceGroups/{resource_group_name}"
return (
(f"{subscription_scope}/{resource_group_id}", relative_resource_id)
if relative_resource_id_match
else (subscription_scope, resource_group_id)
)
return subscription_scope, relative_resource_id
return "/", relative_resource_id | Splits a fully qualified resource ID into two parts.
Returns the resource scope and the relative resource ID extracted from the given resource ID.
Examples:
- split_resource_id("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG) returns
"/subscriptions/00000000-0000-0000-0000-000000000000", "resourceGroups/myRG"
- split_resource_id("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG\
/providers/Microsoft.Storage/storageAccounts/myStorageAccount) returns
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG,
"Microsoft.Storage/storageAccounts/myStorageAccount" | split_resource_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_utils.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_utils.py | MIT |
def _parse_lock_id(id_arg):
"""
Lock ids look very different from regular resource ids, this function uses a regular expression
that parses a lock's id and extracts the following parameters if available:
-lock_name: the lock's name; always present in a lock id
-resource_group: the name of the resource group; present in group/resource level locks
-resource_provider_namespace: the resource provider; present in resource level locks
-resource_type: the resource type; present in resource level locks
-resource_name: the resource name; present in resource level locks
-parent_resource_path: the resource's parent path; present in child resources such as subnets
"""
regex = re.compile(
'/subscriptions/[^/]*(/resource[gG]roups/(?P<resource_group>[^/]*)'
'(/providers/(?P<resource_provider_namespace>[^/]*)'
'(/(?P<parent_resource_path>.*))?/(?P<resource_type>[^/]*)/(?P<resource_name>[^/]*))?)?'
'/providers/Microsoft.Authorization/locks/(?P<lock_name>[^/]*)')
return regex.match(id_arg).groupdict() | Lock ids look very different from regular resource ids, this function uses a regular expression
that parses a lock's id and extracts the following parameters if available:
-lock_name: the lock's name; always present in a lock id
-resource_group: the name of the resource group; present in group/resource level locks
-resource_provider_namespace: the resource provider; present in resource level locks
-resource_type: the resource type; present in resource level locks
-resource_name: the resource name; present in resource level locks
-parent_resource_path: the resource's parent path; present in child resources such as subnets | _parse_lock_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_validators.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_validators.py | MIT |
def json_min(js, **kwargs):
"""
returns a minified version of the json string
"""
import io
klass = io.StringIO
ins = klass(js)
outs = klass()
JsonMinify(ins, outs, **kwargs).minify()
return outs.getvalue() | returns a minified version of the json string | json_min | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/_json_handler.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/_json_handler.py | MIT |
def _list_resources_odata_filter_builder(resource_group_name=None, resource_provider_namespace=None,
resource_type=None, name=None, tag=None, location=None):
"""Build up OData filter string from parameters """
if tag is not None:
if resource_group_name:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--resource-group\''
'(If the default value for resource group is set, please use \'az configure --defaults group=""\' command to clear it first)')
if resource_provider_namespace:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--namespace\'')
if resource_type:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--resource-type\'')
if name:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--name\'')
if location:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--location\''
'(If the default value for location is set, please use \'az configure --defaults location=""\' command to clear it first)')
filters = []
if resource_group_name:
filters.append("resourceGroup eq '{}'".format(resource_group_name))
if name:
filters.append("name eq '{}'".format(name))
if location:
filters.append("location eq '{}'".format(location))
if resource_type:
if resource_provider_namespace:
f = "'{}/{}'".format(resource_provider_namespace, resource_type)
else:
if not re.match('[^/]+/[^/]+', resource_type):
raise CLIError(
'Malformed resource-type: '
'--resource-type=<namespace>/<resource-type> expected.')
# assume resource_type is <namespace>/<type>. The worst is to get a server error
f = "'{}'".format(resource_type)
filters.append("resourceType eq " + f)
else:
if resource_provider_namespace:
raise CLIError('--namespace also requires --resource-type')
if tag:
tag_name = list(tag.keys())[0] if isinstance(tag, dict) else tag
tag_value = tag[tag_name] if isinstance(tag, dict) else ''
if tag_name:
if tag_name[-1] == '*':
filters.append("startswith(tagname, '%s')" % tag_name[0:-1])
else:
filters.append("tagname eq '%s'" % tag_name)
if tag_value != '':
filters.append("tagvalue eq '%s'" % tag_value)
return ' and '.join(filters) | Build up OData filter string from parameters | _list_resources_odata_filter_builder | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def list_resource_groups(cmd, tag=None): # pylint: disable=no-self-use
""" List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format
"""
rcf = _resource_client_factory(cmd.cli_ctx)
filters = []
if tag:
key = list(tag.keys())[0]
filters.append("tagname eq '{}'".format(key))
if tag[key]:
filters.append("tagvalue eq '{}'".format(tag[key]))
filter_text = ' and '.join(filters) if filters else None
groups = rcf.resource_groups.list(filter=filter_text)
return list(groups) | List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format | list_resource_groups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def create_resource_group(cmd, rg_name, location, tags=None, managed_by=None):
""" Create a new resource group.
:param str resource_group_name:the desired resource group name
:param str location:the resource group location
:param str tags:tags in 'a=b c' format
"""
rcf = _resource_client_factory(cmd.cli_ctx)
ResourceGroup = cmd.get_models('ResourceGroup')
parameters = ResourceGroup(
location=location,
tags=tags
)
if cmd.supported_api_version(min_api='2016-09-01'):
parameters.managed_by = managed_by
return rcf.resource_groups.create_or_update(rg_name, parameters) | Create a new resource group.
:param str resource_group_name:the desired resource group name
:param str location:the resource group location
:param str tags:tags in 'a=b c' format | create_resource_group | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def export_group_as_template(
cmd, resource_group_name, include_comments=False, include_parameter_default_value=False, resource_ids=None, skip_resource_name_params=False, skip_all_params=False):
"""Captures a resource group as a template.
:param str resource_group_name: the name of the resource group.
:param resource_ids: space-separated resource ids to filter the export by. To export all resources, do not specify this argument or supply "*".
:param bool include_comments: export template with comments.
:param bool include_parameter_default_value: export template parameter with default value.
:param bool skip_resource_name_params: export template and skip resource name parameterization.
:param bool skip_all_params: export template parameter and skip all parameterization.
"""
rcf = _resource_client_factory(cmd.cli_ctx)
export_options = []
if include_comments:
export_options.append('IncludeComments')
if include_parameter_default_value:
export_options.append('IncludeParameterDefaultValue')
if skip_resource_name_params:
export_options.append('SkipResourceNameParameterization')
if skip_all_params:
export_options.append('SkipAllParameterization')
resources = []
if resource_ids is None or resource_ids[0] == "*":
resources = ["*"]
else:
for i in resource_ids:
if is_valid_resource_id(i):
resources.append(i)
else:
raise CLIError('az resource: error: argument --resource-ids: invalid ResourceId value: \'%s\'' % i)
options = ','.join(export_options) if export_options else None
ExportTemplateRequest = cmd.get_models('ExportTemplateRequest')
export_template_request = ExportTemplateRequest(resources=resources, options=options)
# Exporting a resource group as a template is async since API version 2019-08-01.
if cmd.supported_api_version(min_api='2019-08-01'):
result_poller = rcf.resource_groups.begin_export_template(resource_group_name,
parameters=export_template_request)
result = LongRunningOperation(cmd.cli_ctx)(result_poller)
else:
result = rcf.resource_groups.begin_export_template(resource_group_name,
parameters=export_template_request)
# pylint: disable=no-member
# On error, server still returns 200, with details in the error attribute
if result.error:
error = result.error
try:
logger.warning(error.message)
except AttributeError:
logger.warning(str(error))
for detail in getattr(error, 'details', None) or []:
logger.error(detail.message)
return result.template | Captures a resource group as a template.
:param str resource_group_name: the name of the resource group.
:param resource_ids: space-separated resource ids to filter the export by. To export all resources, do not specify this argument or supply "*".
:param bool include_comments: export template with comments.
:param bool include_parameter_default_value: export template parameter with default value.
:param bool skip_resource_name_params: export template and skip resource name parameterization.
:param bool skip_all_params: export template parameter and skip all parameterization. | export_group_as_template | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def create_application(cmd, resource_group_name,
application_name, managedby_resource_group_id,
kind, managedapp_definition_id=None, location=None,
plan_name=None, plan_publisher=None, plan_product=None,
plan_version=None, tags=None, parameters=None):
""" Create a new managed application.
:param str resource_group_name:the desired resource group name
:param str application_name:the managed application name
:param str kind:the managed application kind. can be marketplace or servicecatalog
:param str plan_name:the managed application package plan name
:param str plan_publisher:the managed application package plan publisher
:param str plan_product:the managed application package plan product
:param str plan_version:the managed application package plan version
:param str tags:tags in 'a=b c' format
"""
Application, Plan = cmd.get_models('Application', 'Plan')
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
rcf = _resource_client_factory(cmd.cli_ctx)
if not location:
location = rcf.resource_groups.get(resource_group_name).location
application = Application(
location=location,
managed_resource_group_id=managedby_resource_group_id,
kind=kind,
tags=tags
)
if kind.lower() == 'servicecatalog':
if managedapp_definition_id:
application.application_definition_id = managedapp_definition_id
else:
raise CLIError('--managedapp-definition-id is required if kind is ServiceCatalog')
elif kind.lower() == 'marketplace':
if (plan_name is None and plan_product is None and
plan_publisher is None and plan_version is None):
raise CLIError('--plan-name, --plan-product, --plan-publisher and \
--plan-version are all required if kind is MarketPlace')
application.plan = Plan(name=plan_name, publisher=plan_publisher, product=plan_product, version=plan_version)
applicationParameters = None
if parameters:
if os.path.exists(parameters):
applicationParameters = get_file_json(parameters)
else:
applicationParameters = shell_safe_json_parse(parameters)
application.parameters = applicationParameters
return racf.applications.begin_create_or_update(resource_group_name, application_name, application) | Create a new managed application.
:param str resource_group_name:the desired resource group name
:param str application_name:the managed application name
:param str kind:the managed application kind. can be marketplace or servicecatalog
:param str plan_name:the managed application package plan name
:param str plan_publisher:the managed application package plan publisher
:param str plan_product:the managed application package plan product
:param str plan_version:the managed application package plan version
:param str tags:tags in 'a=b c' format | create_application | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def show_application(cmd, resource_group_name=None, application_name=None):
""" Gets a managed application.
:param str resource_group_name:the resource group name
:param str application_name:the managed application name
"""
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
return racf.applications.get(resource_group_name, application_name) | Gets a managed application.
:param str resource_group_name:the resource group name
:param str application_name:the managed application name | show_application | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def show_applicationdefinition(cmd, resource_group_name=None, application_definition_name=None):
""" Gets a managed application definition.
:param str resource_group_name:the resource group name
:param str application_definition_name:the managed application definition name
"""
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
return racf.application_definitions.get(resource_group_name, application_definition_name) | Gets a managed application definition.
:param str resource_group_name:the resource group name
:param str application_definition_name:the managed application definition name | show_applicationdefinition | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def create_or_update_applicationdefinition(cmd, resource_group_name,
application_definition_name,
lock_level, authorizations,
description, display_name,
package_file_uri=None, create_ui_definition=None,
main_template=None, location=None, deployment_mode=None, tags=None):
""" Create or update a new managed application definition.
:param str resource_group_name:the desired resource group name
:param str application_definition_name:the managed application definition name
:param str description:the managed application definition description
:param str display_name:the managed application definition display name
:param str package_file_uri:the managed application definition package file uri
:param str create_ui_definition:the managed application definition create ui definition
:param str main_template:the managed application definition main template
:param str tags:tags in 'a=b c' format
"""
ApplicationDefinition, ApplicationAuthorization, ApplicationDeploymentPolicy = \
cmd.get_models('ApplicationDefinition',
'ApplicationAuthorization',
'ApplicationDeploymentPolicy')
if not package_file_uri and not create_ui_definition and not main_template:
raise CLIError('usage error: --package-file-uri <url> | --create-ui-definition --main-template')
if package_file_uri:
if create_ui_definition or main_template:
raise CLIError('usage error: must not specify --create-ui-definition --main-template')
if not package_file_uri:
if not create_ui_definition or not main_template:
raise CLIError('usage error: must specify --create-ui-definition --main-template')
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
rcf = _resource_client_factory(cmd.cli_ctx)
if not location:
location = rcf.resource_groups.get(resource_group_name).location
authorizations = authorizations or []
applicationAuthList = []
for name_value in authorizations:
# split at the first ':', neither principalId nor roldeDefinitionId should have a ':'
principalId, roleDefinitionId = name_value.split(':', 1)
applicationAuth = ApplicationAuthorization(
principal_id=principalId,
role_definition_id=roleDefinitionId)
applicationAuthList.append(applicationAuth)
deployment_policy = ApplicationDeploymentPolicy(deployment_mode=deployment_mode) if deployment_mode is not None else None
applicationDef = ApplicationDefinition(lock_level=lock_level,
authorizations=applicationAuthList,
package_file_uri=package_file_uri)
applicationDef.display_name = display_name
applicationDef.description = description
applicationDef.location = location
applicationDef.package_file_uri = package_file_uri
applicationDef.create_ui_definition = create_ui_definition
applicationDef.main_template = main_template
applicationDef.tags = tags
applicationDef.deployment_policy = deployment_policy
return racf.application_definitions.begin_create_or_update(resource_group_name,
application_definition_name, applicationDef) | Create or update a new managed application definition.
:param str resource_group_name:the desired resource group name
:param str application_definition_name:the managed application definition name
:param str description:the managed application definition description
:param str display_name:the managed application definition display name
:param str package_file_uri:the managed application definition package file uri
:param str create_ui_definition:the managed application definition create ui definition
:param str main_template:the managed application definition main template
:param str tags:tags in 'a=b c' format | create_or_update_applicationdefinition | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def _get_parsed_resource_ids(resource_ids):
"""
Returns a generator of parsed resource ids. Raise when there is invalid resource id.
"""
if not resource_ids:
return None
for rid in resource_ids:
if not is_valid_resource_id(rid):
raise CLIError('az resource: error: argument --ids: invalid ResourceId value: \'%s\'' % rid)
return ({'resource_id': rid} for rid in resource_ids) | Returns a generator of parsed resource ids. Raise when there is invalid resource id. | _get_parsed_resource_ids | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def delete_resource(cmd, resource_ids=None, resource_group_name=None,
resource_provider_namespace=None, parent_resource_path=None, resource_type=None,
resource_name=None, api_version=None, latest_include_preview=False):
"""
Deletes the given resource(s).
This function allows deletion of ids with dependencies on one another.
This is done with multiple passes through the given ids.
"""
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
to_be_deleted = [(_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview), id_dict)
for id_dict in parsed_ids]
results = []
from azure.core.exceptions import HttpResponseError
while to_be_deleted:
logger.debug("Start new loop to delete resources.")
operations = []
failed_to_delete = []
for rsrc_utils, id_dict in to_be_deleted:
try:
operations.append(rsrc_utils.delete())
resource = _build_resource_id(**id_dict) or resource_name
logger.debug("deleting %s", resource)
except HttpResponseError as e:
# request to delete failed, add parsed id dict back to queue
id_dict['exception'] = str(e)
failed_to_delete.append((rsrc_utils, id_dict))
to_be_deleted = failed_to_delete
# stop deleting if none deletable
if not operations:
break
# all operations return result before next pass
for operation in operations:
results.append(operation.result())
if to_be_deleted:
error_msg_builder = ['Some resources failed to be deleted (run with `--verbose` for more information):']
for _, id_dict in to_be_deleted:
logger.info(id_dict['exception'])
resource_id = _build_resource_id(**id_dict) or id_dict['resource_id']
error_msg_builder.append(resource_id)
raise CLIError(os.linesep.join(error_msg_builder))
return _single_or_collection(results) | Deletes the given resource(s).
This function allows deletion of ids with dependencies on one another.
This is done with multiple passes through the given ids. | delete_resource | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def tag_resource(cmd, tags, resource_ids=None, resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, api_version=None,
is_incremental=None, latest_include_preview=False):
""" Updates the tags on an existing resource. To clear tags, specify the --tag option
without anything else. """
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection([LongRunningOperation(cmd.cli_ctx)(
_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).tag(
tags, is_incremental)) for id_dict in parsed_ids]) | Updates the tags on an existing resource. To clear tags, specify the --tag option
without anything else. | tag_resource | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def invoke_resource_action(cmd, action, request_body=None, resource_ids=None,
resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None,
api_version=None, latest_include_preview=False):
""" Invokes the provided action on an existing resource."""
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection(
[_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).invoke_action(
action, request_body) for id_dict in parsed_ids]) | Invokes the provided action on an existing resource. | invoke_resource_action | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def get_deployment_operations(client, resource_group_name, deployment_name, operation_ids):
"""get a deployment's operation."""
result = []
for op_id in operation_ids:
dep = client.get(resource_group_name, deployment_name, op_id)
result.append(dep)
return result | get a deployment's operation. | get_deployment_operations | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def move_resource(cmd, ids, destination_group, destination_subscription_id=None):
"""Moves resources from one resource group to another(can be under different subscription)
:param ids: the space-separated resource ids to be moved
:param destination_group: the destination resource group name
:param destination_subscription_id: the destination subscription identifier
"""
# verify all resource ids are valid and under the same group
resources = []
for i in ids:
if is_valid_resource_id(i):
resources.append(parse_resource_id(i))
else:
raise CLIError('Invalid id "{}", as it has no group or subscription field'.format(i))
if len({r['subscription'] for r in resources}) > 1:
raise CLIError('All resources should be under the same subscription')
if len({r['resource_group'] for r in resources}) > 1:
raise CLIError('All resources should be under the same group')
rcf = _resource_client_factory(cmd.cli_ctx)
default_subscription_id = get_subscription_id(cmd.cli_ctx)
target = _build_resource_id(subscription=(destination_subscription_id or default_subscription_id),
resource_group=destination_group)
ResourcesMoveInfo = cmd.get_models('ResourcesMoveInfo')
resources_move_info = ResourcesMoveInfo(resources=ids, target_resource_group=target)
return rcf.resources.begin_move_resources(resources[0]['resource_group'], parameters=resources_move_info) | Moves resources from one resource group to another(can be under different subscription)
:param ids: the space-separated resource ids to be moved
:param destination_group: the destination resource group name
:param destination_subscription_id: the destination subscription identifier | move_resource | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def create_policy_assignment(cmd, policy=None, policy_set_definition=None,
name=None, display_name=None, params=None,
resource_group_name=None, scope=None, sku=None,
not_scopes=None, location=None, assign_identity=None,
mi_system_assigned=None, mi_user_assigned=None,
identity_scope=None, identity_role='Contributor', enforcement_mode='Default',
description=None):
"""Creates a policy assignment
:param not_scopes: Space-separated scopes where the policy assignment does not apply.
"""
if bool(policy) == bool(policy_set_definition):
raise ArgumentUsageError('usage error: --policy NAME_OR_ID | '
'--policy-set-definition NAME_OR_ID')
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
subscription_id = get_subscription_id(cmd.cli_ctx)
scope = _build_policy_scope(subscription_id, resource_group_name, scope)
policy_id = _resolve_policy_id(cmd, policy, policy_set_definition, policy_client)
params = _load_file_string_or_uri(params, 'params', False)
PolicyAssignment = cmd.get_models('PolicyAssignment')
assignment = PolicyAssignment(display_name=display_name, policy_definition_id=policy_id, scope=scope, enforcement_mode=enforcement_mode, description=description)
assignment.parameters = params if params else None
if cmd.supported_api_version(min_api='2017-06-01-preview'):
if not_scopes:
kwargs_list = []
for id_arg in not_scopes.split(' '):
id_parts = parse_resource_id(id_arg)
if id_parts.get('subscription') or _is_management_group_scope(id_arg):
kwargs_list.append(id_arg)
else:
raise InvalidArgumentValueError("Invalid resource ID value in --not-scopes: '%s'" % id_arg)
assignment.not_scopes = kwargs_list
identities = None
if cmd.supported_api_version(min_api='2018-05-01'):
if location:
assignment.location = location
if mi_system_assigned is not None or assign_identity is not None:
identities = [MSI_LOCAL_ID]
elif mi_user_assigned is not None:
identities = [mi_user_assigned]
identity = None
if identities is not None:
identity = _build_identities_info(cmd, identities, resource_group_name)
assignment.identity = identity
if name is None:
name = (base64.urlsafe_b64encode(uuid.uuid4().bytes).decode())[:-2]
createdAssignment = policy_client.policy_assignments.create(scope, name, assignment)
# Create the identity's role assignment if requested
if identities is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity as _assign_identity_helper
_assign_identity_helper(cmd.cli_ctx, lambda: createdAssignment, lambda resource: createdAssignment, identity_role, identity_scope)
return createdAssignment | Creates a policy assignment
:param not_scopes: Space-separated scopes where the policy assignment does not apply. | create_policy_assignment | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
def update_policy_assignment(cmd, name=None, display_name=None, params=None,
resource_group_name=None, scope=None, sku=None,
not_scopes=None, enforcement_mode=None, description=None):
"""Updates a policy assignment
:param not_scopes: Space-separated scopes where the policy assignment does not apply.
"""
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
subscription_id = get_subscription_id(cmd.cli_ctx)
scope = _build_policy_scope(subscription_id, resource_group_name, scope)
params = _load_file_string_or_uri(params, 'params', False)
existing_assignment = policy_client.policy_assignments.get(scope, name)
PolicyAssignment = cmd.get_models('PolicyAssignment')
assignment = PolicyAssignment(
display_name=display_name if display_name is not None else existing_assignment.display_name,
policy_definition_id=existing_assignment.policy_definition_id,
scope=existing_assignment.scope,
enforcement_mode=enforcement_mode if enforcement_mode is not None else existing_assignment.enforcement_mode,
metadata=existing_assignment.metadata,
parameters=params if params is not None else existing_assignment.parameters,
description=description if description is not None else existing_assignment.description)
if cmd.supported_api_version(min_api='2017-06-01-preview'):
kwargs_list = existing_assignment.not_scopes
if not_scopes:
kwargs_list = []
for id_arg in not_scopes.split(' '):
id_parts = parse_resource_id(id_arg)
if id_parts.get('subscription') or _is_management_group_scope(id_arg):
kwargs_list.append(id_arg)
else:
raise InvalidArgumentValueError("Invalid resource ID value in --not-scopes: '%s'" % id_arg)
assignment.not_scopes = kwargs_list
if cmd.supported_api_version(min_api='2018-05-01'):
assignment.location = existing_assignment.location
assignment.identity = existing_assignment.identity
if cmd.supported_api_version(min_api='2020-09-01'):
assignment.non_compliance_messages = existing_assignment.non_compliance_messages
return policy_client.policy_assignments.create(scope, name, assignment) | Updates a policy assignment
:param not_scopes: Space-separated scopes where the policy assignment does not apply. | update_policy_assignment | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/resource/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/resource/custom.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.