code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _get_managed_db_resource_id(
cli_ctx,
resource_group_name,
managed_instance_name,
database_name,
subscription_id=None):
'''
Gets the Managed db resource id in this Azure environment.
'''
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.core.tools import resource_id
return resource_id(
subscription=subscription_id if subscription_id else get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Sql', type='managedInstances',
name=managed_instance_name,
child_type_1='databases',
child_name_1=database_name) | Gets the Managed db resource id in this Azure environment. | _get_managed_db_resource_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _to_filetimeutc(dateTime):
'''
Changes given datetime to filetimeutc string
'''
NET_epoch = datetime(1601, 1, 1)
UNIX_epoch = datetime(1970, 1, 1)
epoch_delta = UNIX_epoch - NET_epoch
log_time = parse(dateTime)
net_ts = calendar.timegm((log_time + epoch_delta).timetuple())
# units of seconds since NET epoch
filetime_utc_ts = net_ts * (10**7) + log_time.microsecond * 10
return filetime_utc_ts | Changes given datetime to filetimeutc string | _to_filetimeutc | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_managed_dropped_db_resource_id(
cli_ctx,
resource_group_name,
managed_instance_name,
database_name,
deleted_time,
subscription_id=None):
'''
Gets the Managed db resource id in this Azure environment.
'''
from urllib.parse import quote
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.core.tools import resource_id
return (resource_id(
subscription=subscription_id if subscription_id else get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Sql', type='managedInstances',
name=managed_instance_name,
child_type_1='restorableDroppedDatabases',
child_name_1='{},{}'.format(
quote(database_name),
_to_filetimeutc(deleted_time)))) | Gets the Managed db resource id in this Azure environment. | _get_managed_dropped_db_resource_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_managed_instance_resource_id(
cli_ctx,
resource_group_name,
managed_instance_name,
subscription_id=None):
'''
Gets managed instance resource id in this Azure environment.
'''
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.core.tools import resource_id
return (resource_id(
subscription=subscription_id if subscription_id else get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Sql', type='managedInstances',
name=managed_instance_name)) | Gets managed instance resource id in this Azure environment. | _get_managed_instance_resource_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_managed_instance_pool_resource_id(
cli_ctx,
resource_group_name,
instance_pool_name,
subscription_id=None):
'''
Gets instance pool resource id in this Azure environment.
'''
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.core.tools import resource_id
if instance_pool_name:
return (resource_id(
subscription=subscription_id if subscription_id else get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Sql', type='instancePools',
name=instance_pool_name))
return instance_pool_name | Gets instance pool resource id in this Azure environment. | _get_managed_instance_pool_resource_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_show_conn_str(
cmd,
client_provider,
database_name='<databasename>',
server_name='<servername>',
auth_type=ClientAuthenticationType.sql_password.value):
'''
Builds a SQL connection string for a specified client provider.
'''
server_suffix = _get_server_dns_suffx(cmd.cli_ctx)
conn_str_props = {
'server': server_name,
'server_fqdn': '{}{}'.format(server_name, server_suffix),
'server_suffix': server_suffix,
'db': database_name
}
formats = {
ClientType.ado_net.value: {
ClientAuthenticationType.sql_password.value:
'Server=tcp:{server_fqdn},1433;Initial Catalog={db};Persist Security Info=False;'
'User ID=<username>;Password=<password>;MultipleActiveResultSets=False;Encrypt=true;'
'TrustServerCertificate=False;Connection Timeout=30;',
ClientAuthenticationType.active_directory_password.value:
'Server=tcp:{server_fqdn},1433;Initial Catalog={db};Persist Security Info=False;'
'User ID=<username>;Password=<password>;MultipleActiveResultSets=False;Encrypt=true;'
'TrustServerCertificate=False;Authentication="Active Directory Password"',
ClientAuthenticationType.active_directory_integrated.value:
'Server=tcp:{server_fqdn},1433;Initial Catalog={db};Persist Security Info=False;'
'User ID=<username>;MultipleActiveResultSets=False;Encrypt=true;'
'TrustServerCertificate=False;Authentication="Active Directory Integrated"'
},
ClientType.sqlcmd.value: {
ClientAuthenticationType.sql_password.value:
'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -U <username> -P <password> -N -l 30',
ClientAuthenticationType.active_directory_password.value:
'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -U <username> -P <password> -G -N -l 30',
ClientAuthenticationType.active_directory_integrated.value:
'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -G -N -l 30',
},
ClientType.jdbc.value: {
ClientAuthenticationType.sql_password.value:
'jdbc:sqlserver://{server_fqdn}:1433;database={db};user=<username>@{server};'
'password=<password>;encrypt=true;trustServerCertificate=false;'
'hostNameInCertificate=*{server_suffix};loginTimeout=30',
ClientAuthenticationType.active_directory_password.value:
'jdbc:sqlserver://{server_fqdn}:1433;database={db};user=<username>;'
'password=<password>;encrypt=true;trustServerCertificate=false;'
'hostNameInCertificate=*{server_suffix};loginTimeout=30;'
'authentication=ActiveDirectoryPassword',
ClientAuthenticationType.active_directory_integrated.value:
'jdbc:sqlserver://{server_fqdn}:1433;database={db};'
'encrypt=true;trustServerCertificate=false;'
'hostNameInCertificate=*{server_suffix};loginTimeout=30;'
'authentication=ActiveDirectoryIntegrated',
},
ClientType.php_pdo.value: {
# pylint: disable=line-too-long
ClientAuthenticationType.sql_password.value:
'$conn = new PDO("sqlsrv:server = tcp:{server_fqdn},1433; Database = {db}; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");',
ClientAuthenticationType.active_directory_password.value:
CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'),
ClientAuthenticationType.active_directory_integrated.value:
CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'),
},
ClientType.php.value: {
# pylint: disable=line-too-long
ClientAuthenticationType.sql_password.value:
'$connectionOptions = array("UID"=>"<username>@{server}", "PWD"=>"<password>", "Database"=>{db}, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:{server_fqdn},1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);',
ClientAuthenticationType.active_directory_password.value:
CLIError('PHP sqlsrv driver only supports SQL Password authentication.'),
ClientAuthenticationType.active_directory_integrated.value:
CLIError('PHP sqlsrv driver only supports SQL Password authentication.'),
},
ClientType.odbc.value: {
ClientAuthenticationType.sql_password.value:
'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
'Database={db};Uid=<username>@{server};Pwd=<password>;Encrypt=yes;'
'TrustServerCertificate=no;',
ClientAuthenticationType.active_directory_password.value:
'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
'Database={db};Uid=<username>@{server};Pwd=<password>;Encrypt=yes;'
'TrustServerCertificate=no;Authentication=ActiveDirectoryPassword',
ClientAuthenticationType.active_directory_integrated.value:
'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
'Database={db};Encrypt=yes;TrustServerCertificate=no;'
'Authentication=ActiveDirectoryIntegrated',
}
}
f = formats[client_provider][auth_type]
if isinstance(f, Exception):
# Error
raise f
# Success
return f.format(**conn_str_props) | Builds a SQL connection string for a specified client provider. | db_show_conn_str | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _find_db_sku_from_capabilities(cli_ctx, location, sku, allow_reset_family=False, compute_model=None):
'''
Given a requested sku which may have some properties filled in
(e.g. tier and capacity), finds the canonical matching sku
from the given location's capabilities.
'''
logger.debug('_find_db_sku_from_capabilities input: %s', sku)
if sku.name:
# User specified sku.name, so nothing else needs to be resolved.
logger.debug('_find_db_sku_from_capabilities return sku as is')
return sku
if not _any_sku_values_specified(sku):
# User did not request any properties of sku, so just wipe it out.
# Server side will pick a default.
logger.debug('_find_db_sku_from_capabilities return None')
return None
# Some properties of sku are specified, but not name. Use the requested properties
# to find a matching capability and copy the sku from there.
# Get location capability
loc_capability = _get_location_capability(cli_ctx, location, CapabilityGroup.SUPPORTED_EDITIONS)
# Get default server version capability
server_version_capability = _get_default_server_version(loc_capability)
# Find edition capability, based on requested sku properties
edition_capability = _find_edition_capability(
sku, server_version_capability.supported_editions)
# Find performance level capability, based on requested sku properties
performance_level_capability = _find_performance_level_capability(
sku, edition_capability.supported_service_level_objectives,
allow_reset_family=allow_reset_family,
compute_model=compute_model)
# Ideally, we would return the sku object from capability (`return performance_level_capability.sku`).
# However not all db create modes support using `capacity` to find slo, so instead we put
# the slo name into the sku name property.
result = Sku(name=performance_level_capability.name)
logger.debug('_find_db_sku_from_capabilities return: %s', result)
return result | Given a requested sku which may have some properties filled in
(e.g. tier and capacity), finds the canonical matching sku
from the given location's capabilities. | _find_db_sku_from_capabilities | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _validate_elastic_pool_id(
cli_ctx,
elastic_pool_id,
server_name,
resource_group_name):
'''
Validates elastic_pool_id is either None or a valid resource id.
If elastic_pool_id has a value but it is not a valid resource id,
then assume that user specified elastic pool name which we need to
convert to elastic pool id using the provided server & resource group
name.
Returns the elastic_pool_id, which may have been updated and may be None.
'''
from azure.mgmt.core.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if elastic_pool_id and not is_valid_resource_id(elastic_pool_id):
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Sql',
type='servers',
name=server_name,
child_type_1='elasticPools',
child_name_1=elastic_pool_id)
return elastic_pool_id | Validates elastic_pool_id is either None or a valid resource id.
If elastic_pool_id has a value but it is not a valid resource id,
then assume that user specified elastic pool name which we need to
convert to elastic pool id using the provided server & resource group
name.
Returns the elastic_pool_id, which may have been updated and may be None. | _validate_elastic_pool_id | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def restorable_databases_get(
client,
server_name,
resource_group_name,
restorable_dropped_database_id,
expand_keys=False,
keys_filter=None
):
'''
Gets a restorable dropped database
'''
expand = None
if expand_keys and keys_filter is not None:
expand = "keys($filter=pointInTime('%s'))" % keys_filter
elif expand_keys:
expand = 'keys'
return client.get(resource_group_name, server_name, restorable_dropped_database_id, expand) | Gets a restorable dropped database | restorable_databases_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def recoverable_databases_get(
client,
database_name,
server_name,
resource_group_name,
expand_keys=False,
keys_filter=None):
'''
Gets a recoverable database
'''
expand = None
if expand_keys and keys_filter is not None:
expand = "keys($filter=pointInTime('%s'))" % keys_filter
elif expand_keys:
expand = 'keys'
return client.get(resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
expand=expand) | Gets a recoverable database | recoverable_databases_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_get(
client,
database_name,
server_name,
resource_group_name,
expand_keys=False,
keys_filter=None):
'''
Gets a database
'''
expand = None
if expand_keys and keys_filter is not None:
expand = "keys($filter=pointInTime('%s'))" % keys_filter
elif expand_keys:
expand = 'keys'
return client.get(resource_group_name, server_name, database_name, expand) | Gets a database | db_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _db_dw_create(
cli_ctx,
client,
source_db,
dest_db,
no_wait,
sku=None,
secondary_type=None,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
encryption_protector_auto_rotation=None,
**kwargs):
'''
Creates a DB (with any create mode) or DW.
Handles common concerns such as setting location and sku properties.
'''
# This check needs to be here, because server side logic of
# finding a default sku for Serverless is not yet implemented.
if kwargs['compute_model'] == ComputeModelType.serverless:
if not sku or not sku.tier or not sku.family or not sku.capacity:
raise CLIError('When creating a severless database, please pass in edition, '
'family, and capacity parameters through -e -f -c')
# Determine server location
kwargs['location'] = _get_server_location(
cli_ctx,
server_name=dest_db.server_name,
resource_group_name=dest_db.resource_group_name)
# Set create mode properties
if source_db:
kwargs['source_database_id'] = source_db.id()
if secondary_type:
kwargs['secondary_type'] = secondary_type
# If sku.name is not specified, resolve the requested sku name
# using capabilities.
kwargs['sku'] = _find_db_sku_from_capabilities(
cli_ctx,
kwargs['location'],
sku,
compute_model=kwargs['compute_model'])
# Validate elastic pool id
kwargs['elastic_pool_id'] = _validate_elastic_pool_id(
cli_ctx,
kwargs['elastic_pool_id'],
dest_db.server_name,
dest_db.resource_group_name)
# Expand maintenance configuration id if needed
kwargs['maintenance_configuration_id'] = _complete_maintenance_configuration_id(
cli_ctx,
kwargs['maintenance_configuration_id'])
# Per DB CMK params
if assign_identity:
kwargs['identity'] = _get_database_identity(user_assigned_identity_id)
kwargs['keys'] = _get_database_keys(keys)
kwargs['encryption_protector'] = encryption_protector
kwargs['encryption_protector_auto_rotation'] = encryption_protector_auto_rotation
# Create
return sdk_no_wait(no_wait, client.begin_create_or_update,
server_name=dest_db.server_name,
resource_group_name=dest_db.resource_group_name,
database_name=dest_db.database_name,
parameters=kwargs) | Creates a DB (with any create mode) or DW.
Handles common concerns such as setting location and sku properties. | _db_dw_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_create(
cmd,
client,
database_name,
server_name,
resource_group_name,
no_wait=False,
yes=None,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
encryption_protector_auto_rotation=None,
**kwargs):
'''
Creates a DB (with 'Default' create mode.)
'''
# Check backup storage redundancy configurations
location = _get_server_location(
cmd.cli_ctx,
server_name=server_name,
resource_group_name=resource_group_name)
if not yes and _should_show_backup_storage_redundancy_warnings(location):
if not kwargs['requested_backup_storage_redundancy']:
if not _confirm_backup_storage_redundancy_take_geo_warning():
return None
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
return _db_dw_create(
cmd.cli_ctx,
client,
None,
DatabaseIdentity(cmd.cli_ctx, database_name, server_name, resource_group_name),
no_wait,
assign_identity=assign_identity,
user_assigned_identity_id=user_assigned_identity_id,
keys=keys,
encryption_protector=encryption_protector,
encryption_protector_auto_rotation=encryption_protector_auto_rotation,
**kwargs) | Creates a DB (with 'Default' create mode.) | db_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _use_source_db_tier(
client,
database_name,
server_name,
resource_group_name,
kwargs):
'''
Gets the specified db and copies its sku tier into kwargs.
'''
if _any_sku_values_specified(kwargs['sku']):
source = client.get(resource_group_name, server_name, database_name)
kwargs['sku'].tier = source.sku.tier | Gets the specified db and copies its sku tier into kwargs. | _use_source_db_tier | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_copy(
cmd,
client,
database_name,
server_name,
resource_group_name,
dest_name,
dest_server_name=None,
dest_resource_group_name=None,
no_wait=False,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
encryption_protector_auto_rotation=None,
**kwargs):
'''
Copies a DB (i.e. create with 'Copy' create mode.)
'''
# Determine optional values
dest_server_name = dest_server_name or server_name
dest_resource_group_name = dest_resource_group_name or resource_group_name
# Set create mode
kwargs['create_mode'] = 'Copy'
# Some sku properties may be filled in from the command line. However
# the sku tier must be the same as the source tier, so it is grabbed
# from the source db instead of from command line.
_use_source_db_tier(
client,
database_name,
server_name,
resource_group_name,
kwargs)
# Check backup storage redundancy configurations
location = _get_server_location(cmd.cli_ctx,
server_name=dest_server_name,
resource_group_name=dest_resource_group_name)
if _should_show_backup_storage_redundancy_warnings(location):
if not kwargs['requested_backup_storage_redundancy']:
_backup_storage_redundancy_take_source_warning()
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
return _db_dw_create(
cmd.cli_ctx,
client,
DatabaseIdentity(cmd.cli_ctx, database_name, server_name, resource_group_name),
DatabaseIdentity(cmd.cli_ctx, dest_name, dest_server_name, dest_resource_group_name),
no_wait,
assign_identity=assign_identity,
user_assigned_identity_id=user_assigned_identity_id,
keys=keys,
encryption_protector=encryption_protector,
encryption_protector_auto_rotation=encryption_protector_auto_rotation,
**kwargs) | Copies a DB (i.e. create with 'Copy' create mode.) | db_copy | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_create_replica(
cmd,
client,
database_name,
server_name,
resource_group_name,
partner_server_name,
partner_database_name=None,
partner_resource_group_name=None,
secondary_type=None,
no_wait=False,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
encryption_protector_auto_rotation=None,
**kwargs):
'''
Creates a secondary replica DB (i.e. create with 'Secondary' create mode.)
Custom function makes create mode more convenient.
'''
# Determine optional values
partner_resource_group_name = partner_resource_group_name or resource_group_name
partner_database_name = partner_database_name or database_name
# Set create mode
kwargs['create_mode'] = CreateMode.SECONDARY
# Some sku properties may be filled in from the command line. However
# the sku tier must be the same as the source tier, so it is grabbed
# from the source db instead of from command line.
_use_source_db_tier(
client,
database_name,
server_name,
resource_group_name,
kwargs)
# Check backup storage redundancy configurations
location = _get_server_location(cmd.cli_ctx,
server_name=partner_server_name,
resource_group_name=partner_resource_group_name)
if _should_show_backup_storage_redundancy_warnings(location):
if not kwargs['requested_backup_storage_redundancy']:
_backup_storage_redundancy_take_source_warning()
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
return _db_dw_create(
cmd.cli_ctx,
client,
DatabaseIdentity(cmd.cli_ctx, database_name, server_name, resource_group_name),
DatabaseIdentity(cmd.cli_ctx, partner_database_name, partner_server_name, partner_resource_group_name),
no_wait,
secondary_type=secondary_type,
assign_identity=assign_identity,
user_assigned_identity_id=user_assigned_identity_id,
keys=keys,
encryption_protector=encryption_protector,
encryption_protector_auto_rotation=encryption_protector_auto_rotation,
**kwargs) | Creates a secondary replica DB (i.e. create with 'Secondary' create mode.)
Custom function makes create mode more convenient. | db_create_replica | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_rename(
cmd,
client,
database_name,
server_name,
resource_group_name,
new_name,
**kwargs):
'''
Renames a DB.
'''
kwargs['id'] = DatabaseIdentity(
cmd.cli_ctx,
new_name,
server_name,
resource_group_name
).id()
client.rename(
resource_group_name,
server_name,
database_name,
parameters=kwargs)
return client.get(
resource_group_name,
server_name,
new_name) | Renames a DB. | db_rename | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_restore(
cmd,
client,
database_name,
server_name,
resource_group_name,
dest_name,
restore_point_in_time=None,
source_database_deletion_date=None,
no_wait=False,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
encryption_protector_auto_rotation=None,
**kwargs):
'''
Restores an existing or deleted DB (i.e. create with 'Restore'
or 'PointInTimeRestore' create mode.)
Custom function makes create mode more convenient.
'''
if not (restore_point_in_time or source_database_deletion_date):
raise CLIError('Either --time or --deleted-time must be specified.')
# Set create mode properties
is_deleted = source_database_deletion_date is not None
kwargs['restore_point_in_time'] = restore_point_in_time
kwargs['source_database_deletion_date'] = source_database_deletion_date
kwargs['create_mode'] = CreateMode.RESTORE if is_deleted else CreateMode.POINT_IN_TIME_RESTORE
# Check backup storage redundancy configurations
location = _get_server_location(cmd.cli_ctx, server_name=server_name, resource_group_name=resource_group_name)
if _should_show_backup_storage_redundancy_warnings(location):
if not kwargs['requested_backup_storage_redundancy']:
_backup_storage_redundancy_take_source_warning()
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
return _db_dw_create(
cmd.cli_ctx,
client,
DatabaseIdentity(cmd.cli_ctx, database_name, server_name, resource_group_name),
# Cross-server restore is not supported. So dest server/group must be the same as source.
DatabaseIdentity(cmd.cli_ctx, dest_name, server_name, resource_group_name),
no_wait,
assign_identity=assign_identity,
user_assigned_identity_id=user_assigned_identity_id,
keys=keys,
encryption_protector=encryption_protector,
encryption_protector_auto_rotation=encryption_protector_auto_rotation,
**kwargs) | Restores an existing or deleted DB (i.e. create with 'Restore'
or 'PointInTimeRestore' create mode.)
Custom function makes create mode more convenient. | db_restore | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_failover(
client,
database_name,
server_name,
resource_group_name,
allow_data_loss=False):
'''
Fails over a database by setting the specified database as the new primary.
Wrapper function which uses the server location so that the user doesn't
need to specify replication link id.
'''
# List replication links
links = list(client.list_by_database(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name))
if not links:
raise CLIError('The specified database has no replication links.')
# If a replica is primary, then it has 1 or more links (to its secondaries).
# If a replica is secondary, then it has exactly 1 link (to its primary).
primary_link = next((link for link in links if link.partner_role == FailoverGroupReplicationRole.PRIMARY), None)
if not primary_link:
# No link to a primary, so this must already be a primary. Do nothing.
return
# Choose which failover method to use
if allow_data_loss:
failover_func = client.begin_failover_allow_data_loss
else:
failover_func = client.begin_failover
# Execute failover from the primary to this database
return failover_func(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
link_id=primary_link.name) | Fails over a database by setting the specified database as the new primary.
Wrapper function which uses the server location so that the user doesn't
need to specify replication link id. | db_failover | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_list_capabilities(
client,
location,
edition=None,
service_objective=None,
dtu=None,
vcores=None,
show_details=None,
available=False):
'''
Gets database capabilities and optionally applies the specified filters.
'''
# Fixup parameters
if not show_details:
show_details = []
# Get capabilities tree from server
capabilities = client.list_by_location(location, CapabilityGroup.SUPPORTED_EDITIONS)
# Get subtree related to databases
editions = _get_default_server_version(capabilities).supported_editions
# Filter by edition
if edition:
editions = [e for e in editions if e.name.lower() == edition.lower()]
# Filter by service objective
if service_objective:
for e in editions:
e.supported_service_level_objectives = [
slo for slo in e.supported_service_level_objectives
if slo.name.lower() == service_objective.lower()]
# Filter by dtu
if dtu:
for e in editions:
e.supported_service_level_objectives = [
slo for slo in e.supported_service_level_objectives
if slo.performance_level.value == int(dtu) and
slo.performance_level.unit == PerformanceLevelUnit.DTU]
# Filter by vcores
if vcores:
for e in editions:
e.supported_service_level_objectives = [
slo for slo in e.supported_service_level_objectives
if slo.performance_level.value == int(vcores) and
slo.performance_level.unit == PerformanceLevelUnit.V_CORES]
# Filter by availability
if available:
editions = _filter_available(editions)
for e in editions:
e.supported_service_level_objectives = _filter_available(e.supported_service_level_objectives)
for slo in e.supported_service_level_objectives:
if slo.supported_max_sizes:
slo.supported_max_sizes = _filter_available(slo.supported_max_sizes)
# Remove editions with no service objectives (due to filters)
editions = [e for e in editions if e.supported_service_level_objectives]
# Optionally hide supported max sizes
if DatabaseCapabilitiesAdditionalDetails.max_size.value not in show_details:
for e in editions:
for slo in e.supported_service_level_objectives:
if slo.supported_max_sizes:
slo.supported_max_sizes = []
return editions | Gets database capabilities and optionally applies the specified filters. | db_list_capabilities | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_delete_replica_link(
client,
database_name,
server_name,
resource_group_name,
# Partner dbs must have the same name as one another
partner_server_name,
partner_resource_group_name=None,
# Base command code handles confirmation, but it passes '--yes' parameter to us if
# provided. We don't care about this parameter and it gets handled weirdly if we
# expliclty specify it with default value here (e.g. `yes=None` or `yes=True`), receiving
# it in kwargs seems to work.
**kwargs): # pylint: disable=unused-argument
'''
Deletes a replication link.
'''
# Determine optional values
partner_resource_group_name = partner_resource_group_name or resource_group_name
# Find the replication link
links = list(client.list_by_database(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name))
# The link doesn't tell us the partner resource group name, so we just have to count on
# partner server name being unique
link = next((link for link in links if link.partner_server == partner_server_name), None)
if not link:
# No link exists, nothing to be done
return
return client.begin_delete(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
link_id=link.name) | Deletes a replication link. | db_delete_replica_link | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_export(
client,
database_name,
server_name,
resource_group_name,
storage_key_type,
storage_key,
no_wait=False,
**kwargs):
'''
Exports a database to a bacpac file.
'''
storage_key = _pad_sas_key(storage_key_type, storage_key)
kwargs['storage_key_type'] = storage_key_type
kwargs['storage_key'] = storage_key
return sdk_no_wait(
no_wait,
client.begin_export,
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=kwargs) | Exports a database to a bacpac file. | db_export | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_import(
client,
database_name,
server_name,
resource_group_name,
storage_key_type,
storage_key,
no_wait=False,
**kwargs):
'''
Imports a bacpac file into an existing database.
'''
storage_key = _pad_sas_key(storage_key_type, storage_key)
kwargs['storage_key_type'] = storage_key_type
kwargs['storage_key'] = storage_key
return sdk_no_wait(
no_wait,
client.begin_import_method,
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=kwargs) | Imports a bacpac file into an existing database. | db_import | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _pad_sas_key(
storage_key_type,
storage_key):
'''
Import/Export API requires that "?" precede SAS key as an argument.
Adds ? prefix if it wasn't included.
'''
if storage_key_type.lower() == StorageKeyType.SHARED_ACCESS_KEY.value.lower(): # pylint: disable=no-member
if storage_key[0] != '?':
storage_key = '?' + storage_key
return storage_key | Import/Export API requires that "?" precede SAS key as an argument.
Adds ? prefix if it wasn't included. | _pad_sas_key | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_list(
client,
server_name,
resource_group_name,
elastic_pool_name=None):
'''
Lists databases in a server or elastic pool.
'''
if elastic_pool_name:
# List all databases in the elastic pool
return client.list_by_elastic_pool(
server_name=server_name,
resource_group_name=resource_group_name,
elastic_pool_name=elastic_pool_name)
# List all databases in the server
return client.list_by_server(resource_group_name=resource_group_name, server_name=server_name) | Lists databases in a server or elastic pool. | db_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_update( # pylint: disable=too-many-locals, too-many-branches
cmd,
instance,
server_name,
resource_group_name,
elastic_pool_id=None,
max_size_bytes=None,
service_objective=None,
zone_redundant=None,
tier=None,
family=None,
capacity=None,
read_scale=None,
high_availability_replica_count=None,
min_capacity=None,
auto_pause_delay=None,
compute_model=None,
requested_backup_storage_redundancy=None,
maintenance_configuration_id=None,
preferred_enclave_type=None,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
federated_client_id=None,
keys_to_remove=None,
encryption_protector_auto_rotation=None,
use_free_limit=None,
free_limit_exhaustion_behavior=None,
manual_cutover=None,
perform_cutover=None):
'''
Applies requested parameters to a db resource instance for a DB update.
'''
# Verify edition
if instance.sku.tier.lower() == DatabaseEdition.data_warehouse.value.lower(): # pylint: disable=no-member
raise CLIError('Azure SQL Data Warehouse can be updated with the command'
' `az sql dw update`.')
# Check backup storage redundancy configuration
location = _get_server_location(cmd.cli_ctx, server_name=server_name, resource_group_name=resource_group_name)
if _should_show_backup_storage_redundancy_warnings(location):
if requested_backup_storage_redundancy == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
#####
# Set sku-related properties
#####
# Verify that elastic_pool_name and requested_service_objective_name param values are not
# totally inconsistent. If elastic pool and service objective name are both specified, and
# they are inconsistent (i.e. service objective is not 'ElasticPool'), then the service
# actually ignores the value of service objective name (!!). We are trying to protect the CLI
# user from this unintuitive behavior.
if (elastic_pool_id and service_objective and
service_objective != ServiceObjectiveName.ELASTIC_POOL):
raise CLIError('If elastic pool is specified, service objective must be'
' unspecified or equal \'{}\'.'.format(
ServiceObjectiveName.ELASTIC_POOL))
# Update both elastic pool and sku. The service treats elastic pool and sku properties like PATCH,
# so if either of these properties is null then the service will keep the property unchanged -
# except if pool is null/empty and service objective is a standalone SLO value (e.g. 'S0',
# 'S1', etc), in which case the pool being null/empty is meaningful - it means remove from
# pool.
# Validate elastic pool id
instance.elastic_pool_id = _validate_elastic_pool_id(
cmd.cli_ctx,
elastic_pool_id,
server_name,
resource_group_name)
# Finding out requesting compute_model
if not compute_model:
compute_model = (
ComputeModelType.serverless if _is_serverless_slo(instance.sku.name)
else ComputeModelType.provisioned)
# Update sku
_db_elastic_pool_update_sku(
cmd,
instance,
service_objective,
tier,
family,
capacity,
find_sku_from_capabilities_func=_find_db_sku_from_capabilities,
compute_model=compute_model)
# TODO Temporary workaround for elastic pool sku name issue
if instance.elastic_pool_id:
instance.sku = None
#####
# Set other (non-sku related) properties
#####
if max_size_bytes:
instance.max_size_bytes = max_size_bytes
if zone_redundant is not None:
instance.zone_redundant = zone_redundant
if read_scale is not None:
instance.read_scale = read_scale
if high_availability_replica_count is not None:
instance.high_availability_replica_count = high_availability_replica_count
if preferred_enclave_type is not None:
instance.preferred_enclave_type = preferred_enclave_type
if manual_cutover is not None:
instance.manual_cutover = manual_cutover
if perform_cutover is not None:
instance.perform_cutover = perform_cutover
# Set storage_account_type even if storage_acount_type is None
# Otherwise, empty value defaults to current storage_account_type
# and will potentially conflict with a previously requested update
instance.requested_backup_storage_redundancy = requested_backup_storage_redundancy
instance.maintenance_configuration_id = _complete_maintenance_configuration_id(
cmd.cli_ctx,
maintenance_configuration_id)
#####
# Set other (serverless related) properties
#####
if min_capacity:
instance.min_capacity = min_capacity
if auto_pause_delay:
instance.auto_pause_delay = auto_pause_delay
#####
# Per DB CMK properties
#####
if assign_identity and (user_assigned_identity_id is not None):
instance.identity = _get_database_identity(user_assigned_identity_id)
if keys is not None or keys_to_remove is not None:
instance.keys = _get_database_keys_for_update(keys, keys_to_remove)
if encryption_protector is not None:
instance.encryption_protector = encryption_protector
if federated_client_id is not None:
instance.federated_client_id = federated_client_id
instance.availability_zone = None
if encryption_protector_auto_rotation is not None:
instance.encryption_protector_auto_rotation = encryption_protector_auto_rotation
if use_free_limit is not None:
instance.use_free_limit = use_free_limit
if free_limit_exhaustion_behavior:
instance.free_limit_exhaustion_behavior = free_limit_exhaustion_behavior
return instance | Applies requested parameters to a db resource instance for a DB update. | db_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _find_storage_account_resource_group(cli_ctx, name):
'''
Finds a storage account's resource group by querying ARM resource cache.
Why do we have to do this: so we know the resource group in order to later query the storage API
to determine the account's keys and endpoint. Why isn't this just a command line parameter:
because if it was a command line parameter then the customer would need to specify storage
resource group just to update some unrelated property, which is annoying and makes no sense to
the customer.
'''
storage_type = 'Microsoft.Storage/storageAccounts'
classic_storage_type = 'Microsoft.ClassicStorage/storageAccounts'
query = "name eq '{}' and (resourceType eq '{}' or resourceType eq '{}')".format(
name, storage_type, classic_storage_type)
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
resources = list(client.resources.list(filter=query))
if not resources:
raise CLIError("No storage account with name '{}' was found.".format(name))
if len(resources) > 1:
raise CLIError("Multiple storage accounts with name '{}' were found.".format(name))
if resources[0].type == classic_storage_type:
raise CLIError("The storage account with name '{}' is a classic storage account which is"
" not supported by this command. Use a non-classic storage account or"
" specify storage endpoint and key instead.".format(name))
# Split the uri and return just the resource group
return resources[0].id.split('/')[4] | Finds a storage account's resource group by querying ARM resource cache.
Why do we have to do this: so we know the resource group in order to later query the storage API
to determine the account's keys and endpoint. Why isn't this just a command line parameter:
because if it was a command line parameter then the customer would need to specify storage
resource group just to update some unrelated property, which is annoying and makes no sense to
the customer. | _find_storage_account_resource_group | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_storage_account_name(storage_endpoint):
'''
Determines storage account name from endpoint url string.
e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage'
'''
from urllib.parse import urlparse
return urlparse(storage_endpoint).netloc.split('.')[0] | Determines storage account name from endpoint url string.
e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage' | _get_storage_account_name | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_storage_endpoint(
cli_ctx,
storage_account,
resource_group_name):
'''
Gets storage account endpoint by querying storage ARM API.
'''
from azure.mgmt.storage import StorageManagementClient
# Get storage account
client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
account = client.storage_accounts.get_properties(
resource_group_name=resource_group_name,
account_name=storage_account)
# Get endpoint
# pylint: disable=no-member
endpoints = account.primary_endpoints
try:
return endpoints.blob
except AttributeError:
raise CLIError("The storage account with name '{}' (id '{}') has no blob endpoint. Use a"
" different storage account.".format(account.name, account.id)) | Gets storage account endpoint by querying storage ARM API. | _get_storage_endpoint | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_storage_key(
cli_ctx,
storage_account,
resource_group_name,
use_secondary_key):
'''
Gets storage account key by querying storage ARM API.
'''
from azure.mgmt.storage import StorageManagementClient
# Get storage keys
client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
keys = client.storage_accounts.list_keys(
resource_group_name=resource_group_name,
account_name=storage_account)
# Choose storage key
index = 1 if use_secondary_key else 0
return keys.keys[index].value # pylint: disable=no-member | Gets storage account key by querying storage ARM API. | _get_storage_key | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _db_security_policy_update(
cli_ctx,
instance,
enabled,
storage_account,
storage_endpoint,
storage_account_access_key,
use_secondary_key):
'''
Common code for updating audit and threat detection policy.
'''
# Validate storage endpoint arguments
if storage_endpoint and storage_account:
raise CLIError('--storage-endpoint and --storage-account cannot both be specified.')
# Set storage endpoint
if storage_endpoint:
instance.storage_endpoint = storage_endpoint
if storage_account:
storage_resource_group = _find_storage_account_resource_group(cli_ctx, storage_account)
instance.storage_endpoint = _get_storage_endpoint(cli_ctx, storage_account, storage_resource_group)
# Set storage access key
if storage_account_access_key:
# Access key is specified
instance.storage_account_access_key = storage_account_access_key
elif enabled:
# Access key is not specified, but state is Enabled.
# If state is Enabled, then access key property is required in PUT. However access key is
# readonly (GET returns empty string for access key), so we need to determine the value
# and then PUT it back. (We don't want the user to be force to specify this, because that
# would be very annoying when updating non-storage-related properties).
# This doesn't work if the user used generic update args, i.e. `--set state=Enabled`
# instead of `--state Enabled`, since the generic update args are applied after this custom
# function, but at least we tried.
if not storage_account:
storage_account = _get_storage_account_name(instance.storage_endpoint)
storage_resource_group = _find_storage_account_resource_group(cli_ctx, storage_account)
instance.storage_account_access_key = _get_storage_key(
cli_ctx,
storage_account,
storage_resource_group,
use_secondary_key) | Common code for updating audit and threat detection policy. | _db_security_policy_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_diagnostic_settings(
cmd,
resource_group_name,
server_name,
database_name=None):
'''
Common code to get server or database diagnostic settings
'''
diagnostic_settings_url = _get_diagnostic_settings_url(
cmd=cmd, resource_group_name=resource_group_name,
server_name=server_name, database_name=database_name)
azure_monitor_client = cf_monitor(cmd.cli_ctx)
return list(azure_monitor_client.diagnostic_settings.list(diagnostic_settings_url)) | Common code to get server or database diagnostic settings | _get_diagnostic_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ms_support_audit_policy_get(
client,
server_name,
resource_group_name):
'''
Get server Microsoft support operations audit policy
'''
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
dev_ops_auditing_settings_name='default') | Get server Microsoft support operations audit policy | server_ms_support_audit_policy_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ms_support_audit_policy_set(
client,
server_name,
resource_group_name,
parameters):
'''
Set server Microsoft support operations audit policy
'''
return client.begin_create_or_update(
resource_group_name=resource_group_name,
server_name=server_name,
dev_ops_auditing_settings_name='default',
parameters=parameters) | Set server Microsoft support operations audit policy | server_ms_support_audit_policy_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_show(
cmd,
client,
resource_group_name,
server_name,
database_name=None,
category_name=None):
'''
Common code to get server (DevOps) or database audit policy including diagnostic settings
'''
# Request audit policy
if database_name is None:
if category_name == 'DevOpsOperationsAudit':
audit_policy = server_ms_support_audit_policy_get(
client=client,
resource_group_name=resource_group_name,
server_name=server_name)
else:
audit_policy = client.get(
resource_group_name=resource_group_name,
server_name=server_name)
else:
audit_policy = client.get(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name)
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.DISABLED
audit_policy.event_hub_target_state = BlobAuditingPolicyState.DISABLED
audit_policy.log_analytics_target_state = BlobAuditingPolicyState.DISABLED
# If audit policy's state is disabled there is nothing to do
if _is_audit_policy_state_disabled(audit_policy.state):
return audit_policy
if not audit_policy.storage_endpoint:
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.DISABLED
else:
audit_policy.blob_storage_target_state = BlobAuditingPolicyState.ENABLED
# If 'is_azure_monitor_target_enabled' is false there is no reason to request diagnostic settings
if not audit_policy.is_azure_monitor_target_enabled:
return audit_policy
# Request diagnostic settings
diagnostic_settings = _get_diagnostic_settings(
cmd=cmd, resource_group_name=resource_group_name,
server_name=server_name, database_name=database_name)
# Sort received diagnostic settings by name and get first element to ensure consistency between command executions
diagnostic_settings.sort(key=lambda d: d.name)
audit_diagnostic_setting = _fetch_first_audit_diagnostic_setting(diagnostic_settings, category_name)
# Initialize azure monitor properties
if audit_diagnostic_setting is not None:
if audit_diagnostic_setting.workspace_id is not None:
audit_policy.log_analytics_target_state = BlobAuditingPolicyState.ENABLED
audit_policy.log_analytics_workspace_resource_id = audit_diagnostic_setting.workspace_id
if audit_diagnostic_setting.event_hub_authorization_rule_id is not None:
audit_policy.event_hub_target_state = BlobAuditingPolicyState.enabled
audit_policy.event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id
audit_policy.event_hub_name = audit_diagnostic_setting.event_hub_name
return audit_policy | Common code to get server (DevOps) or database audit policy including diagnostic settings | _audit_policy_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_audit_policy_show(
cmd,
client,
server_name,
resource_group_name):
'''
Show server audit policy
'''
return _audit_policy_show(
cmd=cmd,
client=client,
resource_group_name=resource_group_name,
server_name=server_name,
category_name='SQLSecurityAuditEvents') | Show server audit policy | server_audit_policy_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_audit_policy_show(
cmd,
client,
server_name,
resource_group_name,
database_name):
'''
Show database audit policy
'''
return _audit_policy_show(
cmd=cmd,
client=client,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
category_name='SQLSecurityAuditEvents') | Show database audit policy | db_audit_policy_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ms_support_audit_policy_show(
cmd,
client,
server_name,
resource_group_name):
'''
Show server Microsoft support operations audit policy
'''
return _audit_policy_show(
cmd=cmd,
client=client,
resource_group_name=resource_group_name,
server_name=server_name,
category_name='DevOpsOperationsAudit') | Show server Microsoft support operations audit policy | server_ms_support_audit_policy_show | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_validate_arguments(
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub_name=None):
'''
Validate input agruments
'''
blob_storage_arguments_provided = blob_storage_target_state is not None or\
storage_account is not None or storage_endpoint is not None or\
storage_account_access_key is not None or\
retention_days is not None
log_analytics_arguments_provided = log_analytics_target_state is not None or\
log_analytics_workspace_resource_id is not None
event_hub_arguments_provided = event_hub_target_state is not None or\
event_hub_authorization_rule_id is not None or\
event_hub_name is not None
if not state and not blob_storage_arguments_provided and\
not log_analytics_arguments_provided and not event_hub_arguments_provided:
raise CLIError('Either state or blob storage or log analytics or event hub arguments are missing')
if _is_audit_policy_state_enabled(state) and\
blob_storage_target_state is None and log_analytics_target_state is None and event_hub_target_state is None:
raise CLIError('One of the following arguments must be enabled:'
' blob-storage-target-state, log-analytics-target-state, event-hub-target-state')
if _is_audit_policy_state_disabled(state) and\
(blob_storage_arguments_provided or
log_analytics_arguments_provided or
event_hub_name):
raise CLIError('No additional arguments should be provided once state is disabled')
if (_is_audit_policy_state_none_or_disabled(blob_storage_target_state)) and\
(storage_account is not None or storage_endpoint is not None or
storage_account_access_key is not None):
raise CLIError('Blob storage account arguments cannot be specified'
' if blob-storage-target-state is not provided or disabled')
if _is_audit_policy_state_enabled(blob_storage_target_state):
if storage_account is not None and storage_endpoint is not None:
raise CLIError('storage-account and storage-endpoint cannot be provided at the same time')
if storage_account is None and storage_endpoint is None:
raise CLIError('Either storage-account or storage-endpoint must be provided')
# Server upper limit
max_retention_days = 3285
if retention_days is not None and\
(not retention_days.isdigit() or int(retention_days) <= 0 or int(retention_days) >= max_retention_days):
raise CLIError('retention-days must be a positive number greater than zero and lower than {}'
.format(max_retention_days))
if _is_audit_policy_state_none_or_disabled(log_analytics_target_state) and\
log_analytics_workspace_resource_id is not None:
raise CLIError('Log analytics workspace resource id cannot be specified'
' if log-analytics-target-state is not provided or disabled')
if _is_audit_policy_state_enabled(log_analytics_target_state) and\
log_analytics_workspace_resource_id is None:
raise CLIError('Log analytics workspace resource id must be specified'
' if log-analytics-target-state is enabled')
if _is_audit_policy_state_none_or_disabled(event_hub_target_state) and\
(event_hub_authorization_rule_id is not None or event_hub_name is not None):
raise CLIError('Event hub arguments cannot be specified if event-hub-target-state is not provided or disabled')
if _is_audit_policy_state_enabled(event_hub_target_state) and event_hub_authorization_rule_id is None:
raise CLIError('event-hub-authorization-rule-id must be specified if event-hub-target-state is enabled') | Validate input agruments | _audit_policy_validate_arguments | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_create_diagnostic_setting(
cmd,
resource_group_name,
server_name,
database_name=None,
category_name=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub_name=None):
'''
Create audit diagnostic setting, i.e. containing single category - SQLSecurityAuditEvents or DevOpsOperationsAudit
'''
# Generate diagnostic settings name to be created
name = category_name
import inspect
test_methods = ["test_sql_db_security_mgmt", "test_sql_server_security_mgmt", "test_sql_server_ms_support_mgmt"]
test_mode = next((e for e in inspect.stack() if e.function in test_methods), None) is not None
# For test environment the name should be constant, i.e. match the name written in recorded yaml file
if test_mode:
name += '_LogAnalytics' if log_analytics_target_state is not None else ''
name += '_EventHub' if event_hub_target_state is not None else ''
else:
import uuid
name += '_' + str(uuid.uuid4())
diagnostic_settings_url = _get_diagnostic_settings_url(
cmd=cmd,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name)
azure_monitor_client = cf_monitor(cmd.cli_ctx)
LogSettings = cmd.get_models(
'LogSettings',
resource_type=ResourceType.MGMT_MONITOR,
operation_group='diagnostic_settings')
RetentionPolicy = cmd.get_models(
'RetentionPolicy',
resource_type=ResourceType.MGMT_MONITOR,
operation_group='diagnostic_settings')
return create_diagnostics_settings(
client=azure_monitor_client.diagnostic_settings,
name=name,
resource_uri=diagnostic_settings_url,
logs=[LogSettings(category=category_name, enabled=True,
retention_policy=RetentionPolicy(enabled=False, days=0))],
metrics=None,
event_hub=event_hub_name,
event_hub_rule=event_hub_authorization_rule_id,
storage_account=None,
workspace=log_analytics_workspace_resource_id) | Create audit diagnostic setting, i.e. containing single category - SQLSecurityAuditEvents or DevOpsOperationsAudit | _audit_policy_create_diagnostic_setting | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_update_diagnostic_settings(
cmd,
server_name,
resource_group_name,
database_name=None,
diagnostic_settings=None,
category_name=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub_name=None):
'''
Update audit policy's diagnostic settings
'''
# Fetch all audit diagnostic settings
audit_diagnostic_settings = _fetch_all_audit_diagnostic_settings(diagnostic_settings, category_name)
num_of_audit_diagnostic_settings = len(audit_diagnostic_settings)
# If more than 1 audit diagnostic settings found then throw error
if num_of_audit_diagnostic_settings > 1:
raise CLIError('Multiple audit diagnostics settings are already enabled')
diagnostic_settings_url = _get_diagnostic_settings_url(
cmd=cmd,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name)
azure_monitor_client = cf_monitor(cmd.cli_ctx)
# If no audit diagnostic settings found then create one if azure monitor is enabled
if num_of_audit_diagnostic_settings == 0:
if _is_audit_policy_state_enabled(log_analytics_target_state) or\
_is_audit_policy_state_enabled(event_hub_target_state):
created_diagnostic_setting = _audit_policy_create_diagnostic_setting(
cmd=cmd,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
category_name=category_name,
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub_name)
# Return rollback data tuple
return [("delete", created_diagnostic_setting)]
# azure monitor is disabled - there is nothing to do
return None
# This leaves us with case when num_of_audit_diagnostic_settings is 1
audit_diagnostic_setting = audit_diagnostic_settings[0]
# Initialize actually updated azure monitor fields
if log_analytics_target_state is None:
log_analytics_workspace_resource_id = audit_diagnostic_setting.workspace_id
elif _is_audit_policy_state_disabled(log_analytics_target_state):
log_analytics_workspace_resource_id = None
if event_hub_target_state is None:
event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id
event_hub_name = audit_diagnostic_setting.event_hub_name
elif _is_audit_policy_state_disabled(event_hub_target_state):
event_hub_authorization_rule_id = None
event_hub_name = None
is_azure_monitor_target_enabled = log_analytics_workspace_resource_id is not None or\
event_hub_authorization_rule_id is not None
has_other_categories = next((log for log in audit_diagnostic_setting.logs
if log.enabled and log.category != category_name), None) is not None
# If there is no other categories except SQLSecurityAuditEvents\DevOpsOperationsAudit update or delete
# the existing single diagnostic settings
if not has_other_categories:
# If azure monitor is enabled then update existing single audit diagnostic setting
if is_azure_monitor_target_enabled:
create_diagnostics_settings(
client=azure_monitor_client.diagnostic_settings,
name=audit_diagnostic_setting.name,
resource_uri=diagnostic_settings_url,
logs=audit_diagnostic_setting.logs,
metrics=audit_diagnostic_setting.metrics,
event_hub=event_hub_name,
event_hub_rule=event_hub_authorization_rule_id,
storage_account=audit_diagnostic_setting.storage_account_id,
workspace=log_analytics_workspace_resource_id)
# Return rollback data tuple
return [("update", audit_diagnostic_setting)]
# Azure monitor is disabled, delete existing single audit diagnostic setting
azure_monitor_client.diagnostic_settings.delete(diagnostic_settings_url, audit_diagnostic_setting.name)
# Return rollback data tuple
return [("create", audit_diagnostic_setting)]
# In case there are other categories in the existing single audit diagnostic setting a "split" must be performed:
# 1. Disable SQLSecurityAuditEvents\DevOpsOperationsAudit category in found audit diagnostic setting
# 2. Create new diagnostic setting with SQLSecurityAuditEvents\DevOpsOperationsAudit category,
# i.e. audit diagnostic setting
# Build updated logs list with disabled SQLSecurityAuditEvents\DevOpsOperationsAudit category
updated_logs = []
LogSettings = cmd.get_models(
'LogSettings',
resource_type=ResourceType.MGMT_MONITOR,
operation_group='diagnostic_settings')
RetentionPolicy = cmd.get_models(
'RetentionPolicy',
resource_type=ResourceType.MGMT_MONITOR,
operation_group='diagnostic_settings')
for log in audit_diagnostic_setting.logs:
if log.category == category_name:
updated_logs.append(LogSettings(category=log.category, enabled=False,
retention_policy=RetentionPolicy(enabled=False, days=0)))
else:
updated_logs.append(log)
# Update existing diagnostic settings
create_diagnostics_settings(
client=azure_monitor_client.diagnostic_settings,
name=audit_diagnostic_setting.name,
resource_uri=diagnostic_settings_url,
logs=updated_logs,
metrics=audit_diagnostic_setting.metrics,
event_hub=audit_diagnostic_setting.event_hub_name,
event_hub_rule=audit_diagnostic_setting.event_hub_authorization_rule_id,
storage_account=audit_diagnostic_setting.storage_account_id,
workspace=audit_diagnostic_setting.workspace_id)
# Add original 'audit_diagnostic_settings' to rollback_data list
rollback_data = [("update", audit_diagnostic_setting)]
# Create new diagnostic settings with enabled SQLSecurityAuditEvents\DevOpsOperationsAudit category
# only if azure monitor is enabled
if is_azure_monitor_target_enabled:
created_diagnostic_setting = _audit_policy_create_diagnostic_setting(
cmd=cmd,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
category_name=category_name,
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub_name)
# Add 'created_diagnostic_settings' to rollback_data list in reverse order
rollback_data.insert(0, ("delete", created_diagnostic_setting))
return rollback_data | Update audit policy's diagnostic settings | _audit_policy_update_diagnostic_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_update_apply_blob_storage_details(
cmd,
instance,
blob_storage_target_state,
storage_account,
storage_endpoint,
storage_account_access_key,
retention_days):
'''
Apply blob storage details on policy update
'''
if hasattr(instance, 'is_storage_secondary_key_in_use'):
is_storage_secondary_key_in_use = instance.is_storage_secondary_key_in_use
else:
is_storage_secondary_key_in_use = False
if blob_storage_target_state is None:
# Original audit policy has no storage_endpoint
if not instance.storage_endpoint:
instance.storage_endpoint = None
instance.storage_account_access_key = None
else:
# Resolve storage_account_access_key based on original storage_endpoint
storage_account = _get_storage_account_name(instance.storage_endpoint)
storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account)
instance.storage_account_access_key = _get_storage_key(
cli_ctx=cmd.cli_ctx,
storage_account=storage_account,
resource_group_name=storage_resource_group,
use_secondary_key=is_storage_secondary_key_in_use)
elif _is_audit_policy_state_enabled(blob_storage_target_state):
# Resolve storage_endpoint using provided storage_account
if storage_account is not None:
storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account)
storage_endpoint = _get_storage_endpoint(cmd.cli_ctx, storage_account, storage_resource_group)
if storage_endpoint is not None:
instance.storage_endpoint = storage_endpoint
if storage_account_access_key is not None:
instance.storage_account_access_key = storage_account_access_key
elif storage_endpoint is not None:
# Resolve storage_account if not provided
if storage_account is None:
storage_account = _get_storage_account_name(storage_endpoint)
storage_resource_group = _find_storage_account_resource_group(cmd.cli_ctx, storage_account)
# Resolve storage_account_access_key based on storage_account
instance.storage_account_access_key = _get_storage_key(
cli_ctx=cmd.cli_ctx,
storage_account=storage_account,
resource_group_name=storage_resource_group,
use_secondary_key=is_storage_secondary_key_in_use)
# Apply retenation days
if hasattr(instance, 'retention_days') and retention_days is not None:
instance.retention_days = retention_days
else:
instance.storage_endpoint = None
instance.storage_account_access_key = None | Apply blob storage details on policy update | _audit_policy_update_apply_blob_storage_details | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_update_apply_azure_monitor_target_enabled(
instance,
diagnostic_settings,
category_name,
log_analytics_target_state,
event_hub_target_state):
'''
Apply value of is_azure_monitor_target_enabled on policy update
'''
# If log_analytics_target_state and event_hub_target_state are None there is nothing to do
if log_analytics_target_state is None and event_hub_target_state is None:
return
if _is_audit_policy_state_enabled(log_analytics_target_state) or\
_is_audit_policy_state_enabled(event_hub_target_state):
instance.is_azure_monitor_target_enabled = True
else:
# Sort received diagnostic settings by name and get first element to ensure consistency
# between command executions
diagnostic_settings.sort(key=lambda d: d.name)
audit_diagnostic_setting = _fetch_first_audit_diagnostic_setting(diagnostic_settings, category_name)
# Determine value of is_azure_monitor_target_enabled
if audit_diagnostic_setting is None:
updated_log_analytics_workspace_id = None
updated_event_hub_authorization_rule_id = None
else:
updated_log_analytics_workspace_id = audit_diagnostic_setting.workspace_id
updated_event_hub_authorization_rule_id = audit_diagnostic_setting.event_hub_authorization_rule_id
if _is_audit_policy_state_disabled(log_analytics_target_state):
updated_log_analytics_workspace_id = None
if _is_audit_policy_state_disabled(event_hub_target_state):
updated_event_hub_authorization_rule_id = None
instance.is_azure_monitor_target_enabled = updated_log_analytics_workspace_id is not None or\
updated_event_hub_authorization_rule_id is not None | Apply value of is_azure_monitor_target_enabled on policy update | _audit_policy_update_apply_azure_monitor_target_enabled | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_update_global_settings(
cmd,
instance,
diagnostic_settings=None,
category_name=None,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
audit_actions_and_groups=None,
retention_days=None,
log_analytics_target_state=None,
event_hub_target_state=None):
'''
Update audit policy's global settings
'''
# Apply state
if state is not None:
instance.state = BlobAuditingPolicyState[state.lower()]
# Apply additional command line arguments only if policy's state is enabled
if _is_audit_policy_state_enabled(instance.state):
# Apply blob_storage_target_state and all storage account details
_audit_policy_update_apply_blob_storage_details(
cmd=cmd,
instance=instance,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
retention_days=retention_days)
# Apply audit_actions_and_groups
if hasattr(instance, 'audit_actions_and_groups'):
if audit_actions_and_groups is not None:
instance.audit_actions_and_groups = audit_actions_and_groups
if not instance.audit_actions_and_groups or instance.audit_actions_and_groups == []:
instance.audit_actions_and_groups = [
"SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP",
"FAILED_DATABASE_AUTHENTICATION_GROUP",
"BATCH_COMPLETED_GROUP"]
# Apply is_azure_monitor_target_enabled
_audit_policy_update_apply_azure_monitor_target_enabled(
instance=instance,
diagnostic_settings=diagnostic_settings,
category_name=category_name,
log_analytics_target_state=log_analytics_target_state,
event_hub_target_state=event_hub_target_state) | Update audit policy's global settings | _audit_policy_update_global_settings | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _audit_policy_update_rollback(
cmd,
server_name,
resource_group_name,
database_name,
rollback_data):
'''
Rollback diagnostic settings change
'''
diagnostic_settings_url = _get_diagnostic_settings_url(
cmd=cmd,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name)
azure_monitor_client = cf_monitor(cmd.cli_ctx)
for rd in rollback_data:
rollback_diagnostic_setting = rd[1]
if rd[0] == "create" or rd[0] == "update":
create_diagnostics_settings(
client=azure_monitor_client.diagnostic_settings,
name=rollback_diagnostic_setting.name,
resource_uri=diagnostic_settings_url,
logs=rollback_diagnostic_setting.logs,
metrics=rollback_diagnostic_setting.metrics,
event_hub=rollback_diagnostic_setting.event_hub_name,
event_hub_rule=rollback_diagnostic_setting.event_hub_authorization_rule_id,
storage_account=rollback_diagnostic_setting.storage_account_id,
workspace=rollback_diagnostic_setting.workspace_id)
else: # delete
azure_monitor_client.diagnostic_settings.delete(diagnostic_settings_url, rollback_diagnostic_setting.name) | Rollback diagnostic settings change | _audit_policy_update_rollback | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_audit_policy_update(
cmd,
instance,
server_name,
resource_group_name,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
audit_actions_and_groups=None,
retention_days=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub=None):
'''
Update server audit policy
'''
return _audit_policy_update(
cmd=cmd,
instance=instance,
server_name=server_name,
resource_group_name=resource_group_name,
database_name=None,
state=state,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
audit_actions_and_groups=audit_actions_and_groups,
retention_days=retention_days,
category_name='SQLSecurityAuditEvents',
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub) | Update server audit policy | server_audit_policy_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_audit_policy_update(
cmd,
instance,
server_name,
resource_group_name,
database_name,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
audit_actions_and_groups=None,
retention_days=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub=None):
'''
Update database audit policy
'''
return _audit_policy_update(
cmd=cmd,
instance=instance,
server_name=server_name,
resource_group_name=resource_group_name,
database_name=database_name,
state=state,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
audit_actions_and_groups=audit_actions_and_groups,
retention_days=retention_days,
category_name='SQLSecurityAuditEvents',
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub) | Update database audit policy | db_audit_policy_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ms_support_audit_policy_update(
cmd,
instance,
server_name,
resource_group_name,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub=None):
'''
Update server Microsoft support operations audit policy
'''
return _audit_policy_update(
cmd=cmd,
instance=instance,
server_name=server_name,
resource_group_name=resource_group_name,
database_name=None,
state=state,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
audit_actions_and_groups=None,
retention_days=None,
category_name='DevOpsOperationsAudit',
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub) | Update server Microsoft support operations audit policy | server_ms_support_audit_policy_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def update_long_term_retention(
client,
database_name,
server_name,
resource_group_name,
weekly_retention=None,
monthly_retention=None,
yearly_retention=None,
week_of_year=None,
make_backups_immutable=None,
backup_storage_access_tier=None,
**kwargs):
'''
Updates long term retention for managed database
'''
if not (weekly_retention or monthly_retention or yearly_retention):
raise CLIError('Please specify retention setting(s). See \'--help\' for more details.')
if yearly_retention and not week_of_year:
raise CLIError('Please specify week of year for yearly retention.')
if make_backups_immutable:
confirmation = prompt_y_n("""Immutable LTR backups can't be changed or deleted.
You'll be charged for LTR backups for the full retention period.
Do you want to proceed?""")
if not confirmation:
return
if backup_storage_access_tier and backup_storage_access_tier.lower() not in BACKUP_STORAGE_ACCESS_TIERS:
raise CLIError('Please specify a valid backup storage access tier type for backup storage access tier.'
'See \'--help\' for more details.')
kwargs['weekly_retention'] = weekly_retention
kwargs['monthly_retention'] = monthly_retention
kwargs['yearly_retention'] = yearly_retention
kwargs['week_of_year'] = week_of_year
kwargs['make_backups_immutable'] = make_backups_immutable
kwargs['backup_storage_access_tier'] = backup_storage_access_tier
policy = client.begin_create_or_update(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
policy_name=LongTermRetentionPolicyName.DEFAULT,
parameters=kwargs)
return policy | Updates long term retention for managed database | update_long_term_retention | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def get_long_term_retention(
client,
resource_group_name,
database_name,
server_name):
'''
Gets long term retention for managed database
'''
return client.get(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
policy_name=LongTermRetentionPolicyName.DEFAULT) | Gets long term retention for managed database | get_long_term_retention | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def update_short_term_retention(
client,
database_name,
server_name,
resource_group_name,
retention_days,
diffbackup_hours=None,
no_wait=False,
**kwargs):
'''
Updates short term retention for live database
'''
kwargs['retention_days'] = retention_days
kwargs['diff_backup_interval_in_hours'] = diffbackup_hours
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
policy_name=ShortTermRetentionPolicyName.DEFAULT,
parameters=kwargs) | Updates short term retention for live database | update_short_term_retention | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def get_short_term_retention(
client,
database_name,
server_name,
resource_group_name):
'''
Gets short term retention for live database
'''
return client.get(
database_name=database_name,
server_name=server_name,
resource_group_name=resource_group_name,
policy_name=ShortTermRetentionPolicyName.DEFAULT) | Gets short term retention for live database | get_short_term_retention | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _list_by_database_long_term_retention_backups(
client,
location_name,
long_term_retention_server_name,
long_term_retention_database_name,
resource_group_name=None,
only_latest_per_database=None,
database_state=None):
'''
Gets the long term retention backups for a Managed Database
'''
if resource_group_name:
backups = client.list_by_resource_group_database(
resource_group_name=resource_group_name,
location_name=location_name,
long_term_retention_server_name=long_term_retention_server_name,
long_term_retention_database_name=long_term_retention_database_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
else:
backups = client.list_by_database(
location_name=location_name,
long_term_retention_server_name=long_term_retention_server_name,
long_term_retention_database_name=long_term_retention_database_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
return backups | Gets the long term retention backups for a Managed Database | _list_by_database_long_term_retention_backups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _list_by_server_long_term_retention_backups(
client,
location_name,
long_term_retention_server_name,
resource_group_name=None,
only_latest_per_database=None,
database_state=None):
'''
Gets the long term retention backups within a Managed Instance
'''
if resource_group_name:
backups = client.list_by_resource_group_server(
resource_group_name=resource_group_name,
location_name=location_name,
long_term_retention_server_name=long_term_retention_server_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
else:
backups = client.list_by_server(
location_name=location_name,
long_term_retention_server_name=long_term_retention_server_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
return backups | Gets the long term retention backups within a Managed Instance | _list_by_server_long_term_retention_backups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _list_by_location_long_term_retention_backups(
client,
location_name,
resource_group_name=None,
only_latest_per_database=None,
database_state=None):
'''
Gets the long term retention backups within a specified region.
'''
if resource_group_name:
backups = client.list_by_resource_group_location(
resource_group_name=resource_group_name,
location_name=location_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
else:
backups = client.list_by_location(
location_name=location_name,
only_latest_per_database=only_latest_per_database,
database_state=database_state)
return backups | Gets the long term retention backups within a specified region. | _list_by_location_long_term_retention_backups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def list_long_term_retention_backups(
client,
location_name,
long_term_retention_server_name=None,
long_term_retention_database_name=None,
resource_group_name=None,
only_latest_per_database=None,
database_state=None):
'''
Lists the long term retention backups for a specified location, instance, or database.
'''
if long_term_retention_server_name:
if long_term_retention_database_name:
backups = _list_by_database_long_term_retention_backups(
client,
location_name,
long_term_retention_server_name,
long_term_retention_database_name,
resource_group_name,
only_latest_per_database,
database_state)
else:
backups = _list_by_server_long_term_retention_backups(
client,
location_name,
long_term_retention_server_name,
resource_group_name,
only_latest_per_database,
database_state)
else:
backups = _list_by_location_long_term_retention_backups(
client,
location_name,
resource_group_name,
only_latest_per_database,
database_state)
return backups | Lists the long term retention backups for a specified location, instance, or database. | list_long_term_retention_backups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def restore_long_term_retention_backup(
cmd,
client,
long_term_retention_backup_resource_id,
target_database_name,
target_server_name,
target_resource_group_name,
sku,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
federated_client_id=None,
**kwargs):
'''
Restores an existing database (i.e. create with 'RestoreLongTermRetentionBackup' create mode.)
'''
if not target_resource_group_name or not target_server_name or not target_database_name:
raise CLIError('Please specify target resource(s). '
'Target resource group, target server, and target database '
'are all required to restore LTR backup.')
if not long_term_retention_backup_resource_id:
raise CLIError('Please specify a long term retention backup.')
backup_name = long_term_retention_backup_resource_id.split("/")[-1].split(";")
if len(backup_name) == 3 and backup_name[-1] != "Hot":
raise CLIError('Restore operation on "' + backup_name[-1] + '" backup is not allowed. '
'Only "Hot" backups can be restored')
kwargs['location'] = _get_server_location(
cmd.cli_ctx,
server_name=target_server_name,
resource_group_name=target_resource_group_name)
# If sku.name is not specified, resolve the requested sku name
# using capabilities.
kwargs['sku'] = _find_db_sku_from_capabilities(
cmd.cli_ctx,
kwargs['location'],
sku,
compute_model=kwargs['compute_model'])
kwargs['create_mode'] = CreateMode.RESTORE_LONG_TERM_RETENTION_BACKUP
kwargs['long_term_retention_backup_resource_id'] = long_term_retention_backup_resource_id
# Check backup storage redundancy configurations
if _should_show_backup_storage_redundancy_warnings(kwargs['location']):
if not kwargs['requested_backup_storage_redundancy']:
_backup_storage_redundancy_take_source_warning()
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
# Per DB CMK params
if assign_identity:
kwargs['identity'] = _get_database_identity(user_assigned_identity_id)
kwargs['keys'] = _get_database_keys(keys)
kwargs['encryption_protector'] = encryption_protector
kwargs['federated_client_id'] = federated_client_id
return client.begin_create_or_update(
database_name=target_database_name,
server_name=target_server_name,
resource_group_name=target_resource_group_name,
parameters=kwargs) | Restores an existing database (i.e. create with 'RestoreLongTermRetentionBackup' create mode.) | restore_long_term_retention_backup | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def list_geo_backups(
client,
server_name,
resource_group_name):
'''
Gets the geo redundant backups for a server
'''
return client.list_by_server(
resource_group_name=resource_group_name,
server_name=server_name) | Gets the geo redundant backups for a server | list_geo_backups | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def restore_geo_backup(
cmd,
client,
geo_backup_id,
target_database_name,
target_server_name,
target_resource_group_name,
sku,
assign_identity=False,
user_assigned_identity_id=None,
keys=None,
encryption_protector=None,
federated_client_id=None,
**kwargs):
'''
Restores an existing database (i.e. create with 'recovery' create mode.)
'''
if not target_resource_group_name or not target_server_name or not target_database_name:
raise CLIError('Please specify target resource(s). '
'Target resource group, target server, and target database '
'are all required to restore Geo-redundant backup.')
if not geo_backup_id:
raise CLIError('Please specify a geo redundant backup.')
kwargs['location'] = _get_server_location(
cmd.cli_ctx,
server_name=target_server_name,
resource_group_name=target_resource_group_name)
kwargs['create_mode'] = CreateMode.RECOVERY
kwargs['recoverableDatabaseId'] = geo_backup_id
# If sku.name is not specified, resolve the requested sku name
# using capabilities.
kwargs['sku'] = _find_db_sku_from_capabilities(
cmd.cli_ctx,
kwargs['location'],
sku,
compute_model=kwargs['compute_model'])
# Check backup storage redundancy configurations
if _should_show_backup_storage_redundancy_warnings(kwargs['location']):
if not kwargs['requested_backup_storage_redundancy']:
_backup_storage_redundancy_take_source_warning()
if kwargs['requested_backup_storage_redundancy'] == 'Geo':
_backup_storage_redundancy_specify_geo_warning()
# Per DB CMK params
if assign_identity:
kwargs['identity'] = _get_database_identity(user_assigned_identity_id)
kwargs['keys'] = _get_database_keys(keys)
kwargs['encryption_protector'] = encryption_protector
kwargs['federated_client_id'] = federated_client_id
return client.begin_create_or_update(
database_name=target_database_name,
server_name=target_server_name,
resource_group_name=target_resource_group_name,
parameters=kwargs) | Restores an existing database (i.e. create with 'recovery' create mode.) | restore_geo_backup | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_threat_detection_policy_get(
client,
resource_group_name,
server_name,
database_name):
'''
Gets a threat detection policy.
'''
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
security_alert_policy_name=SecurityAlertPolicyName.DEFAULT) | Gets a threat detection policy. | db_threat_detection_policy_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_threat_detection_policy_update(
cmd,
instance,
state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None,
email_addresses=None,
disabled_alerts=None,
email_account_admins=None):
'''
Updates a threat detection policy. Custom update function to apply parameters to instance.
'''
# Apply state
if state:
instance.state = SecurityAlertPolicyState[state.lower()]
enabled = instance.state.lower() == SecurityAlertPolicyState.ENABLED.value.lower() # pylint: disable=no-member
# Set storage-related properties
_db_security_policy_update(
cmd.cli_ctx,
instance,
enabled,
storage_account,
storage_endpoint,
storage_account_access_key,
False)
# Set other properties
if retention_days:
instance.retention_days = retention_days
if email_addresses:
instance.email_addresses = email_addresses
if disabled_alerts:
instance.disabled_alerts = disabled_alerts
if email_account_admins:
instance.email_account_admins = email_account_admins
return instance | Updates a threat detection policy. Custom update function to apply parameters to instance. | db_threat_detection_policy_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_advanced_threat_protection_setting_get(
client,
resource_group_name,
server_name,
database_name):
'''
Gets an advanced threat protection setting.
'''
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
advanced_threat_protection_name=AdvancedThreatProtectionName.DEFAULT) | Gets an advanced threat protection setting. | db_advanced_threat_protection_setting_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_advanced_threat_protection_setting_update(
cmd,
instance,
state=None):
# pylint: disable=unused-argument
'''
Updates an advanced threat protection setting. Custom update function to apply parameters to instance.
'''
# Apply state
if state:
instance.state = AdvancedThreatProtectionState[state.lower()]
return instance | Updates an advanced threat protection setting. Custom update function to apply parameters to instance. | db_advanced_threat_protection_setting_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def db_sensitivity_label_update(
cmd,
client,
database_name,
server_name,
schema_name,
table_name,
column_name,
resource_group_name,
label_name=None,
information_type=None):
'''
Updates a sensitivity label. Custom update function to apply parameters to instance.
'''
# Get the information protection policy
from azure.mgmt.security import SecurityCenter
from azure.core.exceptions import ResourceNotFoundError
security_center_client = get_mgmt_service_client(cmd.cli_ctx, SecurityCenter, asc_location="centralus")
information_protection_policy = security_center_client.information_protection_policies.get(
scope='/providers/Microsoft.Management/managementGroups/{}'.format(_get_tenant_id()),
information_protection_policy_name="effective")
sensitivity_label = SensitivityLabel()
# Get the current label
try:
current_label = client.get(
resource_group_name,
server_name,
database_name,
schema_name,
table_name,
column_name,
SensitivityLabelSource.CURRENT)
# Initialize with existing values
sensitivity_label.label_name = current_label.label_name
sensitivity_label.label_id = current_label.label_id
sensitivity_label.information_type = current_label.information_type
sensitivity_label.information_type_id = current_label.information_type_id
except ResourceNotFoundError as ex:
if not (ex and 'SensitivityLabelsLabelNotFound' in str(ex)):
raise ex
# Find the label id and information type id in the policy by the label name provided
label_id = None
if label_name:
label_id = next((id for id in information_protection_policy.labels
if information_protection_policy.labels[id].display_name.lower() ==
label_name.lower()),
None)
if label_id is None:
raise CLIError('The provided label name was not found in the information protection policy.')
sensitivity_label.label_id = label_id
sensitivity_label.label_name = label_name
information_type_id = None
if information_type:
information_type_id = next((id for id in information_protection_policy.information_types
if information_protection_policy.information_types[id].display_name.lower() ==
information_type.lower()),
None)
if information_type_id is None:
raise CLIError('The provided information type was not found in the information protection policy.')
sensitivity_label.information_type_id = information_type_id
sensitivity_label.information_type = information_type
return client.create_or_update(
resource_group_name, server_name, database_name, schema_name, table_name, column_name, sensitivity_label) | Updates a sensitivity label. Custom update function to apply parameters to instance. | db_sensitivity_label_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def dw_create(
cmd,
client,
database_name,
server_name,
resource_group_name,
no_wait=False,
**kwargs):
'''
Creates a datawarehouse.
'''
# Set edition
kwargs['sku'].tier = DatabaseEdition.data_warehouse.value
# Create
return _db_dw_create(
cmd.cli_ctx,
client,
None,
DatabaseIdentity(cmd.cli_ctx, database_name, server_name, resource_group_name),
no_wait,
**kwargs) | Creates a datawarehouse. | dw_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def dw_list(
client,
server_name,
resource_group_name):
'''
Lists data warehouses in a server or elastic pool.
'''
dbs = client.list_by_server(
resource_group_name=resource_group_name,
server_name=server_name)
# Include only DW's
return [db for db in dbs if db.sku.tier == DatabaseEdition.data_warehouse.value] | Lists data warehouses in a server or elastic pool. | dw_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def dw_update(
instance,
max_size_bytes=None,
service_objective=None):
'''
Updates a data warehouse. Custom update function to apply parameters to instance.
'''
# Apply param values to instance
if max_size_bytes:
instance.max_size_bytes = max_size_bytes
if service_objective:
instance.sku = Sku(name=service_objective)
instance.availability_zone = None
return instance | Updates a data warehouse. Custom update function to apply parameters to instance. | dw_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def dw_pause(
client,
database_name,
server_name,
resource_group_name):
'''
Pauses a datawarehouse.
'''
# Pause, but DO NOT return the result. Long-running POST operation
# results are not returned correctly by SDK.
client.begin_pause(
server_name=server_name,
resource_group_name=resource_group_name,
database_name=database_name).wait() | Pauses a datawarehouse. | dw_pause | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def dw_resume(
client,
database_name,
server_name,
resource_group_name):
'''
Resumes a datawarehouse.
'''
# Resume, but DO NOT return the result. Long-running POST operation
# results are not returned correctly by SDK.
client.begin_resume(
server_name=server_name,
resource_group_name=resource_group_name,
database_name=database_name).wait() | Resumes a datawarehouse. | dw_resume | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _find_elastic_pool_sku_from_capabilities(cli_ctx, location, sku, allow_reset_family=False, compute_model=None):
'''
Given a requested sku which may have some properties filled in
(e.g. tier and capacity), finds the canonical matching sku
from the given location's capabilities.
'''
logger.debug('_find_elastic_pool_sku_from_capabilities input: %s', sku)
if sku.name:
# User specified sku.name, so nothing else needs to be resolved.
logger.debug('_find_elastic_pool_sku_from_capabilities return sku as is')
return sku
if not _any_sku_values_specified(sku):
# User did not request any properties of sku, so just wipe it out.
# Server side will pick a default.
logger.debug('_find_elastic_pool_sku_from_capabilities return None')
return None
# Some properties of sku are specified, but not name. Use the requested properties
# to find a matching capability and copy the sku from there.
# Get location capability
loc_capability = _get_location_capability(cli_ctx, location, CapabilityGroup.SUPPORTED_ELASTIC_POOL_EDITIONS)
# Get default server version capability
server_version_capability = _get_default_server_version(loc_capability)
# Find edition capability, based on requested sku properties
edition_capability = _find_edition_capability(sku, server_version_capability.supported_elastic_pool_editions)
# Find performance level capability, based on requested sku properties
performance_level_capability = _find_performance_level_capability(
sku, edition_capability.supported_elastic_pool_performance_levels,
allow_reset_family=allow_reset_family,
compute_model=compute_model)
# Copy sku object from capability
result = performance_level_capability.sku
logger.debug('_find_elastic_pool_sku_from_capabilities return: %s', result)
return result | Given a requested sku which may have some properties filled in
(e.g. tier and capacity), finds the canonical matching sku
from the given location's capabilities. | _find_elastic_pool_sku_from_capabilities | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def elastic_pool_create(
cmd,
client,
server_name,
resource_group_name,
elastic_pool_name,
sku=None,
maintenance_configuration_id=None,
preferred_enclave_type=None,
**kwargs):
'''
Creates an elastic pool.
'''
# Determine server location
kwargs['location'] = _get_server_location(
cmd.cli_ctx,
server_name=server_name,
resource_group_name=resource_group_name)
# If sku.name is not specified, resolve the requested sku name
# using capabilities.
kwargs['sku'] = _find_elastic_pool_sku_from_capabilities(cmd.cli_ctx, kwargs['location'], sku)
# The min_capacity property is only applicable to serverless SKUs.
#
if kwargs['sku'] is not None and not _is_serverless_slo(kwargs['sku'].name):
kwargs['min_capacity'] = None
# Expand maintenance configuration id if needed
kwargs['maintenance_configuration_id'] = _complete_maintenance_configuration_id(
cmd.cli_ctx,
maintenance_configuration_id)
# Add preferred enclave type, if requested
kwargs['preferred_enclave_type'] = preferred_enclave_type
# Create
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
elastic_pool_name=elastic_pool_name,
parameters=kwargs) | Creates an elastic pool. | elastic_pool_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def elastic_pool_update(
cmd,
instance,
max_capacity=None,
min_capacity=None,
max_size_bytes=None,
zone_redundant=None,
tier=None,
family=None,
capacity=None,
maintenance_configuration_id=None,
high_availability_replica_count=None,
preferred_enclave_type=None):
'''
Updates an elastic pool. Custom update function to apply parameters to instance.
'''
#####
# Set sku-related properties
#####
# Update sku
_db_elastic_pool_update_sku(
cmd,
instance,
None, # service_objective
tier,
family,
capacity,
find_sku_from_capabilities_func=_find_elastic_pool_sku_from_capabilities)
#####
# Set other properties
#####
if max_capacity:
instance.per_database_settings.max_capacity = max_capacity
if min_capacity:
instance.per_database_settings.min_capacity = min_capacity
if max_size_bytes:
instance.max_size_bytes = max_size_bytes
if zone_redundant is not None:
instance.zone_redundant = zone_redundant
instance.maintenance_configuration_id = _complete_maintenance_configuration_id(
cmd.cli_ctx,
maintenance_configuration_id)
if high_availability_replica_count is not None:
instance.high_availability_replica_count = high_availability_replica_count
if preferred_enclave_type is not None:
instance.preferred_enclave_type = preferred_enclave_type
return instance | Updates an elastic pool. Custom update function to apply parameters to instance. | elastic_pool_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def elastic_pool_list_capabilities(
client,
location,
edition=None,
dtu=None,
vcores=None,
show_details=None,
available=False):
'''
Gets elastic pool capabilities and optionally applies the specified filters.
'''
# Fixup parameters
if not show_details:
show_details = []
if dtu:
dtu = int(dtu)
# Get capabilities tree from server
capabilities = client.list_by_location(location, CapabilityGroup.SUPPORTED_ELASTIC_POOL_EDITIONS)
# Get subtree related to elastic pools
editions = _get_default_server_version(capabilities).supported_elastic_pool_editions
# Filter by edition
if edition:
editions = [e for e in editions if e.name.lower() == edition.lower()]
# Filter by dtu
if dtu:
for e in editions:
e.supported_elastic_pool_performance_levels = [
pl for pl in e.supported_elastic_pool_performance_levels
if pl.performance_level.value == int(dtu) and
pl.performance_level.unit == PerformanceLevelUnit.DTU]
# Filter by vcores
if vcores:
for e in editions:
e.supported_elastic_pool_performance_levels = [
pl for pl in e.supported_elastic_pool_performance_levels
if pl.performance_level.value == int(vcores) and
pl.performance_level.unit == PerformanceLevelUnit.V_CORES]
# Filter by availability
if available:
editions = _filter_available(editions)
for e in editions:
e.supported_elastic_pool_performance_levels = _filter_available(e.supported_elastic_pool_performance_levels)
for slo in e.supported_service_level_objectives:
slo.supported_max_sizes = _filter_available(slo.supported_max_sizes)
# Remove editions with no service objectives (due to filters)
editions = [e for e in editions if e.supported_elastic_pool_performance_levels]
for e in editions:
for d in e.supported_elastic_pool_performance_levels:
# Optionally hide supported max sizes
if ElasticPoolCapabilitiesAdditionalDetails.max_size.value not in show_details:
d.supported_max_sizes = []
# Optionally hide per database min & max dtus. min dtus are nested inside max dtus,
# so only hide max dtus if both min and max should be hidden.
if ElasticPoolCapabilitiesAdditionalDetails.db_min_dtu.value not in show_details:
if ElasticPoolCapabilitiesAdditionalDetails.db_max_dtu.value not in show_details:
d.supported_per_database_max_performance_levels = []
for md in d.supported_per_database_max_performance_levels:
md.supported_per_database_min_performance_levels = []
# Optionally hide supported per db max sizes
if ElasticPoolCapabilitiesAdditionalDetails.db_max_size.value not in show_details:
d.supported_per_database_max_sizes = []
return editions | Gets elastic pool capabilities and optionally applies the specified filters. | elastic_pool_list_capabilities | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def instance_pool_list(
client,
resource_group_name=None):
'''
Lists servers in a resource group or subscription
'''
if resource_group_name:
# List all instance pools in the resource group
return client.list_by_resource_group(
resource_group_name=resource_group_name)
# List all instance pools in the subscription
return client.list() | Lists servers in a resource group or subscription | instance_pool_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def instance_pool_create(
cmd,
client,
instance_pool_name,
resource_group_name,
no_wait=False,
sku=None,
**kwargs):
'''
Creates a new instance pool
'''
kwargs['sku'] = _find_instance_pool_sku_from_capabilities(
cmd.cli_ctx, kwargs['location'], sku)
kwargs['maintenance_configuration_id'] = _complete_maintenance_configuration_id(
cmd.cli_ctx, kwargs['maintenance_configuration_id'])
return sdk_no_wait(no_wait, client.begin_create_or_update,
instance_pool_name=instance_pool_name,
resource_group_name=resource_group_name,
parameters=kwargs) | Creates a new instance pool | instance_pool_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def instance_pool_update(
cmd,
instance,
vcores=None,
tier=None,
family=None,
maintenance_configuration_id=None,
license_type=None,
tags=None):
'''
Updates a instance pool
'''
instance.tags = (tags or instance.tags)
instance.maintenance_configuration_id = _complete_maintenance_configuration_id(
cmd.cli_ctx, maintenance_configuration_id or instance.maintenance_configuration_id)
instance.v_cores = (vcores or instance.v_cores)
instance.license_type = (license_type or instance.license_type)
instance.sku.name = None
instance.sku.tier = (tier or instance.sku.tier)
instance.sku.family = (family or instance.sku.family)
instance.sku = _find_instance_pool_sku_from_capabilities(
cmd.cli_ctx,
instance.location,
instance.sku)
return instance | Updates a instance pool | instance_pool_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _find_instance_pool_sku_from_capabilities(cli_ctx, location, sku):
'''
Validate if the sku family and edition input by user are permissible in the region using
capabilities API and get the SKU name
'''
logger.debug('_find_instance_pool_sku_from_capabilities input: %s', sku)
# Get location capability
loc_capability = _get_location_capability(
cli_ctx, location, CapabilityGroup.SUPPORTED_MANAGED_INSTANCE_VERSIONS)
# Get default server version capability
managed_instance_version_capability = _get_default_capability(
loc_capability.supported_managed_instance_versions)
# Find edition capability, based on requested sku properties
edition_capability = _find_edition_capability(
sku, managed_instance_version_capability.supported_instance_pool_editions)
# Find family level capability, based on requested sku properties
_find_family_capability(
sku, edition_capability.supported_families)
result = Sku(
name="instance-pool",
tier=sku.tier,
family=sku.family)
logger.debug(
'_find_instance_pool_sku_from_capabilities return: %s',
result)
return result | Validate if the sku family and edition input by user are permissible in the region using
capabilities API and get the SKU name | _find_instance_pool_sku_from_capabilities | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_create(
client,
resource_group_name,
server_name,
minimal_tls_version=None,
assign_identity=False,
no_wait=False,
enable_public_network=None,
restrict_outbound_network_access=None,
key_id=None,
federated_client_id=None,
user_assigned_identity_id=None,
primary_user_assigned_identity_id=None,
identity_type=None,
enable_ad_only_auth=False,
external_admin_principal_type=None,
external_admin_sid=None,
external_admin_name=None,
**kwargs):
'''
Creates a server.
'''
if assign_identity:
kwargs['identity'] = _get_identity_object_from_type(True, identity_type, user_assigned_identity_id, None)
else:
kwargs['identity'] = _get_identity_object_from_type(False, identity_type, user_assigned_identity_id, None)
if enable_public_network is not None:
kwargs['public_network_access'] = (
ServerNetworkAccessFlag.ENABLED if enable_public_network
else ServerNetworkAccessFlag.DISABLED)
if restrict_outbound_network_access is not None:
kwargs['restrict_outbound_network_access'] = (
ServerNetworkAccessFlag.ENABLED if restrict_outbound_network_access
else ServerNetworkAccessFlag.DISABLED)
if minimal_tls_version is not None:
kwargs['minimal_tls_version'] = minimal_tls_version
else:
kwargs['minimal_tls_version'] = SqlServerMinimalTlsVersionType.tls_1_2
kwargs['key_id'] = key_id
kwargs['federated_client_id'] = federated_client_id
kwargs['primary_user_assigned_identity_id'] = primary_user_assigned_identity_id
ad_only = None
if enable_ad_only_auth:
ad_only = True
tenant_id = None
if external_admin_name is not None:
tenant_id = _get_tenant_id()
kwargs['administrators'] = ServerExternalAdministrator(
principal_type=external_admin_principal_type,
login=external_admin_name,
sid=external_admin_sid,
azure_ad_only_authentication=ad_only,
tenant_id=tenant_id)
# Create
return sdk_no_wait(no_wait, client.begin_create_or_update,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=kwargs) | Creates a server. | server_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_list(
client,
resource_group_name=None,
expand_ad_admin=False):
'''
Lists servers in a resource group or subscription
'''
expand = None
if expand_ad_admin:
expand = 'administrators/activedirectory'
if resource_group_name:
# List all servers in the resource group
return client.list_by_resource_group(resource_group_name=resource_group_name, expand=expand)
# List all servers in the subscription
return client.list(expand) | Lists servers in a resource group or subscription | server_list | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_get(
client,
resource_group_name,
server_name,
expand_ad_admin=False):
'''
Gets a server
'''
expand = None
if expand_ad_admin:
expand = 'administrators/activedirectory'
# List all servers in the subscription
return client.get(resource_group_name, server_name, expand) | Gets a server | server_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_update(
instance,
administrator_login_password=None,
assign_identity=False,
minimal_tls_version=None,
enable_public_network=None,
restrict_outbound_network_access=None,
primary_user_assigned_identity_id=None,
key_id=None,
federated_client_id=None,
identity_type=None,
user_assigned_identity_id=None):
'''
Updates a server. Custom update function to apply parameters to instance.
'''
# Once assigned, the identity cannot be removed
# if instance.identity is None and assign_identity:
# instance.identity = ResourceIdentity(type=IdentityType.system_assigned.value)
instance.identity = _get_identity_object_from_type(
assign_identity,
identity_type,
user_assigned_identity_id,
instance.identity)
# Apply params to instance
instance.administrator_login_password = (
administrator_login_password or instance.administrator_login_password)
instance.minimal_tls_version = (
minimal_tls_version or instance.minimal_tls_version)
if enable_public_network is not None:
instance.public_network_access = (
ServerNetworkAccessFlag.ENABLED if enable_public_network
else ServerNetworkAccessFlag.DISABLED)
if restrict_outbound_network_access is not None:
instance.public_network_access = (
ServerNetworkAccessFlag.ENABLED if restrict_outbound_network_access
else ServerNetworkAccessFlag.DISABLED)
instance.primary_user_assigned_identity_id = (
primary_user_assigned_identity_id or instance.primary_user_assigned_identity_id)
instance.key_id = (key_id or instance.key_id)
instance.federated_client_id = (federated_client_id or instance.federated_client_id)
return instance | Updates a server. Custom update function to apply parameters to instance. | server_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ad_admin_set(
client,
resource_group_name,
server_name,
**kwargs):
'''
Sets a server's AD admin.
'''
kwargs['tenant_id'] = _get_tenant_id()
kwargs['administrator_type'] = AdministratorType.ACTIVE_DIRECTORY
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
administrator_name=AdministratorName.ACTIVE_DIRECTORY,
parameters=kwargs) | Sets a server's AD admin. | server_ad_admin_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ad_admin_delete(
client,
resource_group_name,
server_name):
'''
Sets a server's AD admin.
'''
return client.begin_delete(
server_name=server_name,
resource_group_name=resource_group_name,
administrator_name=AdministratorName.ACTIVE_DIRECTORY) | Sets a server's AD admin. | server_ad_admin_delete | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ad_admin_update(
instance,
login=None,
sid=None,
tenant_id=None):
'''
Updates a server' AD admin.
'''
# Apply params to instance
instance.login = login or instance.login
instance.sid = sid or instance.sid
instance.tenant_id = tenant_id or instance.tenant_id
return instance | Updates a server' AD admin. | server_ad_admin_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ad_admin_update_setter(
client,
resource_group_name,
server_name,
**kwargs):
'''
Updates a server' AD admin.
'''
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
administrator_name=AdministratorName.ACTIVE_DIRECTORY,
parameters=kwargs["parameters"]) | Updates a server' AD admin. | server_ad_admin_update_setter | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_ad_admin_update_getter(
client,
resource_group_name,
server_name):
'''
Updates a server' AD admin.
'''
return client.get(
server_name=server_name,
resource_group_name=resource_group_name,
administrator_name=AdministratorName.ACTIVE_DIRECTORY) | Updates a server' AD admin. | server_ad_admin_update_getter | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def firewall_rule_allow_all_azure_ips(
client,
server_name,
resource_group_name):
'''
Creates a firewall rule with special start/end ip address value
that represents all azure ips.
'''
# Name of the rule that will be created
rule_name = 'AllowAllAzureIPs'
# Special start/end IP that represents allowing all azure ips
azure_ip_addr = '0.0.0.0'
return client.create_or_update(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=rule_name,
start_ip_address=azure_ip_addr,
end_ip_address=azure_ip_addr) | Creates a firewall rule with special start/end ip address value
that represents all azure ips. | firewall_rule_allow_all_azure_ips | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def firewall_rule_update(
client,
firewall_rule_name,
server_name,
resource_group_name,
start_ip_address=None,
end_ip_address=None):
'''
Updates a firewall rule.
'''
# Get existing instance
instance = client.get(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name)
# Send update
return client.create_or_update(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=FirewallRule(start_ip_address=start_ip_address or instance.start_ip_address,
end_ip_address=end_ip_address or instance.end_ip_address)) | Updates a firewall rule. | firewall_rule_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def firewall_rule_create(
client,
firewall_rule_name,
server_name,
resource_group_name,
start_ip_address=None,
end_ip_address=None):
'''
Creates a firewall rule.
'''
return client.create_or_update(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=FirewallRule(start_ip_address=start_ip_address,
end_ip_address=end_ip_address)) | Creates a firewall rule. | firewall_rule_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def ipv6_firewall_rule_update(
client,
firewall_rule_name,
server_name,
resource_group_name,
start_ipv6_address=None,
end_ipv6_address=None):
'''
Updates an ipv6 firewall rule.
'''
# Get existing instance
instance = client.get(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name)
# Send update
return client.create_or_update(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=IPv6FirewallRule(start_i_pv6_address=start_ipv6_address or instance.start_ipv6_address,
end_i_pv6_address=end_ipv6_address or instance.end_ipv6_address)) | Updates an ipv6 firewall rule. | ipv6_firewall_rule_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def ipv6_firewall_rule_create(
client,
firewall_rule_name,
server_name,
resource_group_name,
start_ipv6_address=None,
end_ipv6_address=None):
'''
Creates an ipv6 firewall rule.
'''
return client.create_or_update(
firewall_rule_name=firewall_rule_name,
server_name=server_name,
resource_group_name=resource_group_name,
parameters=IPv6FirewallRule(start_i_pv6_address=start_ipv6_address,
end_i_pv6_address=end_ipv6_address)) | Creates an ipv6 firewall rule. | ipv6_firewall_rule_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def outbound_firewall_rule_create(
client,
server_name,
resource_group_name,
outbound_rule_fqdn):
'''
Creates a new outbound firewall rule.
'''
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
outbound_rule_fqdn=outbound_rule_fqdn,
parameters=OutboundFirewallRule()) | Creates a new outbound firewall rule. | outbound_firewall_rule_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_key_create(
client,
resource_group_name,
server_name,
kid=None):
'''
Creates a server key.
'''
key_name = _get_server_key_name_from_uri(kid)
return client.begin_create_or_update(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name,
parameters=ServerKey(
server_key_type=ServerKeyType.AZURE_KEY_VAULT,
uri=kid)
) | Creates a server key. | server_key_create | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_key_get(
client,
resource_group_name,
server_name,
kid):
'''
Gets a server key.
'''
key_name = _get_server_key_name_from_uri(kid)
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name) | Gets a server key. | server_key_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_key_delete(
client,
resource_group_name,
server_name,
kid):
'''
Deletes a server key.
'''
key_name = _get_server_key_name_from_uri(kid)
return client.begin_delete(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name) | Deletes a server key. | server_key_delete | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def _get_server_key_name_from_uri(uri):
'''
Gets the key's name to use as a SQL server key.
The SQL server key API requires that the server key has a specific name
based on the vault, key and key version.
'''
import re
match = re.match(r'https://(.)+\.(managedhsm.azure.net|managedhsm-preview.azure.net|vault.azure.net|vault-int.azure-int.net|vault.azure.cn|managedhsm.azure.cn|vault.usgovcloudapi.net|managedhsm.usgovcloudapi.net|vault.microsoftazure.de|managedhsm.microsoftazure.de|vault.cloudapi.eaglex.ic.gov|vault.cloudapi.microsoft.scloud)(:443)?\/keys/[^\/]+\/[0-9a-zA-Z]+$', uri)
if match is None:
raise CLIError('The provided uri is invalid. Please provide a valid Azure Key Vault key id. For example: '
'"https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901" '
'or "https://YourManagedHsmRegion.YourManagedHsmName.managedhsm.azure.net/keys/YourKeyName/01234567890123456789012345678901"')
vault = uri.split('.')[0].split('/')[-1]
key = uri.split('/')[-2]
version = uri.split('/')[-1]
return '{}_{}_{}'.format(vault, key, version) | Gets the key's name to use as a SQL server key.
The SQL server key API requires that the server key has a specific name
based on the vault, key and key version. | _get_server_key_name_from_uri | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def server_dns_alias_set(
cmd,
client,
resource_group_name,
server_name,
dns_alias_name,
original_server_name,
original_subscription_id=None,
original_resource_group_name=None,
**kwargs):
'''
Sets a server DNS alias.
'''
from urllib.parse import quote
from azure.cli.core.commands.client_factory import get_subscription_id
# Build the old alias id
old_alias_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Sql/servers/{}/dnsAliases/{}".format(
quote(original_subscription_id or get_subscription_id(cmd.cli_ctx)),
quote(original_resource_group_name or resource_group_name),
quote(original_server_name),
quote(dns_alias_name))
kwargs['old_server_dns_alias_id'] = old_alias_id
return client.begin_acquire(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
parameters=kwargs) | Sets a server DNS alias. | server_dns_alias_set | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def encryption_protector_get(
client,
resource_group_name,
server_name):
'''
Gets a server encryption protector.
'''
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=EncryptionProtectorName.CURRENT) | Gets a server encryption protector. | encryption_protector_get | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def encryption_protector_update(
client,
resource_group_name,
server_name,
server_key_type,
kid=None,
auto_rotation_enabled=None):
'''
Updates a server encryption protector.
'''
if server_key_type == ServerKeyType.SERVICE_MANAGED:
key_name = 'ServiceManaged'
else:
if kid is None:
raise CLIError('A uri must be provided if the server_key_type is AzureKeyVault.')
key_name = _get_server_key_name_from_uri(kid)
return client.begin_create_or_update(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=EncryptionProtectorName.CURRENT,
parameters=EncryptionProtector(server_key_type=server_key_type,
server_key_name=key_name,
auto_rotation_enabled=auto_rotation_enabled)) | Updates a server encryption protector. | encryption_protector_update | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
def encryption_protector_revalidate(
client,
resource_group_name,
server_name):
'''
Revalidate a server encryption protector.
'''
if server_name is None:
raise CLIError('Server name cannot be null')
try:
return client.begin_revalidate(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=EncryptionProtectorName.CURRENT)
except Exception as ex:
raise ex | Revalidate a server encryption protector. | encryption_protector_revalidate | python | Azure/azure-cli | src/azure-cli/azure/cli/command_modules/sql/custom.py | https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/sql/custom.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.