response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Returns a single or double quote character, whichever appears first in the given string. None is returned if the given string doesn't have a single or double quote character.
def _find_quote_char_in_part(part): """ Returns a single or double quote character, whichever appears first in the given string. None is returned if the given string doesn't have a single or double quote character. """ quote_char = None for ch in part: if ch in ('"', "'"): quote_char = ch break return quote_char
Grabs the service id and the operation name from an event name. This is making the assumption that the event name is in the form event.service.operation.
def find_service_and_method_in_event_name(event_name): """ Grabs the service id and the operation name from an event name. This is making the assumption that the event name is in the form event.service.operation. """ split_event = event_name.split('.')[1:] service_name = None if len(split_event) > 0: service_name = split_event[0] operation_name = None if len(split_event) > 1: operation_name = split_event[1] return service_name, operation_name
Check if shape is a document type
def is_document_type(shape): """Check if shape is a document type""" return getattr(shape, 'is_document_type', False)
Check if the shape is a document type or wraps document types This is helpful to determine if a shape purely deals with document types whether the shape is a document type or it is lists or maps whose base values are document types.
def is_document_type_container(shape): """Check if the shape is a document type or wraps document types This is helpful to determine if a shape purely deals with document types whether the shape is a document type or it is lists or maps whose base values are document types. """ if not shape: return False recording_visitor = ShapeRecordingVisitor() ShapeWalker().walk(shape, recording_visitor) end_shape = recording_visitor.visited.pop() if not is_document_type(end_shape): return False for shape in recording_visitor.visited: if shape.type_name not in ['list', 'map']: return False return True
Check if the shape is a streaming blob type.
def is_streaming_blob_type(shape): """Check if the shape is a streaming blob type.""" return (shape and shape.type_name == 'blob' and shape.serialization.get('streaming', False))
Check if the shape is a tagged union structure.
def is_tagged_union_type(shape): """Check if the shape is a tagged union structure.""" return getattr(shape, 'is_tagged_union', False)
Check if document types are ever used in the operation
def operation_uses_document_types(operation_model): """Check if document types are ever used in the operation""" recording_visitor = ShapeRecordingVisitor() walker = ShapeWalker() walker.walk(operation_model.input_shape, recording_visitor) walker.walk(operation_model.output_shape, recording_visitor) for visited_shape in recording_visitor.visited: if is_document_type(visited_shape): return True return False
JSON encoder that formats datetimes as ISO8601 format.
def json_encoder(obj): """JSON encoder that formats datetimes as ISO8601 format.""" if isinstance(obj, datetime.datetime): return obj.isoformat() else: return obj
Asserts that a path is writable and returns the expanded path
def resolve_given_outfile_path(path): """Asserts that a path is writable and returns the expanded path""" if path is None: return outfile = os.path.expanduser(os.path.expandvars(path)) if not os.access(os.path.dirname(os.path.abspath(outfile)), os.W_OK): raise ValueError('Unable to write to file: %s' % outfile) return outfile
Returns True if a parsed result is successful
def is_parsed_result_successful(parsed_result): """Returns True if a parsed result is successful""" return parsed_result['ResponseMetadata']['HTTPStatusCode'] < 300
Hydrate an index-field option value to construct something like:: { 'index_field': { 'DoubleOptions': { 'DefaultValue': 0.0 } } }
def index_hydrate(params, container, cli_type, key, value): """ Hydrate an index-field option value to construct something like:: { 'index_field': { 'DoubleOptions': { 'DefaultValue': 0.0 } } } """ if 'IndexField' not in params: params['IndexField'] = {} if 'IndexFieldType' not in params['IndexField']: raise RuntimeError('You must pass the --type option.') # Find the type and transform it for the type options field name # E.g: int-array => IntArray _type = params['IndexField']['IndexFieldType'] _type = ''.join([i.capitalize() for i in _type.split('-')]) # ``index_field`` of type ``latlon`` is mapped to ``Latlon``. # However, it is defined as ``LatLon`` in the model so it needs to # be changed. if _type == 'Latlon': _type = 'LatLon' # Transform string value to the correct type? if key.split(SEP)[-1] == 'DefaultValue': value = DEFAULT_VALUE_TYPE_MAP.get(_type, lambda x: x)(value) # Set the proper options field if _type + 'Options' not in params['IndexField']: params['IndexField'][_type + 'Options'] = {} params['IndexField'][_type + 'Options'][key.split(SEP)[-1]] = value
The entry point for CloudSearch customizations.
def initialize(cli): """ The entry point for CloudSearch customizations. """ flattened = FlattenArguments('cloudsearch', FLATTEN_CONFIG) flattened.register(cli)
The entry point for the credential helper
def initialize(cli): """ The entry point for the credential helper """ cli.register('building-command-table.codecommit', inject_commands)
Injects new commands into the codecommit subcommand.
def inject_commands(command_table, session, **kwargs): """ Injects new commands into the codecommit subcommand. """ command_table['credential-helper'] = CodeCommitCommand(session)
Add outfile save arguments to create-keys-and-certificate - ``--certificate-pem-outfile`` - ``--public-key-outfile`` - ``--private-key-outfile``
def register_create_keys_and_cert_arguments(session, argument_table, **kwargs): """Add outfile save arguments to create-keys-and-certificate - ``--certificate-pem-outfile`` - ``--public-key-outfile`` - ``--private-key-outfile`` """ after_event = 'after-call.iot.CreateKeysAndCertificate' argument_table['certificate-pem-outfile'] = QueryOutFileArgument( session=session, name='certificate-pem-outfile', query='certificatePem', after_call_event=after_event, perm=0o600) argument_table['public-key-outfile'] = QueryOutFileArgument( session=session, name='public-key-outfile', query='keyPair.PublicKey', after_call_event=after_event, perm=0o600) argument_table['private-key-outfile'] = QueryOutFileArgument( session=session, name='private-key-outfile', query='keyPair.PrivateKey', after_call_event=after_event, perm=0o600)
Add certificate-pem-outfile to create-certificate-from-csr
def register_create_keys_from_csr_arguments(session, argument_table, **kwargs): """Add certificate-pem-outfile to create-certificate-from-csr""" argument_table['certificate-pem-outfile'] = QueryOutFileArgument( session=session, name='certificate-pem-outfile', query='certificatePem', after_call_event='after-call.iot.CreateCertificateFromCsr', perm=0o600)
Cleans a name to fit IAM's naming requirements.
def clean_for_iam(name): """ Cleans a name to fit IAM's naming requirements. """ return re.sub(r'[^A-Za-z0-9+=,.@_-]+', '-', name)
Shortens a name to the given number of characters.
def shorten_name(name, max_length): """ Shortens a name to the given number of characters. """ if len(name) <= max_length: return name q, r = divmod(max_length - 3, 2) return name[:q + r] + "..." + name[-q:]
Create a hidden alias for an existing argument. This will copy an existing argument object in an arg table, and add a new entry to the arg table with a different name. The new argument will also be undocumented. This is needed if you want to check an existing argument, but you still need the other one to work for backwards compatibility reasons.
def make_hidden_alias(argument_table, existing_name, alias_name): """Create a hidden alias for an existing argument. This will copy an existing argument object in an arg table, and add a new entry to the arg table with a different name. The new argument will also be undocumented. This is needed if you want to check an existing argument, but you still need the other one to work for backwards compatibility reasons. """ current = argument_table[existing_name] copy_arg = _copy_argument(argument_table, existing_name, alias_name) copy_arg._UNDOCUMENTED = True if current.required: # If the current argument is required, then # we'll mark both as not required, but # flag _DOCUMENT_AS_REQUIRED so our doc gen # knows to still document this argument as required. copy_arg.required = False current.required = False current._DOCUMENT_AS_REQUIRED = True
Moves an argument to a new name, keeping the old as a hidden alias. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type new_name: str :param new_name: The new name for the command.
def alias_command(command_table, existing_name, new_name): """Moves an argument to a new name, keeping the old as a hidden alias. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type new_name: str :param new_name: The new name for the command. """ current = command_table[existing_name] _copy_argument(command_table, existing_name, new_name) current._UNDOCUMENTED = True
Create a hidden alias for an exiting command. This will copy an existing command object in a command table and add a new entry to the command table with a different name. The new command will be undocumented. This is needed if you want to change an existing command, but you still need the old name to work for backwards compatibility reasons. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type alias_name: str :param alias_name: The new name for the command.
def make_hidden_command_alias(command_table, existing_name, alias_name): """Create a hidden alias for an exiting command. This will copy an existing command object in a command table and add a new entry to the command table with a different name. The new command will be undocumented. This is needed if you want to change an existing command, but you still need the old name to work for backwards compatibility reasons. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type alias_name: str :param alias_name: The new name for the command. """ new = _copy_argument(command_table, existing_name, alias_name) new._UNDOCUMENTED = True
Validate mutually exclusive groups in the parsed args.
def validate_mutually_exclusive(parsed_args, *groups): """Validate mutually exclusive groups in the parsed args.""" args_dict = vars(parsed_args) all_args = set(arg for group in groups for arg in group) if not any(k in all_args for k in args_dict if args_dict[k] is not None): # If none of the specified args are in a mutually exclusive group # there is nothing left to validate. return current_group = None for key in [k for k in args_dict if args_dict[k] is not None]: key_group = _get_group_for_key(key, groups) if key_group is None: # If they key is not part of a mutex group, we can move on. continue if current_group is None: current_group = key_group elif not key_group == current_group: raise ValueError('The key "%s" cannot be specified when one ' 'of the following keys are also specified: ' '%s' % (key, ', '.join(current_group)))
Creates a service client, taking parsed_globals into account Any values specified in overrides will override the returned dict. Note that this override occurs after 'region' from parsed_globals has been translated into 'region_name' in the resulting dict.
def create_client_from_parsed_globals(session, service_name, parsed_globals, overrides=None): """Creates a service client, taking parsed_globals into account Any values specified in overrides will override the returned dict. Note that this override occurs after 'region' from parsed_globals has been translated into 'region_name' in the resulting dict. """ client_args = {} if 'region' in parsed_globals: client_args['region_name'] = parsed_globals.region if 'endpoint_url' in parsed_globals: client_args['endpoint_url'] = parsed_globals.endpoint_url if 'verify_ssl' in parsed_globals: client_args['verify'] = parsed_globals.verify_ssl if overrides: client_args.update(overrides) return session.create_client(service_name, **client_args)
This function is used to properly write unicode to a file, usually stdout or stdderr. It ensures that the proper encoding is used if the statement is not a string type.
def uni_print(statement, out_file=None): """ This function is used to properly write unicode to a file, usually stdout or stdderr. It ensures that the proper encoding is used if the statement is not a string type. """ if out_file is None: out_file = sys.stdout try: # Otherwise we assume that out_file is a # text writer type that accepts str/unicode instead # of bytes. out_file.write(statement) except UnicodeEncodeError: # Some file like objects like cStringIO will # try to decode as ascii on python2. # # This can also fail if our encoding associated # with the text writer cannot encode the unicode # ``statement`` we've been given. This commonly # happens on windows where we have some S3 key # previously encoded with utf-8 that can't be # encoded using whatever codepage the user has # configured in their console. # # At this point we've already failed to do what's # been requested. We now try to make a best effort # attempt at printing the statement to the outfile. # We're using 'ascii' as the default because if the # stream doesn't give us any encoding information # we want to pick an encoding that has the highest # chance of printing successfully. new_encoding = getattr(out_file, 'encoding', 'ascii') # When the output of the aws command is being piped, # ``sys.stdout.encoding`` is ``None``. if new_encoding is None: new_encoding = 'ascii' new_statement = statement.encode( new_encoding, 'replace').decode(new_encoding) out_file.write(new_statement) out_file.flush()
Method to return region value as expected by policy arn
def get_policy_arn_suffix(region): """Method to return region value as expected by policy arn""" region_string = region.lower() if region_string.startswith("cn-"): return "aws-cn" elif region_string.startswith("us-gov"): return "aws-us-gov" else: return "aws"
Upload local artifacts referenced by the property at given resource and return S3 URL of the uploaded object. It is the responsibility of callers to ensure property value is a valid string If path refers to a file, this method will upload the file. If path refers to a folder, this method will zip the folder and upload the zip to S3. If path is omitted, this method will zip the current working folder and upload. If path is already a path to S3 object, this method does nothing. :param resource_id: Id of the CloudFormation resource :param resource_dict: Dictionary containing resource definition :param property_name: Property name of CloudFormation resource where this local path is present :param parent_dir: Resolve all relative paths with respect to this directory :param uploader: Method to upload files to S3 :return: S3 URL of the uploaded object :raise: ValueError if path is not a S3 URL or a local path
def upload_local_artifacts(resource_id, resource_dict, property_name, parent_dir, uploader): """ Upload local artifacts referenced by the property at given resource and return S3 URL of the uploaded object. It is the responsibility of callers to ensure property value is a valid string If path refers to a file, this method will upload the file. If path refers to a folder, this method will zip the folder and upload the zip to S3. If path is omitted, this method will zip the current working folder and upload. If path is already a path to S3 object, this method does nothing. :param resource_id: Id of the CloudFormation resource :param resource_dict: Dictionary containing resource definition :param property_name: Property name of CloudFormation resource where this local path is present :param parent_dir: Resolve all relative paths with respect to this directory :param uploader: Method to upload files to S3 :return: S3 URL of the uploaded object :raise: ValueError if path is not a S3 URL or a local path """ local_path = jmespath.search(property_name, resource_dict) if local_path is None: # Build the root directory and upload to S3 local_path = parent_dir if is_s3_url(local_path): # A valid CloudFormation template will specify artifacts as S3 URLs. # This check is supporting the case where your resource does not # refer to local artifacts # Nothing to do if property value is an S3 URL LOG.debug("Property {0} of {1} is already a S3 URL" .format(property_name, resource_id)) return local_path local_path = make_abs_path(parent_dir, local_path) # Or, pointing to a folder. Zip the folder and upload if is_local_folder(local_path): return zip_and_upload(local_path, uploader) # Path could be pointing to a file. Upload the file elif is_local_file(local_path): return uploader.upload_with_dedup(local_path) raise exceptions.InvalidLocalPathError( resource_id=resource_id, property_name=property_name, local_path=local_path)
Zip the entire folder and return a file to the zip. Use this inside a "with" statement to cleanup the zipfile after it is used. :param folder_path: :return: Name of the zipfile
def zip_folder(folder_path): """ Zip the entire folder and return a file to the zip. Use this inside a "with" statement to cleanup the zipfile after it is used. :param folder_path: :return: Name of the zipfile """ filename = os.path.join( tempfile.gettempdir(), "data-" + uuid.uuid4().hex) zipfile_name = make_zip(filename, folder_path) try: yield zipfile_name finally: if os.path.exists(zipfile_name): os.remove(zipfile_name)
YAML constructor to parse CloudFormation intrinsics. This will return a dictionary with key being the intrinsic name
def intrinsics_multi_constructor(loader, tag_prefix, node): """ YAML constructor to parse CloudFormation intrinsics. This will return a dictionary with key being the intrinsic name """ # Get the actual tag name excluding the first exclamation tag = node.tag[1:] # Some intrinsic functions doesn't support prefix "Fn::" prefix = "Fn::" if tag in ["Ref", "Condition"]: prefix = "" cfntag = prefix + tag if tag == "GetAtt" and isinstance(node.value, six.string_types): # ShortHand notation for !GetAtt accepts Resource.Attribute format # while the standard notation is to use an array # [Resource, Attribute]. Convert shorthand to standard format value = node.value.split(".", 1) elif isinstance(node, ScalarNode): # Value of this node is scalar value = loader.construct_scalar(node) elif isinstance(node, SequenceNode): # Value of this node is an array (Ex: [1,2]) value = loader.construct_sequence(node) else: # Value of this node is an mapping (ex: {foo: bar}) value = loader.construct_mapping(node) return {cfntag: value}
Dumps the dictionary as a YAML document :param dict_to_dump: :return:
def yaml_dump(dict_to_dump): """ Dumps the dictionary as a YAML document :param dict_to_dump: :return: """ FlattenAliasDumper.add_representer(OrderedDict, _dict_representer) return yaml.dump( dict_to_dump, default_flow_style=False, Dumper=FlattenAliasDumper, )
Parse a yaml string
def yaml_parse(yamlstr): """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. return json.loads(yamlstr, object_pairs_hook=OrderedDict) except ValueError: loader = SafeLoaderWrapper loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor) loader.add_multi_constructor("!", intrinsics_multi_constructor) return yaml.load(yamlstr, loader)
The entry point for CloudFormation high level commands.
def initialize(cli): """ The entry point for CloudFormation high level commands. """ cli.register('building-command-table.cloudformation', inject_commands)
Called when the CloudFormation command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names.
def inject_commands(command_table, session, **kwargs): """ Called when the CloudFormation command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['package'] = PackageCommand(session) command_table['deploy'] = DeployCommand(session)
Gets the account ID portion of an ARN
def get_account_id_from_arn(trail_arn): """Gets the account ID portion of an ARN""" return trail_arn.split(':')[4]
Retrieve the AWS account ID for the authenticated user or role
def get_account_id(sts_client): """Retrieve the AWS account ID for the authenticated user or role""" response = sts_client.get_caller_identity() return response['Account']
Gets trail information based on the trail's ARN
def get_trail_by_arn(cloudtrail_client, trail_arn): """Gets trail information based on the trail's ARN""" trails = cloudtrail_client.describe_trails()['trailList'] for trail in trails: if trail.get('TrailARN', None) == trail_arn: return trail raise ValueError('A trail could not be found for %s' % trail_arn)
Returns a formatted date string in a CloudTrail date format
def format_date(date): """Returns a formatted date string in a CloudTrail date format""" return date.strftime(DATE_FORMAT)
Returns a formatted date string meant for CLI output
def format_display_date(date): """Returns a formatted date string meant for CLI output""" return date.strftime(DISPLAY_DATE_FORMAT)
Returns a normalized date using a UTC timezone
def normalize_date(date): """Returns a normalized date using a UTC timezone""" return date.replace(tzinfo=tz.tzutc())
Extract the timestamp portion of a manifest file. Manifest file names take the following form: AWSLogs/{account}/CloudTrail-Digest/{region}/{ymd}/{account}_CloudTrail -Digest_{region}_{name}_region_{date}.json.gz
def extract_digest_key_date(digest_s3_key): """Extract the timestamp portion of a manifest file. Manifest file names take the following form: AWSLogs/{account}/CloudTrail-Digest/{region}/{ymd}/{account}_CloudTrail \ -Digest_{region}_{name}_region_{date}.json.gz """ return digest_s3_key[-24:-8]
Ensures that the arn looks correct. ARNs look like: arn:aws:cloudtrail:us-east-1:123456789012:trail/foo
def assert_cloudtrail_arn_is_valid(trail_arn): """Ensures that the arn looks correct. ARNs look like: arn:aws:cloudtrail:us-east-1:123456789012:trail/foo""" pattern = re.compile('arn:.+:cloudtrail:.+:\d{12}:trail/.+') if not pattern.match(trail_arn): raise ValueError('Invalid trail ARN provided: %s' % trail_arn)
Creates a CloudTrail DigestTraverser and its object graph. :type cloudtrail_client: botocore.client.CloudTrail :param cloudtrail_client: Client used to connect to CloudTrail :type organization_client: botocore.client.organizations :param organization_client: Client used to connect to Organizations :type s3_client_provider: S3ClientProvider :param s3_client_provider: Used to create Amazon S3 client per/region. :param trail_arn: CloudTrail trail ARN :param trail_source_region: The scanned region of a trail. :param on_invalid: Callback that is invoked when validating a digest fails. :param on_gap: Callback that is invoked when a digest has no link to the previous digest, but there are more digests to validate. This can happen when a trail is disabled for a period of time. :param on_missing: Callback that is invoked when a digest file has been deleted from Amazon S3 but is supposed to be present. :param bucket: Amazon S3 bucket of the trail if it is different than the bucket that is currently associated with the trail. :param prefix: bucket: Key prefix prepended to each digest and log placed in the Amazon S3 bucket if it is different than the prefix that is currently associated with the trail. :param account_id: The account id for which the digest files are validated. For normal trails this is the caller account, for organization trails it is the member accout. ``on_gap``, ``on_invalid``, and ``on_missing`` callbacks are invoked with the following named arguments: - ``bucket`: The next S3 bucket. - ``next_key``: (optional) Next digest key that was found in the bucket. - ``next_end_date``: (optional) End date of the next found digest. - ``last_key``: The last digest key that was found. - ``last_start_date``: (optional) Start date of last found digest. - ``message``: (optional) Message string about the notification.
def create_digest_traverser(cloudtrail_client, organization_client, s3_client_provider, trail_arn, trail_source_region=None, on_invalid=None, on_gap=None, on_missing=None, bucket=None, prefix=None, account_id=None): """Creates a CloudTrail DigestTraverser and its object graph. :type cloudtrail_client: botocore.client.CloudTrail :param cloudtrail_client: Client used to connect to CloudTrail :type organization_client: botocore.client.organizations :param organization_client: Client used to connect to Organizations :type s3_client_provider: S3ClientProvider :param s3_client_provider: Used to create Amazon S3 client per/region. :param trail_arn: CloudTrail trail ARN :param trail_source_region: The scanned region of a trail. :param on_invalid: Callback that is invoked when validating a digest fails. :param on_gap: Callback that is invoked when a digest has no link to the previous digest, but there are more digests to validate. This can happen when a trail is disabled for a period of time. :param on_missing: Callback that is invoked when a digest file has been deleted from Amazon S3 but is supposed to be present. :param bucket: Amazon S3 bucket of the trail if it is different than the bucket that is currently associated with the trail. :param prefix: bucket: Key prefix prepended to each digest and log placed in the Amazon S3 bucket if it is different than the prefix that is currently associated with the trail. :param account_id: The account id for which the digest files are validated. For normal trails this is the caller account, for organization trails it is the member accout. ``on_gap``, ``on_invalid``, and ``on_missing`` callbacks are invoked with the following named arguments: - ``bucket`: The next S3 bucket. - ``next_key``: (optional) Next digest key that was found in the bucket. - ``next_end_date``: (optional) End date of the next found digest. - ``last_key``: The last digest key that was found. - ``last_start_date``: (optional) Start date of last found digest. - ``message``: (optional) Message string about the notification. """ assert_cloudtrail_arn_is_valid(trail_arn) organization_id = None if bucket is None: # Determine the bucket and prefix based on the trail arn. trail_info = get_trail_by_arn(cloudtrail_client, trail_arn) LOG.debug('Loaded trail info: %s', trail_info) bucket = trail_info['S3BucketName'] prefix = trail_info.get('S3KeyPrefix', None) is_org_trail = trail_info.get('IsOrganizationTrail') if is_org_trail: if not account_id: raise ParameterRequiredError( "Missing required parameter for organization " "trail: '--account-id'") organization_id = organization_client.describe_organization()[ 'Organization']['Id'] # Determine the region from the ARN (e.g., arn:aws:cloudtrail:REGION:...) trail_region = trail_arn.split(':')[3] # Determine the name from the ARN (the last part after "/") trail_name = trail_arn.split('/')[-1] # If account id is not specified parse it from trail ARN if not account_id: account_id = get_account_id_from_arn(trail_arn) digest_provider = DigestProvider( account_id=account_id, trail_name=trail_name, s3_client_provider=s3_client_provider, trail_source_region=trail_source_region, trail_home_region=trail_region, organization_id=organization_id) return DigestTraverser( digest_provider=digest_provider, starting_bucket=bucket, starting_prefix=prefix, on_invalid=on_invalid, on_gap=on_gap, on_missing=on_missing, public_key_provider=PublicKeyProvider(cloudtrail_client))
The entry point for CloudTrail high level commands.
def initialize(cli): """ The entry point for CloudTrail high level commands. """ cli.register('building-command-table.cloudtrail', inject_commands)
Called when the CloudTrail command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names.
def inject_commands(command_table, session, **kwargs): """ Called when the CloudTrail command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['create-subscription'] = CloudTrailSubscribe(session) command_table['update-subscription'] = CloudTrailUpdate(session) command_table['validate-logs'] = CloudTrailValidateLogs(session)
The entry point for CodeDeploy high level commands.
def initialize(cli): """ The entry point for CodeDeploy high level commands. """ cli.register( 'building-command-table.main', change_name ) cli.register( 'building-command-table.deploy', inject_commands ) cli.register( 'building-argument-table.deploy.get-application-revision', modify_revision_arguments ) cli.register( 'building-argument-table.deploy.register-application-revision', modify_revision_arguments ) cli.register( 'building-argument-table.deploy.create-deployment', modify_revision_arguments )
Change all existing 'aws codedeploy' commands to 'aws deploy' commands.
def change_name(command_table, session, **kwargs): """ Change all existing 'aws codedeploy' commands to 'aws deploy' commands. """ utils.rename_command(command_table, 'codedeploy', 'deploy')
Inject custom 'aws deploy' commands.
def inject_commands(command_table, session, **kwargs): """ Inject custom 'aws deploy' commands. """ command_table['push'] = Push(session) command_table['register'] = Register(session) command_table['deregister'] = Deregister(session) command_table['install'] = Install(session) command_table['uninstall'] = Uninstall(session)
Change all existing ``aws config`` commands to ``aws configservice`` commands.
def change_name(command_table, session, **kwargs): """ Change all existing ``aws config`` commands to ``aws configservice`` commands. """ utils.rename_command(command_table, 'config', 'configservice')
Gets the path of where a service-2.json file should go in ~/.aws/models :type session: botocore.session.Session :param session: A session object :type service_definition: dict :param service_definition: The json loaded service definition :type service_name: str :param service_name: The service name to use. If this not provided, this will be determined from a combination of available services and the service definition. :returns: The path to where are model should be placed based on the service definition and the current services in botocore.
def get_model_location(session, service_definition, service_name=None): """Gets the path of where a service-2.json file should go in ~/.aws/models :type session: botocore.session.Session :param session: A session object :type service_definition: dict :param service_definition: The json loaded service definition :type service_name: str :param service_name: The service name to use. If this not provided, this will be determined from a combination of available services and the service definition. :returns: The path to where are model should be placed based on the service definition and the current services in botocore. """ # Add the ServiceModel abstraction over the service json definition to # make it easier to work with. service_model = ServiceModel(service_definition) # Determine the service_name if not provided if service_name is None: endpoint_prefix = service_model.endpoint_prefix service_name = _get_service_name(session, endpoint_prefix) api_version = service_model.api_version # For the model location we only want the custom data path (~/.aws/models # not the one set by AWS_DATA_PATH) data_path = session.get_component('data_loader').CUSTOMER_DATA_PATH # Use the version of the model to determine the file's naming convention. service_model_name = ( 'service-%d.json' % int( float(service_definition.get('version', '2.0')))) return os.path.join(data_path, service_name, api_version, service_model_name)
Converts a profile name to a section header to be used in the config.
def profile_to_section(profile_name): """Converts a profile name to a section header to be used in the config.""" if any(c in _WHITESPACE for c in profile_name): profile_name = shlex_quote(profile_name) return 'profile %s' % profile_name
The entry point for Lifecycle high level commands.
def dlm_initialize(cli): """ The entry point for Lifecycle high level commands. """ cli.register('building-command-table.dlm', register_commands)
Called when the Lifecycle command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names.
def register_commands(command_table, session, **kwargs): """ Called when the Lifecycle command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['create-default-role'] = CreateDefaultRole(session)
This handler gets called after the argument table for the operation has been created. It's job is to add the ``priv-launch-key`` parameter.
def ec2_add_priv_launch_key(argument_table, operation_model, session, **kwargs): """ This handler gets called after the argument table for the operation has been created. It's job is to add the ``priv-launch-key`` parameter. """ argument_table['priv-launch-key'] = LaunchKeyArgument( session, operation_model, 'priv-launch-key')
The entry point for ECS high level commands.
def initialize(cli): """ The entry point for ECS high level commands. """ cli.register('building-command-table.ecs', inject_commands)
Called when the ECS command table is being built. Used to inject new high level commands into the command list.
def inject_commands(command_table, session, **kwargs): """ Called when the ECS command table is being built. Used to inject new high level commands into the command list. """ command_table['deploy'] = ECSDeploy(session) command_table['execute-command'] = ECSExecuteCommand( name='execute-command', parent_name='ecs', session=session, operation_model=session.get_service_model('ecs') .operation_model('ExecuteCommand'), operation_caller=ExecuteCommandCaller(session), )
Load an OrderedDict object from a yaml stream.
def ordered_yaml_load(stream): """ Load an OrderedDict object from a yaml stream.""" return yaml.load(stream, SafeOrderedLoader)
Dump an OrderedDict object to yaml. :param to_dump: The OrderedDict to dump :type to_dump: OrderedDict :param stream: The file to dump to If not given or if None, only return the value :type stream: file
def ordered_yaml_dump(to_dump, stream=None): """ Dump an OrderedDict object to yaml. :param to_dump: The OrderedDict to dump :type to_dump: OrderedDict :param stream: The file to dump to If not given or if None, only return the value :type stream: file """ return yaml.dump(to_dump, stream, SafeOrderedDumper, default_flow_style=False)
The entry point for EKS high level commands.
def initialize(cli): """ The entry point for EKS high level commands. """ cli.register('building-command-table.eks', inject_commands)
Called when the EKS command table is being built. Used to inject new high level commands into the command list.
def inject_commands(command_table, session, **kwargs): """ Called when the EKS command table is being built. Used to inject new high level commands into the command list. """ command_table['update-kubeconfig'] = UpdateKubeconfigCommand(session) command_table['get-token'] = GetTokenCommand(session)
The entry point for EMR high level commands.
def emr_initialize(cli): """ The entry point for EMR high level commands. """ cli.register('building-command-table.emr', register_commands) cli.register('building-argument-table.emr.add-tags', modify_tags_argument) cli.register( 'building-argument-table.emr.list-clusters', modify_list_clusters_argument) cli.register('before-building-argument-table-parser.emr.*', override_args_required_option)
Called when the EMR command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names.
def register_commands(command_table, session, **kwargs): """ Called when the EMR command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['terminate-clusters'] = TerminateClusters(session) command_table['describe-cluster'] = DescribeCluster(session) command_table['modify-cluster-attributes'] = ModifyClusterAttr(session) command_table['install-applications'] = InstallApplications(session) command_table['create-cluster'] = CreateCluster(session) command_table['add-steps'] = AddSteps(session) command_table['restore-from-hbase-backup'] = \ hbase.RestoreFromHBaseBackup(session) command_table['create-hbase-backup'] = hbase.CreateHBaseBackup(session) command_table['schedule-hbase-backup'] = hbase.ScheduleHBaseBackup(session) command_table['disable-hbase-backups'] = \ hbase.DisableHBaseBackups(session) command_table['create-default-roles'] = CreateDefaultRoles(session) command_table['add-instance-groups'] = AddInstanceGroups(session) command_table['ssh'] = ssh.SSH(session) command_table['socks'] = ssh.Socks(session) command_table['get'] = ssh.Get(session) command_table['put'] = ssh.Put(session)
Assumption: emrfs_args is valid i.e. all required attributes are present
def _build_emrfs_properties(emrfs_args): """ Assumption: emrfs_args is valid i.e. all required attributes are present """ emrfs_properties = OrderedDict() if _need_to_configure_consistent_view(emrfs_args): _update_properties_for_consistent_view(emrfs_properties, emrfs_args) if _need_to_configure_sse(emrfs_args): _update_properties_for_sse(emrfs_properties, emrfs_args) if _need_to_configure_cse(emrfs_args, 'KMS'): _update_properties_for_cse(emrfs_properties, emrfs_args, 'KMS') if _need_to_configure_cse(emrfs_args, 'CUSTOM'): _update_properties_for_cse(emrfs_properties, emrfs_args, 'CUSTOM') if 'Args' in emrfs_args: for arg_value in emrfs_args.get('Args'): key, value = emrutils.split_to_key_value(arg_value) emrfs_properties[key] = value return emrfs_properties
Returns the master_instance's 'PublicDnsName'.
def find_master_dns(session, parsed_globals, cluster_id): """ Returns the master_instance's 'PublicDnsName'. """ client = get_client(session, parsed_globals) data = client.describe_cluster(ClusterId=cluster_id) return data['Cluster']['MasterPublicDnsName']
Helper method to print a list of values [1,2,3] -> '1, 2 and 3'
def join(values, separator=',', lastSeparator='and'): """ Helper method to print a list of values [1,2,3] -> '1, 2 and 3' """ values = [str(x) for x in values] if len(values) < 1: return "" elif len(values) == 1: return values[0] else: separator = '%s ' % separator return ' '.join([separator.join(values[:-1]), lastSeparator, values[-1]])
Helper method that converts --instance-fleets option value in create-cluster to Amazon Elastic MapReduce InstanceFleetConfig data type.
def validate_and_build_instance_fleets(parsed_instance_fleets): """ Helper method that converts --instance-fleets option value in create-cluster to Amazon Elastic MapReduce InstanceFleetConfig data type. """ instance_fleets = [] for instance_fleet in parsed_instance_fleets: instance_fleet_config = {} keys = instance_fleet.keys() if 'Name' in keys: instance_fleet_config['Name'] = instance_fleet['Name'] else: instance_fleet_config['Name'] = instance_fleet['InstanceFleetType'] instance_fleet_config['InstanceFleetType'] = instance_fleet['InstanceFleetType'] if 'TargetOnDemandCapacity' in keys: instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet['TargetOnDemandCapacity'] if 'TargetSpotCapacity' in keys: instance_fleet_config['TargetSpotCapacity'] = instance_fleet['TargetSpotCapacity'] if 'InstanceTypeConfigs' in keys: instance_fleet_config['InstanceTypeConfigs'] = instance_fleet['InstanceTypeConfigs'] if 'LaunchSpecifications' in keys: instanceFleetProvisioningSpecifications = instance_fleet['LaunchSpecifications'] instance_fleet_config['LaunchSpecifications'] = {} if 'SpotSpecification' in instanceFleetProvisioningSpecifications: instance_fleet_config['LaunchSpecifications']['SpotSpecification'] = \ instanceFleetProvisioningSpecifications['SpotSpecification'] if 'OnDemandSpecification' in instanceFleetProvisioningSpecifications: instance_fleet_config['LaunchSpecifications']['OnDemandSpecification'] = \ instanceFleetProvisioningSpecifications['OnDemandSpecification'] if 'ResizeSpecifications' in keys: instanceFleetResizeSpecifications = instance_fleet['ResizeSpecifications'] instance_fleet_config['ResizeSpecifications'] = {} if 'SpotResizeSpecification' in instanceFleetResizeSpecifications: instance_fleet_config['ResizeSpecifications']['SpotResizeSpecification'] = \ instanceFleetResizeSpecifications['SpotResizeSpecification'] if 'OnDemandResizeSpecification' in instanceFleetResizeSpecifications: instance_fleet_config['ResizeSpecifications']['OnDemandResizeSpecification'] = \ instanceFleetResizeSpecifications['OnDemandResizeSpecification'] instance_fleets.append(instance_fleet_config) return instance_fleets
Helper method that converts --instance-groups option value in create-cluster and add-instance-groups to Amazon Elastic MapReduce InstanceGroupConfig data type.
def build_instance_groups(parsed_instance_groups): """ Helper method that converts --instance-groups option value in create-cluster and add-instance-groups to Amazon Elastic MapReduce InstanceGroupConfig data type. """ instance_groups = [] for instance_group in parsed_instance_groups: ig_config = {} keys = instance_group.keys() if 'Name' in keys: ig_config['Name'] = instance_group['Name'] else: ig_config['Name'] = instance_group['InstanceGroupType'] ig_config['InstanceType'] = instance_group['InstanceType'] ig_config['InstanceCount'] = instance_group['InstanceCount'] ig_config['InstanceRole'] = instance_group['InstanceGroupType'].upper() if 'BidPrice' in keys: if instance_group['BidPrice'] != 'OnDemandPrice': ig_config['BidPrice'] = instance_group['BidPrice'] ig_config['Market'] = constants.SPOT else: ig_config['Market'] = constants.ON_DEMAND if 'EbsConfiguration' in keys: ig_config['EbsConfiguration'] = instance_group['EbsConfiguration'] if 'AutoScalingPolicy' in keys: ig_config['AutoScalingPolicy'] = instance_group['AutoScalingPolicy'] if 'Configurations' in keys: ig_config['Configurations'] = instance_group['Configurations'] if 'CustomAmiId' in keys: ig_config['CustomAmiId'] = instance_group['CustomAmiId'] instance_groups.append(ig_config) return instance_groups
Utility method for ssh, socks, put and get command. Check if the cluster to be connected to is terminated or being terminated. Check if the cluster is running. Find master instance public dns of a given cluster. Return the latest created master instance public dns name. Throw MasterDNSNotAvailableError or ClusterTerminatedError.
def validate_and_find_master_dns(session, parsed_globals, cluster_id): """ Utility method for ssh, socks, put and get command. Check if the cluster to be connected to is terminated or being terminated. Check if the cluster is running. Find master instance public dns of a given cluster. Return the latest created master instance public dns name. Throw MasterDNSNotAvailableError or ClusterTerminatedError. """ cluster_state = emrutils.get_cluster_state( session, parsed_globals, cluster_id) if cluster_state in constants.TERMINATED_STATES: raise exceptions.ClusterTerminatedError emr = emrutils.get_client(session, parsed_globals) try: cluster_running_waiter = emr.get_waiter('cluster_running') if cluster_state in constants.STARTING_STATES: print("Waiting for the cluster to start.") cluster_running_waiter.wait(ClusterId=cluster_id) except WaiterError: raise exceptions.MasterDNSNotAvailableError return emrutils.find_master_dns( session=session, cluster_id=cluster_id, parsed_globals=parsed_globals)
The entry point for EMR Containers high level commands.
def initialize(cli): """ The entry point for EMR Containers high level commands. """ cli.register('building-command-table.emr-containers', inject_commands)
Called when the EMR Containers command table is being built. Used to inject new high level commands into the command list.
def inject_commands(command_table, session, **kwargs): """ Called when the EMR Containers command table is being built. Used to inject new high level commands into the command list. """ command_table['update-role-trust-policy'] = UpdateRoleTrustPolicyCommand( session)
This function checks to see if a special file. It checks if the file is a character special device, block special device, FIFO, or socket.
def is_special_file(path): """ This function checks to see if a special file. It checks if the file is a character special device, block special device, FIFO, or socket. """ mode = os.stat(path).st_mode # Character special device. if stat.S_ISCHR(mode): return True # Block special device if stat.S_ISBLK(mode): return True # FIFO. if stat.S_ISFIFO(mode): return True # Socket. if stat.S_ISSOCK(mode): return True return False
This function checks to see if a file or a directory can be read. This is tested by performing an operation that requires read access on the file or the directory.
def is_readable(path): """ This function checks to see if a file or a directory can be read. This is tested by performing an operation that requires read access on the file or the directory. """ if os.path.isdir(path): try: os.listdir(path) except (OSError, IOError): return False else: try: with _open(path, 'r') as fd: pass except (OSError, IOError): return False return True
Given the CLI parameters dict, create a Filter object.
def create_filter(parameters): """Given the CLI parameters dict, create a Filter object.""" # We need to evaluate all the filters based on the source # directory. if parameters['filters']: cli_filters = parameters['filters'] real_filters = [] for filter_type, filter_pattern in cli_filters: real_filters.append((filter_type.lstrip('-'), filter_pattern)) source_location = parameters['src'] if source_location.startswith('s3://'): # This gives us (bucket, keyname) and we want # the bucket to be the root dir. src_rootdir = _get_s3_root(source_location, parameters['dir_op']) else: src_rootdir = _get_local_root(parameters['src'], parameters['dir_op']) destination_location = parameters['dest'] if destination_location.startswith('s3://'): dst_rootdir = _get_s3_root(parameters['dest'], parameters['dir_op']) else: dst_rootdir = _get_local_root(parameters['dest'], parameters['dir_op']) return Filter(real_filters, src_rootdir, dst_rootdir) else: return Filter({}, None, None)
This function is require to use the plugin. It calls the functions required to add all necessary commands and parameters to the CLI. This function is necessary to install the plugin using a configuration file
def awscli_initialize(cli): """ This function is require to use the plugin. It calls the functions required to add all necessary commands and parameters to the CLI. This function is necessary to install the plugin using a configuration file """ cli.register("building-command-table.main", add_s3) cli.register('building-command-table.sync', register_sync_strategies)
This is a wrapper to make the plugin built-in to the cli as opposed to specifying it in the configuration file.
def s3_plugin_initialize(event_handlers): """ This is a wrapper to make the plugin built-in to the cli as opposed to specifying it in the configuration file. """ awscli_initialize(event_handlers)
This creates a new service object for the s3 plugin. It sends the old s3 commands to the namespace ``s3api``.
def add_s3(command_table, session, **kwargs): """ This creates a new service object for the s3 plugin. It sends the old s3 commands to the namespace ``s3api``. """ utils.rename_command(command_table, 's3', 's3api') command_table['s3'] = S3(session)
Creates an equivalent s3transfer TransferConfig :type runtime_config: dict :argument runtime_config: A valid RuntimeConfig-generated dict. :returns: A TransferConfig with the same configuration as the runtime config.
def create_transfer_config_from_runtime_config(runtime_config): """ Creates an equivalent s3transfer TransferConfig :type runtime_config: dict :argument runtime_config: A valid RuntimeConfig-generated dict. :returns: A TransferConfig with the same configuration as the runtime config. """ translation_map = { 'max_concurrent_requests': 'max_request_concurrency', 'max_queue_size': 'max_request_queue_size', 'multipart_threshold': 'multipart_threshold', 'multipart_chunksize': 'multipart_chunksize', 'max_bandwidth': 'max_bandwidth', } kwargs = {} for key, value in runtime_config.items(): if key not in translation_map: continue kwargs[translation_map[key]] = value return TransferConfig(**kwargs)
Convert a size in bytes into a human readable format. For example:: >>> human_readable_size(1) '1 Byte' >>> human_readable_size(10) '10 Bytes' >>> human_readable_size(1024) '1.0 KiB' >>> human_readable_size(1024 * 1024) '1.0 MiB' :param value: The size in bytes. :return: The size in a human readable format based on base-2 units.
def human_readable_size(value): """Convert a size in bytes into a human readable format. For example:: >>> human_readable_size(1) '1 Byte' >>> human_readable_size(10) '10 Bytes' >>> human_readable_size(1024) '1.0 KiB' >>> human_readable_size(1024 * 1024) '1.0 MiB' :param value: The size in bytes. :return: The size in a human readable format based on base-2 units. """ base = 1024 bytes_int = float(value) if bytes_int == 1: return '1 Byte' elif bytes_int < base: return '%d Bytes' % bytes_int for i, suffix in enumerate(HUMANIZE_SUFFIXES): unit = base ** (i+2) if round((bytes_int / unit) * base) < base: return '%.1f %s' % ((base * bytes_int / unit), suffix)
Converts a human readable size to bytes. :param value: A string such as "10MB". If a suffix is not included, then the value is assumed to be an integer representing the size in bytes. :returns: The converted value in bytes as an integer
def human_readable_to_bytes(value): """Converts a human readable size to bytes. :param value: A string such as "10MB". If a suffix is not included, then the value is assumed to be an integer representing the size in bytes. :returns: The converted value in bytes as an integer """ value = value.lower() if value[-2:] == 'ib': # Assume IEC suffix. suffix = value[-3:].lower() else: suffix = value[-2:].lower() has_size_identifier = ( len(value) >= 2 and suffix in SIZE_SUFFIX) if not has_size_identifier: try: return int(value) except ValueError: raise ValueError("Invalid size value: %s" % value) else: multiplier = SIZE_SUFFIX[suffix] return int(value[:-len(suffix)]) * multiplier
This is a helper function that given an s3 path such that the path is of the form: bucket/key It will return the bucket and the key represented by the s3 path
def find_bucket_key(s3_path): """ This is a helper function that given an s3 path such that the path is of the form: bucket/key It will return the bucket and the key represented by the s3 path """ block_unsupported_resources(s3_path) match = _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX.match(s3_path) if match: return match.group('bucket'), match.group('key') match = _S3_OUTPOST_TO_BUCKET_KEY_REGEX.match(s3_path) if match: return match.group('bucket'), match.group('key') s3_components = s3_path.split('/', 1) bucket = s3_components[0] s3_key = '' if len(s3_components) > 1: s3_key = s3_components[1] return bucket, s3_key
Split s3 path into bucket and key prefix. This will also handle the s3:// prefix. :return: Tuple of ('bucketname', 'keyname')
def split_s3_bucket_key(s3_path): """Split s3 path into bucket and key prefix. This will also handle the s3:// prefix. :return: Tuple of ('bucketname', 'keyname') """ if s3_path.startswith('s3://'): s3_path = s3_path[5:] return find_bucket_key(s3_path)
This is a helper function that given a local path return the size of the file in bytes and time of last modification.
def get_file_stat(path): """ This is a helper function that given a local path return the size of the file in bytes and time of last modification. """ try: stats = os.stat(path) except IOError as e: raise ValueError('Could not retrieve file stat of "%s": %s' % ( path, e)) try: update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal()) except (ValueError, OSError, OverflowError): # Python's fromtimestamp raises value errors when the timestamp is out # of range of the platform's C localtime() function. This can cause # issues when syncing from systems with a wide range of valid # timestamps to systems with a lower range. Some systems support # 64-bit timestamps, for instance, while others only support 32-bit. # We don't want to fail in these cases, so instead we pass along none. update_time = None return stats.st_size, update_time
This is a helper function that determines the destination path and compare key given parameters received from the ``FileFormat`` class.
def find_dest_path_comp_key(files, src_path=None): """ This is a helper function that determines the destination path and compare key given parameters received from the ``FileFormat`` class. """ src = files['src'] dest = files['dest'] src_type = src['type'] dest_type = dest['type'] if src_path is None: src_path = src['path'] sep_table = {'s3': '/', 'local': os.sep} if files['dir_op']: rel_path = src_path[len(src['path']):] else: rel_path = src_path.split(sep_table[src_type])[-1] compare_key = rel_path.replace(sep_table[src_type], '/') if files['use_src_name']: dest_path = dest['path'] dest_path += rel_path.replace(sep_table[src_type], sep_table[dest_type]) else: dest_path = dest['path'] return dest_path, compare_key
This creates a ``PrintTask`` for whenever a warning is to be thrown.
def create_warning(path, error_message, skip_file=True): """ This creates a ``PrintTask`` for whenever a warning is to be thrown. """ print_string = "warning: " if skip_file: print_string = print_string + "Skipping file " + path + ". " print_string = print_string + error_message warning_message = WarningResult(message=print_string, error=False, warning=True) return warning_message
Given a filename, guess it's content type. If the type cannot be guessed, a value of None is returned.
def guess_content_type(filename): """Given a filename, guess it's content type. If the type cannot be guessed, a value of None is returned. """ try: return mimetypes.guess_type(filename)[0] # This catches a bug in the mimetype library where some MIME types # specifically on windows machines cause a UnicodeDecodeError # because the MIME type in the Windows registry has an encoding # that cannot be properly encoded using the default system encoding. # https://bugs.python.org/issue9291 # # So instead of hard failing, just log the issue and fall back to the # default guessed content type of None. except UnicodeDecodeError: LOGGER.debug( 'Unable to guess content type for %s due to ' 'UnicodeDecodeError: ', filename, exc_info=True )
Cross platform relative path of a filename. If no relative path can be calculated (i.e different drives on Windows), then instead of raising a ValueError, the absolute path is returned.
def relative_path(filename, start=os.path.curdir): """Cross platform relative path of a filename. If no relative path can be calculated (i.e different drives on Windows), then instead of raising a ValueError, the absolute path is returned. """ try: dirname, basename = os.path.split(filename) relative_dir = os.path.relpath(dirname, start) return os.path.join(relative_dir, basename) except ValueError: return os.path.abspath(filename)
Set the utime of a file, and if it fails, raise a more explicit error. :param filename: the file to modify :param desired_time: the epoch timestamp to set for atime and mtime. :raises: SetFileUtimeError: if you do not have permission (errno 1) :raises: OSError: for all errors other than errno 1
def set_file_utime(filename, desired_time): """ Set the utime of a file, and if it fails, raise a more explicit error. :param filename: the file to modify :param desired_time: the epoch timestamp to set for atime and mtime. :raises: SetFileUtimeError: if you do not have permission (errno 1) :raises: OSError: for all errors other than errno 1 """ try: os.utime(filename, (desired_time, desired_time)) except OSError as e: # Only raise a more explicit exception when it is a permission issue. if e.errno != errno.EPERM: raise e raise SetFileUtimeError( ("The file was downloaded, but attempting to modify the " "utime of the file failed. Is the file owned by another user?"))
Registers a single sync strategy :param session: The session that the sync strategy is being registered to. :param strategy_cls: The class of the sync strategy to be registered. :param sync_type: A string representing when to perform the sync strategy. See ``__init__`` method of ``BaseSyncStrategy`` for possible options.
def register_sync_strategy(session, strategy_cls, sync_type='file_at_src_and_dest'): """Registers a single sync strategy :param session: The session that the sync strategy is being registered to. :param strategy_cls: The class of the sync strategy to be registered. :param sync_type: A string representing when to perform the sync strategy. See ``__init__`` method of ``BaseSyncStrategy`` for possible options. """ strategy = strategy_cls(sync_type) strategy.register_strategy(session)
Registers the different sync strategies. To register a sync strategy add ``register_sync_strategy(session, YourSyncStrategyClass, sync_type)`` to the list of registered strategies in this function.
def register_sync_strategies(command_table, session, **kwargs): """Registers the different sync strategies. To register a sync strategy add ``register_sync_strategy(session, YourSyncStrategyClass, sync_type)`` to the list of registered strategies in this function. """ # Register the size only sync strategy. register_sync_strategy(session, SizeOnlySync) # Register the exact timestamps sync strategy. register_sync_strategy(session, ExactTimestampsSync) # Register the delete sync strategy. register_sync_strategy(session, DeleteSync, 'file_not_at_src')
This link describes the format of Path Style URLs http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
def make_url(region, bucket_name, obj_path, version=None): """ This link describes the format of Path Style URLs http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro """ base = "https://s3.amazonaws.com" if region and region != "us-east-1": base = "https://s3-{0}.amazonaws.com".format(region) result = "{0}/{1}/{2}".format(base, bucket_name, obj_path) if version: result = "{0}?versionId={1}".format(result, version) return result
Setup connects events to the sitemap builder
def setup(app): """Setup connects events to the sitemap builder""" app.connect('html-page-context', add_html_link) app.connect('build-finished', create_sitemap) app.sitemap_links = [] app.set_translator('html', HTMLTranslator)
As each page is built, collect page names for the sitemap
def add_html_link(app, pagename, templatename, context, doctree): """As each page is built, collect page names for the sitemap""" base_url = app.config['html_theme_options'].get('base_url', '') if base_url: app.sitemap_links.append(base_url + pagename + ".html")
Generates the sitemap.xml from the collected HTML page links
def create_sitemap(app, exception): """Generates the sitemap.xml from the collected HTML page links""" if (not app.config['html_theme_options'].get('base_url', '') or exception is not None or not app.sitemap_links): return filename = app.outdir + "/sitemap.xml" print("Generating sitemap.xml in %s" % filename) root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") for link in app.sitemap_links: url = ET.SubElement(root, "url") ET.SubElement(url, "loc").text = link ET.ElementTree(root).write(filename)
Run the given summary script on every file in the given directory. :param script: A summarization script that takes a list of csv files. :param result_dir: A directory containing csv performance result files. :param summary_dir: The directory to put the summary file in.
def summarize(script, result_dir, summary_dir): """Run the given summary script on every file in the given directory. :param script: A summarization script that takes a list of csv files. :param result_dir: A directory containing csv performance result files. :param summary_dir: The directory to put the summary file in. """ summarize_args = [script] for f in os.listdir(result_dir): path = os.path.join(result_dir, f) if os.path.isfile(path): summarize_args.append(path) with open(os.path.join(summary_dir, 'summary.txt'), 'wb') as f: subprocess.check_call(summarize_args, stdout=f) with open(os.path.join(summary_dir, 'summary.json'), 'wb') as f: summarize_args.extend(['--output-format', 'json']) subprocess.check_call(summarize_args, stdout=f)
Retrieves an s3transfer performance script if available.
def _get_s3transfer_performance_script(script_name): """Retrieves an s3transfer performance script if available.""" s3transfer_directory = os.path.dirname(s3transfer.__file__) s3transfer_directory = os.path.dirname(s3transfer_directory) scripts_directory = 'scripts/performance' scripts_directory = os.path.join(s3transfer_directory, scripts_directory) script = os.path.join(scripts_directory, script_name) if os.path.isfile(script): return script else: return None
Backup a given source to a temporary location. :type source: str :param source: A local path or s3 path to backup. :type recursive: bool :param recursive: if True, the source will be treated as a directory.
def backup(source, recursive): """Backup a given source to a temporary location. :type source: str :param source: A local path or s3 path to backup. :type recursive: bool :param recursive: if True, the source will be treated as a directory. """ if source[:5] == 's3://': parts = source.split('/') parts.insert(3, str(uuid.uuid4())) backup_path = '/'.join(parts) else: name = os.path.split(source)[-1] temp_dir = tempfile.mkdtemp() backup_path = os.path.join(temp_dir, name) copy(source, backup_path, recursive) return backup_path
Copy files from one location to another. The source and destination must both be s3 paths or both be local paths. :type source: str :param source: A local path or s3 path to backup. :type destination: str :param destination: A local path or s3 path to backup the source to. :type recursive: bool :param recursive: if True, the source will be treated as a directory.
def copy(source, destination, recursive): """Copy files from one location to another. The source and destination must both be s3 paths or both be local paths. :type source: str :param source: A local path or s3 path to backup. :type destination: str :param destination: A local path or s3 path to backup the source to. :type recursive: bool :param recursive: if True, the source will be treated as a directory. """ if 's3://' in [source[:5], destination[:5]]: cp_args = ['aws', 's3', 'cp', source, destination, '--quiet'] if recursive: cp_args.append('--recursive') subprocess.check_call(cp_args) return if recursive: shutil.copytree(source, destination) else: shutil.copy(source, destination)
Delete a file or directory either locally or on S3.
def clean(destination, recursive): """Delete a file or directory either locally or on S3.""" if destination[:5] == 's3://': rm_args = ['aws', 's3', 'rm', '--quiet', destination] if recursive: rm_args.append('--recursive') subprocess.check_call(rm_args) else: if recursive: shutil.rmtree(destination) else: os.remove(destination)
Create a random subdirectory in a given directory.
def create_random_subfolder(destination): """Create a random subdirectory in a given directory.""" folder_name = str(uuid.uuid4()) if destination.startswith('s3://'): parts = destination.split('/') parts.append(folder_name) return '/'.join(parts) else: parts = list(os.path.split(destination)) parts.append(folder_name) path = os.path.join(*parts) os.makedirs(path) return path
Get a full cli transfer command. Performs common transformations, e.g. adding --quiet
def get_transfer_command(command, recursive, quiet): """Get a full cli transfer command. Performs common transformations, e.g. adding --quiet """ cli_command = 'aws s3 ' + command if recursive: cli_command += ' --recursive' if quiet: cli_command += ' --quiet' else: print(cli_command) return cli_command
Benchmark several runs of a long-running command. :type command: str :param command: The full aws cli command to benchmark :type benchmark_script: str :param benchmark_script: A benchmark script that takes a command to run and outputs performance data to a file. This should be from s3transfer. :type summarize_script: str :param summarize_script: A summarization script that the output of the benchmark script. This should be from s3transfer. :type output_dir: str :param output_dir: The directory to output performance results to. :type num_iterations: int :param num_iterations: The number of times to run the benchmark on the command. :type dry_run: bool :param dry_run: Whether or not to actually run the benchmarks. :type upkeep: function that takes no arguments :param upkeep: A function that is run after every iteration of the benchmark process. This should be used for upkeep, such as restoring files that were deleted as part of the command executing. :type cleanup: function that takes no arguments :param cleanup: A function that is run at the end of the benchmark process or if there are any problems during the benchmark process. It should be uses for the final cleanup, such as deleting files that were created at some destination.
def benchmark_command(command, benchmark_script, summarize_script, output_dir, num_iterations, dry_run, upkeep=None, cleanup=None): """Benchmark several runs of a long-running command. :type command: str :param command: The full aws cli command to benchmark :type benchmark_script: str :param benchmark_script: A benchmark script that takes a command to run and outputs performance data to a file. This should be from s3transfer. :type summarize_script: str :param summarize_script: A summarization script that the output of the benchmark script. This should be from s3transfer. :type output_dir: str :param output_dir: The directory to output performance results to. :type num_iterations: int :param num_iterations: The number of times to run the benchmark on the command. :type dry_run: bool :param dry_run: Whether or not to actually run the benchmarks. :type upkeep: function that takes no arguments :param upkeep: A function that is run after every iteration of the benchmark process. This should be used for upkeep, such as restoring files that were deleted as part of the command executing. :type cleanup: function that takes no arguments :param cleanup: A function that is run at the end of the benchmark process or if there are any problems during the benchmark process. It should be uses for the final cleanup, such as deleting files that were created at some destination. """ performance_dir = os.path.join(output_dir, 'performance') if os.path.exists(performance_dir): shutil.rmtree(performance_dir) os.makedirs(performance_dir) try: for i in range(num_iterations): out_file = 'performance%s.csv' % i out_file = os.path.join(performance_dir, out_file) benchmark_args = [ benchmark_script, command, '--output-file', out_file ] if not dry_run: subprocess.check_call(benchmark_args) if upkeep is not None: upkeep() if not dry_run: summarize(summarize_script, performance_dir, output_dir) finally: if not dry_run and cleanup is not None: cleanup()
Get an ArgumentParser with all the base benchmark arguments added in.
def get_default_argparser(): """Get an ArgumentParser with all the base benchmark arguments added in.""" parser = argparse.ArgumentParser() parser.add_argument( '--no-cleanup', action='store_true', default=False, help='Do not remove the destination after the tests complete.' ) parser.add_argument( '--recursive', action='store_true', default=False, help='Indicates that this is a recursive transfer.' ) benchmark_script = get_benchmark_script() parser.add_argument( '--benchmark-script', default=benchmark_script, required=benchmark_script is None, help=('The benchmark script to run the commands with. This should be ' 'from s3transfer.') ) summarize_script = get_summarize_script() parser.add_argument( '--summarize-script', default=summarize_script, required=summarize_script is None, help=('The summarize script to run the commands with. This should be ' 'from s3transfer.') ) parser.add_argument( '-o', '--result-dir', default='results', help='The directory to output performance results to. Existing ' 'results will be deleted.' ) parser.add_argument( '--dry-run', default=False, action='store_true', help='If set, commands will only be printed out, not executed.' ) parser.add_argument( '--quiet', default=False, action='store_true', help='If set, output is suppressed.' ) parser.add_argument( '-n', '--num-iterations', default=1, type=int, help='The number of times to run the test.' ) return parser