INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Adds tags to the stack if this resource is using the serverless app repo
def _get_application_tags(self): """Adds tags to the stack if this resource is using the serverless app repo """ application_tags = {} if isinstance(self.Location, dict): if (self.APPLICATION_ID_KEY in self.Location.keys() and self.Location[self.APPLICATION_ID_KEY] is not None): application_tags[self._SAR_APP_KEY] = self.Location[self.APPLICATION_ID_KEY] if (self.SEMANTIC_VERSION_KEY in self.Location.keys() and self.Location[self.SEMANTIC_VERSION_KEY] is not None): application_tags[self._SAR_SEMVER_KEY] = self.Location[self.SEMANTIC_VERSION_KEY] return application_tags
Returns the Lambda layer to which this SAM Layer corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list
def to_cloudformation(self, **kwargs): """Returns the Lambda layer to which this SAM Layer corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] # Append any CFN resources: intrinsics_resolver = kwargs["intrinsics_resolver"] resources.append(self._construct_lambda_layer(intrinsics_resolver)) return resources
Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list
def _construct_lambda_layer(self, intrinsics_resolver): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ # Resolve intrinsics if applicable: self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName') self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo') self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description') self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy, 'RetentionPolicy') retention_policy_value = self._get_retention_policy_value() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes['DeletionPolicy'] = retention_policy_value old_logical_id = self.logical_id new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen() self.logical_id = new_logical_id lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes) # Changing the LayerName property: when a layer is published, it is given an Arn # example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1 # where MyLayer is the LayerName property if it exists; otherwise, it is the # LogicalId of this resource. Since a LayerVersion is an immutable resource, when # CloudFormation updates this resource, it will ALWAYS create a new version then # delete the old version if the logical ids match. What this does is change the # logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the # LayerName property of the layer so that the Arn will still always be the same # with the exception of an incrementing version number. if not self.LayerName: self.LayerName = old_logical_id lambda_layer.LayerName = self.LayerName lambda_layer.Description = self.Description lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri') lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes lambda_layer.LicenseInfo = self.LicenseInfo return lambda_layer
Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute.
def _get_retention_policy_value(self): """ Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute. """ if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower(): return self.RETAIN elif self.RetentionPolicy.lower() == self.DELETE.lower(): return self.DELETE elif self.RetentionPolicy.lower() not in self.retention_policy_options: raise InvalidResourceException(self.logical_id, "'{}' must be one of the following options: {}." .format('RetentionPolicy', [self.RETAIN, self.DELETE]))
Performs dialog management and fulfillment for ordering flowers. Beyond fulfillment, the implementation of this intent demonstrates the use of the elicitSlot dialog action in slot validation and re-prompting.
def order_flowers(intent_request): """ Performs dialog management and fulfillment for ordering flowers. Beyond fulfillment, the implementation of this intent demonstrates the use of the elicitSlot dialog action in slot validation and re-prompting. """ flower_type = get_slots(intent_request)["FlowerType"] date = get_slots(intent_request)["PickupDate"] time = get_slots(intent_request)["PickupTime"] source = intent_request['invocationSource'] if source == 'DialogCodeHook': # Perform basic validation on the supplied input slots. # Use the elicitSlot dialog action to re-prompt for the first violation detected. slots = get_slots(intent_request) validation_result = validate_order_flowers(flower_type, date, time) if not validation_result['isValid']: slots[validation_result['violatedSlot']] = None return elicit_slot(intent_request['sessionAttributes'], intent_request['currentIntent']['name'], slots, validation_result['violatedSlot'], validation_result['message']) # Pass the price of the flowers back through session attributes to be used in various prompts defined # on the bot model. output_session_attributes = intent_request['sessionAttributes'] if flower_type is not None: output_session_attributes['Price'] = len(flower_type) * 5 # Elegant pricing model return delegate(output_session_attributes, get_slots(intent_request)) # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user. # In a real bot, this would likely involve a call to a backend service. return close(intent_request['sessionAttributes'], 'Fulfilled', {'contentType': 'PlainText', 'content': 'Thanks, your order for {} has been placed and will be ready for pickup by {} on {}'.format(flower_type, time, date)})
Called when the user specifies an intent for this bot.
def dispatch(intent_request): """ Called when the user specifies an intent for this bot. """ logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name'])) intent_name = intent_request['currentIntent']['name'] # Dispatch to your bot's intent handlers if intent_name == 'OrderFlowers': return order_flowers(intent_request) raise Exception('Intent with name ' + intent_name + ' not supported')
Constructs the Lambda Permission resource allowing the source service to invoke the function this event source triggers. :returns: the permission resource :rtype: model.lambda_.LambdaPermission
def _construct_permission(self, function, source_arn=None, source_account=None, suffix="", event_source_token=None): """Constructs the Lambda Permission resource allowing the source service to invoke the function this event source triggers. :returns: the permission resource :rtype: model.lambda_.LambdaPermission """ lambda_permission = LambdaPermission(self.logical_id + 'Permission' + suffix, attributes=function.get_passthrough_resource_attributes()) try: # Name will not be available for Alias resources function_name_or_arn = function.get_runtime_attr("name") except NotImplementedError: function_name_or_arn = function.get_runtime_attr("arn") lambda_permission.Action = 'lambda:invokeFunction' lambda_permission.FunctionName = function_name_or_arn lambda_permission.Principal = self.principal lambda_permission.SourceArn = source_arn lambda_permission.SourceAccount = source_account lambda_permission.EventSourceToken = event_source_token return lambda_permission
Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this pull event expands :rtype: list
def to_cloudformation(self, **kwargs): """Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this pull event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") resources = [] events_rule = EventsRule(self.logical_id) resources.append(events_rule) events_rule.ScheduleExpression = self.Schedule events_rule.Targets = [self._construct_target(function)] source_arn = events_rule.get_runtime_attr("arn") if CONDITION in function.resource_attributes: events_rule.set_resource_attribute(CONDITION, function.resource_attributes[CONDITION]) resources.append(self._construct_permission(function, source_arn=source_arn)) return resources
Constructs the Target property for the CloudWatch Events Rule. :returns: the Target property :rtype: dict
def _construct_target(self, function): """Constructs the Target property for the CloudWatch Events Rule. :returns: the Target property :rtype: dict """ target = { 'Arn': function.get_runtime_attr("arn"), 'Id': self.logical_id + 'LambdaTarget' } if self.Input is not None: target['Input'] = self.Input if self.InputPath is not None: target['InputPath'] = self.InputPath return target
Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list
def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'bucket' not in kwargs or kwargs['bucket'] is None: raise TypeError("Missing required keyword argument: bucket") if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None: raise TypeError("Missing required keyword argument: bucket_id") bucket = kwargs['bucket'] bucket_id = kwargs['bucket_id'] resources = [] source_account = ref('AWS::AccountId') permission = self._construct_permission(function, source_account=source_account) if CONDITION in permission.resource_attributes: self._depend_on_lambda_permissions_using_tag(bucket, permission) else: self._depend_on_lambda_permissions(bucket, permission) resources.append(permission) # NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are # multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification # configuration for each event. This is the reason why we continue to use existing bucket dict and append onto # it. # # NOTE: There is some fragile logic here where we will append multiple resources to output # SAM template but de-dupe them when merging into output CFN template. This is scary because the order of # merging is literally "last one wins", which works fine because we linearly loop through the template once. # The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of # to_cloudformation() to output template. self._inject_notification_configuration(function, bucket) resources.append(S3Bucket.from_dict(bucket_id, bucket)) return resources
Make the S3 bucket depends on Lambda Permissions resource because when S3 adds a Notification Configuration, it will check whether it has permissions to access Lambda. This will fail if the Lambda::Permissions is not already applied for this bucket to invoke the Lambda. :param dict bucket: Dictionary representing the bucket in SAM template. This is a raw dictionary and not a "resource" object :param model.lambda_.lambda_permission permission: Lambda Permission resource that needs to be created before the bucket. :return: Modified Bucket dictionary
def _depend_on_lambda_permissions(self, bucket, permission): """ Make the S3 bucket depends on Lambda Permissions resource because when S3 adds a Notification Configuration, it will check whether it has permissions to access Lambda. This will fail if the Lambda::Permissions is not already applied for this bucket to invoke the Lambda. :param dict bucket: Dictionary representing the bucket in SAM template. This is a raw dictionary and not a "resource" object :param model.lambda_.lambda_permission permission: Lambda Permission resource that needs to be created before the bucket. :return: Modified Bucket dictionary """ depends_on = bucket.get("DependsOn", []) # DependsOn can be either a list of strings or a scalar string if isinstance(depends_on, string_types): depends_on = [depends_on] depends_on_set = set(depends_on) depends_on_set.add(permission.logical_id) bucket["DependsOn"] = list(depends_on_set) return bucket
Since conditional DependsOn is not supported this undocumented way of implicitely making dependency through tags is used. See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a dependency, so CloudFormation will automatically wait once it reaches that function, the same as if you were using a DependsOn.
def _depend_on_lambda_permissions_using_tag(self, bucket, permission): """ Since conditional DependsOn is not supported this undocumented way of implicitely making dependency through tags is used. See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a dependency, so CloudFormation will automatically wait once it reaches that function, the same as if you were using a DependsOn. """ properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties tags = properties.get('Tags', None) if tags is None: tags = [] properties['Tags'] = tags dep_tag = { 'sam:ConditionalDependsOn:' + permission.logical_id: { 'Fn::If': [ permission.resource_attributes[CONDITION], ref(permission.logical_id), 'no dependency' ] } } properties['Tags'] = tags + get_tag_list(dep_tag) return bucket
Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list
def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") return [self._construct_permission(function, source_arn=self.Topic), self._inject_subscription(function, self.Topic, self.FilterPolicy)]
If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API
def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """ rest_api_id = self.RestApiId if isinstance(rest_api_id, dict) and "Ref" in rest_api_id: rest_api_id = rest_api_id["Ref"] # If RestApiId is a resource in the same template, then we try find the StageName by following the reference # Otherwise we default to a wildcard. This stage name is solely used to construct the permission to # allow this stage to invoke the Lambda function. If we are unable to resolve the stage name, we will # simply permit all stages to invoke this Lambda function # This hack is necessary because customers could use !ImportValue, !Ref or other intrinsic functions which # can be sometimes impossible to resolve (ie. when it has cross-stack references) permitted_stage = "*" stage_suffix = "AllStages" explicit_api = None if isinstance(rest_api_id, string_types): if rest_api_id in resources \ and "Properties" in resources[rest_api_id] \ and "StageName" in resources[rest_api_id]["Properties"]: explicit_api = resources[rest_api_id]["Properties"] permitted_stage = explicit_api["StageName"] # Stage could be a intrinsic, in which case leave the suffix to default value if isinstance(permitted_stage, string_types): if not permitted_stage: raise InvalidResourceException(rest_api_id, 'StageName cannot be empty.') stage_suffix = permitted_stage else: stage_suffix = "Stage" else: # RestApiId is a string, not an intrinsic, but we did not find a valid API resource for this ID raise InvalidEventException(self.relative_id, "RestApiId property of Api event must reference a valid " "resource in the same template.") return { 'explicit_api': explicit_api, 'explicit_api_stage': { 'permitted_stage': permitted_stage, 'suffix': stage_suffix } }
If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list
def to_cloudformation(self, **kwargs): """If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """ resources = [] function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs['explicit_api'] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, function) return resources
Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added.
def _add_swagger_integration(self, api, function): """Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """ swagger_body = api.get("DefinitionBody") if swagger_body is None: return function_arn = function.get_runtime_attr('arn') partition = ArnGenerator.get_partition_name() uri = fnSub('arn:' + partition + ':apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/' + make_shorthand(function_arn) + '/invocations') editor = SwaggerEditor(swagger_body) if editor.has_integration(self.Path, self.Method): # Cannot add the Lambda Integration, if it is already present raise InvalidEventException( self.relative_id, 'API method "{method}" defined multiple times for path "{path}".'.format( method=self.Method, path=self.Path)) condition = None if CONDITION in function.resource_attributes: condition = function.resource_attributes[CONDITION] editor.add_lambda_integration(self.Path, self.Method, uri, self.Auth, api.get('Auth'), condition=condition) if self.Auth: method_authorizer = self.Auth.get('Authorizer') if method_authorizer: api_auth = api.get('Auth') api_authorizers = api_auth and api_auth.get('Authorizers') if method_authorizer != 'AWS_IAM': if not api_authorizers: raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because the related API does not define any Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer != 'NONE' and not api_authorizers.get(method_authorizer): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because it wasn\'t defined in the API\'s Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer == 'NONE' and not api_auth.get('DefaultAuthorizer'): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer on API method [{method}] for path [{path}] because \'NONE\' ' 'is only a valid value when a DefaultAuthorizer on the API is specified.'.format( method=self.Method, path=self.Path)) editor.add_auth_to_method(api=api, path=self.Path, method_name=self.Method, auth=self.Auth) api["DefinitionBody"] = editor.swagger
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as !GetAtt, !Sub or !Ref to non-parameters will be left untouched. Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into transform's output because it changes the template structure by inlining parameter values. :param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions :return: A copy of a dictionary with parameter references replaced by actual value.
def resolve_parameter_refs(self, input): """ Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as !GetAtt, !Sub or !Ref to non-parameters will be left untouched. Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into transform's output because it changes the template structure by inlining parameter values. :param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions :return: A copy of a dictionary with parameter references replaced by actual value. """ return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API resource. This method recursively walks the tree, converting all derived references to the real resource name, if it is present. Example: {"Ref": "MyFunction.Alias"} -> {"Ref": "MyFunctionAliasLive"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param SupportedResourceReferences supported_resource_refs: Object that contains information about the resource references supported in this SAM template, along with the value they should resolve to. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
def resolve_sam_resource_refs(self, input, supported_resource_refs): """ Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API resource. This method recursively walks the tree, converting all derived references to the real resource name, if it is present. Example: {"Ref": "MyFunction.Alias"} -> {"Ref": "MyFunctionAliasLive"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param SupportedResourceReferences supported_resource_refs: Object that contains information about the resource references supported in this SAM template, along with the value they should resolve to. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise """ return self._traverse(input, supported_resource_refs, self._try_resolve_sam_resource_refs)
Some SAM resources have their logical ids mutated from the original id that the customer writes in the template. This method recursively walks the tree and updates these logical ids from the old value to the new value that is generated by SAM. Example: {"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
def resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): """ Some SAM resources have their logical ids mutated from the original id that the customer writes in the template. This method recursively walks the tree and updates these logical ids from the old value to the new value that is generated by SAM. Example: {"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise """ return self._traverse(input, supported_resource_id_refs, self._try_resolve_sam_resource_id_refs)
Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when to perform the resolution. :param input: Any primitive type (dict, array, string etc) whose value might contain an intrinsic function :param resolution_data: Data that will help with resolution. For example, when resolving parameter references, this object will contain a dictionary of parameter names and their values. :param resolver_method: Method that will be called to actually resolve an intrinsic function. This method is called with the parameters `(input, resolution_data)`. :return: Modified `input` with intrinsics resolved
def _traverse(self, input, resolution_data, resolver_method): """ Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when to perform the resolution. :param input: Any primitive type (dict, array, string etc) whose value might contain an intrinsic function :param resolution_data: Data that will help with resolution. For example, when resolving parameter references, this object will contain a dictionary of parameter names and their values. :param resolver_method: Method that will be called to actually resolve an intrinsic function. This method is called with the parameters `(input, resolution_data)`. :return: Modified `input` with intrinsics resolved """ # There is data to help with resolution. Skip the traversal altogether if len(resolution_data) == 0: return input # # Traversal Algorithm: # # Imagine the input dictionary/list as a tree. We are doing a Pre-Order tree traversal here where we first # process the root node before going to its children. Dict and Lists are the only two iterable nodes. # Everything else is a leaf node. # # We do a Pre-Order traversal to handle the case where `input` contains intrinsic function as its only child # ie. input = {"Ref": "foo}. # # We will try to resolve the intrinsics if we can, otherwise return the original input. In some cases, resolving # an intrinsic will result in a terminal state ie. {"Ref": "foo"} could resolve to a string "bar". In other # cases, resolving intrinsics is only partial and we might need to continue traversing the tree (ex: Fn::Sub) # to handle nested intrinsics. All of these cases lend well towards a Pre-Order traversal where we try and # process the intrinsic, which results in a modified sub-tree to traverse. # input = resolver_method(input, resolution_data) if isinstance(input, dict): return self._traverse_dict(input, resolution_data, resolver_method) elif isinstance(input, list): return self._traverse_list(input, resolution_data, resolver_method) else: # We can iterate only over dict or list types. Primitive types are terminals return input
Traverse a dictionary to resolve intrinsic functions on every value :param input_dict: Input dictionary to traverse :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified dictionary with values resolved
def _traverse_dict(self, input_dict, resolution_data, resolver_method): """ Traverse a dictionary to resolve intrinsic functions on every value :param input_dict: Input dictionary to traverse :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified dictionary with values resolved """ for key, value in input_dict.items(): input_dict[key] = self._traverse(value, resolution_data, resolver_method) return input_dict
Traverse a list to resolve intrinsic functions on every element :param input_list: List of input :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified list with intrinsic functions resolved
def _traverse_list(self, input_list, resolution_data, resolver_method): """ Traverse a list to resolve intrinsic functions on every element :param input_list: List of input :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified list with intrinsic functions resolved """ for index, value in enumerate(input_list): input_list[index] = self._traverse(value, resolution_data, resolver_method) return input_list
Try to resolve parameter references on the given input object. The object could be of any type. If the input is not in the format used by intrinsics (ie. dictionary with one key), input is returned unmodified. If the single key in dictionary is one of the supported intrinsic function types, go ahead and try to resolve it. :param input: Input object to resolve :param parameters: Parameter values used to for ref substitution :return:
def _try_resolve_parameter_refs(self, input, parameters): """ Try to resolve parameter references on the given input object. The object could be of any type. If the input is not in the format used by intrinsics (ie. dictionary with one key), input is returned unmodified. If the single key in dictionary is one of the supported intrinsic function types, go ahead and try to resolve it. :param input: Input object to resolve :param parameters: Parameter values used to for ref substitution :return: """ if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_parameter_refs(input, parameters)
Try to resolve SAM resource references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param SupportedResourceReferences supported_resource_refs: Object containing information about available resource references and the values they resolve to. :return: Modified input dictionary with references resolved
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs): """ Try to resolve SAM resource references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param SupportedResourceReferences supported_resource_refs: Object containing information about available resource references and the values they resolve to. :return: Modified input dictionary with references resolved """ if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
Try to resolve SAM resource id references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return: Modified input dictionary with id references resolved
def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): """ Try to resolve SAM resource id references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return: Modified input dictionary with id references resolved """ if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)
Can the input represent an intrinsic function in it? :param input: Object to be checked :return: True, if the input contains a supported intrinsic function. False otherwise
def _is_intrinsic_dict(self, input): """ Can the input represent an intrinsic function in it? :param input: Object to be checked :return: True, if the input contains a supported intrinsic function. False otherwise """ # All intrinsic functions are dictionaries with just one key return isinstance(input, dict) \ and len(input) == 1 \ and list(input.keys())[0] in self.supported_intrinsics
Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this push event expands :rtype: list
def to_cloudformation(self, **kwargs): """Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this push event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") source_arn = self.get_source_arn() permission = self._construct_permission(function, source_arn=source_arn) subscription_filter = self.get_subscription_filter(function, permission) resources = [permission, subscription_filter] return resources
Converts the given template to IAM-ready policy statement by substituting template parameters with the given values. :param template_name: Name of the template :param parameter_values: Values for all parameters of the template :return dict: Dictionary containing policy statement :raises ValueError: If the given inputs don't represent valid template :raises InsufficientParameterValues: If the parameter values don't have values for all required parameters
def convert(self, template_name, parameter_values): """ Converts the given template to IAM-ready policy statement by substituting template parameters with the given values. :param template_name: Name of the template :param parameter_values: Values for all parameters of the template :return dict: Dictionary containing policy statement :raises ValueError: If the given inputs don't represent valid template :raises InsufficientParameterValues: If the parameter values don't have values for all required parameters """ if not self.has(template_name): raise TemplateNotFoundException(template_name) template = self.get(template_name) return template.to_statement(parameter_values)
Is this a valid policy template dictionary :param dict policy_templates_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing policy template :return: True, if it is valid. :raises ValueError: If the template dictionary doesn't match up with the schema
def _is_valid_templates_dict(policy_templates_dict, schema=None): """ Is this a valid policy template dictionary :param dict policy_templates_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing policy template :return: True, if it is valid. :raises ValueError: If the template dictionary doesn't match up with the schema """ if not schema: schema = PolicyTemplatesProcessor._read_schema() try: jsonschema.validate(policy_templates_dict, schema) except ValidationError as ex: # Stringifying the exception will give us useful error message raise ValueError(str(ex)) return True
Render a chart or page to local html files. :param chart: A Chart or Page object :param path: The destination file which the html code write to :param template_name: The name of template file.
def render_chart_to_file(self, template_name: str, chart: Any, path: str): """ Render a chart or page to local html files. :param chart: A Chart or Page object :param path: The destination file which the html code write to :param template_name: The name of template file. """ tpl = self.env.get_template(template_name) html = tpl.render(chart=self.generate_js_link(chart)) write_utf8_html_file(path, self._reg_replace(html))
Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string.
def decode_base64(data: str) -> bytes: """Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. """ missing_padding = len(data) % 4 if missing_padding != 0: data += "=" * (4 - missing_padding) return base64.decodebytes(data.encode("utf-8"))
间隔折叠节点,当节点过多时可以解决节点显示过杂间隔。 :param data: 节点数据 :param interval: 指定间隔
def _set_collapse_interval(data, interval): """ 间隔折叠节点,当节点过多时可以解决节点显示过杂间隔。 :param data: 节点数据 :param interval: 指定间隔 """ if interval <= 0: return data if data and isinstance(data, list): for d in data: children = d.get("children", None) if children and interval > 0: for index, value in enumerate(children): if index % interval == 0: value.update(collapsed="false") return data
Parses a string and returns a pin-num.
def parse_pin(name_str): """Parses a string and returns a pin-num.""" if len(name_str) < 1: raise ValueError("Expecting pin name to be at least 4 charcters.") if name_str[0] != 'P': raise ValueError("Expecting pin name to start with P") pin_str = name_str[1:].split('/')[0] if not pin_str.isdigit(): raise ValueError("Expecting numeric pin number.") return int(pin_str)
Returns the numbered function (i.e. USART6) for this AF.
def ptr(self): """Returns the numbered function (i.e. USART6) for this AF.""" if self.fn_num is None: return self.func return '{:s}{:d}'.format(self.func, self.fn_num)
Prints the C representation of this AF.
def print(self): """Prints the C representation of this AF.""" if self.supported: print(' AF', end='') else: print(' //', end='') fn_num = self.fn_num if fn_num is None: fn_num = 0 print('({:2d}, {:8s}, {:2d}, {:10s}, {:8s}), // {:s}'.format(self.idx, self.func, fn_num, self.pin_type, self.ptr(), self.af_str))
Start the loop. :param `leds`: Which LEDs to light up upon switch press. :type `leds`: sequence of LED objects
def run_loop(leds=all_leds): """ Start the loop. :param `leds`: Which LEDs to light up upon switch press. :type `leds`: sequence of LED objects """ print('Loop started.\nPress Ctrl+C to break out of the loop.') while 1: try: if switch(): [led.on() for led in leds] else: [led.off() for led in leds] except OSError: # VCPInterrupt # Ctrl+C in interpreter mode. break
Search vpaths for the c file that matches the provided object_file. :param str obj_file: object file to find the matching c file for :param List[str] vpath: List of base paths, similar to gcc vpath :return: str path to c file or None
def find_c_file(obj_file, vpath): """ Search vpaths for the c file that matches the provided object_file. :param str obj_file: object file to find the matching c file for :param List[str] vpath: List of base paths, similar to gcc vpath :return: str path to c file or None """ c_file = None relative_c_file = os.path.splitext(obj_file)[0] + ".c" relative_c_file = relative_c_file.lstrip('/\\') for p in vpath: possible_c_file = os.path.join(p, relative_c_file) if os.path.exists(possible_c_file): c_file = possible_c_file break return c_file
Find any MP_REGISTER_MODULE definitions in the provided c file. :param str c_file: path to c file to check :return: List[(module_name, obj_module, enabled_define)]
def find_module_registrations(c_file): """ Find any MP_REGISTER_MODULE definitions in the provided c file. :param str c_file: path to c file to check :return: List[(module_name, obj_module, enabled_define)] """ global pattern if c_file is None: # No c file to match the object file, skip return set() with io.open(c_file, encoding='utf-8') as c_file_obj: return set(re.findall(pattern, c_file_obj.read()))
Generate header with module table entries for builtin modules. :param List[(module_name, obj_module, enabled_define)] modules: module defs :return: None
def generate_module_table_header(modules): """ Generate header with module table entries for builtin modules. :param List[(module_name, obj_module, enabled_define)] modules: module defs :return: None """ # Print header file for all external modules. mod_defs = [] print("// Automatically generated by makemoduledefs.py.\n") for module_name, obj_module, enabled_define in modules: mod_def = "MODULE_DEF_{}".format(module_name.upper()) mod_defs.append(mod_def) print(( "#if ({enabled_define})\n" " extern const struct _mp_obj_module_t {obj_module};\n" " #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n" "#else\n" " #define {mod_def}\n" "#endif\n" ).format(module_name=module_name, obj_module=obj_module, enabled_define=enabled_define, mod_def=mod_def) ) print("\n#define MICROPY_REGISTERED_MODULES \\") for mod_def in mod_defs: print(" {mod_def} \\".format(mod_def=mod_def)) print("// MICROPY_REGISTERED_MODULES")
Reads test files
def readfiles(): """ Reads test files """ tests = list(filter(lambda x: x.endswith('.py'), os.listdir(TESTPATH))) tests.sort() files = [] for test in tests: text = open(TESTPATH + test, 'r').read() try: class_, desc, cause, workaround, code = [x.rstrip() for x in \ list(filter(None, re.split(SPLIT, text)))] output = Output(test, class_, desc, cause, workaround, code, '', '', '') files.append(output) except IndexError: print('Incorrect format in file ' + TESTPATH + test) return files
converts CPython module names into MicroPython equivalents
def uimports(code): """ converts CPython module names into MicroPython equivalents """ for uimport in UIMPORTLIST: uimport = bytes(uimport, 'utf8') code = code.replace(uimport, b'u' + uimport) return code
indents paragraphs of text for rst formatting
def indent(block, spaces): """ indents paragraphs of text for rst formatting """ new_block = '' for line in block.split('\n'): new_block += spaces + line + '\n' return new_block
creates a table given any set of columns
def gen_table(contents): """ creates a table given any set of columns """ xlengths = [] ylengths = [] for column in contents: col_len = 0 for entry in column: lines = entry.split('\n') for line in lines: col_len = max(len(line) + 2, col_len) xlengths.append(col_len) for i in range(len(contents[0])): ymax = 0 for j in range(len(contents)): ymax = max(ymax, len(contents[j][i].split('\n'))) ylengths.append(ymax) table_divider = '+' + ''.join(['-' * i + '+' for i in xlengths]) + '\n' table = table_divider for i in range(len(ylengths)): row = [column[i] for column in contents] row = [entry + '\n' * (ylengths[i]-len(entry.split('\n'))) for entry in row] row = [entry.split('\n') for entry in row] for j in range(ylengths[i]): k = 0 for entry in row: width = xlengths[k] table += ''.join(['| {:{}}'.format(entry[j], width - 1)]) k += 1 table += '|\n' table += table_divider return table + '\n'
creates restructured text documents to display tests
def gen_rst(results): """ creates restructured text documents to display tests """ # make sure the destination directory exists try: os.mkdir(DOCPATH) except OSError as e: if e.args[0] != errno.EEXIST and e.args[0] != errno.EISDIR: raise toctree = [] class_ = [] for output in results: section = output.class_.split(',') for i in range(len(section)): section[i] = section[i].rstrip() if section[i] in CLASSMAP: section[i] = CLASSMAP[section[i]] if i >= len(class_) or section[i] != class_[i]: if i == 0: filename = section[i].replace(' ', '_').lower() rst = open(DOCPATH + filename + '.rst', 'w') rst.write(HEADER) rst.write(section[i] + '\n') rst.write(RSTCHARS[0] * len(section[i])) rst.write(time.strftime("\nGenerated %a %d %b %Y %X UTC\n\n", time.gmtime())) toctree.append(filename) else: rst.write(section[i] + '\n') rst.write(RSTCHARS[min(i, len(RSTCHARS)-1)] * len(section[i])) rst.write('\n\n') class_ = section rst.write('.. _cpydiff_%s:\n\n' % output.name.rsplit('.', 1)[0]) rst.write(output.desc + '\n') rst.write('~' * len(output.desc) + '\n\n') if output.cause != 'Unknown': rst.write('**Cause:** ' + output.cause + '\n\n') if output.workaround != 'Unknown': rst.write('**Workaround:** ' + output.workaround + '\n\n') rst.write('Sample code::\n\n' + indent(output.code, TAB) + '\n') output_cpy = indent(''.join(output.output_cpy[0:2]), TAB).rstrip() output_cpy = ('::\n\n' if output_cpy != '' else '') + output_cpy output_upy = indent(''.join(output.output_upy[0:2]), TAB).rstrip() output_upy = ('::\n\n' if output_upy != '' else '') + output_upy table = gen_table([['CPy output:', output_cpy], ['uPy output:', output_upy]]) rst.write(table) template = open(INDEXTEMPLATE, 'r') index = open(DOCPATH + INDEX, 'w') index.write(HEADER) index.write(template.read()) for section in INDEXPRIORITY: if section in toctree: index.write(indent(section + '.rst', TAB)) toctree.remove(section) for section in toctree: index.write(indent(section + '.rst', TAB))
Main function
def main(): """ Main function """ # set search path so that test scripts find the test modules (and no other ones) os.environ['PYTHONPATH'] = TESTPATH os.environ['MICROPYPATH'] = TESTPATH files = readfiles() results = run_tests(files) gen_rst(results)
Initializes the found DFU device so that we can program it.
def init(): """Initializes the found DFU device so that we can program it.""" global __dev, __cfg_descr devices = get_dfu_devices(idVendor=__VID, idProduct=__PID) if not devices: raise ValueError('No DFU device found') if len(devices) > 1: raise ValueError("Multiple DFU devices found") __dev = devices[0] __dev.set_configuration() # Claim DFU interface usb.util.claim_interface(__dev, __DFU_INTERFACE) # Find the DFU configuration descriptor, either in the device or interfaces __cfg_descr = None for cfg in __dev.configurations(): __cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors) if __cfg_descr: break for itf in cfg.interfaces(): __cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors) if __cfg_descr: break # Get device into idle state for attempt in range(4): status = get_status() if status == __DFU_STATE_DFU_IDLE: break elif (status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE): abort_request() else: clr_status()
Performs a MASS erase (i.e. erases the entire device.
def mass_erase(): """Performs a MASS erase (i.e. erases the entire device.""" # Send DNLOAD with first byte=0x41 __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed")
Erases a single page.
def page_erase(addr): """Erases a single page.""" if __verbose: print("Erasing page: 0x%x..." % (addr)) # Send DNLOAD with first byte=0x41 and page address buf = struct.pack("<BI", 0x41, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed")
Sets the address for the next operation.
def set_address(addr): """Sets the address for the next operation.""" # Send DNLOAD with first byte=0x21 and page address buf = struct.pack("<BI", 0x21, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: set address failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: set address failed")
Writes a single page. This routine assumes that memory has already been erased.
def write_page(buf, xfer_offset): """Writes a single page. This routine assumes that memory has already been erased. """ xfer_base = 0x08000000 # Set mem write address set_address(xfer_base+xfer_offset) # Send DNLOAD with fw data __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") if __verbose: print ("Write: 0x%x " % (xfer_base + xfer_offset))
Exit DFU mode, and start running the program.
def exit_dfu(): """Exit DFU mode, and start running the program.""" # set jump address set_address(0x08000000) # Send DNLOAD with 0 length to exit DFU __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT) try: # Execute last command if get_status() != __DFU_STATE_DFU_MANIFEST: print("Failed to reset device") # Release device usb.util.dispose_resources(__dev) except: pass
Writes a buffer into memory. This routine assumes that memory has already been erased.
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0): """Writes a buffer into memory. This routine assumes that memory has already been erased. """ xfer_count = 0 xfer_bytes = 0 xfer_total = len(buf) xfer_base = addr while xfer_bytes < xfer_total: if __verbose and xfer_count % 512 == 0: print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)) if progress and xfer_count % 2 == 0: progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size) # Set mem write address set_address(xfer_base+xfer_bytes) # Send DNLOAD with fw data chunk = min(__cfg_descr.wTransferSize, xfer_total-xfer_bytes) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") xfer_count += 1 xfer_bytes += chunk
Parses the struct defined by `fmt` from `data`, stores the parsed fields into a named tuple using `names`. Returns the named tuple, and the data with the struct stripped off.
def consume(fmt, data, names): """Parses the struct defined by `fmt` from `data`, stores the parsed fields into a named tuple using `names`. Returns the named tuple, and the data with the struct stripped off.""" size = struct.calcsize(fmt) return named(struct.unpack(fmt, data[:size]), names), data[size:]
Reads a DFU file, and parses the individual elements from the file. Returns an array of elements. Each element is a dictionary with the following keys: num - The element index address - The address that the element data should be written to. size - The size of the element ddata. data - The element data. If an error occurs while parsing the file, then None is returned.
def read_dfu_file(filename): """Reads a DFU file, and parses the individual elements from the file. Returns an array of elements. Each element is a dictionary with the following keys: num - The element index address - The address that the element data should be written to. size - The size of the element ddata. data - The element data. If an error occurs while parsing the file, then None is returned. """ print("File: {}".format(filename)) with open(filename, 'rb') as fin: data = fin.read() crc = compute_crc(data[:-4]) elements = [] # Decode the DFU Prefix # # <5sBIB # < little endian # 5s char[5] signature "DfuSe" # B uint8_t version 1 # I uint32_t size Size of the DFU file (not including suffix) # B uint8_t targets Number of targets dfu_prefix, data = consume('<5sBIB', data, 'signature version size targets') print (" %(signature)s v%(version)d, image size: %(size)d, " "targets: %(targets)d" % dfu_prefix) for target_idx in range(dfu_prefix['targets']): # Decode the Image Prefix # # <6sBI255s2I # < little endian # 6s char[6] signature "Target" # B uint8_t altsetting # I uint32_t named bool indicating if a name was used # 255s char[255] name name of the target # I uint32_t size size of image (not incl prefix) # I uint32_t elements Number of elements in the image img_prefix, data = consume('<6sBI255s2I', data, 'signature altsetting named name ' 'size elements') img_prefix['num'] = target_idx if img_prefix['named']: img_prefix['name'] = cstring(img_prefix['name']) else: img_prefix['name'] = '' print(' %(signature)s %(num)d, alt setting: %(altsetting)s, ' 'name: "%(name)s", size: %(size)d, elements: %(elements)d' % img_prefix) target_size = img_prefix['size'] target_data, data = data[:target_size], data[target_size:] for elem_idx in range(img_prefix['elements']): # Decode target prefix # < little endian # I uint32_t element address # I uint32_t element size elem_prefix, target_data = consume('<2I', target_data, 'addr size') elem_prefix['num'] = elem_idx print(' %(num)d, address: 0x%(addr)08x, size: %(size)d' % elem_prefix) elem_size = elem_prefix['size'] elem_data = target_data[:elem_size] target_data = target_data[elem_size:] elem_prefix['data'] = elem_data elements.append(elem_prefix) if len(target_data): print("target %d PARSE ERROR" % target_idx) # Decode DFU Suffix # < little endian # H uint16_t device Firmware version # H uint16_t product # H uint16_t vendor # H uint16_t dfu 0x11a (DFU file format version) # 3s char[3] ufd 'UFD' # B uint8_t len 16 # I uint32_t crc32 dfu_suffix = named(struct.unpack('<4H3sBI', data[:16]), 'device product vendor dfu ufd len crc') print (' usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, ' 'dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x' % dfu_suffix) if crc != dfu_suffix['crc']: print("CRC ERROR: computed crc32 is 0x%08x" % crc) return data = data[16:] if data: print("PARSE ERROR") return return elements
Returns a list of USB device which are currently in DFU mode. Additional filters (like idProduct and idVendor) can be passed in to refine the search.
def get_dfu_devices(*args, **kwargs): """Returns a list of USB device which are currently in DFU mode. Additional filters (like idProduct and idVendor) can be passed in to refine the search. """ # convert to list for compatibility with newer pyusb return list(usb.core.find(*args, find_all=True, custom_match=FilterDFU(), **kwargs))
Returns an array which identifies the memory layout. Each entry of the array will contain a dictionary with the following keys: addr - Address of this memory segment last_addr - Last address contained within the memory segment. size - size of the segment, in bytes num_pages - number of pages in the segment page_size - size of each page, in bytes
def get_memory_layout(device): """Returns an array which identifies the memory layout. Each entry of the array will contain a dictionary with the following keys: addr - Address of this memory segment last_addr - Last address contained within the memory segment. size - size of the segment, in bytes num_pages - number of pages in the segment page_size - size of each page, in bytes """ cfg = device[0] intf = cfg[(0, 0)] mem_layout_str = get_string(device, intf.iInterface) mem_layout = mem_layout_str.split('/') result = [] for mem_layout_index in range(1, len(mem_layout), 2): addr = int(mem_layout[mem_layout_index], 0) segments = mem_layout[mem_layout_index + 1].split(',') seg_re = re.compile(r'(\d+)\*(\d+)(.)(.)') for segment in segments: seg_match = seg_re.match(segment) num_pages = int(seg_match.groups()[0], 10) page_size = int(seg_match.groups()[1], 10) multiplier = seg_match.groups()[2] if multiplier == 'K': page_size *= 1024 if multiplier == 'M': page_size *= 1024 * 1024 size = num_pages * page_size last_addr = addr + size - 1 result.append(named((addr, last_addr, size, num_pages, page_size), "addr last_addr size num_pages page_size")) addr += size return result
Prints a lits of devices detected in DFU mode.
def list_dfu_devices(*args, **kwargs): """Prints a lits of devices detected in DFU mode.""" devices = get_dfu_devices(*args, **kwargs) if not devices: print("No DFU capable devices found") return for device in devices: print("Bus {} Device {:03d}: ID {:04x}:{:04x}" .format(device.bus, device.address, device.idVendor, device.idProduct)) layout = get_memory_layout(device) print("Memory Layout") for entry in layout: print(" 0x{:x} {:2d} pages of {:3d}K bytes" .format(entry['addr'], entry['num_pages'], entry['page_size'] // 1024))
Writes the indicated elements into the target memory, erasing as needed.
def write_elements(elements, mass_erase_used, progress=None): """Writes the indicated elements into the target memory, erasing as needed. """ mem_layout = get_memory_layout(__dev) for elem in elements: addr = elem['addr'] size = elem['size'] data = elem['data'] elem_size = size elem_addr = addr if progress: progress(elem_addr, 0, elem_size) while size > 0: write_size = size if not mass_erase_used: for segment in mem_layout: if addr >= segment['addr'] and \ addr <= segment['last_addr']: # We found the page containing the address we want to # write, erase it page_size = segment['page_size'] page_addr = addr & ~(page_size - 1) if addr + write_size > page_addr + page_size: write_size = page_addr + page_size - addr page_erase(page_addr) break write_memory(addr, data[:write_size], progress, elem_addr, elem_size) data = data[write_size:] addr += write_size size -= write_size if progress: progress(elem_addr, addr - elem_addr, elem_size)
Prints a progress report suitable for use on the command line.
def cli_progress(addr, offset, size): """Prints a progress report suitable for use on the command line.""" width = 25 done = offset * width // size print("\r0x{:08x} {:7d} [{}{}] {:3d}% " .format(addr, size, '=' * done, ' ' * (width - done), offset * 100 // size), end="") try: sys.stdout.flush() except OSError: pass # Ignore Windows CLI "WinError 87" on Python 3.6 if offset == size: print("")
Test program for verifying this files functionality.
def main(): """Test program for verifying this files functionality.""" global __verbose # Parse CMD args parser = argparse.ArgumentParser(description='DFU Python Util') #parser.add_argument("path", help="file path") parser.add_argument( "-l", "--list", help="list available DFU devices", action="store_true", default=False ) parser.add_argument( "-m", "--mass-erase", help="mass erase device", action="store_true", default=False ) parser.add_argument( "-u", "--upload", help="read file from DFU device", dest="path", default=False ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true", default=False ) args = parser.parse_args() __verbose = args.verbose if args.list: list_dfu_devices(idVendor=__VID, idProduct=__PID) return init() if args.mass_erase: print ("Mass erase...") mass_erase() if args.path: elements = read_dfu_file(args.path) if not elements: return print("Writing memory...") write_elements(elements, args.mass_erase, progress=cli_progress) print("Exiting DFU...") exit_dfu() return print("No command specified")
Parses a string and returns a (port-num, pin-num) tuple.
def parse_port_pin(name_str): """Parses a string and returns a (port-num, pin-num) tuple.""" if len(name_str) < 3: raise ValueError("Expecting pin name to be at least 3 charcters.") if name_str[0] != 'P': raise ValueError("Expecting pin name to start with P") if name_str[1] < 'A' or name_str[1] > 'K': raise ValueError("Expecting pin port to be between A and K") port = ord(name_str[1]) - ord('A') pin_str = name_str[2:] if not pin_str.isdigit(): raise ValueError("Expecting numeric pin number.") return (port, int(pin_str))
Prints the C representation of this AF.
def print(self): """Prints the C representation of this AF.""" cond_var = None if self.supported: cond_var = conditional_var('{}{}'.format(self.func, self.fn_num)) print_conditional_if(cond_var) print(' AF', end='') else: print(' //', end='') fn_num = self.fn_num if fn_num is None: fn_num = 0 print('({:2d}, {:8s}, {:2d}, {:10s}, {:8s}), // {:s}'.format(self.idx, self.func, fn_num, self.pin_type, self.ptr(), self.af_str)) print_conditional_endif(cond_var)
Parses a string and returns a (port, gpio_bit) tuple.
def parse_port_pin(name_str): """Parses a string and returns a (port, gpio_bit) tuple.""" if len(name_str) < 3: raise ValueError("Expecting pin name to be at least 3 characters") if name_str[:2] != 'GP': raise ValueError("Expecting pin name to start with GP") if not name_str[2:].isdigit(): raise ValueError("Expecting numeric GPIO number") port = int(int(name_str[2:]) / 8) gpio_bit = 1 << int(int(name_str[2:]) % 8) return (port, gpio_bit)
Simple run one operator and return the results. Args: outputs_info: a list of tuples, which contains the element type and shape of each output. First element of the tuple is the dtype, and the second element is the shape. More use case can be found in https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
def run_node(cls, node, # type: NodeProto inputs, # type: Any device='CPU', # type: Text outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]] **kwargs # type: Dict[Text, Any] ): # type: (...) -> Optional[Tuple[Any, ...]] '''Simple run one operator and return the results. Args: outputs_info: a list of tuples, which contains the element type and shape of each output. First element of the tuple is the dtype, and the second element is the shape. More use case can be found in https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py ''' # TODO Remove Optional from return type if 'opset_version' in kwargs: special_context = c_checker.CheckerContext() special_context.ir_version = IR_VERSION special_context.opset_imports = {'': kwargs['opset_version']} # type: ignore onnx.checker.check_node(node, special_context) else: onnx.checker.check_node(node) return None
Load data from an external file for tensor. @params tensor: a TensorProto object. base_dir: directory that contains the external data.
def load_external_data_for_tensor(tensor, base_dir): # type: (TensorProto, Text) -> None """ Load data from an external file for tensor. @params tensor: a TensorProto object. base_dir: directory that contains the external data. """ if tensor.HasField("raw_data"): # already loaded return info = ExternalDataInfo(tensor) file_location = _sanitize_path(info.location) external_data_file_path = os.path.join(base_dir, file_location) with open(external_data_file_path, 'rb') as data_file: if info.offset: data_file.seek(info.offset) if info.length: tensor.raw_data = data_file.read(info.length) else: tensor.raw_data = data_file.read()
Loads external tensors into model @params model: ModelProto to load external data to base_dir: directory that contains external data
def load_external_data_for_model(model, base_dir): # type: (ModelProto, Text) -> None """ Loads external tensors into model @params model: ModelProto to load external data to base_dir: directory that contains external data """ for tensor in _get_all_tensors(model): if uses_external_data(tensor): load_external_data_for_tensor(tensor, base_dir)
call to set all tensors as external data. save_model saves all the tensors data as external data after calling this function. @params model: ModelProto to be converted. all_tensors_to_one_file: If true, save all tensors to one external file specified by location. If false, save each tensor to a file named with the tensor name. location: specify the external file that all tensors to save to. If not specified, will use the model name.
def convert_model_to_external_data(model, all_tensors_to_one_file=True, location=None): # type: (ModelProto, bool, Optional[Text]) -> None """ call to set all tensors as external data. save_model saves all the tensors data as external data after calling this function. @params model: ModelProto to be converted. all_tensors_to_one_file: If true, save all tensors to one external file specified by location. If false, save each tensor to a file named with the tensor name. location: specify the external file that all tensors to save to. If not specified, will use the model name. """ if all_tensors_to_one_file: file_name = Text(uuid.uuid1()) if location: file_name = location for tensor in _get_all_tensors(model): set_external_data(tensor, file_name) else: for tensor in _get_all_tensors(model): set_external_data(tensor, tensor.name)
call to set all tensors data as embedded data. save_model saves all the tensors data as embedded data after calling this function. @params model: ModelProto to be converted.
def convert_model_from_external_data(model): # type: (ModelProto) -> None """ call to set all tensors data as embedded data. save_model saves all the tensors data as embedded data after calling this function. @params model: ModelProto to be converted. """ for tensor in _get_all_tensors(model): if uses_external_data(tensor): if not tensor.HasField("raw_data"): raise ValueError("raw_data field doesn't exist.") del tensor.external_data[:] tensor.data_location = TensorProto.DEFAULT
Write tensor data to an external file according to information in the `external_data` field. @params tensor: Tensor object to be serialized base_path: System path of a folder where tensor data is to be stored
def save_external_data(tensor, base_path): # type: (TensorProto, Text) -> None """ Write tensor data to an external file according to information in the `external_data` field. @params tensor: Tensor object to be serialized base_path: System path of a folder where tensor data is to be stored """ info = ExternalDataInfo(tensor) external_data_file_path = os.path.join(base_path, info.location) # Retrieve the tensor's data from raw_data or load external file if not tensor.HasField("raw_data"): raise ValueError("raw_data field doesn't exist.") # Create file if it doesn't exist if not os.path.isfile(external_data_file_path): open(external_data_file_path, 'ab').close() # Open file for reading and writing at random locations ('r+b') with open(external_data_file_path, 'r+b') as data_file: data_file.seek(0, 2) if info.offset is not None: # Pad file to required offset if needed file_size = data_file.tell() if info.offset > file_size: data_file.write(b"\0" * (info.offset - file_size)) data_file.seek(info.offset) offset = data_file.tell() data_file.write(tensor.raw_data) set_external_data(tensor, info.location, offset, data_file.tell() - offset)
Create an iterator of tensors from node attributes of an ONNX model.
def _get_attribute_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto] """Create an iterator of tensors from node attributes of an ONNX model.""" for node in onnx_model_proto.graph.node: for attribute in node.attribute: if attribute.HasField("t"): yield attribute.t for tensor in attribute.tensors: yield tensor
Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed
def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None """ Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed """ for (i, field) in enumerate(tensor.external_data): if field.key == field_key: del tensor.external_data[i]
Write external data of all tensors to files on disk. Note: This function also strips basepath information from all tensors' external_data fields. @params model: Model object which is the source of tensors to serialize. filepath: System path to the directory which should be treated as base path for external data. @return The modified model object.
def write_external_data_tensors(model, filepath): # type: (ModelProto, Text) -> ModelProto """ Write external data of all tensors to files on disk. Note: This function also strips basepath information from all tensors' external_data fields. @params model: Model object which is the source of tensors to serialize. filepath: System path to the directory which should be treated as base path for external data. @return The modified model object. """ for tensor in _get_all_tensors(model): if uses_external_data(tensor): save_external_data(tensor, filepath) tensor.ClearField(str('raw_data')) return model
Imports a stdlib path and returns a handle to it eg. self._import("typing", "Optional") -> "Optional"
def _import(self, path, name): # type: (Text, Text) -> Text """Imports a stdlib path and returns a handle to it eg. self._import("typing", "Optional") -> "Optional" """ imp = path.replace('/', '.') self.imports[imp].add(name) return name
Import a referenced message and return a handle
def _import_message(self, type_name): # type: (d.FieldDescriptorProto) -> Text """Import a referenced message and return a handle""" name = cast(Text, type_name) if name[0] == '.' and name[1].isupper() and name[2].islower(): # Message defined in this file return name[1:] message_fd = self.descriptors.message_to_fd[name] if message_fd.name == self.fd.name: # message defined in this package split = name.split('.') for i, segment in enumerate(split): if segment and segment[0].isupper() and segment[1].islower(): return ".".join(split[i:]) # Not in package. Must import split = name.split(".") for i, segment in enumerate(split): if segment and segment[0].isupper() and segment[1].islower(): assert message_fd.name.endswith('.proto') import_name = self._import(message_fd.name[:-6].replace('-', '_') + "_pb2", segment) remains = ".".join(split[i + 1:]) if not remains: return import_name raise AssertionError("Don't support nested imports yet") # return new_nested_import(import_name, remains) raise AssertionError("Could not parse local name " + name)
Run command.
def run(self): """Run command.""" onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py")) returncode = subprocess.call([sys.executable, onnx_script]) sys.exit(returncode)
Construct a NodeProto. Arguments: op_type (string): The name of the operator to construct inputs (list of string): list of input names outputs (list of string): list of output names name (string, default None): optional unique identifier for NodeProto doc_string (string, default None): optional documentation string for NodeProto domain (string, default None): optional domain for NodeProto. If it's None, we will just use default domain (which is empty) **kwargs (dict): the attributes of the node. The acceptable values are documented in :func:`make_attribute`.
def make_node( op_type, # type: Text inputs, # type: Sequence[Text] outputs, # type: Sequence[Text] name=None, # type: Optional[Text] doc_string=None, # type: Optional[Text] domain=None, # type: Optional[Text] **kwargs # type: Any ): # type: (...) -> NodeProto """Construct a NodeProto. Arguments: op_type (string): The name of the operator to construct inputs (list of string): list of input names outputs (list of string): list of output names name (string, default None): optional unique identifier for NodeProto doc_string (string, default None): optional documentation string for NodeProto domain (string, default None): optional domain for NodeProto. If it's None, we will just use default domain (which is empty) **kwargs (dict): the attributes of the node. The acceptable values are documented in :func:`make_attribute`. """ node = NodeProto() node.op_type = op_type node.input.extend(inputs) node.output.extend(outputs) if name: node.name = name if doc_string: node.doc_string = doc_string if domain is not None: node.domain = domain if kwargs: node.attribute.extend( make_attribute(key, value) for key, value in sorted(kwargs.items())) return node
Construct an OperatorSetIdProto. Arguments: domain (string): The domain of the operator set id version (integer): Version of operator set id
def make_operatorsetid( domain, # type: Text version, # type: int ): # type: (...) -> OperatorSetIdProto """Construct an OperatorSetIdProto. Arguments: domain (string): The domain of the operator set id version (integer): Version of operator set id """ operatorsetid = OperatorSetIdProto() operatorsetid.domain = domain operatorsetid.version = version return operatorsetid
An internal graph to convert the input to a bytes or to False. The criteria for conversion is as follows and should be python 2 and 3 compatible: - If val is py2 str or py3 bytes: return bytes - If val is py2 unicode or py3 str: return val.decode('utf-8') - Otherwise, return False
def _to_bytes_or_false(val): # type: (Union[Text, bytes]) -> Union[bytes, bool] """An internal graph to convert the input to a bytes or to False. The criteria for conversion is as follows and should be python 2 and 3 compatible: - If val is py2 str or py3 bytes: return bytes - If val is py2 unicode or py3 str: return val.decode('utf-8') - Otherwise, return False """ if isinstance(val, bytes): return val else: try: return val.encode('utf-8') except AttributeError: return False
Makes an AttributeProto based on the value type.
def make_attribute( key, # type: Text value, # type: Any doc_string=None # type: Optional[Text] ): # type: (...) -> AttributeProto """Makes an AttributeProto based on the value type.""" attr = AttributeProto() attr.name = key if doc_string: attr.doc_string = doc_string is_iterable = isinstance(value, collections.Iterable) bytes_or_false = _to_bytes_or_false(value) # First, singular cases # float if isinstance(value, float): attr.f = value attr.type = AttributeProto.FLOAT # integer elif isinstance(value, numbers.Integral): attr.i = cast(int, value) attr.type = AttributeProto.INT # string elif bytes_or_false: assert isinstance(bytes_or_false, bytes) attr.s = bytes_or_false attr.type = AttributeProto.STRING elif isinstance(value, TensorProto): attr.t.CopyFrom(value) attr.type = AttributeProto.TENSOR elif isinstance(value, GraphProto): attr.g.CopyFrom(value) attr.type = AttributeProto.GRAPH # third, iterable cases elif is_iterable: byte_array = [_to_bytes_or_false(v) for v in value] if all(isinstance(v, float) for v in value): attr.floats.extend(value) attr.type = AttributeProto.FLOATS elif all(isinstance(v, numbers.Integral) for v in value): # Turn np.int32/64 into Python built-in int. attr.ints.extend(int(v) for v in value) attr.type = AttributeProto.INTS elif all(byte_array): attr.strings.extend(cast(List[bytes], byte_array)) attr.type = AttributeProto.STRINGS elif all(isinstance(v, TensorProto) for v in value): attr.tensors.extend(value) attr.type = AttributeProto.TENSORS elif all(isinstance(v, GraphProto) for v in value): attr.graphs.extend(value) attr.type = AttributeProto.GRAPHS else: raise ValueError( "You passed in an iterable attribute but I cannot figure out " "its applicable type.") else: raise ValueError( 'Value "{}" is not valid attribute data type.'.format(value)) return attr
Makes a ValueInfoProto based on the data type and shape.
def make_tensor_value_info( name, # type: Text elem_type, # type: int shape, # type: Optional[Sequence[Union[Text, int]]] doc_string="", # type: Text shape_denotation=None, # type: Optional[List[Text]] ): # type: (...) -> ValueInfoProto """Makes a ValueInfoProto based on the data type and shape.""" value_info_proto = ValueInfoProto() value_info_proto.name = name if doc_string: value_info_proto.doc_string = doc_string tensor_type_proto = value_info_proto.type.tensor_type tensor_type_proto.elem_type = elem_type tensor_shape_proto = tensor_type_proto.shape if shape is not None: # You might think this is a no-op (extending a normal Python # list by [] certainly is), but protobuf lists work a little # differently; if a field is never set, it is omitted from the # resulting protobuf; a list that is explicitly set to be # empty will get an (empty) entry in the protobuf. This # difference is visible to our consumers, so make sure we emit # an empty shape! tensor_shape_proto.dim.extend([]) if shape_denotation: if len(shape_denotation) != len(shape): raise ValueError( 'Invalid shape_denotation. ' 'Must be of the same length as shape.') for i, d in enumerate(shape): dim = tensor_shape_proto.dim.add() if d is None: pass elif isinstance(d, integer_types): dim.dim_value = d elif isinstance(d, text_type): dim.dim_param = d else: raise ValueError( 'Invalid item in shape: {}. ' 'Needs to of integer_types or text_type.'.format(d)) if shape_denotation: dim.denotation = shape_denotation[i] return value_info_proto
Empties `doc_string` field on any nested protobuf messages
def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None """ Empties `doc_string` field on any nested protobuf messages """ assert isinstance(proto, google.protobuf.message.Message) for descriptor in proto.DESCRIPTOR.fields: if descriptor.name == 'doc_string': proto.ClearField(descriptor.name) elif descriptor.type == descriptor.TYPE_MESSAGE: if descriptor.label == descriptor.LABEL_REPEATED: for x in getattr(proto, descriptor.name): strip_doc_string(x) elif proto.HasField(descriptor.name): strip_doc_string(getattr(proto, descriptor.name))
Converts a tensor def object to a numpy array. Inputs: tensor: a TensorProto object. Returns: arr: the converted array.
def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any] """Converts a tensor def object to a numpy array. Inputs: tensor: a TensorProto object. Returns: arr: the converted array. """ if tensor.HasField("segment"): raise ValueError( "Currently not supporting loading segments.") if tensor.data_type == TensorProto.UNDEFINED: raise ValueError("The data type is not defined.") tensor_dtype = tensor.data_type np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype] storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype] storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type] storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type] dims = tensor.dims if tensor.data_type == TensorProto.STRING: utf8_strings = getattr(tensor, storage_field) ss = list(s.decode('utf-8') for s in utf8_strings) return np.asarray(ss).astype(np_dtype).reshape(dims) if tensor.HasField("raw_data"): # Raw_bytes support: using frombuffer. return np.frombuffer( tensor.raw_data, dtype=np_dtype).reshape(dims) else: data = getattr(tensor, storage_field), # type: Sequence[np.complex64] if (tensor_dtype == TensorProto.COMPLEX64 or tensor_dtype == TensorProto.COMPLEX128): data = combine_pairs_to_complex(data) return ( np.asarray( data, dtype=storage_np_dtype) .astype(np_dtype) .reshape(dims) )
Converts a numpy array to a tensor def. Inputs: arr: a numpy array. name: (optional) the name of the tensor. Returns: tensor_def: the converted tensor def.
def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto """Converts a numpy array to a tensor def. Inputs: arr: a numpy array. name: (optional) the name of the tensor. Returns: tensor_def: the converted tensor def. """ tensor = TensorProto() tensor.dims.extend(arr.shape) if name: tensor.name = name if arr.dtype == np.object: # Special care for strings. tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] # TODO: Introduce full string support. # We flatten the array in case there are 2-D arrays are specified # We throw the error below if we have a 3-D array or some kind of other # object. If you want more complex shapes then follow the below instructions. # Unlike other types where the shape is automatically inferred from # nested arrays of values, the only reliable way now to feed strings # is to put them into a flat array then specify type astype(np.object) # (otherwise all strings may have different types depending on their length) # and then specify shape .reshape([x, y, z]) flat_array = arr.flatten() for e in flat_array: if isinstance(e, text_type): tensor.string_data.append(e.encode('utf-8')) elif isinstance(e, np.ndarray): for s in e: if isinstance(s, text_type): tensor.string_data.append(s.encode('utf-8')) else: raise NotImplementedError( "Unrecognized object in the object array, expect a string, or array of bytes: ", str(type(e))) return tensor # For numerical types, directly use numpy raw bytes. try: dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] except KeyError: raise RuntimeError( "Numpy data type not understood yet: {}".format(str(arr.dtype))) tensor.data_type = dtype tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9. return tensor
Serialize a in-memory proto to bytes @params proto is a in-memory proto, such as a ModelProto, TensorProto, etc @return Serialized proto in bytes
def _serialize(proto): # type: (Union[bytes, google.protobuf.message.Message]) -> bytes ''' Serialize a in-memory proto to bytes @params proto is a in-memory proto, such as a ModelProto, TensorProto, etc @return Serialized proto in bytes ''' if isinstance(proto, bytes): return proto elif hasattr(proto, 'SerializeToString') and callable(proto.SerializeToString): result = proto.SerializeToString() return result else: raise ValueError('No SerializeToString method is detected. ' 'neither proto is a str.\ntype is {}'.format(type(proto)))
Parse bytes into a in-memory proto @params s is bytes containing serialized proto proto is a in-memory proto object @return The proto instance filled in by s
def _deserialize(s, proto): # type: (bytes, _Proto) -> _Proto ''' Parse bytes into a in-memory proto @params s is bytes containing serialized proto proto is a in-memory proto object @return The proto instance filled in by s ''' if not isinstance(s, bytes): raise ValueError('Parameter s must be bytes, but got type: {}'.format(type(s))) if not (hasattr(proto, 'ParseFromString') and callable(proto.ParseFromString)): raise ValueError('No ParseFromString method is detected. ' '\ntype is {}'.format(type(proto))) decoded = cast(Optional[int], proto.ParseFromString(s)) if decoded is not None and decoded != len(s): raise google.protobuf.message.DecodeError( "Protobuf decoding consumed too few bytes: {} out of {}".format( decoded, len(s))) return proto
Loads a serialized ModelProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory ModelProto
def load_model(f, format=None, load_external_data=True): # type: (Union[IO[bytes], Text], Optional[Any], bool) -> ModelProto ''' Loads a serialized ModelProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory ModelProto ''' s = _load_bytes(f) model = load_model_from_string(s, format=format) if load_external_data: model_filepath = _get_file_path(f) if model_filepath: base_dir = os.path.dirname(model_filepath) load_external_data_for_model(model, base_dir) return model
Loads a serialized TensorProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory TensorProto
def load_tensor(f, format=None): # type: (Union[IO[bytes], Text], Optional[Any]) -> TensorProto ''' Loads a serialized TensorProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory TensorProto ''' s = _load_bytes(f) return load_tensor_from_string(s, format=format)
Saves the ModelProto to the specified path. @params proto should be a in-memory ModelProto f can be a file-like object (has "write" function) or a string containing a file name format is for future use
def save_model(proto, f, format=None): # type: (Union[ModelProto, bytes], Union[IO[bytes], Text], Optional[Any]) -> None ''' Saves the ModelProto to the specified path. @params proto should be a in-memory ModelProto f can be a file-like object (has "write" function) or a string containing a file name format is for future use ''' if isinstance(proto, bytes): proto = _deserialize(proto, ModelProto()) model_filepath = _get_file_path(f) if model_filepath: basepath = os.path.dirname(model_filepath) proto = write_external_data_tensors(proto, basepath) s = _serialize(proto) _save_bytes(s, f)
This function combines several useful utility functions together.
def polish_model(model): # type: (ModelProto) -> ModelProto ''' This function combines several useful utility functions together. ''' onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) model = onnx.optimizer.optimize(model) onnx.checker.check_model(model) return model
Unrolls an RNN cell across time steps. Currently, 'TNC' is a preferred layout. unroll on the input of this layout runs much faster. Parameters ---------- cell : an object whose base class is RNNCell. The RNN cell to run on the input sequence. inputs : Symbol It should have shape (batch_size, length, ...) if `layout` is 'NTC', or (length, batch_size, ...) if `layout` is 'TNC'. begin_state : nested list of Symbol The initial states of the RNN sequence. drop_inputs : float, default 0. The dropout rate for inputs. Won't apply dropout if it equals 0. drop_outputs : float, default 0. The dropout rate for outputs. Won't apply dropout if it equals 0. layout : str, optional `layout` of input symbol. Only used if inputs is a single Symbol. valid_length : Symbol, NDArray or None `valid_length` specifies the length of the sequences in the batch without padding. This option is especially useful for building sequence-to-sequence models where the input and output sequences would potentially be padded. If `valid_length` is None, all sequences are assumed to have the same length. If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,). The ith element will be the length of the ith sequence in the batch. The last valid state will be return and the padded outputs will be masked with 0. Note that `valid_length` must be smaller or equal to `length`. Returns ------- outputs : Symbol the output of the RNN from this unrolling. states : list of Symbol The new state of this RNN after this unrolling. The type of this symbol is same as the output of `begin_state`. Examples -------- >>> seq_len = 3 >>> batch_size = 2 >>> input_size = 5 >>> cell = mx.gluon.rnn.LSTMCell(input_size, prefix='rnn_') >>> cell.initialize(ctx=mx.cpu()) >>> rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size)) >>> state_shape = (batch_size, input_size) >>> states = [mx.nd.normal(loc=0, scale=1, shape=state_shape) for i in range(2)] >>> valid_length = mx.nd.array([2, 3]) >>> output, states = mx.gluon.contrib.rnn.rnn_cell.dynamic_unroll(cell, rnn_data, states, valid_length=valid_length, layout='TNC') >>> print(output) [[[ 0.00767238 0.00023103 0.03973929 -0.00925503 -0.05660512] [ 0.00881535 0.05428379 -0.02493718 -0.01834097 0.02189514]] [[-0.00676967 0.01447039 0.01287002 -0.00574152 -0.05734247] [ 0.01568508 0.02650866 -0.04270559 -0.04328435 0.00904011]] [[ 0. 0. 0. 0. 0. ] [ 0.01055336 0.02734251 -0.03153727 -0.03742751 -0.01378113]]] <NDArray 3x2x5 @cpu(0)>
def dynamic_unroll(cell, inputs, begin_state, drop_inputs=0, drop_outputs=0, layout='TNC', valid_length=None): """Unrolls an RNN cell across time steps. Currently, 'TNC' is a preferred layout. unroll on the input of this layout runs much faster. Parameters ---------- cell : an object whose base class is RNNCell. The RNN cell to run on the input sequence. inputs : Symbol It should have shape (batch_size, length, ...) if `layout` is 'NTC', or (length, batch_size, ...) if `layout` is 'TNC'. begin_state : nested list of Symbol The initial states of the RNN sequence. drop_inputs : float, default 0. The dropout rate for inputs. Won't apply dropout if it equals 0. drop_outputs : float, default 0. The dropout rate for outputs. Won't apply dropout if it equals 0. layout : str, optional `layout` of input symbol. Only used if inputs is a single Symbol. valid_length : Symbol, NDArray or None `valid_length` specifies the length of the sequences in the batch without padding. This option is especially useful for building sequence-to-sequence models where the input and output sequences would potentially be padded. If `valid_length` is None, all sequences are assumed to have the same length. If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,). The ith element will be the length of the ith sequence in the batch. The last valid state will be return and the padded outputs will be masked with 0. Note that `valid_length` must be smaller or equal to `length`. Returns ------- outputs : Symbol the output of the RNN from this unrolling. states : list of Symbol The new state of this RNN after this unrolling. The type of this symbol is same as the output of `begin_state`. Examples -------- >>> seq_len = 3 >>> batch_size = 2 >>> input_size = 5 >>> cell = mx.gluon.rnn.LSTMCell(input_size, prefix='rnn_') >>> cell.initialize(ctx=mx.cpu()) >>> rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size)) >>> state_shape = (batch_size, input_size) >>> states = [mx.nd.normal(loc=0, scale=1, shape=state_shape) for i in range(2)] >>> valid_length = mx.nd.array([2, 3]) >>> output, states = mx.gluon.contrib.rnn.rnn_cell.dynamic_unroll(cell, rnn_data, states, valid_length=valid_length, layout='TNC') >>> print(output) [[[ 0.00767238 0.00023103 0.03973929 -0.00925503 -0.05660512] [ 0.00881535 0.05428379 -0.02493718 -0.01834097 0.02189514]] [[-0.00676967 0.01447039 0.01287002 -0.00574152 -0.05734247] [ 0.01568508 0.02650866 -0.04270559 -0.04328435 0.00904011]] [[ 0. 0. 0. 0. 0. ] [ 0.01055336 0.02734251 -0.03153727 -0.03742751 -0.01378113]]] <NDArray 3x2x5 @cpu(0)> """ # Merge is always True, so we don't need length. inputs, axis, F, _ = _format_sequence(0, inputs, layout, True) if axis != 0: axes = list(range(len(layout))) tmp = axes[0] axes[0] = axes[axis] axes[axis] = tmp inputs = F.transpose(inputs, axes=axes) states = begin_state if drop_inputs: inputs = F.Dropout(inputs, p=drop_inputs, axes=(axis,)) if valid_length is None: def loop_body(inputs, states): return cell(inputs, states) else: zeros = [] for s in states: zeros.append(F.zeros_like(s)) states = list(_as_list(states)) states.append(F.zeros((1))) def loop_body(inputs, states): cell_states = states[:-1] iter_no = states[-1] out, new_states = cell(inputs, cell_states) for i, state in enumerate(cell_states): new_states[i] = F.where(F.broadcast_greater(valid_length, iter_no), new_states[i], state) new_states.append(iter_no + 1) return out, new_states outputs, states = F.contrib.foreach(loop_body, inputs, states) if drop_outputs: outputs = F.Dropout(outputs, p=drop_outputs, axes=(axis,)) if valid_length is not None: if axis != 0: outputs = F.transpose(outputs, axes) outputs = F.SequenceMask(outputs, sequence_length=valid_length, use_sequence_length=True, axis=axis) # the last state is the iteration number. We don't need it. return outputs, states[:-1] else: if axis != 0: outputs = F.transpose(outputs, axes) return outputs, states
Unrolls an RNN cell across time steps. Parameters ---------- length : int Number of steps to unroll. inputs : Symbol, list of Symbol, or None If `inputs` is a single Symbol (usually the output of Embedding symbol), it should have shape (batch_size, length, ...) if `layout` is 'NTC', or (length, batch_size, ...) if `layout` is 'TNC'. If `inputs` is a list of symbols (usually output of previous unroll), they should all have shape (batch_size, ...). begin_state : nested list of Symbol, optional Input states created by `begin_state()` or output state of another cell. Created from `begin_state()` if `None`. layout : str, optional `layout` of input symbol. Only used if inputs is a single Symbol. merge_outputs : bool, optional If `False`, returns outputs as a list of Symbols. If `True`, concatenates output across time steps and returns a single symbol with shape (batch_size, length, ...) if layout is 'NTC', or (length, batch_size, ...) if layout is 'TNC'. If `None`, output whatever is faster. valid_length : Symbol, NDArray or None `valid_length` specifies the length of the sequences in the batch without padding. This option is especially useful for building sequence-to-sequence models where the input and output sequences would potentially be padded. If `valid_length` is None, all sequences are assumed to have the same length. If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,). The ith element will be the length of the ith sequence in the batch. The last valid state will be return and the padded outputs will be masked with 0. Note that `valid_length` must be smaller or equal to `length`. Returns ------- outputs : list of Symbol or Symbol Symbol (if `merge_outputs` is True) or list of Symbols (if `merge_outputs` is False) corresponding to the output from the RNN from this unrolling. states : list of Symbol The new state of this RNN after this unrolling. The type of this symbol is same as the output of `begin_state()`.
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None, valid_length=None): """Unrolls an RNN cell across time steps. Parameters ---------- length : int Number of steps to unroll. inputs : Symbol, list of Symbol, or None If `inputs` is a single Symbol (usually the output of Embedding symbol), it should have shape (batch_size, length, ...) if `layout` is 'NTC', or (length, batch_size, ...) if `layout` is 'TNC'. If `inputs` is a list of symbols (usually output of previous unroll), they should all have shape (batch_size, ...). begin_state : nested list of Symbol, optional Input states created by `begin_state()` or output state of another cell. Created from `begin_state()` if `None`. layout : str, optional `layout` of input symbol. Only used if inputs is a single Symbol. merge_outputs : bool, optional If `False`, returns outputs as a list of Symbols. If `True`, concatenates output across time steps and returns a single symbol with shape (batch_size, length, ...) if layout is 'NTC', or (length, batch_size, ...) if layout is 'TNC'. If `None`, output whatever is faster. valid_length : Symbol, NDArray or None `valid_length` specifies the length of the sequences in the batch without padding. This option is especially useful for building sequence-to-sequence models where the input and output sequences would potentially be padded. If `valid_length` is None, all sequences are assumed to have the same length. If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,). The ith element will be the length of the ith sequence in the batch. The last valid state will be return and the padded outputs will be masked with 0. Note that `valid_length` must be smaller or equal to `length`. Returns ------- outputs : list of Symbol or Symbol Symbol (if `merge_outputs` is True) or list of Symbols (if `merge_outputs` is False) corresponding to the output from the RNN from this unrolling. states : list of Symbol The new state of this RNN after this unrolling. The type of this symbol is same as the output of `begin_state()`. """ # Dropout on inputs and outputs can be performed on the whole sequence # only when state dropout is not present. if self.drop_states: return super(VariationalDropoutCell, self).unroll(length, inputs, begin_state, layout, merge_outputs, valid_length=valid_length) self.reset() inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, True) states = _get_begin_state(self, F, begin_state, inputs, batch_size) if self.drop_inputs: inputs = F.Dropout(inputs, p=self.drop_inputs, axes=(axis,)) outputs, states = self.base_cell.unroll(length, inputs, states, layout, merge_outputs=True, valid_length=valid_length) if self.drop_outputs: outputs = F.Dropout(outputs, p=self.drop_outputs, axes=(axis,)) merge_outputs = isinstance(outputs, tensor_types) if merge_outputs is None else \ merge_outputs outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs) if valid_length is not None: outputs = _mask_sequence_variable_length(F, outputs, length, valid_length, axis, merge_outputs) return outputs, states
Change attribute names as per values in change_map dictionary. Parameters ---------- :param attrs : dict Dict of operator attributes :param change_map : dict Dict of onnx attribute name to mxnet attribute names. Returns ------- :return new_attr : dict Converted dict of operator attributes.
def _fix_attribute_names(attrs, change_map): """ Change attribute names as per values in change_map dictionary. Parameters ---------- :param attrs : dict Dict of operator attributes :param change_map : dict Dict of onnx attribute name to mxnet attribute names. Returns ------- :return new_attr : dict Converted dict of operator attributes. """ new_attr = {} for k in attrs.keys(): if k in change_map: new_attr[change_map[k]] = attrs[k] else: new_attr[k] = attrs[k] return new_attr
Removes attributes in the remove list from the input attribute dict :param attrs : Dict of operator attributes :param remove_list : list of attributes to be removed :return new_attr : Dict of operator attributes without the listed attributes.
def _remove_attributes(attrs, remove_list): """ Removes attributes in the remove list from the input attribute dict :param attrs : Dict of operator attributes :param remove_list : list of attributes to be removed :return new_attr : Dict of operator attributes without the listed attributes. """ new_attrs = {} for attr in attrs.keys(): if attr not in remove_list: new_attrs[attr] = attrs[attr] return new_attrs
:param attrs: Current Attribute list :param extraAttrMap: Additional attributes to be added :return: new_attr
def _add_extra_attributes(attrs, extra_attr_map): """ :param attrs: Current Attribute list :param extraAttrMap: Additional attributes to be added :return: new_attr """ for attr in extra_attr_map: if attr not in attrs: attrs[attr] = extra_attr_map[attr] return attrs
Changing onnx's pads sequence to match with mxnet's pad_width mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end) onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)
def _pad_sequence_fix(attr, kernel_dim=None): """Changing onnx's pads sequence to match with mxnet's pad_width mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end) onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)""" new_attr = () if len(attr) % 2 == 0: for index in range(int(len(attr) / 2)): new_attr = new_attr + attr[index::int(len(attr) / 2)] # Making sure pad values are in the attr for all axes. if kernel_dim is not None: while len(new_attr) < kernel_dim*2: new_attr = new_attr + (0, 0) return new_attr
onnx pooling operator supports asymmetrical padding Adding pad operator before pooling in mxnet to work with onnx
def _fix_pooling(pool_type, inputs, new_attr): """onnx pooling operator supports asymmetrical padding Adding pad operator before pooling in mxnet to work with onnx""" stride = new_attr.get('stride') kernel = new_attr.get('kernel') padding = new_attr.get('pad') p_value = new_attr.get('p_value') # Adding default stride. if stride is None: stride = (1,) * len(kernel) # Add padding attr if not provided. if padding is None: padding = (0,) * len(kernel) * 2 # Mxnet Pad operator supports only 4D/5D tensors. # For 1D case, these are the steps: # Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2 # Step 2. Apply padding to this changed tensor # Step 3. Remove the extra dimension added in step 1. if len(kernel) == 1: dummy_axis = 2 # setting 0 padding to the new dim to be added. padding = (0, padding[0], 0, padding[1]) pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2) # Step 1. curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis) # Step 2. Common for all tensor sizes new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) # Step 3: Removing extra dim added. new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1) else: # For 2D/3D cases: # Apply padding pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel)) curr_sym = inputs[0] if pool_type == 'max': # For max pool : mode = 'edge', we should replicate the # edge values to pad, so that we only include input data values # for calculating 'max' new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) else: # For avg pool, we should add 'zeros' for padding so mode='constant' new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width) # Apply pooling without pads. if pool_type == 'lp': new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value) else: new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel) return new_pooling_op
A workaround for 'use_bias' attribute since onnx don't provide this attribute, we have to check the number of inputs to decide it.
def _fix_bias(op_name, attrs, num_inputs): """A workaround for 'use_bias' attribute since onnx don't provide this attribute, we have to check the number of inputs to decide it.""" if num_inputs == 3: attrs['no_bias'] = False elif num_inputs == 2: attrs['no_bias'] = True else: raise ValueError("Unexpected number of inputs for: {}".format(op_name)) return attrs
A workaround to reshape bias term to (1, num_channel).
def _fix_broadcast(op_name, inputs, broadcast_axis, proto_obj): """A workaround to reshape bias term to (1, num_channel).""" if int(len(proto_obj._params)) > 0: assert len(list(inputs)) == 2 input0_shape = get_input_shape(inputs[0], proto_obj) #creating reshape shape reshape_shape = list(len(input0_shape) * (1,)) reshape_shape[broadcast_axis] = -1 reshape_shape = tuple(reshape_shape) reshape_op_sym = symbol.reshape(inputs[1], shape=reshape_shape) op_sym = getattr(symbol, op_name)(inputs[0], reshape_op_sym) else: op_sym = op_name return op_sym
A workaround for getting 'channels' or 'units' since onnx don't provide these attributes. We check the shape of weights provided to get the number.
def _fix_channels(op_name, attrs, inputs, proto_obj): """A workaround for getting 'channels' or 'units' since onnx don't provide these attributes. We check the shape of weights provided to get the number. """ weight_name = inputs[1].name if not weight_name in proto_obj._params: raise ValueError("Unable to get channels/units attr from onnx graph.") else: wshape = proto_obj._params[weight_name].shape assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape) if op_name == 'FullyConnected': attrs['num_hidden'] = wshape[0] else: if op_name == 'Convolution': # Weight shape for Conv and FC: (M x C x kH x kW) : M is number of # feature maps/hidden and C is number of channels attrs['num_filter'] = wshape[0] elif op_name == 'Deconvolution': # Weight shape for DeConv : (C x M x kH x kW) : M is number of # feature maps/filters and C is number of channels attrs['num_filter'] = wshape[1] return attrs
Using FullyConnected operator in place of linalg_gemm to perform same operation
def _fix_gemm(op_name, inputs, old_attr, proto_obj): """Using FullyConnected operator in place of linalg_gemm to perform same operation""" op_sym = getattr(symbol, op_name, None) alpha = float(old_attr.get('alpha', 1.0)) beta = float(old_attr.get('beta', 1.0)) trans_a = int(old_attr.get('transA', 0)) trans_b = int(old_attr.get('transB', 0)) if trans_a: inputs[0] = symbol.transpose(inputs[0], axes=(1, 0)) if not trans_b: inputs[1] = symbol.transpose(inputs[1], axes=(1, 0)) new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]] new_attr = {'num_hidden' : proto_obj._params[inputs[2].name].shape[0]} return op_sym, new_attr, new_inputs