text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Constructs the Lambda Permission resource allowing the source service to invoke the function this event <END_TASK> <USER_TASK:> Description: def _construct_permission(self, function, source_arn=None, source_account=None, suffix="", event_source_token=None): """Constructs the Lambda Permission resource allowing the source service to invoke the function this event source triggers. :returns: the permission resource :rtype: model.lambda_.LambdaPermission """
lambda_permission = LambdaPermission(self.logical_id + 'Permission' + suffix, attributes=function.get_passthrough_resource_attributes()) try: # Name will not be available for Alias resources function_name_or_arn = function.get_runtime_attr("name") except NotImplementedError: function_name_or_arn = function.get_runtime_attr("arn") lambda_permission.Action = 'lambda:invokeFunction' lambda_permission.FunctionName = function_name_or_arn lambda_permission.Principal = self.principal lambda_permission.SourceArn = source_arn lambda_permission.SourceAccount = source_account lambda_permission.EventSourceToken = event_source_token return lambda_permission
<SYSTEM_TASK:> Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds. <END_TASK> <USER_TASK:> Description: def to_cloudformation(self, **kwargs): """Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this pull event expands :rtype: list """
function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") resources = [] events_rule = EventsRule(self.logical_id) resources.append(events_rule) events_rule.ScheduleExpression = self.Schedule events_rule.Targets = [self._construct_target(function)] source_arn = events_rule.get_runtime_attr("arn") if CONDITION in function.resource_attributes: events_rule.set_resource_attribute(CONDITION, function.resource_attributes[CONDITION]) resources.append(self._construct_permission(function, source_arn=source_arn)) return resources
<SYSTEM_TASK:> Constructs the Target property for the CloudWatch Events Rule. <END_TASK> <USER_TASK:> Description: def _construct_target(self, function): """Constructs the Target property for the CloudWatch Events Rule. :returns: the Target property :rtype: dict """
target = { 'Arn': function.get_runtime_attr("arn"), 'Id': self.logical_id + 'LambdaTarget' } if self.Input is not None: target['Input'] = self.Input if self.InputPath is not None: target['InputPath'] = self.InputPath return target
<SYSTEM_TASK:> Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. <END_TASK> <USER_TASK:> Description: def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list """
function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'bucket' not in kwargs or kwargs['bucket'] is None: raise TypeError("Missing required keyword argument: bucket") if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None: raise TypeError("Missing required keyword argument: bucket_id") bucket = kwargs['bucket'] bucket_id = kwargs['bucket_id'] resources = [] source_account = ref('AWS::AccountId') permission = self._construct_permission(function, source_account=source_account) if CONDITION in permission.resource_attributes: self._depend_on_lambda_permissions_using_tag(bucket, permission) else: self._depend_on_lambda_permissions(bucket, permission) resources.append(permission) # NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are # multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification # configuration for each event. This is the reason why we continue to use existing bucket dict and append onto # it. # # NOTE: There is some fragile logic here where we will append multiple resources to output # SAM template but de-dupe them when merging into output CFN template. This is scary because the order of # merging is literally "last one wins", which works fine because we linearly loop through the template once. # The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of # to_cloudformation() to output template. self._inject_notification_configuration(function, bucket) resources.append(S3Bucket.from_dict(bucket_id, bucket)) return resources
<SYSTEM_TASK:> Since conditional DependsOn is not supported this undocumented way of <END_TASK> <USER_TASK:> Description: def _depend_on_lambda_permissions_using_tag(self, bucket, permission): """ Since conditional DependsOn is not supported this undocumented way of implicitely making dependency through tags is used. See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a dependency, so CloudFormation will automatically wait once it reaches that function, the same as if you were using a DependsOn. """
properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties tags = properties.get('Tags', None) if tags is None: tags = [] properties['Tags'] = tags dep_tag = { 'sam:ConditionalDependsOn:' + permission.logical_id: { 'Fn::If': [ permission.resource_attributes[CONDITION], ref(permission.logical_id), 'no dependency' ] } } properties['Tags'] = tags + get_tag_list(dep_tag) return bucket
<SYSTEM_TASK:> Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. <END_TASK> <USER_TASK:> Description: def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list """
function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") return [self._construct_permission(function, source_arn=self.Topic), self._inject_subscription(function, self.Topic, self.FilterPolicy)]
<SYSTEM_TASK:> If this API Event Source refers to an explicit API resource, resolve the reference and grab <END_TASK> <USER_TASK:> Description: def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """
rest_api_id = self.RestApiId if isinstance(rest_api_id, dict) and "Ref" in rest_api_id: rest_api_id = rest_api_id["Ref"] # If RestApiId is a resource in the same template, then we try find the StageName by following the reference # Otherwise we default to a wildcard. This stage name is solely used to construct the permission to # allow this stage to invoke the Lambda function. If we are unable to resolve the stage name, we will # simply permit all stages to invoke this Lambda function # This hack is necessary because customers could use !ImportValue, !Ref or other intrinsic functions which # can be sometimes impossible to resolve (ie. when it has cross-stack references) permitted_stage = "*" stage_suffix = "AllStages" explicit_api = None if isinstance(rest_api_id, string_types): if rest_api_id in resources \ and "Properties" in resources[rest_api_id] \ and "StageName" in resources[rest_api_id]["Properties"]: explicit_api = resources[rest_api_id]["Properties"] permitted_stage = explicit_api["StageName"] # Stage could be a intrinsic, in which case leave the suffix to default value if isinstance(permitted_stage, string_types): if not permitted_stage: raise InvalidResourceException(rest_api_id, 'StageName cannot be empty.') stage_suffix = permitted_stage else: stage_suffix = "Stage" else: # RestApiId is a string, not an intrinsic, but we did not find a valid API resource for this ID raise InvalidEventException(self.relative_id, "RestApiId property of Api event must reference a valid " "resource in the same template.") return { 'explicit_api': explicit_api, 'explicit_api_stage': { 'permitted_stage': permitted_stage, 'suffix': stage_suffix } }
<SYSTEM_TASK:> If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing <END_TASK> <USER_TASK:> Description: def to_cloudformation(self, **kwargs): """If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """
resources = [] function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs['explicit_api'] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, function) return resources
<SYSTEM_TASK:> Adds the path and method for this Api event source to the Swagger body for the provided RestApi. <END_TASK> <USER_TASK:> Description: def _add_swagger_integration(self, api, function): """Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """
swagger_body = api.get("DefinitionBody") if swagger_body is None: return function_arn = function.get_runtime_attr('arn') partition = ArnGenerator.get_partition_name() uri = fnSub('arn:' + partition + ':apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/' + make_shorthand(function_arn) + '/invocations') editor = SwaggerEditor(swagger_body) if editor.has_integration(self.Path, self.Method): # Cannot add the Lambda Integration, if it is already present raise InvalidEventException( self.relative_id, 'API method "{method}" defined multiple times for path "{path}".'.format( method=self.Method, path=self.Path)) condition = None if CONDITION in function.resource_attributes: condition = function.resource_attributes[CONDITION] editor.add_lambda_integration(self.Path, self.Method, uri, self.Auth, api.get('Auth'), condition=condition) if self.Auth: method_authorizer = self.Auth.get('Authorizer') if method_authorizer: api_auth = api.get('Auth') api_authorizers = api_auth and api_auth.get('Authorizers') if method_authorizer != 'AWS_IAM': if not api_authorizers: raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because the related API does not define any Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer != 'NONE' and not api_authorizers.get(method_authorizer): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because it wasn\'t defined in the API\'s Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer == 'NONE' and not api_auth.get('DefaultAuthorizer'): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer on API method [{method}] for path [{path}] because \'NONE\' ' 'is only a valid value when a DefaultAuthorizer on the API is specified.'.format( method=self.Method, path=self.Path)) editor.add_auth_to_method(api=api, path=self.Path, method_name=self.Method, auth=self.Auth) api["DefinitionBody"] = editor.swagger
<SYSTEM_TASK:> Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as <END_TASK> <USER_TASK:> Description: def resolve_parameter_refs(self, input): """ Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as !GetAtt, !Sub or !Ref to non-parameters will be left untouched. Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into transform's output because it changes the template structure by inlining parameter values. :param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions :return: A copy of a dictionary with parameter references replaced by actual value. """
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
<SYSTEM_TASK:> Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API <END_TASK> <USER_TASK:> Description: def resolve_sam_resource_refs(self, input, supported_resource_refs): """ Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API resource. This method recursively walks the tree, converting all derived references to the real resource name, if it is present. Example: {"Ref": "MyFunction.Alias"} -> {"Ref": "MyFunctionAliasLive"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param SupportedResourceReferences supported_resource_refs: Object that contains information about the resource references supported in this SAM template, along with the value they should resolve to. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise """
return self._traverse(input, supported_resource_refs, self._try_resolve_sam_resource_refs)
<SYSTEM_TASK:> Some SAM resources have their logical ids mutated from the original id that the customer writes in the <END_TASK> <USER_TASK:> Description: def resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): """ Some SAM resources have their logical ids mutated from the original id that the customer writes in the template. This method recursively walks the tree and updates these logical ids from the old value to the new value that is generated by SAM. Example: {"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise """
return self._traverse(input, supported_resource_id_refs, self._try_resolve_sam_resource_id_refs)
<SYSTEM_TASK:> Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when <END_TASK> <USER_TASK:> Description: def _traverse(self, input, resolution_data, resolver_method): """ Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when to perform the resolution. :param input: Any primitive type (dict, array, string etc) whose value might contain an intrinsic function :param resolution_data: Data that will help with resolution. For example, when resolving parameter references, this object will contain a dictionary of parameter names and their values. :param resolver_method: Method that will be called to actually resolve an intrinsic function. This method is called with the parameters `(input, resolution_data)`. :return: Modified `input` with intrinsics resolved """
# There is data to help with resolution. Skip the traversal altogether if len(resolution_data) == 0: return input # # Traversal Algorithm: # # Imagine the input dictionary/list as a tree. We are doing a Pre-Order tree traversal here where we first # process the root node before going to its children. Dict and Lists are the only two iterable nodes. # Everything else is a leaf node. # # We do a Pre-Order traversal to handle the case where `input` contains intrinsic function as its only child # ie. input = {"Ref": "foo}. # # We will try to resolve the intrinsics if we can, otherwise return the original input. In some cases, resolving # an intrinsic will result in a terminal state ie. {"Ref": "foo"} could resolve to a string "bar". In other # cases, resolving intrinsics is only partial and we might need to continue traversing the tree (ex: Fn::Sub) # to handle nested intrinsics. All of these cases lend well towards a Pre-Order traversal where we try and # process the intrinsic, which results in a modified sub-tree to traverse. # input = resolver_method(input, resolution_data) if isinstance(input, dict): return self._traverse_dict(input, resolution_data, resolver_method) elif isinstance(input, list): return self._traverse_list(input, resolution_data, resolver_method) else: # We can iterate only over dict or list types. Primitive types are terminals return input
<SYSTEM_TASK:> Traverse a dictionary to resolve intrinsic functions on every value <END_TASK> <USER_TASK:> Description: def _traverse_dict(self, input_dict, resolution_data, resolver_method): """ Traverse a dictionary to resolve intrinsic functions on every value :param input_dict: Input dictionary to traverse :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified dictionary with values resolved """
for key, value in input_dict.items(): input_dict[key] = self._traverse(value, resolution_data, resolver_method) return input_dict
<SYSTEM_TASK:> Traverse a list to resolve intrinsic functions on every element <END_TASK> <USER_TASK:> Description: def _traverse_list(self, input_list, resolution_data, resolver_method): """ Traverse a list to resolve intrinsic functions on every element :param input_list: List of input :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified list with intrinsic functions resolved """
for index, value in enumerate(input_list): input_list[index] = self._traverse(value, resolution_data, resolver_method) return input_list
<SYSTEM_TASK:> Try to resolve SAM resource references on the given template. If the given object looks like one of the <END_TASK> <USER_TASK:> Description: def _try_resolve_sam_resource_refs(self, input, supported_resource_refs): """ Try to resolve SAM resource references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param SupportedResourceReferences supported_resource_refs: Object containing information about available resource references and the values they resolve to. :return: Modified input dictionary with references resolved """
if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
<SYSTEM_TASK:> Try to resolve SAM resource id references on the given template. If the given object looks like one of the <END_TASK> <USER_TASK:> Description: def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): """ Try to resolve SAM resource id references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return: Modified input dictionary with id references resolved """
if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)
<SYSTEM_TASK:> Can the input represent an intrinsic function in it? <END_TASK> <USER_TASK:> Description: def _is_intrinsic_dict(self, input): """ Can the input represent an intrinsic function in it? :param input: Object to be checked :return: True, if the input contains a supported intrinsic function. False otherwise """
# All intrinsic functions are dictionaries with just one key return isinstance(input, dict) \ and len(input) == 1 \ and list(input.keys())[0] in self.supported_intrinsics
<SYSTEM_TASK:> Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source <END_TASK> <USER_TASK:> Description: def to_cloudformation(self, **kwargs): """Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this push event expands :rtype: list """
function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") source_arn = self.get_source_arn() permission = self._construct_permission(function, source_arn=source_arn) subscription_filter = self.get_subscription_filter(function, permission) resources = [permission, subscription_filter] return resources
<SYSTEM_TASK:> Converts the given template to IAM-ready policy statement by substituting template parameters with the given <END_TASK> <USER_TASK:> Description: def convert(self, template_name, parameter_values): """ Converts the given template to IAM-ready policy statement by substituting template parameters with the given values. :param template_name: Name of the template :param parameter_values: Values for all parameters of the template :return dict: Dictionary containing policy statement :raises ValueError: If the given inputs don't represent valid template :raises InsufficientParameterValues: If the parameter values don't have values for all required parameters """
if not self.has(template_name): raise TemplateNotFoundException(template_name) template = self.get(template_name) return template.to_statement(parameter_values)
<SYSTEM_TASK:> Is this a valid policy template dictionary <END_TASK> <USER_TASK:> Description: def _is_valid_templates_dict(policy_templates_dict, schema=None): """ Is this a valid policy template dictionary :param dict policy_templates_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing policy template :return: True, if it is valid. :raises ValueError: If the template dictionary doesn't match up with the schema """
if not schema: schema = PolicyTemplatesProcessor._read_schema() try: jsonschema.validate(policy_templates_dict, schema) except ValidationError as ex: # Stringifying the exception will give us useful error message raise ValueError(str(ex)) return True
<SYSTEM_TASK:> Render a chart or page to local html files. <END_TASK> <USER_TASK:> Description: def render_chart_to_file(self, template_name: str, chart: Any, path: str): """ Render a chart or page to local html files. :param chart: A Chart or Page object :param path: The destination file which the html code write to :param template_name: The name of template file. """
tpl = self.env.get_template(template_name) html = tpl.render(chart=self.generate_js_link(chart)) write_utf8_html_file(path, self._reg_replace(html))
<SYSTEM_TASK:> Decode base64, padding being optional. <END_TASK> <USER_TASK:> Description: def decode_base64(data: str) -> bytes: """Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. """
missing_padding = len(data) % 4 if missing_padding != 0: data += "=" * (4 - missing_padding) return base64.decodebytes(data.encode("utf-8"))
<SYSTEM_TASK:> Parses a string and returns a pin-num. <END_TASK> <USER_TASK:> Description: def parse_pin(name_str): """Parses a string and returns a pin-num."""
if len(name_str) < 1: raise ValueError("Expecting pin name to be at least 4 charcters.") if name_str[0] != 'P': raise ValueError("Expecting pin name to start with P") pin_str = name_str[1:].split('/')[0] if not pin_str.isdigit(): raise ValueError("Expecting numeric pin number.") return int(pin_str)
<SYSTEM_TASK:> Start the loop. <END_TASK> <USER_TASK:> Description: def run_loop(leds=all_leds): """ Start the loop. :param `leds`: Which LEDs to light up upon switch press. :type `leds`: sequence of LED objects """
print('Loop started.\nPress Ctrl+C to break out of the loop.') while 1: try: if switch(): [led.on() for led in leds] else: [led.off() for led in leds] except OSError: # VCPInterrupt # Ctrl+C in interpreter mode. break
<SYSTEM_TASK:> Search vpaths for the c file that matches the provided object_file. <END_TASK> <USER_TASK:> Description: def find_c_file(obj_file, vpath): """ Search vpaths for the c file that matches the provided object_file. :param str obj_file: object file to find the matching c file for :param List[str] vpath: List of base paths, similar to gcc vpath :return: str path to c file or None """
c_file = None relative_c_file = os.path.splitext(obj_file)[0] + ".c" relative_c_file = relative_c_file.lstrip('/\\') for p in vpath: possible_c_file = os.path.join(p, relative_c_file) if os.path.exists(possible_c_file): c_file = possible_c_file break return c_file
<SYSTEM_TASK:> Generate header with module table entries for builtin modules. <END_TASK> <USER_TASK:> Description: def generate_module_table_header(modules): """ Generate header with module table entries for builtin modules. :param List[(module_name, obj_module, enabled_define)] modules: module defs :return: None """
# Print header file for all external modules. mod_defs = [] print("// Automatically generated by makemoduledefs.py.\n") for module_name, obj_module, enabled_define in modules: mod_def = "MODULE_DEF_{}".format(module_name.upper()) mod_defs.append(mod_def) print(( "#if ({enabled_define})\n" " extern const struct _mp_obj_module_t {obj_module};\n" " #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n" "#else\n" " #define {mod_def}\n" "#endif\n" ).format(module_name=module_name, obj_module=obj_module, enabled_define=enabled_define, mod_def=mod_def) ) print("\n#define MICROPY_REGISTERED_MODULES \\") for mod_def in mod_defs: print(" {mod_def} \\".format(mod_def=mod_def)) print("// MICROPY_REGISTERED_MODULES")
<SYSTEM_TASK:> converts CPython module names into MicroPython equivalents <END_TASK> <USER_TASK:> Description: def uimports(code): """ converts CPython module names into MicroPython equivalents """
for uimport in UIMPORTLIST: uimport = bytes(uimport, 'utf8') code = code.replace(uimport, b'u' + uimport) return code
<SYSTEM_TASK:> indents paragraphs of text for rst formatting <END_TASK> <USER_TASK:> Description: def indent(block, spaces): """ indents paragraphs of text for rst formatting """
new_block = '' for line in block.split('\n'): new_block += spaces + line + '\n' return new_block
<SYSTEM_TASK:> creates a table given any set of columns <END_TASK> <USER_TASK:> Description: def gen_table(contents): """ creates a table given any set of columns """
xlengths = [] ylengths = [] for column in contents: col_len = 0 for entry in column: lines = entry.split('\n') for line in lines: col_len = max(len(line) + 2, col_len) xlengths.append(col_len) for i in range(len(contents[0])): ymax = 0 for j in range(len(contents)): ymax = max(ymax, len(contents[j][i].split('\n'))) ylengths.append(ymax) table_divider = '+' + ''.join(['-' * i + '+' for i in xlengths]) + '\n' table = table_divider for i in range(len(ylengths)): row = [column[i] for column in contents] row = [entry + '\n' * (ylengths[i]-len(entry.split('\n'))) for entry in row] row = [entry.split('\n') for entry in row] for j in range(ylengths[i]): k = 0 for entry in row: width = xlengths[k] table += ''.join(['| {:{}}'.format(entry[j], width - 1)]) k += 1 table += '|\n' table += table_divider return table + '\n'
<SYSTEM_TASK:> Initializes the found DFU device so that we can program it. <END_TASK> <USER_TASK:> Description: def init(): """Initializes the found DFU device so that we can program it."""
global __dev, __cfg_descr devices = get_dfu_devices(idVendor=__VID, idProduct=__PID) if not devices: raise ValueError('No DFU device found') if len(devices) > 1: raise ValueError("Multiple DFU devices found") __dev = devices[0] __dev.set_configuration() # Claim DFU interface usb.util.claim_interface(__dev, __DFU_INTERFACE) # Find the DFU configuration descriptor, either in the device or interfaces __cfg_descr = None for cfg in __dev.configurations(): __cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors) if __cfg_descr: break for itf in cfg.interfaces(): __cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors) if __cfg_descr: break # Get device into idle state for attempt in range(4): status = get_status() if status == __DFU_STATE_DFU_IDLE: break elif (status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE): abort_request() else: clr_status()
<SYSTEM_TASK:> Performs a MASS erase (i.e. erases the entire device. <END_TASK> <USER_TASK:> Description: def mass_erase(): """Performs a MASS erase (i.e. erases the entire device."""
# Send DNLOAD with first byte=0x41 __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed")
<SYSTEM_TASK:> Sets the address for the next operation. <END_TASK> <USER_TASK:> Description: def set_address(addr): """Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address buf = struct.pack("<BI", 0x21, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: set address failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: set address failed")
<SYSTEM_TASK:> Writes a buffer into memory. This routine assumes that memory has <END_TASK> <USER_TASK:> Description: def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0): """Writes a buffer into memory. This routine assumes that memory has already been erased. """
xfer_count = 0 xfer_bytes = 0 xfer_total = len(buf) xfer_base = addr while xfer_bytes < xfer_total: if __verbose and xfer_count % 512 == 0: print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)) if progress and xfer_count % 2 == 0: progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size) # Set mem write address set_address(xfer_base+xfer_bytes) # Send DNLOAD with fw data chunk = min(__cfg_descr.wTransferSize, xfer_total-xfer_bytes) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") xfer_count += 1 xfer_bytes += chunk
<SYSTEM_TASK:> Writes a single page. This routine assumes that memory has already <END_TASK> <USER_TASK:> Description: def write_page(buf, xfer_offset): """Writes a single page. This routine assumes that memory has already been erased. """
xfer_base = 0x08000000 # Set mem write address set_address(xfer_base+xfer_offset) # Send DNLOAD with fw data __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") if __verbose: print ("Write: 0x%x " % (xfer_base + xfer_offset))
<SYSTEM_TASK:> Exit DFU mode, and start running the program. <END_TASK> <USER_TASK:> Description: def exit_dfu(): """Exit DFU mode, and start running the program."""
# set jump address set_address(0x08000000) # Send DNLOAD with 0 length to exit DFU __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT) try: # Execute last command if get_status() != __DFU_STATE_DFU_MANIFEST: print("Failed to reset device") # Release device usb.util.dispose_resources(__dev) except: pass
<SYSTEM_TASK:> Parses the struct defined by `fmt` from `data`, stores the parsed fields <END_TASK> <USER_TASK:> Description: def consume(fmt, data, names): """Parses the struct defined by `fmt` from `data`, stores the parsed fields into a named tuple using `names`. Returns the named tuple, and the data with the struct stripped off."""
size = struct.calcsize(fmt) return named(struct.unpack(fmt, data[:size]), names), data[size:]
<SYSTEM_TASK:> Prints a lits of devices detected in DFU mode. <END_TASK> <USER_TASK:> Description: def list_dfu_devices(*args, **kwargs): """Prints a lits of devices detected in DFU mode."""
devices = get_dfu_devices(*args, **kwargs) if not devices: print("No DFU capable devices found") return for device in devices: print("Bus {} Device {:03d}: ID {:04x}:{:04x}" .format(device.bus, device.address, device.idVendor, device.idProduct)) layout = get_memory_layout(device) print("Memory Layout") for entry in layout: print(" 0x{:x} {:2d} pages of {:3d}K bytes" .format(entry['addr'], entry['num_pages'], entry['page_size'] // 1024))
<SYSTEM_TASK:> Writes the indicated elements into the target memory, <END_TASK> <USER_TASK:> Description: def write_elements(elements, mass_erase_used, progress=None): """Writes the indicated elements into the target memory, erasing as needed. """
mem_layout = get_memory_layout(__dev) for elem in elements: addr = elem['addr'] size = elem['size'] data = elem['data'] elem_size = size elem_addr = addr if progress: progress(elem_addr, 0, elem_size) while size > 0: write_size = size if not mass_erase_used: for segment in mem_layout: if addr >= segment['addr'] and \ addr <= segment['last_addr']: # We found the page containing the address we want to # write, erase it page_size = segment['page_size'] page_addr = addr & ~(page_size - 1) if addr + write_size > page_addr + page_size: write_size = page_addr + page_size - addr page_erase(page_addr) break write_memory(addr, data[:write_size], progress, elem_addr, elem_size) data = data[write_size:] addr += write_size size -= write_size if progress: progress(elem_addr, addr - elem_addr, elem_size)
<SYSTEM_TASK:> Prints a progress report suitable for use on the command line. <END_TASK> <USER_TASK:> Description: def cli_progress(addr, offset, size): """Prints a progress report suitable for use on the command line."""
width = 25 done = offset * width // size print("\r0x{:08x} {:7d} [{}{}] {:3d}% " .format(addr, size, '=' * done, ' ' * (width - done), offset * 100 // size), end="") try: sys.stdout.flush() except OSError: pass # Ignore Windows CLI "WinError 87" on Python 3.6 if offset == size: print("")
<SYSTEM_TASK:> Test program for verifying this files functionality. <END_TASK> <USER_TASK:> Description: def main(): """Test program for verifying this files functionality."""
global __verbose # Parse CMD args parser = argparse.ArgumentParser(description='DFU Python Util') #parser.add_argument("path", help="file path") parser.add_argument( "-l", "--list", help="list available DFU devices", action="store_true", default=False ) parser.add_argument( "-m", "--mass-erase", help="mass erase device", action="store_true", default=False ) parser.add_argument( "-u", "--upload", help="read file from DFU device", dest="path", default=False ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true", default=False ) args = parser.parse_args() __verbose = args.verbose if args.list: list_dfu_devices(idVendor=__VID, idProduct=__PID) return init() if args.mass_erase: print ("Mass erase...") mass_erase() if args.path: elements = read_dfu_file(args.path) if not elements: return print("Writing memory...") write_elements(elements, args.mass_erase, progress=cli_progress) print("Exiting DFU...") exit_dfu() return print("No command specified")
<SYSTEM_TASK:> Load data from an external file for tensor. <END_TASK> <USER_TASK:> Description: def load_external_data_for_tensor(tensor, base_dir): # type: (TensorProto, Text) -> None """ Load data from an external file for tensor. @params tensor: a TensorProto object. base_dir: directory that contains the external data. """
if tensor.HasField("raw_data"): # already loaded return info = ExternalDataInfo(tensor) file_location = _sanitize_path(info.location) external_data_file_path = os.path.join(base_dir, file_location) with open(external_data_file_path, 'rb') as data_file: if info.offset: data_file.seek(info.offset) if info.length: tensor.raw_data = data_file.read(info.length) else: tensor.raw_data = data_file.read()
<SYSTEM_TASK:> Loads external tensors into model <END_TASK> <USER_TASK:> Description: def load_external_data_for_model(model, base_dir): # type: (ModelProto, Text) -> None """ Loads external tensors into model @params model: ModelProto to load external data to base_dir: directory that contains external data """
for tensor in _get_all_tensors(model): if uses_external_data(tensor): load_external_data_for_tensor(tensor, base_dir)
<SYSTEM_TASK:> call to set all tensors as external data. save_model saves all the tensors data as external data after calling this function. <END_TASK> <USER_TASK:> Description: def convert_model_to_external_data(model, all_tensors_to_one_file=True, location=None): # type: (ModelProto, bool, Optional[Text]) -> None """ call to set all tensors as external data. save_model saves all the tensors data as external data after calling this function. @params model: ModelProto to be converted. all_tensors_to_one_file: If true, save all tensors to one external file specified by location. If false, save each tensor to a file named with the tensor name. location: specify the external file that all tensors to save to. If not specified, will use the model name. """
if all_tensors_to_one_file: file_name = Text(uuid.uuid1()) if location: file_name = location for tensor in _get_all_tensors(model): set_external_data(tensor, file_name) else: for tensor in _get_all_tensors(model): set_external_data(tensor, tensor.name)
<SYSTEM_TASK:> call to set all tensors data as embedded data. save_model saves all the tensors data as embedded data after calling this function. <END_TASK> <USER_TASK:> Description: def convert_model_from_external_data(model): # type: (ModelProto) -> None """ call to set all tensors data as embedded data. save_model saves all the tensors data as embedded data after calling this function. @params model: ModelProto to be converted. """
for tensor in _get_all_tensors(model): if uses_external_data(tensor): if not tensor.HasField("raw_data"): raise ValueError("raw_data field doesn't exist.") del tensor.external_data[:] tensor.data_location = TensorProto.DEFAULT
<SYSTEM_TASK:> Write tensor data to an external file according to information in the `external_data` field. <END_TASK> <USER_TASK:> Description: def save_external_data(tensor, base_path): # type: (TensorProto, Text) -> None """ Write tensor data to an external file according to information in the `external_data` field. @params tensor: Tensor object to be serialized base_path: System path of a folder where tensor data is to be stored """
info = ExternalDataInfo(tensor) external_data_file_path = os.path.join(base_path, info.location) # Retrieve the tensor's data from raw_data or load external file if not tensor.HasField("raw_data"): raise ValueError("raw_data field doesn't exist.") # Create file if it doesn't exist if not os.path.isfile(external_data_file_path): open(external_data_file_path, 'ab').close() # Open file for reading and writing at random locations ('r+b') with open(external_data_file_path, 'r+b') as data_file: data_file.seek(0, 2) if info.offset is not None: # Pad file to required offset if needed file_size = data_file.tell() if info.offset > file_size: data_file.write(b"\0" * (info.offset - file_size)) data_file.seek(info.offset) offset = data_file.tell() data_file.write(tensor.raw_data) set_external_data(tensor, info.location, offset, data_file.tell() - offset)
<SYSTEM_TASK:> Create an iterator of tensors from node attributes of an ONNX model. <END_TASK> <USER_TASK:> Description: def _get_attribute_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto] """Create an iterator of tensors from node attributes of an ONNX model."""
for node in onnx_model_proto.graph.node: for attribute in node.attribute: if attribute.HasField("t"): yield attribute.t for tensor in attribute.tensors: yield tensor
<SYSTEM_TASK:> Remove a field from a Tensor's external_data key-value store. <END_TASK> <USER_TASK:> Description: def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None """ Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed """
for (i, field) in enumerate(tensor.external_data): if field.key == field_key: del tensor.external_data[i]
<SYSTEM_TASK:> Write external data of all tensors to files on disk. <END_TASK> <USER_TASK:> Description: def write_external_data_tensors(model, filepath): # type: (ModelProto, Text) -> ModelProto """ Write external data of all tensors to files on disk. Note: This function also strips basepath information from all tensors' external_data fields. @params model: Model object which is the source of tensors to serialize. filepath: System path to the directory which should be treated as base path for external data. @return The modified model object. """
for tensor in _get_all_tensors(model): if uses_external_data(tensor): save_external_data(tensor, filepath) tensor.ClearField(str('raw_data')) return model
<SYSTEM_TASK:> Import a referenced message and return a handle <END_TASK> <USER_TASK:> Description: def _import_message(self, type_name): # type: (d.FieldDescriptorProto) -> Text """Import a referenced message and return a handle"""
name = cast(Text, type_name) if name[0] == '.' and name[1].isupper() and name[2].islower(): # Message defined in this file return name[1:] message_fd = self.descriptors.message_to_fd[name] if message_fd.name == self.fd.name: # message defined in this package split = name.split('.') for i, segment in enumerate(split): if segment and segment[0].isupper() and segment[1].islower(): return ".".join(split[i:]) # Not in package. Must import split = name.split(".") for i, segment in enumerate(split): if segment and segment[0].isupper() and segment[1].islower(): assert message_fd.name.endswith('.proto') import_name = self._import(message_fd.name[:-6].replace('-', '_') + "_pb2", segment) remains = ".".join(split[i + 1:]) if not remains: return import_name raise AssertionError("Don't support nested imports yet") # return new_nested_import(import_name, remains) raise AssertionError("Could not parse local name " + name)
<SYSTEM_TASK:> Construct a NodeProto. <END_TASK> <USER_TASK:> Description: def make_node( op_type, # type: Text inputs, # type: Sequence[Text] outputs, # type: Sequence[Text] name=None, # type: Optional[Text] doc_string=None, # type: Optional[Text] domain=None, # type: Optional[Text] **kwargs # type: Any ): # type: (...) -> NodeProto """Construct a NodeProto. Arguments: op_type (string): The name of the operator to construct inputs (list of string): list of input names outputs (list of string): list of output names name (string, default None): optional unique identifier for NodeProto doc_string (string, default None): optional documentation string for NodeProto domain (string, default None): optional domain for NodeProto. If it's None, we will just use default domain (which is empty) **kwargs (dict): the attributes of the node. The acceptable values are documented in :func:`make_attribute`. """
node = NodeProto() node.op_type = op_type node.input.extend(inputs) node.output.extend(outputs) if name: node.name = name if doc_string: node.doc_string = doc_string if domain is not None: node.domain = domain if kwargs: node.attribute.extend( make_attribute(key, value) for key, value in sorted(kwargs.items())) return node
<SYSTEM_TASK:> An internal graph to convert the input to a bytes or to False. <END_TASK> <USER_TASK:> Description: def _to_bytes_or_false(val): # type: (Union[Text, bytes]) -> Union[bytes, bool] """An internal graph to convert the input to a bytes or to False. The criteria for conversion is as follows and should be python 2 and 3 compatible: - If val is py2 str or py3 bytes: return bytes - If val is py2 unicode or py3 str: return val.decode('utf-8') - Otherwise, return False """
if isinstance(val, bytes): return val else: try: return val.encode('utf-8') except AttributeError: return False
<SYSTEM_TASK:> Makes a ValueInfoProto based on the data type and shape. <END_TASK> <USER_TASK:> Description: def make_tensor_value_info( name, # type: Text elem_type, # type: int shape, # type: Optional[Sequence[Union[Text, int]]] doc_string="", # type: Text shape_denotation=None, # type: Optional[List[Text]] ): # type: (...) -> ValueInfoProto """Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto() value_info_proto.name = name if doc_string: value_info_proto.doc_string = doc_string tensor_type_proto = value_info_proto.type.tensor_type tensor_type_proto.elem_type = elem_type tensor_shape_proto = tensor_type_proto.shape if shape is not None: # You might think this is a no-op (extending a normal Python # list by [] certainly is), but protobuf lists work a little # differently; if a field is never set, it is omitted from the # resulting protobuf; a list that is explicitly set to be # empty will get an (empty) entry in the protobuf. This # difference is visible to our consumers, so make sure we emit # an empty shape! tensor_shape_proto.dim.extend([]) if shape_denotation: if len(shape_denotation) != len(shape): raise ValueError( 'Invalid shape_denotation. ' 'Must be of the same length as shape.') for i, d in enumerate(shape): dim = tensor_shape_proto.dim.add() if d is None: pass elif isinstance(d, integer_types): dim.dim_value = d elif isinstance(d, text_type): dim.dim_param = d else: raise ValueError( 'Invalid item in shape: {}. ' 'Needs to of integer_types or text_type.'.format(d)) if shape_denotation: dim.denotation = shape_denotation[i] return value_info_proto
<SYSTEM_TASK:> Empties `doc_string` field on any nested protobuf messages <END_TASK> <USER_TASK:> Description: def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None """ Empties `doc_string` field on any nested protobuf messages """
assert isinstance(proto, google.protobuf.message.Message) for descriptor in proto.DESCRIPTOR.fields: if descriptor.name == 'doc_string': proto.ClearField(descriptor.name) elif descriptor.type == descriptor.TYPE_MESSAGE: if descriptor.label == descriptor.LABEL_REPEATED: for x in getattr(proto, descriptor.name): strip_doc_string(x) elif proto.HasField(descriptor.name): strip_doc_string(getattr(proto, descriptor.name))
<SYSTEM_TASK:> Converts a tensor def object to a numpy array. <END_TASK> <USER_TASK:> Description: def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any] """Converts a tensor def object to a numpy array. Inputs: tensor: a TensorProto object. Returns: arr: the converted array. """
if tensor.HasField("segment"): raise ValueError( "Currently not supporting loading segments.") if tensor.data_type == TensorProto.UNDEFINED: raise ValueError("The data type is not defined.") tensor_dtype = tensor.data_type np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype] storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype] storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type] storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type] dims = tensor.dims if tensor.data_type == TensorProto.STRING: utf8_strings = getattr(tensor, storage_field) ss = list(s.decode('utf-8') for s in utf8_strings) return np.asarray(ss).astype(np_dtype).reshape(dims) if tensor.HasField("raw_data"): # Raw_bytes support: using frombuffer. return np.frombuffer( tensor.raw_data, dtype=np_dtype).reshape(dims) else: data = getattr(tensor, storage_field), # type: Sequence[np.complex64] if (tensor_dtype == TensorProto.COMPLEX64 or tensor_dtype == TensorProto.COMPLEX128): data = combine_pairs_to_complex(data) return ( np.asarray( data, dtype=storage_np_dtype) .astype(np_dtype) .reshape(dims) )
<SYSTEM_TASK:> Converts a numpy array to a tensor def. <END_TASK> <USER_TASK:> Description: def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto """Converts a numpy array to a tensor def. Inputs: arr: a numpy array. name: (optional) the name of the tensor. Returns: tensor_def: the converted tensor def. """
tensor = TensorProto() tensor.dims.extend(arr.shape) if name: tensor.name = name if arr.dtype == np.object: # Special care for strings. tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] # TODO: Introduce full string support. # We flatten the array in case there are 2-D arrays are specified # We throw the error below if we have a 3-D array or some kind of other # object. If you want more complex shapes then follow the below instructions. # Unlike other types where the shape is automatically inferred from # nested arrays of values, the only reliable way now to feed strings # is to put them into a flat array then specify type astype(np.object) # (otherwise all strings may have different types depending on their length) # and then specify shape .reshape([x, y, z]) flat_array = arr.flatten() for e in flat_array: if isinstance(e, text_type): tensor.string_data.append(e.encode('utf-8')) elif isinstance(e, np.ndarray): for s in e: if isinstance(s, text_type): tensor.string_data.append(s.encode('utf-8')) else: raise NotImplementedError( "Unrecognized object in the object array, expect a string, or array of bytes: ", str(type(e))) return tensor # For numerical types, directly use numpy raw bytes. try: dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] except KeyError: raise RuntimeError( "Numpy data type not understood yet: {}".format(str(arr.dtype))) tensor.data_type = dtype tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9. return tensor
<SYSTEM_TASK:> Helper function to obtain the shape of an array <END_TASK> <USER_TASK:> Description: def get_input_shape(sym, proto_obj): """Helper function to obtain the shape of an array"""
arg_params = proto_obj.arg_dict aux_params = proto_obj.aux_dict model_input_shape = [data[1] for data in proto_obj.model_metadata.get('input_tensor_data')] data_names = [data[0] for data in proto_obj.model_metadata.get('input_tensor_data')] # creating dummy inputs inputs = [] for in_shape in model_input_shape: inputs.append(nd.ones(shape=in_shape)) data_shapes = [] for idx, input_name in enumerate(data_names): data_shapes.append((input_name, inputs[idx].shape)) ctx = context.cpu() # create a module mod = module.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) mod.set_params(arg_params=arg_params, aux_params=aux_params) data_forward = [] for idx, input_name in enumerate(data_names): val = inputs[idx] data_forward.append(val) mod.forward(io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() return result.shape
<SYSTEM_TASK:> r"""Resize image with OpenCV. <END_TASK> <USER_TASK:> Description: def imresize(src, w, h, *args, **kwargs): r"""Resize image with OpenCV. .. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imresize` to work. Parameters ---------- src : NDArray source image w : int, required Width of resized image. h : int, required Height of resized image. interp : int, optional, default=1 Interpolation method (default=cv2.INTER_LINEAR). Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. out : NDArray, optional The output NDArray to hold the result. Returns ------- out : NDArray or list of NDArrays The output of this function. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> new_image = mx.img.resize(image, 240, 360) >>> new_image <NDArray 240x360x3 @cpu(0)> """
return _internal._cvimresize(src, w, h, *args, **kwargs)
<SYSTEM_TASK:> Decode an image to an NDArray. <END_TASK> <USER_TASK:> Description: def imdecode(buf, *args, **kwargs): """Decode an image to an NDArray. .. note:: `imdecode` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imdecode` to work. Parameters ---------- buf : str/bytes/bytearray or numpy.ndarray Binary image data as string or numpy ndarray. flag : int, optional, default=1 1 for three channel color output. 0 for grayscale output. to_rgb : int, optional, default=1 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default). out : NDArray, optional Output buffer. Use `None` for automatic allocation. Returns ------- NDArray An `NDArray` containing the image. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 224x224x3 @cpu(0)> Set `flag` parameter to 0 to get grayscale output >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, flag=0) >>> image <NDArray 224x224x1 @cpu(0)> Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, to_rgb=0) >>> image <NDArray 224x224x3 @cpu(0)> """
if not isinstance(buf, nd.NDArray): if sys.version_info[0] == 3 and not isinstance(buf, (bytes, bytearray, np.ndarray)): raise ValueError('buf must be of type bytes, bytearray or numpy.ndarray,' 'if you would like to input type str, please convert to bytes') buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8) return _internal._cvimdecode(buf, *args, **kwargs)
<SYSTEM_TASK:> Scales down crop size if it's larger than image size. <END_TASK> <USER_TASK:> Description: def scale_down(src_size, size): """Scales down crop size if it's larger than image size. If width/height of the crop is larger than the width/height of the image, sets the width/height to the width/height of the image. Parameters ---------- src_size : tuple of int Size of the image in (width, height) format. size : tuple of int Size of the crop in (width, height) format. Returns ------- tuple of int A tuple containing the scaled crop size in (width, height) format. Example -------- >>> src_size = (640,480) >>> size = (720,120) >>> new_size = mx.img.scale_down(src_size, size) >>> new_size (640,106) """
w, h = size sw, sh = src_size if sh < h: w, h = float(w * sh) / h, sh if sw < w: w, h = sw, float(h * sw) / w return int(w), int(h)
<SYSTEM_TASK:> Pad image border with OpenCV. <END_TASK> <USER_TASK:> Description: def copyMakeBorder(src, top, bot, left, right, *args, **kwargs): """Pad image border with OpenCV. Parameters ---------- src : NDArray source image top : int, required Top margin. bot : int, required Bottom margin. left : int, required Left margin. right : int, required Right margin. type : int, optional, default='0' Filling type (default=cv2.BORDER_CONSTANT). 0 - cv2.BORDER_CONSTANT - Adds a constant colored border. 1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the border elements, like this : fedcba|abcdefgh|hgfedcb 2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above, but with a slight change, like this : gfedcb|abcdefgh|gfedcba 3 - cv2.BORDER_REPLICATE - Last element is replicated throughout, like this: aaaaaa|abcdefgh|hhhhhhh 4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg value : double, optional, default=0 (Deprecated! Use ``values`` instead.) Fill with single value. values : tuple of <double>, optional, default=[] Fill with value(RGB[A] or gray), up to 4 channels. out : NDArray, optional The output NDArray to hold the result. Returns ------- out : NDArray or list of NDArrays The output of this function. Example -------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0) >>> new_image <NDArray 2324x3489x3 @cpu(0)> """
return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)
<SYSTEM_TASK:> Get the interpolation method for resize functions. <END_TASK> <USER_TASK:> Description: def _get_interp_method(interp, sizes=()): """Get the interpolation method for resize functions. The major purpose of this function is to wrap a random interp method selection and a auto-estimation method. Parameters ---------- interp : int interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. sizes : tuple of int (old_height, old_width, new_height, new_width), if None provided, auto(9) will return Area(2) anyway. Returns ------- int interp method from 0 to 4 """
if interp == 9: if sizes: assert len(sizes) == 4 oh, ow, nh, nw = sizes if nh > oh and nw > ow: return 2 elif nh < oh and nw < ow: return 3 else: return 1 else: return 2 if interp == 10: return random.randint(0, 4) if interp not in (0, 1, 2, 3, 4): raise ValueError('Unknown interp method %d' % interp) return interp
<SYSTEM_TASK:> Resizes shorter edge to size. <END_TASK> <USER_TASK:> Description: def resize_short(src, size, interp=2): """Resizes shorter edge to size. .. note:: `resize_short` uses OpenCV (not the CV2 Python library). MXNet must have been built with OpenCV for `resize_short` to work. Resizes the original image by setting the shorter edge to size and setting the longer edge accordingly. Resizing function is called from OpenCV. Parameters ---------- src : NDArray The original image. size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method used for resizing the image. Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. Returns ------- NDArray An 'NDArray' containing the resized image. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> size = 640 >>> new_image = mx.img.resize_short(image, size) >>> new_image <NDArray 2321x3482x3 @cpu(0)> """
h, w, _ = src.shape if h > w: new_h, new_w = size * h // w, size else: new_h, new_w = size, size * w // h return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
<SYSTEM_TASK:> Crops the image `src` to the given `size` by trimming on all four <END_TASK> <USER_TASK:> Description: def center_crop(src, size, interp=2): """Crops the image `src` to the given `size` by trimming on all four sides and preserving the center of the image. Upsamples if `src` is smaller than `size`. .. note:: This requires MXNet to be compiled with USE_OPENCV. Parameters ---------- src : NDArray Binary source image data. size : list or tuple of int The desired output image size. interp : int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray The cropped image. Tuple (x, y, width, height) where x, y are the positions of the crop in the original image and width, height the dimensions of the crop. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.image.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500)) >>> cropped_image <NDArray 500x1000x3 @cpu(0)> >>> x, y, width, height (1241, 910, 1000, 500) """
h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = int((w - new_w) / 2) y0 = int((h - new_h) / 2) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h)
<SYSTEM_TASK:> Normalize src with mean and std. <END_TASK> <USER_TASK:> Description: def color_normalize(src, mean, std=None): """Normalize src with mean and std. Parameters ---------- src : NDArray Input image mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided Returns ------- NDArray An `NDArray` containing the normalized image. """
if mean is not None: src -= mean if std is not None: src /= std return src
<SYSTEM_TASK:> Randomly crop src with size. Randomize area and aspect ratio. <END_TASK> <USER_TASK:> Description: def random_size_crop(src, size, area, ratio, interp=2, **kwargs): """Randomly crop src with size. Randomize area and aspect ratio. Parameters ---------- src : NDArray Input image size : tuple of (int, int) Size of the crop formatted as (width, height). area : float in (0, 1] or tuple of (float, float) If tuple, minimum area and maximum area to be maintained after cropping If float, minimum area to be maintained after cropping, maximum area is set to 1.0 ratio : tuple of (float, float) Aspect ratio range as (min_aspect_ratio, max_aspect_ratio) interp: int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Tuple A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the original image and (width, height) are the dimensions of the cropped image. """
h, w, _ = src.shape src_area = h * w if 'min_area' in kwargs: warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning) area = kwargs.pop('min_area') assert not kwargs, "unexpected keyword arguments for `random_size_crop`." if isinstance(area, numeric_types): area = (area, 1.0) for _ in range(10): target_area = random.uniform(area[0], area[1]) * src_area log_ratio = (np.log(ratio[0]), np.log(ratio[1])) new_ratio = np.exp(random.uniform(*log_ratio)) new_w = int(round(np.sqrt(target_area * new_ratio))) new_h = int(round(np.sqrt(target_area / new_ratio))) if new_w <= w and new_h <= h: x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h) # fall back to center_crop return center_crop(src, size, interp)
<SYSTEM_TASK:> Creates an augmenter list. <END_TASK> <USER_TASK:> Description: def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False, mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0, pca_noise=0, rand_gray=0, inter_method=2): """Creates an augmenter list. Parameters ---------- data_shape : tuple of int Shape for output data resize : int Resize shorter edge if larger than 0 at the begining rand_crop : bool Whether to enable random cropping other than center crop rand_resize : bool Whether to enable random sized cropping, require rand_crop to be enabled rand_gray : float [0, 1], probability to convert to grayscale for all channels, the number of channels will not be reduced to 1 rand_mirror : bool Whether to apply horizontal flip to image with probability 0.5 mean : np.ndarray or None Mean pixel values for [r, g, b] std : np.ndarray or None Standard deviations for [r, g, b] brightness : float Brightness jittering range (percent) contrast : float Contrast jittering range (percent) saturation : float Saturation jittering range (percent) hue : float Hue jittering range (percent) pca_noise : float Pca noise level (percent) inter_method : int, default=2(Area-based) Interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). Examples -------- >>> # An example of creating multiple augmenters >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True, ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05, ... saturation=0.125, pca_noise=0.05, inter_method=10) >>> # dump the details >>> for aug in augs: ... aug.dumps() """
auglist = [] if resize > 0: auglist.append(ResizeAug(resize, inter_method)) crop_size = (data_shape[2], data_shape[1]) if rand_resize: assert rand_crop auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method)) elif rand_crop: auglist.append(RandomCropAug(crop_size, inter_method)) else: auglist.append(CenterCropAug(crop_size, inter_method)) if rand_mirror: auglist.append(HorizontalFlipAug(0.5)) auglist.append(CastAug()) if brightness or contrast or saturation: auglist.append(ColorJitterAug(brightness, contrast, saturation)) if hue: auglist.append(HueJitterAug(hue)) if pca_noise > 0: eigval = np.array([55.46, 4.794, 1.148]) eigvec = np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]) auglist.append(LightingAug(pca_noise, eigval, eigvec)) if rand_gray > 0: auglist.append(RandomGrayAug(rand_gray)) if mean is True: mean = nd.array([123.68, 116.28, 103.53]) elif mean is not None: assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3] if std is True: std = nd.array([58.395, 57.12, 57.375]) elif std is not None: assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3] if mean is not None or std is not None: auglist.append(ColorNormalizeAug(mean, std)) return auglist
<SYSTEM_TASK:> Saves the Augmenter to string <END_TASK> <USER_TASK:> Description: def dumps(self): """Saves the Augmenter to string Returns ------- str JSON formatted string that describes the Augmenter. """
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
<SYSTEM_TASK:> Override the default to avoid duplicate dump. <END_TASK> <USER_TASK:> Description: def dumps(self): """Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
<SYSTEM_TASK:> Resets the iterator and ignore roll over data <END_TASK> <USER_TASK:> Description: def hard_reset(self): """Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.imgrec is not None: self.imgrec.reset() self.cur = 0 self._allow_read = True self._cache_data = None self._cache_label = None self._cache_idx = None
<SYSTEM_TASK:> Helper function for reading in next sample. <END_TASK> <USER_TASK:> Description: def next_sample(self): """Helper function for reading in next sample."""
if self._allow_read is False: raise StopIteration if self.seq is not None: if self.cur < self.num_image: idx = self.seq[self.cur] else: if self.last_batch_handle != 'discard': self.cur = 0 raise StopIteration self.cur += 1 if self.imgrec is not None: s = self.imgrec.read_idx(idx) header, img = recordio.unpack(s) if self.imglist is None: return header.label, img else: return self.imglist[idx][0], img else: label, fname = self.imglist[idx] return label, self.read_image(fname) else: s = self.imgrec.read() if s is None: if self.last_batch_handle != 'discard': self.imgrec.reset() raise StopIteration header, img = recordio.unpack(s) return header.label, img
<SYSTEM_TASK:> Helper function for batchifying data <END_TASK> <USER_TASK:> Description: def _batchify(self, batch_data, batch_label, start=0): """Helper function for batchifying data"""
i = start batch_size = self.batch_size try: while i < batch_size: label, s = self.next_sample() data = self.imdecode(s) try: self.check_valid_image(data) except RuntimeError as e: logging.debug('Invalid image, skipping: %s', str(e)) continue data = self.augmentation_transform(data) assert i < batch_size, 'Batch size must be multiples of augmenter output length' batch_data[i] = self.postprocess_data(data) batch_label[i] = label i += 1 except StopIteration: if not i: raise StopIteration return i
<SYSTEM_TASK:> Decodes a string or byte string to an NDArray. <END_TASK> <USER_TASK:> Description: def imdecode(self, s): """Decodes a string or byte string to an NDArray. See mx.img.imdecode for more details."""
def locate(): """Locate the image file/index if decode fails.""" if self.seq is not None: idx = self.seq[(self.cur % self.num_image) - 1] else: idx = (self.cur % self.num_image) - 1 if self.imglist is not None: _, fname = self.imglist[idx] msg = "filename: {}".format(fname) else: msg = "index: {}".format(idx) return "Broken image " + msg try: img = imdecode(s) except Exception as e: raise RuntimeError("{}, {}".format(locate(), e)) return img
<SYSTEM_TASK:> Convert character vectors to integer vectors. <END_TASK> <USER_TASK:> Description: def word_to_vector(word): """ Convert character vectors to integer vectors. """
vector = [] for char in list(word): vector.append(char2int(char)) return vector
<SYSTEM_TASK:> Convert integer vectors to character vectors. <END_TASK> <USER_TASK:> Description: def vector_to_word(vector): """ Convert integer vectors to character vectors. """
word = "" for vec in vector: word = word + int2char(vec) return word
<SYSTEM_TASK:> Convert integer vectors to character vectors for batch. <END_TASK> <USER_TASK:> Description: def char_conv(out): """ Convert integer vectors to character vectors for batch. """
out_conv = list() for i in range(out.shape[0]): tmp_str = '' for j in range(out.shape[1]): if int(out[i][j]) >= 0: tmp_char = int2char(int(out[i][j])) if int(out[i][j]) == 27: tmp_char = '' tmp_str = tmp_str + tmp_char out_conv.append(tmp_str) return out_conv
<SYSTEM_TASK:> Get path to all the frame in view SAX and contain complete frames <END_TASK> <USER_TASK:> Description: def get_frames(root_path): """Get path to all the frame in view SAX and contain complete frames"""
ret = [] for root, _, files in os.walk(root_path): root=root.replace('\\','/') files=[s for s in files if ".dcm" in s] if len(files) == 0 or not files[0].endswith(".dcm") or root.find("sax") == -1: continue prefix = files[0].rsplit('-', 1)[0] fileset = set(files) expected = ["%s-%04d.dcm" % (prefix, i + 1) for i in range(30)] if all(x in fileset for x in expected): ret.append([root + "/" + x for x in expected]) # sort for reproduciblity return sorted(ret, key = lambda x: x[0])
<SYSTEM_TASK:> crop center and resize <END_TASK> <USER_TASK:> Description: def crop_resize(img, size): """crop center and resize"""
if img.shape[0] < img.shape[1]: img = img.T # we crop image from center short_egde = min(img.shape[:2]) yy = int((img.shape[0] - short_egde) / 2) xx = int((img.shape[1] - short_egde) / 2) crop_img = img[yy : yy + short_egde, xx : xx + short_egde] # resize to 64, 64 resized_img = transform.resize(crop_img, (size, size)) resized_img *= 255 return resized_img.astype("uint8")
<SYSTEM_TASK:> construct and return descriptor <END_TASK> <USER_TASK:> Description: def get_descriptor(ctx): """ construct and return descriptor """
d_net = gluon.nn.Sequential() with d_net.name_scope(): d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx)) return d_net
<SYSTEM_TASK:> generate random cropping boxes according to parameters <END_TASK> <USER_TASK:> Description: def sample(self, label): """ generate random cropping boxes according to parameters if satifactory crops generated, apply to ground-truth as well Parameters: ---------- label : numpy.array (n x 5 matrix) ground-truths Returns: ---------- list of (crop_box, label) tuples, if failed, return empty list [] """
samples = [] count = 0 for trial in range(self.max_trials): if count >= self.max_sample: return samples scale = np.random.uniform(self.min_scale, self.max_scale) min_ratio = max(self.min_aspect_ratio, scale * scale) max_ratio = min(self.max_aspect_ratio, 1. / scale / scale) ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio)) width = scale * ratio height = scale / ratio left = np.random.uniform(0., 1 - width) top = np.random.uniform(0., 1 - height) rand_box = (left, top, left + width, top + height) valid_mask = np.where(label[:, 0] > -1)[0] gt = label[valid_mask, :] ious = self._check_satisfy(rand_box, gt) if ious is not None: # transform gt labels after crop, discard bad ones l, t, r, b = rand_box new_gt_boxes = [] new_width = r - l new_height = b - t for i in range(valid_mask.size): if ious[i] > 0: xmin = max(0., (gt[i, 1] - l) / new_width) ymin = max(0., (gt[i, 2] - t) / new_height) xmax = min(1., (gt[i, 3] - l) / new_width) ymax = min(1., (gt[i, 4] - t) / new_height) new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax]) if not new_gt_boxes: continue new_gt_boxes = np.array(new_gt_boxes) label = np.lib.pad(new_gt_boxes, ((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \ 'constant', constant_values=(-1, -1)) samples.append((rand_box, label)) count += 1 return samples
<SYSTEM_TASK:> check if overlap with any gt box is larger than threshold <END_TASK> <USER_TASK:> Description: def _check_satisfy(self, rand_box, gt_boxes): """ check if overlap with any gt box is larger than threshold """
l, t, r, b = rand_box num_gt = gt_boxes.shape[0] ls = np.ones(num_gt) * l ts = np.ones(num_gt) * t rs = np.ones(num_gt) * r bs = np.ones(num_gt) * b mask = np.where(ls < gt_boxes[:, 1])[0] ls[mask] = gt_boxes[mask, 1] mask = np.where(ts < gt_boxes[:, 2])[0] ts[mask] = gt_boxes[mask, 2] mask = np.where(rs > gt_boxes[:, 3])[0] rs[mask] = gt_boxes[mask, 3] mask = np.where(bs > gt_boxes[:, 4])[0] bs[mask] = gt_boxes[mask, 4] w = rs - ls w[w < 0] = 0 h = bs - ts h[h < 0] = 0 inter_area = h * w union_area = np.ones(num_gt) * max(0, r - l) * max(0, b - t) union_area += (gt_boxes[:, 3] - gt_boxes[:, 1]) * (gt_boxes[:, 4] - gt_boxes[:, 2]) union_area -= inter_area ious = inter_area / union_area ious[union_area <= 0] = 0 max_iou = np.amax(ious) if max_iou < self.min_overlap: return None # check ground-truth constraint if self.config['gt_constraint'] == 'center': for i in range(ious.shape[0]): if ious[i] > 0: gt_x = (gt_boxes[i, 1] + gt_boxes[i, 3]) / 2.0 gt_y = (gt_boxes[i, 2] + gt_boxes[i, 4]) / 2.0 if gt_x < l or gt_x > r or gt_y < t or gt_y > b: return None elif self.config['gt_constraint'] == 'corner': for i in range(ious.shape[0]): if ious[i] > 0: if gt_boxes[i, 1] < l or gt_boxes[i, 3] > r \ or gt_boxes[i, 2] < t or gt_boxes[i, 4] > b: return None return ious
<SYSTEM_TASK:> generate random padding boxes according to parameters <END_TASK> <USER_TASK:> Description: def sample(self, label): """ generate random padding boxes according to parameters if satifactory padding generated, apply to ground-truth as well Parameters: ---------- label : numpy.array (n x 5 matrix) ground-truths Returns: ---------- list of (crop_box, label) tuples, if failed, return empty list [] """
samples = [] count = 0 for trial in range(self.max_trials): if count >= self.max_sample: return samples scale = np.random.uniform(self.min_scale, self.max_scale) min_ratio = max(self.min_aspect_ratio, scale * scale) max_ratio = min(self.max_aspect_ratio, 1. / scale / scale) ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio)) width = scale * ratio if width < 1: continue height = scale / ratio if height < 1: continue left = np.random.uniform(0., 1 - width) top = np.random.uniform(0., 1 - height) right = left + width bot = top + height rand_box = (left, top, right, bot) valid_mask = np.where(label[:, 0] > -1)[0] gt = label[valid_mask, :] new_gt_boxes = [] for i in range(gt.shape[0]): xmin = (gt[i, 1] - left) / width ymin = (gt[i, 2] - top) / height xmax = (gt[i, 3] - left) / width ymax = (gt[i, 4] - top) / height new_size = min(xmax - xmin, ymax - ymin) if new_size < self.min_gt_scale: new_gt_boxes = [] break new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax]) if not new_gt_boxes: continue new_gt_boxes = np.array(new_gt_boxes) label = np.lib.pad(new_gt_boxes, ((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \ 'constant', constant_values=(-1, -1)) samples.append((rand_box, label)) count += 1 return samples
<SYSTEM_TASK:> Measure time cost of running a function <END_TASK> <USER_TASK:> Description: def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs): """Measure time cost of running a function """
mx.nd.waitall() args_list = [] for arg in args: args_list.append(arg) start = time.time() if scipy_trans_lhs: args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0]) for _ in range(repeat): func_name(*args_list, **kwargs) mx.nd.waitall() end = time.time() diff = end - start return diff / repeat
<SYSTEM_TASK:> Collate data into batch. <END_TASK> <USER_TASK:> Description: def default_batchify_fn(data): """Collate data into batch."""
if isinstance(data[0], nd.NDArray): return nd.stack(*data) elif isinstance(data[0], tuple): data = zip(*data) return [default_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype)
<SYSTEM_TASK:> Collate data into batch. Use shared memory for stacking. <END_TASK> <USER_TASK:> Description: def default_mp_batchify_fn(data): """Collate data into batch. Use shared memory for stacking."""
if isinstance(data[0], nd.NDArray): out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) return nd.stack(*data, out=out) elif isinstance(data[0], tuple): data = zip(*data) return [default_mp_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype, ctx=context.Context('cpu_shared', 0))
<SYSTEM_TASK:> Fetcher loop for fetching data from queue and put in reorder dict. <END_TASK> <USER_TASK:> Description: def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False, pin_device_id=0, data_buffer_lock=None): """Fetcher loop for fetching data from queue and put in reorder dict."""
while True: idx, batch = data_queue.get() if idx is None: break if pin_memory: batch = _as_in_context(batch, context.cpu_pinned(pin_device_id)) else: batch = _as_in_context(batch, context.cpu()) if data_buffer_lock is not None: with data_buffer_lock: data_buffer[idx] = batch else: data_buffer[idx] = batch
<SYSTEM_TASK:> Shutdown internal workers by pushing terminate signals. <END_TASK> <USER_TASK:> Description: def shutdown(self): """Shutdown internal workers by pushing terminate signals."""
if not self._shutdown: # send shutdown signal to the fetcher and join data queue first # Remark: loop_fetcher need to be joined prior to the workers. # otherwise, the the fetcher may fail at getting data self._data_queue.put((None, None)) self._fetcher.join() # send shutdown signal to all worker processes for _ in range(self._num_workers): self._key_queue.put((None, None)) # force shut down any alive worker processes for w in self._workers: if w.is_alive(): w.terminate() self._shutdown = True
<SYSTEM_TASK:> Returns ctype arrays for the key-value args, and the whether string keys are used. <END_TASK> <USER_TASK:> Description: def _ctype_key_value(keys, vals): """ Returns ctype arrays for the key-value args, and the whether string keys are used. For internal use only. """
if isinstance(keys, (tuple, list)): assert(len(keys) == len(vals)) c_keys = [] c_vals = [] use_str_keys = None for key, val in zip(keys, vals): c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val) c_keys += c_key_i c_vals += c_val_i use_str_keys = str_keys_i if use_str_keys is None else use_str_keys assert(use_str_keys == str_keys_i), "inconsistent types of keys detected." c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \ else c_array(ctypes.c_int, c_keys) c_vals_arr = c_array(ctypes.c_void_p, c_vals) return (c_keys_arr, c_vals_arr, use_str_keys) assert(isinstance(keys, (int,) + string_types)), \ "unexpected type for keys: " + str(type(keys)) use_str_keys = isinstance(keys, string_types) if isinstance(vals, NDArray): c_keys = c_str_array([keys]) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys])) return (c_keys, c_handle_array([vals]), use_str_keys) else: for value in vals: assert(isinstance(value, NDArray)) c_keys = c_str_array([keys] * len(vals)) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals))) return (c_keys, c_handle_array(vals), use_str_keys)
<SYSTEM_TASK:> Creates a new KVStore. <END_TASK> <USER_TASK:> Description: def create(name='local'): """Creates a new KVStore. For single machine training, there are two commonly used types: ``local``: Copies all gradients to CPU memory and updates weights there. ``device``: Aggregates gradients and updates weights on GPUs. With this setting, the KVStore also attempts to use GPU peer-to-peer communication, potentially accelerating the communication. For distributed training, KVStore also supports a number of types: ``dist_sync``: Behaves similarly to ``local`` but with one major difference. With ``dist_sync``, batch-size now means the batch size used on each machine. So if there are ``n`` machines and we use batch size ``b``, then ``dist_sync`` behaves like ``local`` with batch size ``n * b``. ``dist_device_sync``: Identical to ``dist_sync`` with the difference similar to ``device`` vs ``local``. ``dist_async``: Performs asynchronous updates. The weights are updated whenever gradients are received from any machine. No two updates happen on the same weight at the same time. However, the order is not guaranteed. Parameters ---------- name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async'} The type of KVStore. Returns ------- kv : KVStore The created KVStore. """
if not isinstance(name, string_types): raise TypeError('name must be a string') handle = KVStoreHandle() check_call(_LIB.MXKVStoreCreate(c_str(name), ctypes.byref(handle))) kv = KVStore(handle) set_kvstore_handle(kv.handle) return kv
<SYSTEM_TASK:> Initializes a single or a sequence of key-value pairs into the store. <END_TASK> <USER_TASK:> Description: def init(self, key, value): """ Initializes a single or a sequence of key-value pairs into the store. For each key, one must `init` it before calling `push` or `pull`. When multiple workers invoke `init` for the same key, only the value supplied by worker with rank `0` is used. This function returns after data has been initialized successfully. Parameters ---------- key : str, int, or sequence of str or int The keys. value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray Values corresponding to the keys. Examples -------- >>> # init a single key-value pair >>> shape = (2,3) >>> kv = mx.kv.create('local') >>> kv.init('3', mx.nd.ones(shape)*2) >>> a = mx.nd.zeros(shape) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 2. 2. 2.] [ 2. 2. 2.]] >>> # init a list of key-value pairs >>> keys = ['5', '7', '9'] >>> kv.init(keys, [mx.nd.ones(shape)]*len(keys)) >>> # init a row_sparse value >>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse')) >>> b = mx.nd.sparse.zeros('row_sparse', shape) >>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b) >>> print b <RowSparseNDArray 2x3 @cpu(0)> """
ckeys, cvals, use_str_keys = _ctype_key_value(key, value) if use_str_keys: check_call(_LIB.MXKVStoreInitEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals)) else: check_call(_LIB.MXKVStoreInit(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
<SYSTEM_TASK:> Pushes a single or a sequence of key-value pairs into the store. <END_TASK> <USER_TASK:> Description: def push(self, key, value, priority=0): """ Pushes a single or a sequence of key-value pairs into the store. This function returns immediately after adding an operator to the engine. The actual operation is executed asynchronously. If there are consecutive pushes to the same key, there is no guarantee on the serialization of pushes. The execution of a push does not guarantee that all previous pushes are finished. There is no synchronization between workers. One can use ``_barrier()`` to sync all workers. Parameters ---------- key : str, int, or sequence of str or int Keys. value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray, or list of list of NDArray or RowSparseNDArray Values corresponding to the keys. priority : int, optional The priority of the push operation. Higher priority push operations are likely to be executed before other push actions. Examples -------- >>> # push a single key-value pair >>> kv.push('3', mx.nd.ones(shape)*8) >>> kv.pull('3', out=a) # pull out the value >>> print a.asnumpy() [[ 8. 8. 8.] [ 8. 8. 8.]] >>> # aggregate the value and the push >>> gpus = [mx.gpu(i) for i in range(4)] >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus] >>> kv.push('3', b) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> # push a list of keys. >>> # single device >>> keys = ['4', '5', '6'] >>> kv.push(keys, [mx.nd.ones(shape)]*len(keys)) >>> b = [mx.nd.zeros(shape)]*len(keys) >>> kv.pull(keys, out=b) >>> print b[1].asnumpy() [[ 1. 1. 1.] [ 1. 1. 1.]] >>> # multiple devices: >>> keys = ['7', '8', '9'] >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys) >>> kv.push(keys, b) >>> kv.pull(keys, out=b) >>> print b[1][1].asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> # push a row_sparse value >>> b = mx.nd.sparse.zeros('row_sparse', shape) >>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape)) >>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse')) >>> # pull out the value >>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b) >>> print b <RowSparseNDArray 2x3 @cpu(0)> """
ckeys, cvals, use_str_keys = _ctype_key_value(key, value) if use_str_keys: check_call(_LIB.MXKVStorePushEx( self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority))) else: check_call(_LIB.MXKVStorePush( self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
<SYSTEM_TASK:> Pulls a single value or a sequence of values from the store. <END_TASK> <USER_TASK:> Description: def pull(self, key, out=None, priority=0, ignore_sparse=True): """ Pulls a single value or a sequence of values from the store. This function returns immediately after adding an operator to the engine. Subsequent attempts to read from the `out` variable will be blocked until the pull operation completes. `pull` is executed asynchronously after all previous `pull` calls and only the last `push` call for the same input key(s) are finished. The returned values are guaranteed to be the latest values in the store. pull with `RowSparseNDArray` is not supported for dist kvstore. Please use ``row_sparse_pull`` instead. Parameters ---------- key : str, int, or sequence of str or int Keys. out: NDArray or list of NDArray or list of list of NDArray Values corresponding to the keys. priority : int, optional The priority of the pull operation. Higher priority pull operations are likely to be executed before other pull actions. ignore_sparse: bool, optional, default True Whether to ignore sparse arrays in the request. Examples -------- >>> # pull a single key-value pair >>> a = mx.nd.zeros(shape) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 2. 2. 2.] [ 2. 2. 2.]] >>> # pull into multiple devices >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus] >>> kv.pull('3', out=b) >>> print b[1].asnumpy() [[ 2. 2. 2.] [ 2. 2. 2.]] >>> # pull a list of key-value pairs. >>> # On single device >>> keys = ['5', '7', '9'] >>> b = [mx.nd.zeros(shape)]*len(keys) >>> kv.pull(keys, out=b) >>> print b[1].asnumpy() [[ 2. 2. 2.] [ 2. 2. 2.]] >>> # On multiple devices >>> keys = ['6', '8', '10'] >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys) >>> kv.pull(keys, out=b) >>> print b[1][1].asnumpy() [[ 2. 2. 2.] [ 2. 2. 2.]] """
assert(out is not None) ckeys, cvals, use_str_keys = _ctype_key_value(key, out) if use_str_keys: check_call(_LIB.MXKVStorePullWithSparseEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority), ctypes.c_bool(ignore_sparse))) else: check_call(_LIB.MXKVStorePullWithSparse(self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority), ctypes.c_bool(ignore_sparse)))
<SYSTEM_TASK:> Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \ <END_TASK> <USER_TASK:> Description: def row_sparse_pull(self, key, out=None, priority=0, row_ids=None): """ Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \ from the store with specified row_ids. When there is only one row_id, KVStoreRowSparsePull \ is invoked just once and the result is broadcast to all the rest of outputs. `row_sparse_pull` is executed asynchronously after all previous `pull`/`row_sparse_pull` calls and the last `push` call for the same input key(s) are finished. The returned values are guaranteed to be the latest values in the store. Parameters ---------- key : str, int, or sequence of str or int Keys. out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray Values corresponding to the keys. The stype is expected to be row_sparse priority : int, optional The priority of the pull operation. Higher priority pull operations are likely to be executed before other pull actions. row_ids : NDArray or list of NDArray The row_ids for which to pull for each value. Each row_id is an 1-D NDArray \ whose values don't have to be unique nor sorted. Examples -------- >>> shape = (3, 3) >>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse')) >>> a = mx.nd.sparse.zeros('row_sparse', shape) >>> row_ids = mx.nd.array([0, 2], dtype='int64') >>> kv.row_sparse_pull('3', out=a, row_ids=row_ids) >>> print a.asnumpy() [[ 1. 1. 1.] [ 0. 0. 0.] [ 1. 1. 1.]] >>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64') >>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids) >>> print a.asnumpy() [[ 0. 0. 0.] [ 0. 0. 0.] [ 1. 1. 1.]] >>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64') >>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids) >>> print a.asnumpy() [[ 1. 1. 1.] [ 1. 1. 1.] [ 0. 0. 0.]] """
assert(out is not None) assert(row_ids is not None) if isinstance(row_ids, NDArray): row_ids = [row_ids] assert(isinstance(row_ids, list)), \ "row_ids should be NDArray or list of NDArray" first_out = out # whether row_ids are the same single_rowid = False if len(row_ids) == 1 and isinstance(out, list): single_rowid = True first_out = [out[0]] ckeys, cvals, use_str_keys = _ctype_key_value(key, first_out) _, crow_ids, _ = _ctype_key_value(key, row_ids) assert(len(crow_ids) == len(cvals)), \ "the number of row_ids doesn't match the number of values" if use_str_keys: check_call(_LIB.MXKVStorePullRowSparseEx( self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority))) else: check_call(_LIB.MXKVStorePullRowSparse( self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority))) # the result can be copied to other devices without invoking row_sparse_pull # if the indices are the same if single_rowid: for out_i in out[1:]: out[0].copyto(out_i)
<SYSTEM_TASK:> Specifies type of low-bit quantization for gradient compression \ <END_TASK> <USER_TASK:> Description: def set_gradient_compression(self, compression_params): """ Specifies type of low-bit quantization for gradient compression \ and additional arguments depending on the type of compression being used. 2bit Gradient Compression takes a positive float `threshold`. The technique works by thresholding values such that positive values in the gradient above threshold will be set to threshold. Negative values whose absolute values are higher than threshold, will be set to the negative of threshold. Values whose absolute values are less than threshold will be set to 0. By doing so, each value in the gradient is in one of three states. 2bits are used to represent these states, and every 16 float values in the original gradient can be represented using one float. This compressed representation can reduce communication costs. The difference between these thresholded values and original values is stored at the sender's end as residual and added to the gradient in the next iteration. When kvstore is 'local', gradient compression is used to reduce communication between multiple devices (gpus). Gradient is quantized on each GPU which computed the gradients, then sent to the GPU which merges the gradients. This receiving GPU dequantizes the gradients and merges them. Note that this increases memory usage on each GPU because of the residual array stored. When kvstore is 'dist', gradient compression is used to reduce communication from worker to sender. Gradient is quantized on each worker which computed the gradients, then sent to the server which dequantizes this data and merges the gradients from each worker. Note that this increases CPU memory usage on each worker because of the residual array stored. Only worker to server communication is compressed in this setting. If each machine has multiple GPUs, currently this GPU to GPU or GPU to CPU communication is not compressed. Server to worker communication (in the case of pull) is also not compressed. To use 2bit compression, we need to specify `type` as `2bit`. Only specifying `type` would use default value for the threshold. To completely specify the arguments for 2bit compression, we would need to pass a dictionary which includes `threshold` like: {'type': '2bit', 'threshold': 0.5} Parameters ---------- compression_params : dict A dictionary specifying the type and parameters for gradient compression. The key `type` in this dictionary is a required string argument and specifies the type of gradient compression. Currently `type` can be only `2bit` Other keys in this dictionary are optional and specific to the type of gradient compression. """
if ('device' in self.type) or ('dist' in self.type): # pylint: disable=unsupported-membership-test ckeys, cvals = _ctype_dict(compression_params) check_call(_LIB.MXKVStoreSetGradientCompression(self.handle, mx_uint(len(compression_params)), ckeys, cvals)) else: raise Exception('Gradient compression is not supported for this type of kvstore')
<SYSTEM_TASK:> Registers an optimizer with the kvstore. <END_TASK> <USER_TASK:> Description: def set_optimizer(self, optimizer): """ Registers an optimizer with the kvstore. When using a single machine, this function updates the local optimizer. If using multiple machines and this operation is invoked from a worker node, it will serialized the optimizer with pickle and send it to all servers. The function returns after all servers have been updated. Parameters ---------- optimizer : Optimizer The new optimizer for the store Examples -------- >>> kv = mx.kv.create() >>> shape = (2, 2) >>> weight = mx.nd.zeros(shape) >>> kv.init(3, weight) >>> # set the optimizer for kvstore as the default SGD optimizer >>> kv.set_optimizer(mx.optimizer.SGD()) >>> grad = mx.nd.ones(shape) >>> kv.push(3, grad) >>> kv.pull(3, out = weight) >>> # weight is updated via gradient descent >>> weight.asnumpy() array([[-0.01, -0.01], [-0.01, -0.01]], dtype=float32) """
is_worker = ctypes.c_int() check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker))) # pylint: disable=invalid-name if 'dist' in self.type and is_worker.value: # pylint: disable=unsupported-membership-test # send the optimizer to server try: # use ASCII protocol 0, might be slower, but not a big ideal optim_str = py_str(pickle.dumps(optimizer, 0)) except: raise cmd = _get_kvstore_server_command_type('kController') self._send_command_to_servers(cmd, optim_str) if optimizer.multi_precision: cmd = _get_kvstore_server_command_type('kSetMultiPrecision') self._send_command_to_servers(cmd, '') else: self._set_updater(opt.get_updater(optimizer))
<SYSTEM_TASK:> Returns the type of this kvstore. <END_TASK> <USER_TASK:> Description: def type(self): """ Returns the type of this kvstore. Returns ------- type : str the string type """
kv_type = ctypes.c_char_p() check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type))) return py_str(kv_type.value)
<SYSTEM_TASK:> Returns the rank of this worker node. <END_TASK> <USER_TASK:> Description: def rank(self): """ Returns the rank of this worker node. Returns ------- rank : int The rank of this node, which is in range [0, num_workers()) """
rank = ctypes.c_int() check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank))) return rank.value
<SYSTEM_TASK:> Returns the number of worker nodes. <END_TASK> <USER_TASK:> Description: def num_workers(self): """Returns the number of worker nodes. Returns ------- size :int The number of worker nodes. """
size = ctypes.c_int() check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size))) return size.value
<SYSTEM_TASK:> Sends a command to all server nodes. <END_TASK> <USER_TASK:> Description: def _send_command_to_servers(self, head, body): """Sends a command to all server nodes. Sending command to a server node will cause that server node to invoke ``KVStoreServer.controller`` to execute the command. This function returns after the command has been executed on all server nodes. Parameters ---------- head : int the head of the command. body : str the body of the command. """
check_call(_LIB.MXKVStoreSendCommmandToServers( self.handle, mx_uint(head), c_str(body)))
<SYSTEM_TASK:> Add a module to the chain. <END_TASK> <USER_TASK:> Description: def add(self, module, **kwargs): """Add a module to the chain. Parameters ---------- module : BaseModule The new module to add. kwargs : ``**keywords`` All the keyword arguments are saved as meta information for the added module. The currently known meta includes - `take_labels`: indicating whether the module expect to take labels when doing computation. Note any module in the chain can take labels (not necessarily only the top most one), and they all take the same labels passed from the original data batch for the `SequentialModule`. Returns ------- self This function returns `self` to allow us to easily chain a series of `add` calls. Examples -------- >>> # An example of addinging two modules to a chain. >>> seq_mod = mx.mod.SequentialModule() >>> seq_mod.add(mod1) >>> seq_mod.add(mod2) """
self._modules.append(module) # a sanity check to avoid typo for key in kwargs: assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key) self._metas.append(kwargs) # after adding new modules, we are reset back to raw states, needs # to bind, init_params, etc. self.binded = False self.params_initialized = False self.optimizer_initialized = False return self