text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> convert raw configuration to unified dictionary <END_TASK> <USER_TASK:> Description: def config_as_dict(cfg): """ convert raw configuration to unified dictionary """
ret = cfg.__dict__.copy() # random cropping params del ret['rand_crop_samplers'] assert isinstance(cfg.rand_crop_samplers, list) ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers)) num_crop_sampler = len(cfg.rand_crop_samplers) ret['num_crop_sampler'] = num_crop_sampler # must specify the # ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler # random padding params del ret['rand_pad'] ret = merge_dict(ret, cfg.rand_pad._asdict()) # color jitter del ret['color_jitter'] ret = merge_dict(ret, cfg.color_jitter._asdict()) return ret
<SYSTEM_TASK:> Returns the name and shape information of input and output tensors of the given ONNX model file. <END_TASK> <USER_TASK:> Description: def get_model_metadata(model_file): """ Returns the name and shape information of input and output tensors of the given ONNX model file. Notes ----- This method is available when you ``import mxnet.contrib.onnx`` Parameters ---------- model_file : str ONNX model file name Returns ------- model_metadata : dict A dictionary object mapping various metadata to its corresponding value. The dictionary will have the following template:: 'input_tensor_data' : list of tuples representing the shape of the input paramters 'output_tensor_data' : list of tuples representing the shape of the output of the model """
graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") model_proto = onnx.load_model(model_file) metadata = graph.get_graph_metadata(model_proto.graph) return metadata
<SYSTEM_TASK:> Wrapper function to extract features from base network, attaching extra <END_TASK> <USER_TASK:> Description: def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128): """Wrapper function to extract features from base network, attaching extra layers and SSD specific layers Parameters ---------- from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers min_filter : int minimum number of filters used in 1x1 convolution Returns ------- list of mx.Symbols """
# arguments check assert len(from_layers) > 0 assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0 assert len(from_layers) == len(num_filters) == len(strides) == len(pads) internals = body.get_internals() layers = [] for k, params in enumerate(zip(from_layers, num_filters, strides, pads)): from_layer, num_filter, s, p = params if from_layer.strip(): # extract from base network layer = internals[from_layer.strip() + '_output'] layers.append(layer) else: # attach from last feature layer assert len(layers) > 0 assert num_filter > 0 layer = layers[-1] num_1x1 = max(min_filter, num_filter // 2) conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k), num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu') conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k), num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu') layers.append(conv_3x3) return layers
<SYSTEM_TASK:> Apply weighting to loss. <END_TASK> <USER_TASK:> Description: def _apply_weighting(F, loss, weight=None, sample_weight=None): """Apply weighting to loss. Parameters ---------- loss : Symbol The loss to be weighted. weight : float or None Global scalar weight for loss. sample_weight : Symbol or None Per sample weighting. Must be broadcastable to the same shape as loss. For example, if loss has shape (64, 10) and you want to weight each sample in the batch separately, `sample_weight` should have shape (64, 1). Returns ------- loss : Symbol Weighted loss """
if sample_weight is not None: loss = F.broadcast_mul(loss, sample_weight) if weight is not None: assert isinstance(weight, numeric_types), "weight must be a number" loss = loss * weight return loss
<SYSTEM_TASK:> Reshapes x to the same shape as y. <END_TASK> <USER_TASK:> Description: def _reshape_like(F, x, y): """Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
<SYSTEM_TASK:> create TV gradient executor with input binded on img <END_TASK> <USER_TASK:> Description: def get_tv_grad_executor(img, ctx, tv_weight): """create TV gradient executor with input binded on img """
if tv_weight <= 0.0: return None nchannel = img.shape[1] simg = mx.sym.Variable("img") skernel = mx.sym.Variable("kernel") channels = mx.sym.SliceChannel(simg, num_outputs=nchannel) out = mx.sym.Concat(*[ mx.sym.Convolution(data=channels[i], weight=skernel, num_filter=1, kernel=(3, 3), pad=(1,1), no_bias=True, stride=(1,1)) for i in range(nchannel)]) kernel = mx.nd.array(np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]) .reshape((1, 1, 3, 3)), ctx) / 8.0 out = out * tv_weight return out.bind(ctx, args={"img": img, "kernel": kernel})
<SYSTEM_TASK:> Get input slice from the input shape. <END_TASK> <USER_TASK:> Description: def _split_input_slice(batch_size, work_load_list): """Get input slice from the input shape. Parameters ---------- batch_size : int The number of samples in a mini-batch. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. Returns ------- slices : list of slice The split slices to get a specific slice. Raises ------ ValueError In case of too many splits, leading to some empty slices. """
total_work_load = sum(work_load_list) batch_num_list = [round(work_load * batch_size / total_work_load) for work_load in work_load_list] batch_num_sum = sum(batch_num_list) if batch_num_sum < batch_size: batch_num_list[-1] += batch_size - batch_num_sum slices = [] end = 0 for batch_num in batch_num_list: begin = int(min((end, batch_size))) end = int(min((begin + batch_num, batch_size))) if begin >= end: raise ValueError('Too many slices. Some splits are empty.') slices.append(slice(begin, end)) return slices
<SYSTEM_TASK:> Check the argument names of symbol. <END_TASK> <USER_TASK:> Description: def _check_arguments(symbol): """Check the argument names of symbol. This function checks the duplication of arguments in Symbol. The check is done for feedforward net for now. Parameters ---------- symbol : Symbol The network configuration. """
arg_set = set() arg_names = symbol.list_arguments() for name in arg_names: if name in arg_set: raise ValueError(('Find duplicated argument name \"%s\", ' + 'please make the weight name non-duplicated(using name arguments), ' + 'arguments are %s') % (name, str(arg_names))) arg_set.add(name) aux_set = set() aux_names = symbol.list_auxiliary_states() for name in aux_names: if name in aux_set: raise ValueError( ('Find duplicated auxiliary param name \"%s\", ' + 'please make the weight name non-duplicated(using name arguments), ' + 'arguments are %s, auxiliary params are %s' ) % (name, str(arg_names), str(aux_names))) aux_set.add(name)
<SYSTEM_TASK:> Perform a forward pass on each executor. <END_TASK> <USER_TASK:> Description: def forward(self, is_train=False): """Perform a forward pass on each executor."""
for texec in self.train_execs: texec.forward(is_train=is_train)
<SYSTEM_TASK:> Update evaluation metric with label and current outputs. <END_TASK> <USER_TASK:> Description: def update_metric(self, metric, labels, pre_sliced=False): """Update evaluation metric with label and current outputs."""
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)): if not pre_sliced: labels_slice = [label[islice] for label in labels] else: labels_slice = labels[current_exec] metric.update(labels_slice, texec.outputs)
<SYSTEM_TASK:> Update metric with the current executor. <END_TASK> <USER_TASK:> Description: def update_metric(self, metric, labels, pre_sliced=False): """Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels, pre_sliced)
<SYSTEM_TASK:> Clear all contents in the relay memory <END_TASK> <USER_TASK:> Description: def clear(self): """ Clear all contents in the relay memory """
self.states[:] = 0 self.actions[:] = 0 self.rewards[:] = 0 self.terminate_flags[:] = 0 self.top = 0 self.size = 0
<SYSTEM_TASK:> Return the server controller. <END_TASK> <USER_TASK:> Description: def _controller(self): """Return the server controller."""
def server_controller(cmd_id, cmd_body, _): """Server controler.""" if not self.init_logginig: # the reason put the codes here is because we cannot get # kvstore.rank earlier head = '%(asctime)-15s Server[' + str( self.kvstore.rank) + '] %(message)s' logging.basicConfig(level=logging.DEBUG, format=head) self.init_logginig = True if cmd_id == 0: try: optimizer = pickle.loads(cmd_body) except: raise self.kvstore.set_optimizer(optimizer) else: print("server %d, unknown command (%d, %s)" % ( self.kvstore.rank, cmd_id, cmd_body)) return server_controller
<SYSTEM_TASK:> Run the server, whose behavior is like. <END_TASK> <USER_TASK:> Description: def run(self): """Run the server, whose behavior is like. >>> while receive(x): ... if is_command x: controller(x) ... else if is_key_value x: updater(x) """
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p) check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
<SYSTEM_TASK:> Counts tokens in the specified string. <END_TASK> <USER_TASK:> Description: def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n', to_lower=False, counter_to_update=None): """Counts tokens in the specified string. For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of tokens may look like:: <td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd> <td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as delimiters. The list of special characters can be found at https://docs.python.org/3/library/re.html. Parameters ---------- source_str : str A source string of tokens. token_delim : str, default ' ' A token delimiter. seq_delim : str, default '\\\\n' A sequence delimiter. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter_to_update : collections.Counter or None, default None The collections.Counter instance to be updated with the token counts of `source_str`. If None, return a new collections.Counter instance counting tokens from `source_str`. Returns ------- collections.Counter The `counter_to_update` collections.Counter instance after being updated with the token counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter instance counting tokens from `source_str`. Examples -------- >>> source_str = ' Life is great ! \\n life is good . \\n' >>> count_tokens_from_str(token_line, ' ', '\\n', True) Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2}) >>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n' >>> count_tokens_from_str(token_line, '\\*', '\\n', True) Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1}) """
source_str = filter(None, re.split(token_delim + '|' + seq_delim, source_str)) if to_lower: source_str = [t.lower() for t in source_str] if counter_to_update is None: return collections.Counter(source_str) else: counter_to_update.update(source_str) return counter_to_update
<SYSTEM_TASK:> Loads an array from file. <END_TASK> <USER_TASK:> Description: def load(fname): """Loads an array from file. See more details in ``save``. Parameters ---------- fname : str The filename. Returns ------- list of NDArray, RowSparseNDArray or CSRNDArray, or \ dict of str to NDArray, RowSparseNDArray or CSRNDArray Loaded data. """
if not isinstance(fname, string_types): raise TypeError('fname required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
<SYSTEM_TASK:> Loads an array dictionary or list from a buffer <END_TASK> <USER_TASK:> Description: def load_frombuffer(buf): """Loads an array dictionary or list from a buffer See more details in ``save``. Parameters ---------- buf : str Buffer containing contents of a file as a string or bytes. Returns ------- list of NDArray, RowSparseNDArray or CSRNDArray, or \ dict of str to NDArray, RowSparseNDArray or CSRNDArray Loaded data. """
if not isinstance(buf, string_types + tuple([bytes])): raise TypeError('buf required to be a string or bytes') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoadFromBuffer(buf, mx_uint(len(buf)), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
<SYSTEM_TASK:> Saves a list of arrays or a dict of str->array to file. <END_TASK> <USER_TASK:> Description: def save(fname, data): """Saves a list of arrays or a dict of str->array to file. Examples of filenames: - ``/path/to/file`` - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports) - ``hdfs://path/to/file`` (if compiled with HDFS supports) Parameters ---------- fname : str The filename. data : NDArray, RowSparseNDArray or CSRNDArray, \ or list of NDArray, RowSparseNDArray or CSRNDArray, \ or dict of str to NDArray, RowSparseNDArray or CSRNDArray The data to save. Examples -------- >>> x = mx.nd.zeros((2,3)) >>> y = mx.nd.ones((1,4)) >>> mx.nd.save('my_list', [x,y]) >>> mx.nd.save('my_dict', {'x':x, 'y':y}) >>> mx.nd.load('my_list') [<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>] >>> mx.nd.load('my_dict') {'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>} """
if isinstance(data, NDArray): data = [data] handles = c_array(NDArrayHandle, []) if isinstance(data, dict): str_keys = data.keys() nd_vals = data.values() if any(not isinstance(k, string_types) for k in str_keys) or \ any(not isinstance(v, NDArray) for v in nd_vals): raise TypeError('save only accept dict str->NDArray or list of NDArray') keys = c_str_array(str_keys) handles = c_handle_array(nd_vals) elif isinstance(data, list): if any(not isinstance(v, NDArray) for v in data): raise TypeError('save only accept dict str->NDArray or list of NDArray') keys = None handles = c_handle_array(data) else: raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs " "or a list of NDarrays.") check_call(_LIB.MXNDArraySave(c_str(fname), mx_uint(len(handles)), handles, keys))
<SYSTEM_TASK:> Get the common prefix for all names <END_TASK> <USER_TASK:> Description: def _common_prefix(names): """Get the common prefix for all names"""
if not names: return '' prefix = names[0] for name in names: i = 0 while i < len(prefix) and i < len(name) and prefix[i] == name[i]: i += 1 prefix = prefix[:i] return prefix
<SYSTEM_TASK:> Utility function that helps in inferring DType of args and auxs params <END_TASK> <USER_TASK:> Description: def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t): """Utility function that helps in inferring DType of args and auxs params from given input param. Parameters ---------- in_params: List of Symbol List of input symbol variables. out_params: Symbol Output symbol variable. arg_params: List of Str List of names of argument parametrs. aux_params: List of Str List of names of auxiliary parameters. default_dtype: numpy.dtype or str, default 'float32' Default data type for arg_params and aux_params, if unable to infer the type. Returns ------- arg_types: List of numpy.dtype List of arg_params type. Order is same as arg_params. Defaults to 'float32', if unable to infer type. aux_types: List of numpy.dtype List of aux_params type. Order is same as aux_params. Defaults to 'float32', if unable to infer type. """
arg_types = None aux_types = None # Get Input symbol details. This will be used to infer types of # other parameters. input_sym_names = [in_param.name for in_param in in_params] # Try to infer input types. If not successful, we will set default dtype. # If successful, we will try to infer other params in the graph. input_sym_arg_types = [] can_infer_input_type = True for in_param in in_params: input_sym_arg_type = in_param.infer_type()[0] if not input_sym_arg_type or len(input_sym_arg_type) < 1: can_infer_input_type = False break else: input_sym_arg_types.append(in_param.infer_type()[0][0]) # Try to infer types of other parameters. if can_infer_input_type: params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)} arg_types, _, aux_types = out_params.infer_type(**params) if arg_types is None or len(arg_types) != len(arg_params): arg_types = [] for _ in arg_params: arg_types.append(default_dtype) if aux_types is None or len(aux_types) != len(aux_params): aux_types = [] for _ in aux_params: aux_types.append(default_dtype) return (arg_types, aux_types)
<SYSTEM_TASK:> Load parameters from file previously saved by `save_parameters`. <END_TASK> <USER_TASK:> Description: def load_parameters(self, filename, ctx=None, allow_missing=False, ignore_extra=False): """Load parameters from file previously saved by `save_parameters`. Parameters ---------- filename : str Path to parameter file. ctx : Context or list of Context, default cpu() Context(s) to initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this Block. References ---------- `Saving and Loading Gluon Models \ <https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_ """
loaded = ndarray.load(filename) params = self._collect_params_with_prefix() if not loaded and not params: return if not any('.' in i for i in loaded.keys()): # legacy loading del loaded self.collect_params().load( filename, ctx, allow_missing, ignore_extra, self.prefix) return if not allow_missing: for name in params.keys(): assert name in loaded, \ "Parameter '%s' is missing in file '%s', which contains parameters: %s. " \ "Set allow_missing=True to ignore missing parameters."%( name, filename, _brief_print_list(loaded.keys())) for name in loaded: if not ignore_extra and name not in params: raise ValueError( "Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \ "which contains parameters %s. Set ignore_extra=True to ignore. "%( name, filename, _brief_print_list(self._params.keys()))) if name in params: params[name]._load_init(loaded[name], ctx)
<SYSTEM_TASK:> r"""Registers a forward pre-hook on the block. <END_TASK> <USER_TASK:> Description: def register_forward_pre_hook(self, hook): r"""Registers a forward pre-hook on the block. The hook function is called immediately before :func:`forward`. It should not modify the input or output. Parameters ---------- hook : callable The forward hook function of form `hook(block, input) -> None`. Returns ------- :class:`mxnet.gluon.utils.HookHandle` """
handle = HookHandle() handle.attach(self._forward_pre_hooks, hook) return handle
<SYSTEM_TASK:> r"""Registers a forward hook on the block. <END_TASK> <USER_TASK:> Description: def register_forward_hook(self, hook): r"""Registers a forward hook on the block. The hook function is called immediately after :func:`forward`. It should not modify the input or output. Parameters ---------- hook : callable The forward hook function of form `hook(block, input, output) -> None`. Returns ------- :class:`mxnet.gluon.utils.HookHandle` """
handle = HookHandle() handle.attach(self._forward_hooks, hook) return handle
<SYSTEM_TASK:> r"""Applies ``fn`` recursively to every child block as well as self. <END_TASK> <USER_TASK:> Description: def apply(self, fn): r"""Applies ``fn`` recursively to every child block as well as self. Parameters ---------- fn : callable Function to be applied to each submodule, of form `fn(block)`. Returns ------- this block """
for cld in self._children.values(): cld.apply(fn) fn(self) return self
<SYSTEM_TASK:> Cast this Block to use another data type. <END_TASK> <USER_TASK:> Description: def cast(self, dtype): """Cast this Block to use another data type. Parameters ---------- dtype : str or numpy.dtype The new data type. """
for child in self._children.values(): child.cast(dtype) for _, param in self.params.items(): param.cast(dtype)
<SYSTEM_TASK:> Export HybridBlock to json format that can be loaded by <END_TASK> <USER_TASK:> Description: def export(self, path, epoch=0): """Export HybridBlock to json format that can be loaded by `SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface. .. note:: When there are only one input, it will have name `data`. When there Are more than one inputs, they will be named as `data0`, `data1`, etc. Parameters ---------- path : str Path to save model. Two files `path-symbol.json` and `path-xxxx.params` will be created, where xxxx is the 4 digits epoch number. epoch : int Epoch number of saved model. """
if not self._cached_graph: raise RuntimeError( "Please first call block.hybridize() and then run forward with " "this block at least once before calling export.") sym = self._cached_graph[1] sym.save('%s-symbol.json'%path) arg_names = set(sym.list_arguments()) aux_names = set(sym.list_auxiliary_states()) arg_dict = {} for name, param in self.collect_params().items(): if name in arg_names: arg_dict['arg:%s'%name] = param._reduce() else: assert name in aux_names arg_dict['aux:%s'%name] = param._reduce() ndarray.save('%s-%04d.params'%(path, epoch), arg_dict)
<SYSTEM_TASK:> Import model previously saved by `HybridBlock.export` or <END_TASK> <USER_TASK:> Description: def imports(symbol_file, input_names, param_file=None, ctx=None): """Import model previously saved by `HybridBlock.export` or `Module.save_checkpoint` as a SymbolBlock for use in Gluon. Parameters ---------- symbol_file : str Path to symbol file. input_names : list of str List of input variable names param_file : str, optional Path to parameter file. ctx : Context, default None The context to initialize SymbolBlock on. Returns ------- SymbolBlock SymbolBlock loaded from symbol and parameter files. Examples -------- >>> net1 = gluon.model_zoo.vision.resnet18_v1( ... prefix='resnet', pretrained=True) >>> net1.hybridize() >>> x = mx.nd.random.normal(shape=(1, 3, 32, 32)) >>> out1 = net1(x) >>> net1.export('net1', epoch=1) >>> >>> net2 = gluon.SymbolBlock.imports( ... 'net1-symbol.json', ['data'], 'net1-0001.params') >>> out2 = net2(x) """
sym = symbol.load(symbol_file) if isinstance(input_names, str): input_names = [input_names] inputs = [symbol.var(i) for i in input_names] ret = SymbolBlock(sym, inputs) if param_file is not None: ret.collect_params().load(param_file, ctx=ctx) return ret
<SYSTEM_TASK:> Calculates the expectation of the gradients per epoch for each parameter w.r.t number of batches <END_TASK> <USER_TASK:> Description: def calc_expectation(grad_dict, num_batches): """Calculates the expectation of the gradients per epoch for each parameter w.r.t number of batches Parameters ---------- grad_dict: dict dictionary that maps parameter name to gradients in the mod executor group num_batches: int number of batches Returns ---------- grad_dict: dict dictionary with new keys mapping to gradients expectations """
for key in grad_dict.keys(): grad_dict[str.format(key+"_expectation")] = mx.ndarray.sum(grad_dict[key], axis=0) / num_batches return grad_dict
<SYSTEM_TASK:> Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches <END_TASK> <USER_TASK:> Description: def calc_variance(grad_dict, num_batches, param_names): """Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches Parameters ---------- grad_dict: dict dictionary that maps parameter name to gradients in the mod executor group num_batches: int number of batches param_names: str parameter name in the module Returns ---------- grad_dict: dict dictionary with new keys mapping to gradients variance """
for i in range(len(param_names)): diff_sqr = mx.ndarray.square(mx.nd.subtract(grad_dict[param_names[i]], grad_dict[str.format(param_names[i]+"_expectation")])) grad_dict[str.format(param_names[i] + "_variance")] = mx.ndarray.sum(diff_sqr, axis=0) / num_batches
<SYSTEM_TASK:> computes f1, precision and recall on the entity class <END_TASK> <USER_TASK:> Description: def classifer_metrics(label, pred): """ computes f1, precision and recall on the entity class """
prediction = np.argmax(pred, axis=1) label = label.astype(int) pred_is_entity = prediction != not_entity_index label_is_entity = label != not_entity_index corr_pred = (prediction == label) == (pred_is_entity == True) #how many entities are there? num_entities = np.sum(label_is_entity) entity_preds = np.sum(pred_is_entity) #how many times did we correctly predict an entity? correct_entitites = np.sum(corr_pred[pred_is_entity]) #precision: when we predict entity, how often are we right? precision = correct_entitites/entity_preds if entity_preds == 0: precision = np.nan #recall: of the things that were an entity, how many did we catch? recall = correct_entitites / num_entities if num_entities == 0: recall = np.nan f1 = 2 * precision * recall / (precision + recall) return precision, recall, f1
<SYSTEM_TASK:> Construct data iter <END_TASK> <USER_TASK:> Description: def data_iter(batch_size, num_embed, pre_trained_word2vec=False): """Construct data iter Parameters ---------- batch_size: int num_embed: int pre_trained_word2vec: boolean identify the pre-trained layers or not Returns ---------- train_set: DataIter Train DataIter valid: DataIter Valid DataIter sentences_size: int array dimensions embedded_size: int array dimensions vocab_size: int array dimensions """
print('Loading data...') if pre_trained_word2vec: word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec') x, y = data_helpers.load_data_with_word2vec(word2vec) # reshape for convolution input x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2])) embedded_size = x.shape[-1] sentences_size = x.shape[2] vocabulary_size = -1 else: x, y, vocab, vocab_inv = data_helpers.load_data() embedded_size = num_embed sentences_size = x.shape[1] vocabulary_size = len(vocab) # randomly shuffle data np.random.seed(10) shuffle_indices = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indices] y_shuffled = y[shuffle_indices] # split train/valid set x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:] y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:] print('Train/Valid split: %d/%d' % (len(y_train), len(y_dev))) print('train shape:', x_train.shape) print('valid shape:', x_dev.shape) print('sentence max words', sentences_size) print('embedding size', embedded_size) print('vocab size', vocabulary_size) train_set = mx.io.NDArrayIter( x_train, y_train, batch_size, shuffle=True) valid = mx.io.NDArrayIter( x_dev, y_dev, batch_size) return train_set, valid, sentences_size, embedded_size, vocabulary_size
<SYSTEM_TASK:> Generate network symbol <END_TASK> <USER_TASK:> Description: def sym_gen(batch_size, sentences_size, num_embed, vocabulary_size, num_label=2, filter_list=None, num_filter=100, dropout=0.0, pre_trained_word2vec=False): """Generate network symbol Parameters ---------- batch_size: int sentences_size: int num_embed: int vocabulary_size: int num_label: int filter_list: list num_filter: int dropout: int pre_trained_word2vec: boolean identify the pre-trained layers or not Returns ---------- sm: symbol data: list of str data names softmax_label: list of str label names """
input_x = mx.sym.Variable('data') input_y = mx.sym.Variable('softmax_label') # embedding layer if not pre_trained_word2vec: embed_layer = mx.sym.Embedding(data=input_x, input_dim=vocabulary_size, output_dim=num_embed, name='vocab_embed') conv_input = mx.sym.Reshape(data=embed_layer, target_shape=(batch_size, 1, sentences_size, num_embed)) else: conv_input = input_x # create convolution + (max) pooling layer for each filter operation pooled_outputs = [] for i, filter_size in enumerate(filter_list): convi = mx.sym.Convolution(data=conv_input, kernel=(filter_size, num_embed), num_filter=num_filter) relui = mx.sym.Activation(data=convi, act_type='relu') pooli = mx.sym.Pooling(data=relui, pool_type='max', kernel=(sentences_size - filter_size + 1, 1), stride=(1, 1)) pooled_outputs.append(pooli) # combine all pooled outputs total_filters = num_filter * len(filter_list) concat = mx.sym.Concat(*pooled_outputs, dim=1) h_pool = mx.sym.Reshape(data=concat, target_shape=(batch_size, total_filters)) # dropout layer if dropout > 0.0: h_drop = mx.sym.Dropout(data=h_pool, p=dropout) else: h_drop = h_pool # fully connected cls_weight = mx.sym.Variable('cls_weight') cls_bias = mx.sym.Variable('cls_bias') fc = mx.sym.FullyConnected(data=h_drop, weight=cls_weight, bias=cls_bias, num_hidden=num_label) # softmax output sm = mx.sym.SoftmaxOutput(data=fc, label=input_y, name='softmax') return sm, ('data',), ('softmax_label',)
<SYSTEM_TASK:> Train cnn model <END_TASK> <USER_TASK:> Description: def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names): """Train cnn model Parameters ---------- symbol_data: symbol train_iterator: DataIter Train DataIter valid_iterator: DataIter Valid DataIter data_column_names: list of str Defaults to ('data') for a typical model used in image classification target_names: list of str Defaults to ('softmax_label') for a typical model used in image classification """
devs = mx.cpu() # default setting if args.gpus is not None: for i in args.gpus.split(','): mx.gpu(int(i)) devs = mx.gpu() module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs) module.fit(train_data=train_iterator, eval_data=valid_iterator, eval_metric='acc', kvstore=args.kv_store, optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr}, initializer=mx.initializer.Uniform(0.1), num_epoch=args.num_epochs, batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches), epoch_end_callback=save_model())
<SYSTEM_TASK:> Helper function to parse operator attributes in required format. <END_TASK> <USER_TASK:> Description: def parse_helper(attrs, attrs_name, alt_value=None): """Helper function to parse operator attributes in required format."""
tuple_re = re.compile('\([0-9L|,| ]+\)') if not attrs: return alt_value attrs_str = None if attrs.get(attrs_name) is None else str(attrs.get(attrs_name)) if attrs_str is None: return alt_value attrs_match = tuple_re.search(attrs_str) if attrs_match is not None: if attrs_match.span() == (0, len(attrs_str)): dims = eval(attrs_str) return dims else: raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str))) return alt_value
<SYSTEM_TASK:> Helper function to convert padding format for pad operator. <END_TASK> <USER_TASK:> Description: def transform_padding(pad_width): """Helper function to convert padding format for pad operator. """
num_pad_values = len(pad_width) onnx_pad_width = [0]*num_pad_values start_index = 0 # num_pad_values will always be multiple of 2 end_index = int(num_pad_values/2) for idx in range(0, num_pad_values): if idx % 2 == 0: onnx_pad_width[start_index] = pad_width[idx] start_index += 1 else: onnx_pad_width[end_index] = pad_width[idx] end_index += 1 return onnx_pad_width
<SYSTEM_TASK:> Helper function to convert string to list. <END_TASK> <USER_TASK:> Description: def convert_string_to_list(string_val): """Helper function to convert string to list. Used to convert shape attribute string to list format. """
result_list = [] list_string = string_val.split(',') for val in list_string: val = str(val.strip()) val = val.replace("(", "") val = val.replace(")", "") val = val.replace("L", "") val = val.replace("[", "") val = val.replace("]", "") if val not in ("", "None"): result_list.append(int(val)) return result_list
<SYSTEM_TASK:> Helper function to get inputs <END_TASK> <USER_TASK:> Description: def get_inputs(node, kwargs): """Helper function to get inputs"""
name = node["name"] proc_nodes = kwargs["proc_nodes"] index_lookup = kwargs["index_lookup"] inputs = node["inputs"] attrs = node.get("attrs", {}) input_nodes = [] for ip in inputs: input_node_id = index_lookup[ip[0]] input_nodes.append(proc_nodes[input_node_id].name) return name, input_nodes, attrs
<SYSTEM_TASK:> Helper function to create a basic operator <END_TASK> <USER_TASK:> Description: def create_basic_op_node(op_name, node, kwargs): """Helper function to create a basic operator node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs) node = onnx.helper.make_node( op_name, input_nodes, [name], name=name ) return [node]
<SYSTEM_TASK:> Helper function to convert weights and inputs. <END_TASK> <USER_TASK:> Description: def convert_weights_and_inputs(node, **kwargs): """Helper function to convert weights and inputs. """
name, _, _ = get_inputs(node, kwargs) if kwargs["is_input"] is False: weights = kwargs["weights"] initializer = kwargs["initializer"] np_arr = weights[name] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype] dims = np.shape(np_arr) tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=name, data_type=data_type, dims=dims, vals=np_arr.flatten().tolist(), raw=False, ) ) return [tensor_node] else: tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"]) return [tval_node]
<SYSTEM_TASK:> Map MXNet's convolution operator attributes to onnx's Conv operator <END_TASK> <USER_TASK:> Description: def convert_convolution(node, **kwargs): """Map MXNet's convolution operator attributes to onnx's Conv operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) pad_dims = pad_dims + pad_dims conv_node = onnx.helper.make_node( "Conv", inputs=input_nodes, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, pads=pad_dims, group=num_group, name=name ) return [conv_node]
<SYSTEM_TASK:> Map MXNet's deconvolution operator attributes to onnx's ConvTranspose operator <END_TASK> <USER_TASK:> Description: def convert_deconvolution(node, **kwargs): """Map MXNet's deconvolution operator attributes to onnx's ConvTranspose operator and return the created node. """
name, inputs, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) adj_dims = list(parse_helper(attrs, "adj", [0, 0])) pad_dims = pad_dims + pad_dims deconv_node = onnx.helper.make_node( "ConvTranspose", inputs=inputs, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, output_padding=adj_dims, pads=pad_dims, group=num_group, name=name ) return [deconv_node]
<SYSTEM_TASK:> Map MXNet's crop operator attributes to onnx's Crop operator <END_TASK> <USER_TASK:> Description: def convert_crop(node, **kwargs): """Map MXNet's crop operator attributes to onnx's Crop operator and return the created node. """
name, inputs, attrs = get_inputs(node, kwargs) num_inputs = len(inputs) y, x = list(parse_helper(attrs, "offset", [0, 0])) h, w = list(parse_helper(attrs, "h_w", [0, 0])) if num_inputs > 1: h, w = kwargs["out_shape"][-2:] border = [x, y, x + w, y + h] crop_node = onnx.helper.make_node( "Crop", inputs=[inputs[0]], outputs=[name], border=border, scale=[1, 1], name=name ) logging.warning( "Using an experimental ONNX operator: Crop. " \ "Its definition can change.") return [crop_node]
<SYSTEM_TASK:> Map MXNet's FullyConnected operator attributes to onnx's Gemm operator <END_TASK> <USER_TASK:> Description: def convert_fully_connected(node, **kwargs): """Map MXNet's FullyConnected operator attributes to onnx's Gemm operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) initializer = kwargs["initializer"] no_bias = get_boolean_attribute_value(attrs, "no_bias") fcnode = [] op_name = "flatten_" + str(kwargs["idx"]) flatten_node = onnx.helper.make_node( 'Flatten', inputs=[input_nodes[0]], outputs=[op_name], name=op_name ) input_nodes[0] = op_name fcnode.append(flatten_node) if no_bias: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] bias_name = "bias" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=bias_name, data_type=data_type, dims=(1,), vals=[0], raw=False, ) ) input_nodes.append(bias_name) fcnode.append(tensor_node) node = onnx.helper.make_node( "Gemm", input_nodes, # input (A, B, C) - C can be in place [name], # output alpha=1.0, beta=1.0, transA=False, transB=True, name=name ) fcnode.append(node) return fcnode
<SYSTEM_TASK:> Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator <END_TASK> <USER_TASK:> Description: def convert_batchnorm(node, **kwargs): """Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) momentum = float(attrs.get("momentum", 0.9)) eps = float(attrs.get("eps", 0.001)) bn_node = onnx.helper.make_node( "BatchNormalization", input_nodes, [name], name=name, epsilon=eps, momentum=momentum, # MXNet computes mean and variance per feature for batchnorm # Default for onnx is across all spatial features. So disabling the parameter. spatial=0 ) return [bn_node]
<SYSTEM_TASK:> Map MXNet's pad operator attributes to onnx's Pad operator <END_TASK> <USER_TASK:> Description: def convert_pad(node, **kwargs): """Map MXNet's pad operator attributes to onnx's Pad operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) mxnet_pad_width = convert_string_to_list(attrs.get("pad_width")) onnx_pad_width = transform_padding(mxnet_pad_width) pad_mode = attrs.get("mode") if pad_mode == "constant": pad_value = float(attrs.get("constant_value")) \ if "constant_value" in attrs else 0.0 node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode='constant', value=pad_value, pads=onnx_pad_width, name=name ) else: node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode=pad_mode, pads=onnx_pad_width, name=name ) return [node]
<SYSTEM_TASK:> create extra transpose node for dot operator <END_TASK> <USER_TASK:> Description: def create_helper_trans_node(op_name, input_node, node_name): """create extra transpose node for dot operator"""
node_name = op_name + "_" + node_name trans_node = onnx.helper.make_node( 'Transpose', inputs=[input_node], outputs=[node_name], name=node_name ) return trans_node
<SYSTEM_TASK:> Map MXNet's dot operator attributes to onnx's <END_TASK> <USER_TASK:> Description: def convert_dot(node, **kwargs): """Map MXNet's dot operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes."""
name, input_nodes, attrs = get_inputs(node, kwargs) input_node_a = input_nodes[0] input_node_b = input_nodes[1] trans_a_node = None trans_b_node = None trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if trans_a: trans_a_node = create_helper_trans_node(op_name, input_nodes[0], 'a') input_node_a = op_name+"_a" if trans_b: trans_b_node = create_helper_trans_node(op_name, input_nodes[1], 'b') input_node_b = op_name+"_b" matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_node_a, input_node_b], outputs=[name], name=name ) if not trans_a and not trans_b: return [matmul_node] elif trans_a and not trans_b: return [trans_a_node, matmul_node] elif trans_b and not trans_a: return [trans_b_node, matmul_node] else: return [trans_a_node, trans_b_node, matmul_node]
<SYSTEM_TASK:> Map MXNet's _linalg_gemm2 operator attributes to onnx's <END_TASK> <USER_TASK:> Description: def convert_linalg_gemm2(node, **kwargs): """Map MXNet's _linalg_gemm2 operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes. Return multiple nodes created. """
name, input_nodes, attrs = get_inputs(node, kwargs) # Getting the attributes and assigning default values. alpha = float(attrs.get("alpha", 1.0)) trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if alpha == 1.0 and trans_a == 0 and trans_b == 0: matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [matmul_node] elif trans_a == 1 and trans_b == 0: op_name = "transpose" + str(kwargs["idx"]) node_name = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[node_name, input_nodes[1]], outputs=[name], name=name ) return [trans_a_node, matmul_node] elif trans_a == 0 and trans_b == 1: node_name = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_nodes[0], node_name], outputs=[name], name=name ) return [trans_b_node, matmul_node] else: node_name_a = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name_a ) node_name_b = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name_b ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [trans_a_node, trans_b_node, matmul_node]
<SYSTEM_TASK:> Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator <END_TASK> <USER_TASK:> Description: def convert_instancenorm(node, **kwargs): """Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator based on the input node's attributes and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) eps = float(attrs.get("eps", 0.001)) node = onnx.helper.make_node( 'InstanceNormalization', inputs=input_nodes, outputs=[name], name=name, epsilon=eps) return [node]
<SYSTEM_TASK:> Map MXNet's softmax operator attributes to onnx's Softmax operator <END_TASK> <USER_TASK:> Description: def convert_softmax(node, **kwargs): """Map MXNet's softmax operator attributes to onnx's Softmax operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis", -1)) softmax_node = onnx.helper.make_node( "Softmax", input_nodes, [name], axis=axis, name=name ) return [softmax_node]
<SYSTEM_TASK:> Map MXNet's Concat operator attributes to onnx's Concat operator <END_TASK> <USER_TASK:> Description: def convert_concat(node, **kwargs): """Map MXNet's Concat operator attributes to onnx's Concat operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("dim", 1)) concat_node = onnx.helper.make_node( "Concat", input_nodes, [name], axis=axis, name=name ) return [concat_node]
<SYSTEM_TASK:> Map MXNet's transpose operator attributes to onnx's Transpose operator <END_TASK> <USER_TASK:> Description: def convert_transpose(node, **kwargs): """Map MXNet's transpose operator attributes to onnx's Transpose operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axes = attrs.get("axes", ()) if axes: axes = tuple(map(int, re.findall(r'\d+', axes))) transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], perm=axes, name=name ) else: transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], name=name ) return [transpose_node]
<SYSTEM_TASK:> Map MXNet's LRN operator attributes to onnx's LRN operator <END_TASK> <USER_TASK:> Description: def convert_lrn(node, **kwargs): """Map MXNet's LRN operator attributes to onnx's LRN operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) alpha = float(attrs.get("alpha", 0.0001)) beta = float(attrs.get("beta", 0.75)) bias = float(attrs.get("knorm", 1.0)) size = int(attrs.get("nsize")) lrn_node = onnx.helper.make_node( "LRN", inputs=input_nodes, outputs=[name], name=name, alpha=alpha, beta=beta, bias=bias, size=size ) return [lrn_node]
<SYSTEM_TASK:> Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator <END_TASK> <USER_TASK:> Description: def convert_l2normalization(node, **kwargs): """Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) mode = attrs.get("mode", "instance") if mode != "channel": raise AttributeError("L2Normalization: ONNX currently supports channel mode only") l2norm_node = onnx.helper.make_node( "LpNormalization", input_nodes, [name], axis=1, # channel only name=name ) return [l2norm_node]
<SYSTEM_TASK:> Map MXNet's Dropout operator attributes to onnx's Dropout operator <END_TASK> <USER_TASK:> Description: def convert_dropout(node, **kwargs): """Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) probability = float(attrs.get("p", 0.5)) dropout_node = onnx.helper.make_node( "Dropout", input_nodes, [name], ratio=probability, name=name ) return [dropout_node]
<SYSTEM_TASK:> Helper function for scalar arithmetic operations <END_TASK> <USER_TASK:> Description: def scalar_op_helper(node, op_name, **kwargs): """Helper function for scalar arithmetic operations"""
name, input_nodes, attrs = get_inputs(node, kwargs) from onnx import numpy_helper input_type = kwargs["in_type"] scalar_value = np.array([attrs.get("scalar", 1)], dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]) initializer = kwargs["initializer"] flag = True # If the input value is in initializer, just multiply with scalar input # and create a new initializer for i in initializer: if i.name == input_nodes[0]: if op_name == 'Mul': new_initializer = numpy_helper.to_array(i) * scalar_value[0] elif op_name == 'Sub': if name.startswith("_rminusscalar"): new_initializer = scalar_value[0] - numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) - scalar_value[0] elif op_name == 'Add': new_initializer = numpy_helper.to_array(i) + scalar_value[0] elif op_name == 'Div': if name.startswith("_rdivscalar"): new_initializer = scalar_value[0] / numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) / scalar_value[0] elif op_name == 'Pow': new_initializer = numpy_helper.to_array(i) ** scalar_value[0] flag = False break # else create a new tensor of the scalar value, add it in initializer if flag is True: dims = np.shape(scalar_value) scalar_op_name = "scalar_op" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims) initializer.append( onnx.helper.make_tensor( name=scalar_op_name, data_type=input_type, dims=dims, vals=scalar_value, raw=False, ) ) mul_node = onnx.helper.make_node( op_name, [input_nodes[0], scalar_op_name], [name], name=name ) return [tensor_node, mul_node] else: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype] dims = np.shape(new_initializer) new_a_node = input_nodes[0] + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims) initializer.append( onnx.helper.make_tensor( name=new_a_node, data_type=data_type, dims=dims, vals=new_initializer, raw=False, ) ) return [tensor_node]
<SYSTEM_TASK:> Map MXNet's argmax operator attributes to onnx's ArgMax operator <END_TASK> <USER_TASK:> Description: def convert_argmax(node, **kwargs): """Map MXNet's argmax operator attributes to onnx's ArgMax operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) keepdims = get_boolean_attribute_value(attrs, "keepdims") node = onnx.helper.make_node( 'ArgMax', inputs=input_nodes, axis=axis, keepdims=keepdims, outputs=[name], name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's Reshape operator attributes to onnx's Reshape operator. <END_TASK> <USER_TASK:> Description: def convert_reshape(node, **kwargs): """Map MXNet's Reshape operator attributes to onnx's Reshape operator. Converts output shape attribute to output shape tensor and return multiple created nodes. """
name, input_nodes, attrs = get_inputs(node, kwargs) output_shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(output_shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=output_shape_list, raw=False, ) ) input_nodes.append(output_shape_name) not_supported_shape = [-2, -3, -4] for val in output_shape_list: if val in not_supported_shape: raise AttributeError("Reshape: Shape value not supported in ONNX", val) reshape_node = onnx.helper.make_node( "Reshape", input_nodes, [name], name=name ) return [tensor_node, reshape_node]
<SYSTEM_TASK:> Map MXNet's Cast operator attributes to onnx's Cast operator <END_TASK> <USER_TASK:> Description: def convert_cast(node, **kwargs): """Map MXNet's Cast operator attributes to onnx's Cast operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) dtype = attrs["dtype"] # dtype can be mapped only with types from TensorProto # float32 is mapped to float and float64 to double in onnx # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py if dtype == 'float32': dtype = 'float' elif dtype == 'float64': dtype = 'double' node = onnx.helper.make_node( "Cast", input_nodes, [name], to=getattr(onnx.TensorProto, dtype.upper()), name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's slice_axis operator attributes to onnx's Slice operator <END_TASK> <USER_TASK:> Description: def convert_slice_axis(node, **kwargs): """Map MXNet's slice_axis operator attributes to onnx's Slice operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axes = int(attrs.get("axis")) starts = int(attrs.get("begin")) ends = int(attrs.get("end", None)) if not ends: raise ValueError("Slice: ONNX doesnt't support 'None' in 'end' attribute") node = onnx.helper.make_node( "Slice", input_nodes, [name], axes=[axes], starts=[starts], ends=[ends], name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split <END_TASK> <USER_TASK:> Description: def convert_slice_channel(node, **kwargs): """Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split operator based on squeeze_axis attribute and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) num_outputs = int(attrs.get("num_outputs")) axis = int(attrs.get("axis", 1)) squeeze_axis = int(attrs.get("squeeze_axis", 0)) if squeeze_axis == 1 and num_outputs == 1: node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=[axis], name=name, ) return [node] elif squeeze_axis == 0 and num_outputs > 1: in_shape = kwargs.get('in_shape')[0] split = in_shape[axis] // num_outputs node = onnx.helper.make_node( "Split", input_nodes, [name+'_output'+str(i) for i in range(num_outputs)], axis=axis, split=[split for _ in range(num_outputs)], name=name, ) return [node] else: raise NotImplementedError("SliceChannel operator with num_outputs>1 and" "squeeze_axis true is not implemented.")
<SYSTEM_TASK:> Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator <END_TASK> <USER_TASK:> Description: def convert_expand_dims(node, **kwargs): """Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) node = onnx.helper.make_node( "Unsqueeze", input_nodes, [name], axes=[axis], name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's squeeze operator attributes to onnx's squeeze operator <END_TASK> <USER_TASK:> Description: def convert_squeeze(node, **kwargs): """Map MXNet's squeeze operator attributes to onnx's squeeze operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) axis = attrs.get("axis", None) if not axis: raise AttributeError("Squeeze: Missing axis attribute: ONNX currently requires axis to " "be specified for squeeze operator") axis = convert_string_to_list(axis) node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=axis, name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's depth_to_space operator attributes to onnx's <END_TASK> <USER_TASK:> Description: def convert_depthtospace(node, **kwargs): """Map MXNet's depth_to_space operator attributes to onnx's DepthToSpace operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) blksize = int(attrs.get("block_size", 0)) node = onnx.helper.make_node( "DepthToSpace", input_nodes, [name], blocksize=blksize, name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's square operator attributes to onnx's Pow operator <END_TASK> <USER_TASK:> Description: def convert_square(node, **kwargs): """Map MXNet's square operator attributes to onnx's Pow operator and return the created node. """
name, input_nodes, _ = get_inputs(node, kwargs) initializer = kwargs["initializer"] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] power2_name = "square_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=power2_name, data_type=data_type, dims=(1,), vals=[2], raw=False, ) ) input_nodes.append(power2_name) node = onnx.helper.make_node( "Pow", input_nodes, [name], name=name ) return [tensor_node, node]
<SYSTEM_TASK:> Map MXNet's sum operator attributes to onnx's ReduceSum operator <END_TASK> <USER_TASK:> Description: def convert_sum(node, **kwargs): """Map MXNet's sum operator attributes to onnx's ReduceSum operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) else: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's hard_sigmoid operator attributes to onnx's HardSigmoid operator <END_TASK> <USER_TASK:> Description: def convert_hardsigmoid(node, **kwargs): """Map MXNet's hard_sigmoid operator attributes to onnx's HardSigmoid operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 alpha = float(attrs.get("alpha", 0.2)) beta = float(attrs.get("beta", 0.5)) node = onnx.helper.make_node( 'HardSigmoid', input_nodes, [name], alpha=alpha, beta=beta, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator <END_TASK> <USER_TASK:> Description: def convert_logsoftmax(node, **kwargs): """Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to int axis = int(attrs.get("axis", -1)) temp = attrs.get("temperature", 'None') if temp != 'None': raise AttributeError("LogSoftMax: ONNX supports only temperature=None") node = onnx.helper.make_node( 'LogSoftmax', input_nodes, [name], axis=axis, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators <END_TASK> <USER_TASK:> Description: def convert_norm(node, **kwargs): """Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis else None keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], axes=axes, keepdims=keepdims, name=name ) return [reduce_node] else: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], keepdims=keepdims, name=name ) return [reduce_node]
<SYSTEM_TASK:> Map MXNet's multinomial operator attributes to onnx's <END_TASK> <USER_TASK:> Description: def convert_multinomial(node, **kwargs): """Map MXNet's multinomial operator attributes to onnx's Multinomial operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))] sample_size = convert_string_to_list(attrs.get("shape", '1')) if len(sample_size) < 2: sample_size = sample_size[-1] else: raise AttributeError("ONNX currently supports integer sample_size only") node = onnx.helper.make_node( "Multinomial", input_nodes, [name], dtype=dtype, sample_size=sample_size, name=name, ) return [node]
<SYSTEM_TASK:> Map MXNet's random_uniform operator attributes to onnx's RandomUniform <END_TASK> <USER_TASK:> Description: def convert_random_uniform(node, **kwargs): """Map MXNet's random_uniform operator attributes to onnx's RandomUniform operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 low = float(attrs.get("low", 0)) high = float(attrs.get("high", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomUniform', input_nodes, [name], low=low, high=high, dtype=dtype, shape=shape, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's random_normal operator attributes to onnx's RandomNormal <END_TASK> <USER_TASK:> Description: def convert_random_normal(node, **kwargs): """Map MXNet's random_normal operator attributes to onnx's RandomNormal operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 mean = float(attrs.get("loc", 0)) scale = float(attrs.get("scale", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomNormal', input_nodes, [name], mean=mean, scale=scale, dtype=dtype, shape=shape, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool <END_TASK> <USER_TASK:> Description: def convert_roipooling(node, **kwargs): """Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) pooled_shape = convert_string_to_list(attrs.get('pooled_size')) scale = float(attrs.get("spatial_scale")) node = onnx.helper.make_node( 'MaxRoiPool', input_nodes, [name], pooled_shape=pooled_shape, spatial_scale=scale, name=name ) return [node]
<SYSTEM_TASK:> Map MXNet's Tile operator attributes to onnx's Tile <END_TASK> <USER_TASK:> Description: def convert_tile(node, **kwargs): """Map MXNet's Tile operator attributes to onnx's Tile operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) reps_list = convert_string_to_list(attrs["reps"]) initializer = kwargs["initializer"] reps_shape_np = np.array(reps_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[reps_shape_np.dtype] dims = np.shape(reps_shape_np) output_shape_name = "reps_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=reps_list, raw=False, ) ) input_nodes.append(output_shape_name) tile_node = onnx.helper.make_node( "Tile", input_nodes, [name], name=name ) return [tensor_node, tile_node]
<SYSTEM_TASK:> Map MXNet's broadcast_to operator attributes to onnx's Expand <END_TASK> <USER_TASK:> Description: def convert_broadcast_to(node, **kwargs): """Map MXNet's broadcast_to operator attributes to onnx's Expand operator and return the created node. """
name, input_nodes, attrs = get_inputs(node, kwargs) shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "expand_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=shape_list, raw=False, ) ) input_nodes.append(output_shape_name) expand_node = onnx.helper.make_node( "Expand", input_nodes, [name], name=name ) return [tensor_node, expand_node]
<SYSTEM_TASK:> Get the current executor <END_TASK> <USER_TASK:> Description: def exe(self): """Get the current executor Returns ------- exe : mxnet.executor.Executor """
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
<SYSTEM_TASK:> View the internal symbols using the forward function. <END_TASK> <USER_TASK:> Description: def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict): """ View the internal symbols using the forward function. :param sym_name: :param bucket_kwargs: :param input_dict: :return: """
data_shapes = {k: v.shape for k, v in arg_dict.items()} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) internal_sym = self.sym.get_internals()[sym_name] data_inputs = {k: mx.nd.empty(v, ctx=self.ctx) for k, v in self.data_shapes.items() if k in internal_sym.list_arguments()} params = {k: v for k, v in self.params.items() if k in internal_sym.list_arguments()} aux_states = {k: v for k, v in self.aux_states.items() if k in internal_sym.list_auxiliary_states()} exe = internal_sym.bind(ctx=self.ctx, args=dict(params, **data_inputs), args_grad=None, grad_req='null', aux_states=aux_states, shared_exec=self.exe) for k, v in arg_dict.items(): exe.arg_dict[k][:] = v exe.forward(is_train=False) assert 1 == len(exe.outputs) for output in exe.outputs: output.wait_to_read() return exe.outputs[0]
<SYSTEM_TASK:> use zero initialization for better convergence, because it tends to oputut 0, <END_TASK> <USER_TASK:> Description: def init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from): """ use zero initialization for better convergence, because it tends to oputut 0, and the label 0 stands for background, which may occupy most size of one image. """
fcnxs_args = fcnxs_args_from.copy() fcnxs_auxs = fcnxs_auxs_from.copy() for k,v in fcnxs_args.items(): if(v.context != ctx): fcnxs_args[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_args[k]) for k,v in fcnxs_auxs.items(): if(v.context != ctx): fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_auxs[k]) data_shape=(1,3,500,500) arg_names = fcnxs_symbol.list_arguments() arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape) rest_params = {} deconv_params = {} # this is fcn8s init from fcn16s if 'score_pool3_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool3_bias', 'score_pool3_weight']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score4_weight']]) # this is fcn16s init from fcn32s elif 'score_pool4_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool4_weight', 'score_pool4_bias']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score2_weight']]) # this is fcn32s init else: logging.error("you are init the fcn32s model, so you should use init_from_vgg16()") sys.exit() fcnxs_args.update(rest_params) for k, v in deconv_params.items(): filt = upsample_filt(v[3]) initw = np.zeros(v) initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing fcnxs_args[k] = mx.nd.array(initw, ctx) return fcnxs_args, fcnxs_auxs
<SYSTEM_TASK:> Creates a symbolic variable with specified name. <END_TASK> <USER_TASK:> Description: def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None, init=None, stype=None, **kwargs): """Creates a symbolic variable with specified name. Example ------- >>> data = mx.sym.Variable('data', attr={'a': 'b'}) >>> data <Symbol data> >>> csr_data = mx.sym.Variable('csr_data', stype='csr') >>> csr_data <Symbol csr_data> >>> row_sparse_weight = mx.sym.Variable('weight', stype='row_sparse') >>> row_sparse_weight <Symbol weight> Parameters ---------- name : str Variable name. attr : Dict of strings Additional attributes to set on the variable. Format {string : string}. shape : tuple The shape of a variable. If specified, this will be used during the shape inference. If one has specified a different shape for this variable using a keyword argument when calling shape inference, this shape information will be ignored. lr_mult : float The learning rate multiplier for input variable. wd_mult : float Weight decay multiplier for input variable. dtype : str or numpy.dtype The dtype for input variable. If not specified, this value will be inferred. init : initializer (mxnet.init.*) Initializer for this variable to (optionally) override the default initializer. stype : str The storage type of the variable, such as 'row_sparse', 'csr', 'default', etc kwargs : Additional attribute variables Additional attributes must start and end with double underscores. Returns ------- variable : Symbol A symbol corresponding to an input to the computation graph. """
if not isinstance(name, string_types): raise TypeError('Expect a string for variable `name`') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle))) ret = Symbol(handle) if not hasattr(AttrScope._current, "value"): AttrScope._current.value = AttrScope() attr = AttrScope._current.value.get(attr) attr = {} if attr is None else attr if shape is not None: attr['__shape__'] = str(shape) if lr_mult is not None: attr['__lr_mult__'] = str(lr_mult) if wd_mult is not None: attr['__wd_mult__'] = str(wd_mult) if dtype is not None: attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type]) if init is not None: if not isinstance(init, string_types): init = init.dumps() attr['__init__'] = init if stype is not None: attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype]) for k, v in kwargs.items(): if k.startswith('__') and k.endswith('__'): attr[k] = str(v) else: raise ValueError('Attribute name=%s is not supported.' ' Additional attributes must start and end with double underscores,' ' e.g, __yourattr__' % k) ret._set_attr(**attr) return ret
<SYSTEM_TASK:> Creates a symbol that contains a collection of other symbols, grouped together. <END_TASK> <USER_TASK:> Description: def Group(symbols): """Creates a symbol that contains a collection of other symbols, grouped together. Example ------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> mx.sym.Group([a,b]) <Symbol Grouped> Parameters ---------- symbols : list List of symbols to be grouped. Returns ------- sym : Symbol A group symbol. """
if not symbols or any(not isinstance(sym, Symbol) for sym in symbols): raise TypeError('Expected a list of symbols as input') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateGroup( mx_uint(len(symbols)), c_handle_array(symbols), ctypes.byref(handle))) return Symbol(handle)
<SYSTEM_TASK:> Loads symbol from a JSON file. <END_TASK> <USER_TASK:> Description: def load(fname): """Loads symbol from a JSON file. You can also use pickle to do the job if you only work on python. The advantage of load/save is the file is language agnostic. This means the file saved using save can be loaded by other language binding of mxnet. You also get the benefit being able to directly load/save from cloud storage(S3, HDFS). Parameters ---------- fname : str The name of the file, examples: - `s3://my-bucket/path/my-s3-symbol` - `hdfs://my-bucket/path/my-hdfs-symbol` - `/path-to/my-local-symbol` Returns ------- sym : Symbol The loaded symbol. See Also -------- Symbol.save : Used to save symbol into file. """
if not isinstance(fname, string_types): raise TypeError('fname need to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle))) return Symbol(handle)
<SYSTEM_TASK:> Returns element-wise maximum of the input elements. <END_TASK> <USER_TASK:> Description: def maximum(left, right): """Returns element-wise maximum of the input elements. Both inputs can be Symbol or scalar number. Broadcasting is not supported. Parameters --------- left : Symbol or scalar First symbol to be compared. right : Symbol or scalar Second symbol to be compared. Returns ------- Symbol or scalar The element-wise maximum of the input symbols. Examples -------- >>> mx.sym.maximum(2, 3.5) 3.5 >>> x = mx.sym.Variable('x') >>> y = mx.sym.Variable('y') >>> z = mx.sym.maximum(x, 4) >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy() array([ 4., 5., 4., 10.], dtype=float32) >>> z = mx.sym.maximum(x, y) >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy() array([ 10., 4.], dtype=float32) """
if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Maximum(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._MaximumScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._MaximumScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return left if left > right else right else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
<SYSTEM_TASK:> Returns element-wise minimum of the input elements. <END_TASK> <USER_TASK:> Description: def minimum(left, right): """Returns element-wise minimum of the input elements. Both inputs can be Symbol or scalar number. Broadcasting is not supported. Parameters --------- left : Symbol or scalar First symbol to be compared. right : Symbol or scalar Second symbol to be compared. Returns ------- Symbol or scalar The element-wise minimum of the input symbols. Examples -------- >>> mx.sym.minimum(2, 3.5) 2 >>> x = mx.sym.Variable('x') >>> y = mx.sym.Variable('y') >>> z = mx.sym.minimum(x, 4) >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy() array([ 3., 4., 2., 4.], dtype=float32) >>> z = mx.sym.minimum(x, y) >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy() array([ 3., 2.], dtype=float32) """
if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Minimum(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._MinimumScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._MinimumScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return left if left < right else right else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
<SYSTEM_TASK:> Given the "legs" of a right triangle, returns its hypotenuse. <END_TASK> <USER_TASK:> Description: def hypot(left, right): """Given the "legs" of a right triangle, returns its hypotenuse. Equivalent to :math:`\\sqrt(left^2 + right^2)`, element-wise. Both inputs can be Symbol or scalar number. Broadcasting is not supported. Parameters --------- left : Symbol or scalar First leg of the triangle(s). right : Symbol or scalar Second leg of the triangle(s). Returns ------- Symbol or scalar The hypotenuse of the triangle(s) Examples -------- >>> mx.sym.hypot(3, 4) 5.0 >>> x = mx.sym.Variable('x') >>> y = mx.sym.Variable('y') >>> z = mx.sym.hypot(x, 4) >>> z.eval(x=mx.nd.array([3,5,2]))[0].asnumpy() array([ 5., 6.40312433, 4.47213602], dtype=float32) >>> z = mx.sym.hypot(x, y) >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy() array([ 10.44030666, 4.47213602], dtype=float32) """
if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Hypot(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._HypotScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._HypotScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return _numpy.hypot(left, right) else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
<SYSTEM_TASK:> Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere. <END_TASK> <USER_TASK:> Description: def eye(N, M=0, k=0, dtype=None, **kwargs): """Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere. Parameters ---------- N: int Number of rows in the output. M: int, optional Number of columns in the output. If 0, defaults to N. k: int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : str or numpy.dtype, optional The value type of the inner value, default to ``np.float32``. Returns ------- out : Symbol The created Symbol. """
if dtype is None: dtype = _numpy.float32 return _internal._eye(N, M, k, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Returns a new symbol of given shape and type, filled with zeros. <END_TASK> <USER_TASK:> Description: def zeros(shape, dtype=None, **kwargs): """Returns a new symbol of given shape and type, filled with zeros. Parameters ---------- shape : int or sequence of ints Shape of the new array. dtype : str or numpy.dtype, optional The value type of the inner value, default to ``np.float32``. Returns ------- out : Symbol The created Symbol. """
if dtype is None: dtype = _numpy.float32 return _internal._zeros(shape=shape, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Returns a new symbol of given shape and type, filled with ones. <END_TASK> <USER_TASK:> Description: def ones(shape, dtype=None, **kwargs): """Returns a new symbol of given shape and type, filled with ones. Parameters ---------- shape : int or sequence of ints Shape of the new array. dtype : str or numpy.dtype, optional The value type of the inner value, default to ``np.float32``. Returns ------- out : Symbol The created Symbol """
if dtype is None: dtype = _numpy.float32 return _internal._ones(shape=shape, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Gets name string from the symbol, this function only works for non-grouped symbol. <END_TASK> <USER_TASK:> Description: def name(self): """Gets name string from the symbol, this function only works for non-grouped symbol. Returns ------- value : str The name of this symbol, returns ``None`` for grouped symbol. """
ret = ctypes.c_char_p() success = ctypes.c_int() check_call(_LIB.MXSymbolGetName( self.handle, ctypes.byref(ret), ctypes.byref(success))) if success.value != 0: return py_str(ret.value) else: return None
<SYSTEM_TASK:> Returns the attribute string for corresponding input key from the symbol. <END_TASK> <USER_TASK:> Description: def attr(self, key): """Returns the attribute string for corresponding input key from the symbol. This function only works for non-grouped symbols. Example ------- >>> data = mx.sym.Variable('data', attr={'mood': 'angry'}) >>> data.attr('mood') 'angry' Parameters ---------- key : str The key corresponding to the desired attribute. Returns ------- value : str The desired attribute value, returns ``None`` if the attribute does not exist. """
ret = ctypes.c_char_p() success = ctypes.c_int() check_call(_LIB.MXSymbolGetAttr( self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success))) if success.value != 0: return py_str(ret.value) else: return None
<SYSTEM_TASK:> Gets all attributes from the symbol. <END_TASK> <USER_TASK:> Description: def list_attr(self, recursive=False): """Gets all attributes from the symbol. Example ------- >>> data = mx.sym.Variable('data', attr={'mood': 'angry'}) >>> data.list_attr() {'mood': 'angry'} Returns ------- ret : Dict of str to str A dictionary mapping attribute keys to values. """
if recursive: raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. " "Please use attr_dict instead.") size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttrShallow check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
<SYSTEM_TASK:> Recursively gets all attributes from the symbol and its children. <END_TASK> <USER_TASK:> Description: def attr_dict(self): """Recursively gets all attributes from the symbol and its children. Example ------- >>> a = mx.sym.Variable('a', attr={'a1':'a2'}) >>> b = mx.sym.Variable('b', attr={'b1':'b2'}) >>> c = a+b >>> c.attr_dict() {'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}} Returns ------- ret : Dict of str to dict There is a key in the returned dict for every child with non-empty attribute set. For each symbol, the name of the symbol is its key in the dict and the correspond value is that symbol's attribute list (itself a dictionary). """
size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttr check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) ret = {} for i in range(size.value): name, key = py_str(pairs[i * 2]).split('$') val = py_str(pairs[i * 2 + 1]) if name not in ret: ret[name] = {} ret[name][key] = val return ret
<SYSTEM_TASK:> Sets an attribute of the symbol. <END_TASK> <USER_TASK:> Description: def _set_attr(self, **kwargs): """Sets an attribute of the symbol. For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"`` to the symbol's attribute dictionary. Parameters ---------- **kwargs The attributes to set """
for key, value in kwargs.items(): if not isinstance(value, string_types): raise ValueError("Set Attr only accepts string values") check_call(_LIB.MXSymbolSetAttr( self.handle, c_str(key), c_str(str(value))))
<SYSTEM_TASK:> Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of <END_TASK> <USER_TASK:> Description: def get_internals(self): """Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of outputs of all of the internal nodes. Consider the following code: Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> d = c.get_internals() >>> d <Symbol Grouped> >>> d.list_outputs() ['a', 'b', '_plus4_output'] Returns ------- sgroup : Symbol A symbol group containing all internal and leaf nodes of the computation graph used to compute the symbol. """
handle = SymbolHandle() check_call(_LIB.MXSymbolGetInternals( self.handle, ctypes.byref(handle))) return Symbol(handle=handle)
<SYSTEM_TASK:> Gets a new grouped symbol whose output contains <END_TASK> <USER_TASK:> Description: def get_children(self): """Gets a new grouped symbol whose output contains inputs to output nodes of the original symbol. Example ------- >>> x = mx.sym.Variable('x') >>> y = mx.sym.Variable('y') >>> z = mx.sym.Variable('z') >>> a = y+z >>> b = x+a >>> b.get_children() <Symbol Grouped> >>> b.get_children().list_outputs() ['x', '_plus10_output'] >>> b.get_children().get_children().list_outputs() ['y', 'z'] Returns ------- sgroup : Symbol or None The children of the head node. If the symbol has no inputs then ``None`` will be returned. """
handle = SymbolHandle() check_call(_LIB.MXSymbolGetChildren( self.handle, ctypes.byref(handle))) ret = Symbol(handle=handle) if len(ret.list_outputs()) == 0: return None return ret
<SYSTEM_TASK:> Lists all the arguments in the symbol. <END_TASK> <USER_TASK:> Description: def list_arguments(self): """Lists all the arguments in the symbol. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> c.list_arguments ['a', 'b'] Returns ------- args : list of string List containing the names of all the arguments required to compute the symbol. """
size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListArguments( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
<SYSTEM_TASK:> Lists all the outputs in the symbol. <END_TASK> <USER_TASK:> Description: def list_outputs(self): """Lists all the outputs in the symbol. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> c.list_outputs() ['_plus12_output'] Returns ------- list of str List of all the outputs. For most symbols, this list contains only the name of this symbol. For symbol groups, this is a list with the names of all symbols in the group. """
size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListOutputs( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
<SYSTEM_TASK:> Lists all the auxiliary states in the symbol. <END_TASK> <USER_TASK:> Description: def list_auxiliary_states(self): """Lists all the auxiliary states in the symbol. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> c.list_auxiliary_states() [] Example of auxiliary states in `BatchNorm`. >>> data = mx.symbol.Variable('data') >>> weight = mx.sym.Variable(name='fc1_weight') >>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128) >>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0') >>> fc2.list_auxiliary_states() ['batchnorm0_moving_mean', 'batchnorm0_moving_var'] Returns ------- aux_states : list of str List of the auxiliary states in input symbol. Notes ----- Auxiliary states are special states of symbols that do not correspond to an argument, and are not updated by gradient descent. Common examples of auxiliary states include the `moving_mean` and `moving_variance` in `BatchNorm`. Most operators do not have auxiliary states. """
size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListAuxiliaryStates( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
<SYSTEM_TASK:> Lists all arguments and auxiliary states of this Symbol. <END_TASK> <USER_TASK:> Description: def list_inputs(self): """Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var'] """
size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.NNSymbolListInputNames( self.handle, 0, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
<SYSTEM_TASK:> Infers the type of all arguments and all outputs, given the known types <END_TASK> <USER_TASK:> Description: def infer_type(self, *args, **kwargs): """Infers the type of all arguments and all outputs, given the known types for some arguments. This function takes the known types of some arguments in either positional way or keyword argument way as input. It returns a tuple of `None` values if there is not enough information to deduce the missing types. Inconsistencies in the known types will cause an error to be raised. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> arg_types, out_types, aux_types = c.infer_type(a='float32') >>> arg_types [<type 'numpy.float32'>, <type 'numpy.float32'>] >>> out_types [<type 'numpy.float32'>] >>> aux_types [] Parameters ---------- *args : Type of known arguments in a positional way. Unknown type can be marked as None. **kwargs : Keyword arguments of known types. Returns ------- arg_types : list of numpy.dtype or None List of argument types. The order is same as the order of list_arguments(). out_types : list of numpy.dtype or None List of output types. The order is same as the order of list_outputs(). aux_types : list of numpy.dtype or None List of auxiliary state types. The order is same as the order of list_auxiliary_states(). """
try: res = self._infer_type_impl(False, *args, **kwargs) if res[1] is None: arg_shapes, _, _ = self._infer_type_impl(True, *args, **kwargs) arg_names = self.list_arguments() unknowns = [] for name, dtype in zip(arg_names, arg_shapes): if not dtype: if len(unknowns) >= 10: unknowns.append('...') break unknowns.append('%s: %s' % (name, str(dtype))) warnings.warn( "Cannot decide type for the following arguments. " + "Consider providing them as input:\n\t" + "\n\t".join(unknowns), stacklevel=2) return res except MXNetError: print("infer_type error. Arguments:") for i, arg in enumerate(args): print(" #%d: %s" % (i, arg)) for k, v in kwargs.items(): print(" %s: %s" % (k, v)) raise
<SYSTEM_TASK:> Infers the shapes of all arguments and all outputs given the known shapes of <END_TASK> <USER_TASK:> Description: def infer_shape(self, *args, **kwargs): """Infers the shapes of all arguments and all outputs given the known shapes of some arguments. This function takes the known shapes of some arguments in either positional way or keyword argument way as input. It returns a tuple of `None` values if there is not enough information to deduce the missing shapes. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3)) >>> arg_shapes [(3L, 3L), (3L, 3L)] >>> out_shapes [(3L, 3L)] >>> aux_shapes [] >>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None. (None, None, None) Inconsistencies in the known shapes will cause an error to be raised. See the following example: >>> data = mx.sym.Variable('data') >>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000) >>> out = mx.sym.Activation(data=out, act_type='relu') >>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10) >>> weight_shape= (1, 100) >>> data_shape = (100, 100) >>> out.infer_shape(data=data_shape, fc1_weight=weight_shape) Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100) Parameters ---------- *args : Shape of arguments in a positional way. Unknown shape can be marked as None. **kwargs : Keyword arguments of the known shapes. Returns ------- arg_shapes : list of tuple or None List of argument shapes. The order is same as the order of list_arguments(). out_shapes : list of tuple or None List of output shapes. The order is same as the order of list_outputs(). aux_shapes : list of tuple or None List of auxiliary state shapes. The order is same as the order of list_auxiliary_states(). """
try: res = self._infer_shape_impl(False, *args, **kwargs) if res[1] is None: arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs) arg_names = self.list_arguments() unknowns = [] for name, shape in zip(arg_names, arg_shapes): if is_np_compat(): shape_is_none = not shape or -1 in shape else: shape_is_none = not shape or 0 in shape if shape_is_none: if len(unknowns) >= 10: unknowns.append('...') break unknowns.append('%s: %s' % (name, str(shape))) warnings.warn( "Cannot decide shape for the following arguments " + "(0s in shape means unknown dimensions). " + "Consider providing them as input:\n\t" + "\n\t".join(unknowns), stacklevel=2) return res except MXNetError: print("infer_shape error. Arguments:") for i, arg in enumerate(args): print(" #%d: %s" % (i, arg)) for k, v in kwargs.items(): print(" %s: %s" % (k, v)) raise