Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def empty(stype, shape, ctx=None, dtype=None): if isinstance(shape, int): shape = (shape, ) if ctx is None: ctx = current_context() if dtype is None: dtype = mx_real_t assert(stype is not None) if stype in ('csr', 'row_sparse'): return zeros(stype, shape, ctx=ctx, dtype=dtype) else: raise Exception("unknown stype : " + str(stype))
[ "Returns a new array of given shape and type, without initializing entries.\n\n Parameters\n ----------\n stype: string\n The storage type of the empty array, such as 'row_sparse', 'csr', etc\n shape : int or tuple of int\n The shape of the empty array.\n ctx : Context, optional\n An optional device context (default is the current default context).\n dtype : str or numpy.dtype, optional\n An optional value type (default is `float32`).\n\n Returns\n -------\n CSRNDArray or RowSparseNDArray\n A created array.\n " ]
Please provide a description of the function:def array(source_array, ctx=None, dtype=None): ctx = current_context() if ctx is None else ctx if isinstance(source_array, NDArray): assert(source_array.stype != 'default'), \ "Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray" # prepare dtype and ctx based on source_array, if not provided dtype = _prepare_default_dtype(source_array, dtype) # if both dtype and ctx are different from source_array, we cannot copy directly if source_array.dtype != dtype and source_array.context != ctx: arr = empty(source_array.stype, source_array.shape, dtype=dtype) arr[:] = source_array arr = arr.as_in_context(ctx) else: arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx) arr[:] = source_array return arr elif spsp and isinstance(source_array, spsp.csr.csr_matrix): # TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy # preprocess scipy csr to canonical form csr = source_array.sorted_indices() csr.sum_duplicates() dtype = _prepare_default_dtype(source_array, dtype) return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \ dtype=dtype, ctx=ctx) elif isinstance(source_array, (np.ndarray, np.generic)): raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ", type(source_array)) else: raise ValueError("Unexpected source_array type: ", type(source_array))
[ "Creates a sparse array from any object exposing the array interface.\n\n Parameters\n ----------\n source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix\n The source sparse array\n ctx : Context, optional\n The default context is ``source_array.context`` if ``source_array`` is an NDArray. \\\n The current default context otherwise.\n dtype : str or numpy.dtype, optional\n The data type of the output array. The default dtype is ``source_array.dtype``\n if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \\\n `float32` otherwise.\n\n Returns\n -------\n RowSparseNDArray or CSRNDArray\n An array with the same contents as the `source_array`.\n\n Examples\n --------\n >>> import scipy.sparse as spsp\n >>> csr = spsp.csr_matrix((2, 100))\n >>> mx.nd.sparse.array(csr)\n <CSRNDArray 2x100 @cpu(0)>\n >>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))\n <CSRNDArray 3x2 @cpu(0)>\n >>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))\n <RowSparseNDArray 3x2 @cpu(0)>\n " ]
Please provide a description of the function:def _aux_type(self, i): aux_type = ctypes.c_int() check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type))) return _DTYPE_MX_TO_NP[aux_type.value]
[ "Data-type of the array's ith aux data.\n\n Returns\n -------\n numpy.dtype\n This BaseSparseNDArray's aux data type.\n " ]
Please provide a description of the function:def _aux_types(self): aux_types = [] num_aux = self._num_aux for i in range(num_aux): aux_types.append(self._aux_type(i)) return aux_types
[ "The data types of the aux data for the BaseSparseNDArray.\n " ]
Please provide a description of the function:def astype(self, dtype, copy=True): if not copy and np.dtype(dtype) == self.dtype: return self res = zeros(shape=self.shape, ctx=self.context, dtype=dtype, stype=self.stype) self.copyto(res) return res
[ "Return a copy of the array after casting to a specified type.\n\n Parameters\n ----------\n dtype : numpy.dtype or str\n The type of the returned array.\n copy : bool\n Default `True`. By default, astype always returns a newly\n allocated ndarray on the same context. If this is set to\n `False`, and the dtype requested is the same as the ndarray's\n dtype, the ndarray is returned instead of a copy.\n\n Examples\n --------\n >>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')\n >>> y = x.astype('int32')\n >>> y.dtype\n <type 'numpy.int32'>\n " ]
Please provide a description of the function:def check_format(self, full_check=True): check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
[ "Check whether the NDArray format is valid.\n\n Parameters\n ----------\n full_check : bool, optional\n If `True`, rigorous check, O(N) operations. Otherwise\n basic check, O(1) operations (default True).\n " ]
Please provide a description of the function:def _data(self): self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl))) return NDArray(hdl)
[ "A deep copy NDArray of the data array associated with the BaseSparseNDArray.\n\n This function blocks. Do not use it in performance critical code.\n " ]
Please provide a description of the function:def _aux_data(self, i): self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl))) return NDArray(hdl)
[ " Get a deep copy NDArray of the i-th aux data array associated with the\n BaseSparseNDArray.\n\n This function blocks. Do not use it in performance critical code.\n " ]
Please provide a description of the function:def asscipy(self): data = self.data.asnumpy() indices = self.indices.asnumpy() indptr = self.indptr.asnumpy() if not spsp: raise ImportError("scipy is not available. \ Please check if the scipy python bindings are installed.") return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
[ "Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array\n\n Examples\n --------\n >>> x = mx.nd.sparse.zeros('csr', (2,3))\n >>> y = x.asscipy()\n >>> type(y)\n <type 'scipy.sparse.csr.csr_matrix'>\n >>> y\n <2x3 sparse matrix of type '<type 'numpy.float32'>'\n with 0 stored elements in Compressed Sparse Row format>\n " ]
Please provide a description of the function:def tostype(self, stype): # pylint: disable= no-member, protected-access if stype == 'csr': raise ValueError("cast_storage from row_sparse to csr is not supported") return op.cast_storage(self, stype=stype)
[ "Return a copy of the array with chosen storage type.\n\n Returns\n -------\n NDArray or RowSparseNDArray\n A copy of the array with the chosen storage stype\n " ]
Please provide a description of the function:def copyto(self, other): if isinstance(other, Context): return super(RowSparseNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype if stype in ('default', 'row_sparse'): return super(RowSparseNDArray, self).copyto(other) else: raise TypeError('copyto does not support destination NDArray stype ' + str(stype)) else: raise TypeError('copyto does not support type ' + str(type(other)))
[ "Copies the value of this array to another array.\n\n If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``\n and ``self.shape`` should be the same. This function copies the value from\n ``self`` to ``other``.\n\n If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on\n the target context, and the value of ``self`` is copied.\n\n Parameters\n ----------\n other : NDArray or RowSparseNDArray or Context\n The destination array or context.\n\n Returns\n -------\n NDArray or RowSparseNDArray\n The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the\n return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.\n " ]
Please provide a description of the function:def export_model(sym, params, input_shape, input_type=np.float32, onnx_file_path='model.onnx', verbose=False): try: from onnx import helper, mapping except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") converter = MXNetGraph() data_format = np.dtype(input_type) # if input parameters are strings(file paths), load files and create symbol parameter objects if isinstance(sym, string_types) and isinstance(params, string_types): logging.info("Converting json and weight file to sym and params") sym_obj, params_obj = load_module(sym, params) onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[data_format], verbose=verbose) elif isinstance(sym, symbol.Symbol) and isinstance(params, dict): onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[data_format], verbose=verbose) else: raise ValueError("Input sym and params should either be files or objects") # Create the model (ModelProto) onnx_model = helper.make_model(onnx_graph) # Save model on disk with open(onnx_file_path, "wb") as file_handle: serialized = onnx_model.SerializeToString() file_handle.write(serialized) logging.info("Input shape of the model %s ", input_shape) logging.info("Exported ONNX file %s saved to disk", onnx_file_path) return onnx_file_path
[ "Exports the MXNet model file, passed as a parameter, into ONNX model.\n Accepts both symbol,parameter objects as well as json and params filepaths as input.\n Operator support and coverage -\n https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration\n\n Parameters\n ----------\n sym : str or symbol object\n Path to the json file or Symbol object\n params : str or symbol object\n Path to the params file or params dictionary. (Including both arg_params and aux_params)\n input_shape : List of tuple\n Input shape of the model e.g [(1,3,224,224)]\n input_type : data type\n Input data type e.g. np.float32\n onnx_file_path : str\n Path where to save the generated onnx file\n verbose : Boolean\n If true will print logs of the model conversion\n\n Returns\n -------\n onnx_file_path : str\n Onnx file path\n\n Notes\n -----\n This method is available when you ``import mxnet.contrib.onnx``\n\n " ]
Please provide a description of the function:def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density, rhs_density, dot_func, trans_lhs, lhs_stype, rhs_stype, only_storage, distribution="uniform"): lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution) if not only_storage: rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype, density=rhs_density, distribution=distribution) out = dot_func(lhs_nd, rhs_nd, trans_lhs) mx.nd.waitall()
[ " Benchmarking both storage and dot\n " ]
Please provide a description of the function:def convert_mean(binaryproto_fname, output=None): mean_blob = caffe_parser.caffe_pb2.BlobProto() with open(binaryproto_fname, 'rb') as f: mean_blob.ParseFromString(f.read()) img_mean_np = np.array(mean_blob.data) img_mean_np = img_mean_np.reshape( mean_blob.channels, mean_blob.height, mean_blob.width ) # swap channels from Caffe BGR to RGB img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :] nd = mx.nd.array(img_mean_np) if output is not None: mx.nd.save(output, {"mean_image": nd}) return nd
[ "Convert caffe mean\n\n Parameters\n ----------\n binaryproto_fname : str\n Filename of the mean\n output : str, optional\n Save the mean into mxnet's format\n\n Returns\n -------\n NDArray\n Mean in ndarray\n " ]
Please provide a description of the function:def get_densenet(num_layers, pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r num_init_features, growth_rate, block_config = densenet_spec[num_layers] net = DenseNet(num_init_features, growth_rate, block_config, **kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx) return net
[ "Densenet-BC model from the\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.\n\n Parameters\n ----------\n num_layers : int\n Number of layers for the variant of densenet. Options are 121, 161, 169, 201.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default $MXNET_HOME/models\n Location for keeping the model parameters.\n " ]
Please provide a description of the function:def load_module(sym_filepath, params_filepath): if not (os.path.isfile(sym_filepath) and os.path.isfile(params_filepath)): raise ValueError("Symbol and params files provided are invalid") else: try: # reads symbol.json file from given path and # retrieves model prefix and number of epochs model_name = sym_filepath.rsplit('.', 1)[0].rsplit('-', 1)[0] params_file_list = params_filepath.rsplit('.', 1)[0].rsplit('-', 1) # Setting num_epochs to 0 if not present in filename num_epochs = 0 if len(params_file_list) == 1 else int(params_file_list[1]) except IndexError: logging.info("Model and params name should be in format: " "prefix-symbol.json, prefix-epoch.params") raise sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, num_epochs) # Merging arg and aux parameters params = {} params.update(arg_params) params.update(aux_params) return sym, params
[ "Loads the MXNet model file and\n returns MXNet symbol and params (weights).\n\n Parameters\n ----------\n json_path : str\n Path to the json file\n params_path : str\n Path to the params file\n\n Returns\n -------\n sym : MXNet symbol\n Model symbol object\n\n params : params object\n Model weights including both arg and aux params.\n " ]
Please provide a description of the function:def import_module(module_name): import sys, os import importlib sys.path.append(os.path.dirname(__file__)) return importlib.import_module(module_name)
[ "Helper function to import module" ]
Please provide a description of the function:def get_symbol_train(network, num_classes, from_layers, num_filters, strides, pads, sizes, ratios, normalizations=-1, steps=[], min_filter=128, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs): label = mx.sym.Variable('label') body = import_module(network).get_symbol(num_classes, **kwargs) layers = multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=min_filter) loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \ num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \ num_channels=num_filters, clip=False, interm_layer=0, steps=steps) tmp = mx.symbol.contrib.MultiBoxTarget( *[anchor_boxes, label, cls_preds], overlap_threshold=.5, \ ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \ negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2), name="multibox_target") loc_target = tmp[0] loc_target_mask = tmp[1] cls_target = tmp[2] cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \ ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \ normalization='valid', name="cls_prob") loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \ data=loc_target_mask * (loc_preds - loc_target), scalar=1.0) loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \ normalization='valid', name="loc_loss") # monitoring training status cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label") det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \ name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk) det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out") # group output out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det]) return out
[ "Build network symbol for training SSD\n\n Parameters\n ----------\n network : str\n base network symbol name\n num_classes : int\n number of object classes not including background\n from_layers : list of str\n feature extraction layers, use '' for add extra layers\n For example:\n from_layers = ['relu4_3', 'fc7', '', '', '', '']\n which means extract feature from relu4_3 and fc7, adding 4 extra layers\n on top of fc7\n num_filters : list of int\n number of filters for extra layers, you can use -1 for extracted features,\n however, if normalization and scale is applied, the number of filter for\n that layer must be provided.\n For example:\n num_filters = [512, -1, 512, 256, 256, 256]\n strides : list of int\n strides for the 3x3 convolution appended, -1 can be used for extracted\n feature layers\n pads : list of int\n paddings for the 3x3 convolution, -1 can be used for extracted layers\n sizes : list or list of list\n [min_size, max_size] for all layers or [[], [], []...] for specific layers\n ratios : list or list of list\n [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers\n normalizations : int or list of int\n use normalizations value for all layers or [...] for specific layers,\n -1 indicate no normalizations and scales\n steps : list\n specify steps for each MultiBoxPrior layer, leave empty, it will calculate\n according to layer dimensions\n min_filter : int\n minimum number of filters used in 1x1 convolution\n nms_thresh : float\n non-maximum suppression threshold\n force_suppress : boolean\n whether suppress different class objects\n nms_topk : int\n apply NMS to top K detections\n\n Returns\n -------\n mx.Symbol\n\n " ]
Please provide a description of the function:def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios, strides, pads, normalizations=-1, steps=[], min_filter=128, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs): body = import_module(network).get_symbol(num_classes, **kwargs) layers = multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=min_filter) loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \ num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \ num_channels=num_filters, clip=False, interm_layer=0, steps=steps) cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob') out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \ name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk) return out
[ "Build network for testing SSD\n\n Parameters\n ----------\n network : str\n base network symbol name\n num_classes : int\n number of object classes not including background\n from_layers : list of str\n feature extraction layers, use '' for add extra layers\n For example:\n from_layers = ['relu4_3', 'fc7', '', '', '', '']\n which means extract feature from relu4_3 and fc7, adding 4 extra layers\n on top of fc7\n num_filters : list of int\n number of filters for extra layers, you can use -1 for extracted features,\n however, if normalization and scale is applied, the number of filter for\n that layer must be provided.\n For example:\n num_filters = [512, -1, 512, 256, 256, 256]\n strides : list of int\n strides for the 3x3 convolution appended, -1 can be used for extracted\n feature layers\n pads : list of int\n paddings for the 3x3 convolution, -1 can be used for extracted layers\n sizes : list or list of list\n [min_size, max_size] for all layers or [[], [], []...] for specific layers\n ratios : list or list of list\n [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers\n normalizations : int or list of int\n use normalizations value for all layers or [...] for specific layers,\n -1 indicate no normalizations and scales\n steps : list\n specify steps for each MultiBoxPrior layer, leave empty, it will calculate\n according to layer dimensions\n min_filter : int\n minimum number of filters used in 1x1 convolution\n nms_thresh : float\n non-maximum suppression threshold\n force_suppress : boolean\n whether suppress different class objects\n nms_topk : int\n apply NMS to top K detections\n\n Returns\n -------\n mx.Symbol\n\n " ]
Please provide a description of the function:def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False): if image_grad: image.attach_grad() Conv2D.capture_layer_name = None Activation.set_guided_backprop(True) else: # Tell convviz.Conv2D which layer's output and gradient needs to be recorded Conv2D.capture_layer_name = conv_layer_name Activation.set_guided_backprop(False) # Run the network with autograd.record(train_mode=False): out = net(image) # If user didn't provide a class id, we'll use the class that the network predicted if class_id == None: model_output = out.asnumpy() class_id = np.argmax(model_output) # Create a one-hot target with class_id and backprop with the created target one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000) out.backward(one_hot_target, train_mode=False) if image_grad: return image.grad[0].asnumpy() else: # Return the recorded convolution output and gradient conv_out = Conv2D.conv_output return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()
[ "This is an internal helper function that can be used for either of these\n but not both at the same time:\n 1. Record the output and gradient of output of an intermediate convolutional layer.\n 2. Record the gradients of the image.\n\n Parameters\n ----------\n image : NDArray\n Image to visuaize. This is an NDArray with the preprocessed image.\n class_id : int\n Category ID this image belongs to. If not provided,\n network's prediction will be used.\n conv_layer_name: str\n Name of the convolutional layer whose output and output's gradients need to be acptured.\n image_grad: bool\n Whether to capture gradients of the image." ]
Please provide a description of the function:def get_conv_out_grad(net, image, class_id=None, conv_layer_name=None): return _get_grad(net, image, class_id, conv_layer_name, image_grad=False)
[ "Get the output and gradients of output of a convolutional layer.\n\n Parameters:\n ----------\n net: Block\n Network to use for visualization.\n image: NDArray\n Preprocessed image to use for visualization.\n class_id: int\n Category ID this image belongs to. If not provided,\n network's prediction will be used.\n conv_layer_name: str\n Name of the convolutional layer whose output and output's gradients need to be acptured." ]
Please provide a description of the function:def get_image_grad(net, image, class_id=None): return _get_grad(net, image, class_id, image_grad=True)
[ "Get the gradients of the image.\n\n Parameters:\n ----------\n net: Block\n Network to use for visualization.\n image: NDArray\n Preprocessed image to use for visualization.\n class_id: int\n Category ID this image belongs to. If not provided,\n network's prediction will be used." ]
Please provide a description of the function:def grad_to_image(gradient): gradient = gradient - gradient.min() gradient /= gradient.max() gradient = np.uint8(gradient * 255).transpose(1, 2, 0) gradient = gradient[..., ::-1] return gradient
[ "Convert gradients of image obtained using `get_image_grad`\n into image. This shows parts of the image that is most strongly activating\n the output neurons." ]
Please provide a description of the function:def get_cam(imggrad, conv_out): weights = np.mean(imggrad, axis=(1, 2)) cam = np.ones(conv_out.shape[1:], dtype=np.float32) for i, w in enumerate(weights): cam += w * conv_out[i, :, :] cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2])) cam = np.maximum(cam, 0) cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)) cam = np.uint8(cam * 255) return cam
[ "Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details" ]
Please provide a description of the function:def get_img_heatmap(orig_img, activation_map): heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL) heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) img_heatmap = np.float32(heatmap) + np.float32(orig_img) img_heatmap = img_heatmap / np.max(img_heatmap) img_heatmap *= 255 return img_heatmap.astype(int)
[ "Draw a heatmap on top of the original image using intensities from activation_map" ]
Please provide a description of the function:def to_grayscale(cv2im): # How strongly does each position activate the output grayscale_im = np.sum(np.abs(cv2im), axis=0) # Normalize between min and 99th percentile im_max = np.percentile(grayscale_im, 99) im_min = np.min(grayscale_im) grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1) grayscale_im = np.expand_dims(grayscale_im, axis=0) return grayscale_im
[ "Convert gradients to grayscale. This gives a saliency map." ]
Please provide a description of the function:def check_label_shapes(labels, preds, wrap=False, shape=False): if not shape: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of " "predictions {}".format(label_shape, pred_shape)) if wrap: if isinstance(labels, ndarray.ndarray.NDArray): labels = [labels] if isinstance(preds, ndarray.ndarray.NDArray): preds = [preds] return labels, preds
[ "Helper function for checking shape of label and prediction\n\n Parameters\n ----------\n labels : list of `NDArray`\n The labels of the data.\n\n preds : list of `NDArray`\n Predicted values.\n\n wrap : boolean\n If True, wrap labels/preds in a list if they are single NDArray\n\n shape : boolean\n If True, check the shape of labels and preds;\n Otherwise only check their length.\n " ]
Please provide a description of the function:def create(metric, *args, **kwargs): if callable(metric): return CustomMetric(metric, *args, **kwargs) elif isinstance(metric, list): composite_metric = CompositeEvalMetric() for child_metric in metric: composite_metric.add(create(child_metric, *args, **kwargs)) return composite_metric return _create(metric, *args, **kwargs)
[ "Creates evaluation metric from metric names or instances of EvalMetric\n or a custom metric function.\n\n Parameters\n ----------\n metric : str or callable\n Specifies the metric to create.\n This argument must be one of the below:\n\n - Name of a metric.\n - An instance of `EvalMetric`.\n - A list, each element of which is a metric or a metric name.\n - An evaluation function that computes custom metric for a given batch of\n labels and predictions.\n *args : list\n Additional arguments to metric constructor.\n Only used when metric is str.\n **kwargs : dict\n Additional arguments to metric constructor.\n Only used when metric is str\n\n Examples\n --------\n >>> def custom_metric(label, pred):\n ... return np.mean(np.abs(label - pred))\n ...\n >>> metric1 = mx.metric.create('acc')\n >>> metric2 = mx.metric.create(custom_metric)\n >>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])\n " ]
Please provide a description of the function:def np(numpy_feval, name=None, allow_extra_outputs=False): def feval(label, pred): return numpy_feval(label, pred) feval.__name__ = numpy_feval.__name__ return CustomMetric(feval, name, allow_extra_outputs)
[ "Creates a custom evaluation metric that receives its inputs as numpy arrays.\n\n Parameters\n ----------\n numpy_feval : callable(label, pred)\n Custom evaluation function that receives labels and predictions for a minibatch\n as numpy arrays and returns the corresponding custom metric as a floating point number.\n name : str, optional\n Name of the custom metric.\n allow_extra_outputs : bool, optional\n Whether prediction output is allowed to have extra outputs. This is useful in cases\n like RNN where states are also part of output which can then be fed back to the RNN\n in the next step. By default, extra outputs are not allowed.\n\n Returns\n -------\n float\n Custom metric corresponding to the provided labels and predictions.\n\n Example\n -------\n >>> def custom_metric(label, pred):\n ... return np.mean(np.abs(label-pred))\n ...\n >>> metric = mx.metric.np(custom_metric)\n ", "Internal eval function." ]
Please provide a description of the function:def get_config(self): config = self._kwargs.copy() config.update({ 'metric': self.__class__.__name__, 'name': self.name, 'output_names': self.output_names, 'label_names': self.label_names}) return config
[ "Save configurations of metric. Can be recreated\n from configs with metric.create(``**config``)\n " ]
Please provide a description of the function:def update_dict(self, label, pred): if self.output_names is not None: pred = [pred[name] for name in self.output_names] else: pred = list(pred.values()) if self.label_names is not None: label = [label[name] for name in self.label_names] else: label = list(label.values()) self.update(label, pred)
[ "Update the internal evaluation with named label and pred\n\n Parameters\n ----------\n labels : OrderedDict of str -> NDArray\n name to array mapping for labels.\n\n preds : OrderedDict of str -> NDArray\n name to array mapping of predicted outputs.\n " ]
Please provide a description of the function:def reset(self): self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0
[ "Resets the internal evaluation result to initial state." ]
Please provide a description of the function:def get(self): if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst)
[ "Gets the current evaluation result.\n\n Returns\n -------\n names : list of str\n Name of the metrics.\n values : list of float\n Value of the evaluations.\n " ]
Please provide a description of the function:def get_global(self): if self._has_global_stats: if self.global_num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.global_sum_metric / self.global_num_inst) else: return self.get()
[ "Gets the current global evaluation result.\n\n Returns\n -------\n names : list of str\n Name of the metrics.\n values : list of float\n Value of the evaluations.\n " ]
Please provide a description of the function:def get_name_value(self): name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value))
[ "Returns zipped name and value pairs.\n\n Returns\n -------\n list of tuples\n A (name, value) tuple list.\n " ]
Please provide a description of the function:def get_global_name_value(self): if self._has_global_stats: name, value = self.get_global() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) else: return self.get_name_value()
[ "Returns zipped name and value pairs for global results.\n\n Returns\n -------\n list of tuples\n A (name, value) tuple list.\n " ]
Please provide a description of the function:def update_binary_stats(self, label, pred): pred = pred.asnumpy() label = label.asnumpy().astype('int32') pred_label = numpy.argmax(pred, axis=1) check_label_shapes(label, pred) if len(numpy.unique(label)) > 2: raise ValueError("%s currently only supports binary classification." % self.__class__.__name__) pred_true = (pred_label == 1) pred_false = 1 - pred_true label_true = (label == 1) label_false = 1 - label_true true_pos = (pred_true * label_true).sum() false_pos = (pred_true * label_false).sum() false_neg = (pred_false * label_true).sum() true_neg = (pred_false * label_false).sum() self.true_positives += true_pos self.global_true_positives += true_pos self.false_positives += false_pos self.global_false_positives += false_pos self.false_negatives += false_neg self.global_false_negatives += false_neg self.true_negatives += true_neg self.global_true_negatives += true_neg
[ "\n Update various binary classification counts for a single (label, pred)\n pair.\n\n Parameters\n ----------\n label : `NDArray`\n The labels of the data.\n\n pred : `NDArray`\n Predicted values.\n " ]
Please provide a description of the function:def matthewscc(self, use_global=False): if use_global: if not self.global_total_examples: return 0. true_pos = float(self.global_true_positives) false_pos = float(self.global_false_positives) false_neg = float(self.global_false_negatives) true_neg = float(self.global_true_negatives) else: if not self.total_examples: return 0. true_pos = float(self.true_positives) false_pos = float(self.false_positives) false_neg = float(self.false_negatives) true_neg = float(self.true_negatives) terms = [(true_pos + false_pos), (true_pos + false_neg), (true_neg + false_pos), (true_neg + false_neg)] denom = 1. for t in filter(lambda t: t != 0., terms): denom *= t return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)
[ "\n Calculate the Matthew's Correlation Coefficent\n " ]
Please provide a description of the function:def transform(self, fn, lazy=True): trans = _LazyTransformDataset(self, fn) if lazy: return trans return SimpleDataset([i for i in trans])
[ "Returns a new dataset with each sample transformed by the\n transformer function `fn`.\n\n Parameters\n ----------\n fn : callable\n A transformer function that takes a sample as input and\n returns the transformed sample.\n lazy : bool, default True\n If False, transforms all samples at once. Otherwise,\n transforms each sample on demand. Note that if `fn`\n is stochastic, you must set lazy to True or you will\n get the same result on all epochs.\n\n Returns\n -------\n Dataset\n The transformed dataset.\n " ]
Please provide a description of the function:def transform_first(self, fn, lazy=True): return self.transform(_TransformFirstClosure(fn), lazy)
[ "Returns a new dataset with the first element of each sample\n transformed by the transformer function `fn`.\n\n This is useful, for example, when you only want to transform data\n while keeping label as is.\n\n Parameters\n ----------\n fn : callable\n A transformer function that takes the first elemtn of a sample\n as input and returns the transformed element.\n lazy : bool, default True\n If False, transforms all samples at once. Otherwise,\n transforms each sample on demand. Note that if `fn`\n is stochastic, you must set lazy to True or you will\n get the same result on all epochs.\n\n Returns\n -------\n Dataset\n The transformed dataset.\n " ]
Please provide a description of the function:def forward_ocr(self, img_): img_ = cv2.resize(img_, (80, 30)) img_ = img_.transpose(1, 0) print(img_.shape) img_ = img_.reshape((1, 80, 30)) print(img_.shape) # img_ = img_.reshape((80 * 30)) img_ = np.multiply(img_, 1 / 255.0) self.predictor.forward(data=img_, **self.init_state_dict) prob = self.predictor.get_output(0) label_list = [] for p in prob: print(np.argsort(p)) max_index = np.argsort(p)[::-1][0] label_list.append(max_index) return self.__get_string(label_list)
[ "Forward the image through the LSTM network model\n\n Parameters\n ----------\n img_: int of array\n\n Returns\n ----------\n label_list: string of list\n " ]
Please provide a description of the function:def read_prototxt(fname): proto = caffe_pb2.NetParameter() with open(fname, 'r') as f: text_format.Merge(str(f.read()), proto) return proto
[ "Return a caffe_pb2.NetParameter object that defined in a prototxt file\n " ]
Please provide a description of the function:def get_layers(proto): if len(proto.layer): return proto.layer elif len(proto.layers): return proto.layers else: raise ValueError('Invalid proto file.')
[ "Returns layers in a caffe_pb2.NetParameter object\n " ]
Please provide a description of the function:def read_caffemodel(prototxt_fname, caffemodel_fname): if use_caffe: caffe.set_mode_cpu() net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST) layer_names = net._layer_names layers = net.layers return (layers, layer_names) else: proto = caffe_pb2.NetParameter() with open(caffemodel_fname, 'rb') as f: proto.ParseFromString(f.read()) return (get_layers(proto), None)
[ "Return a caffe_pb2.NetParameter object that defined in a binary\n caffemodel file\n " ]
Please provide a description of the function:def layer_iter(layers, layer_names): if use_caffe: for layer_idx, layer in enumerate(layers): layer_name = re.sub('[-/]', '_', layer_names[layer_idx]) layer_type = layer.type layer_blobs = layer.blobs yield (layer_name, layer_type, layer_blobs) else: for layer in layers: layer_name = re.sub('[-/]', '_', layer.name) layer_type = layer.type layer_blobs = layer.blobs yield (layer_name, layer_type, layer_blobs)
[ "Iterate over all layers" ]
Please provide a description of the function:def set_config(**kwargs): kk = kwargs.keys() vv = kwargs.values() check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs), c_str_array([key for key in kk]), c_str_array([str(val) for val in vv]), profiler_kvstore_handle))
[ "Set up the configure of profiler (only accepts keyword arguments).\n\n Parameters\n ----------\n filename : string,\n output file for profile data\n profile_all : boolean,\n all profile types enabled\n profile_symbolic : boolean,\n whether to profile symbolic operators\n profile_imperative : boolean,\n whether to profile imperative operators\n profile_memory : boolean,\n whether to profile memory usage\n profile_api : boolean,\n whether to profile the C API\n contiguous_dump : boolean,\n whether to periodically dump profiling data to file\n dump_period : float,\n seconds between profile data dumps\n aggregate_stats : boolean,\n whether to maintain aggregate stats in memory for console\n dump. Has some negative performance impact.\n profile_process : string\n whether to profile kvstore `server` or `worker`.\n server can only be profiled when kvstore is of type dist.\n if this is not passed, defaults to `worker`\n " ]
Please provide a description of the function:def profiler_set_config(mode='symbolic', filename='profile.json'): warnings.warn('profiler.profiler_set_config() is deprecated. ' 'Please use profiler.set_config() instead') keys = c_str_array([key for key in ["profile_" + mode, "filename"]]) values = c_str_array([str(val) for val in [True, filename]]) assert len(keys) == len(values) check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
[ "Set up the configure of profiler (Deprecated).\n\n Parameters\n ----------\n mode : string, optional\n Indicates whether to enable the profiler, can\n be 'symbolic', or 'all'. Defaults to `symbolic`.\n filename : string, optional\n The name of output trace file. Defaults to 'profile.json'.\n " ]
Please provide a description of the function:def set_state(state='stop', profile_process='worker'): state2int = {'stop': 0, 'run': 1} profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]), profile_process2int[profile_process], profiler_kvstore_handle))
[ "Set up the profiler state to 'run' or 'stop'.\n\n Parameters\n ----------\n state : string, optional\n Indicates whether to run the profiler, can\n be 'stop' or 'run'. Default is `stop`.\n profile_process : string\n whether to profile kvstore `server` or `worker`.\n server can only be profiled when kvstore is of type dist.\n if this is not passed, defaults to `worker`\n " ]
Please provide a description of the function:def dump(finished=True, profile_process='worker'): fin = 1 if finished is True else 0 profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXDumpProcessProfile(fin, profile_process2int[profile_process], profiler_kvstore_handle))
[ "Dump profile and stop profiler. Use this to save profile\n in advance in case your program cannot exit normally.\n\n Parameters\n ----------\n finished : boolean\n Indicates whether to stop statistic output (dumping) after this dump.\n Default is True\n profile_process : string\n whether to profile kvstore `server` or `worker`.\n server can only be profiled when kvstore is of type dist.\n if this is not passed, defaults to `worker`\n " ]
Please provide a description of the function:def dumps(reset=False): debug_str = ctypes.c_char_p() do_reset = 1 if reset is True else 0 check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str), int(do_reset))) return py_str(debug_str.value)
[ "Return a printable string of aggregate profile stats.\n\n Parameters\n ----------\n reset: boolean\n Indicates whether to clean aggeregate statistical data collected up to this point\n " ]
Please provide a description of the function:def pause(profile_process='worker'): profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXProcessProfilePause(int(1), profile_process2int[profile_process], profiler_kvstore_handle))
[ "Pause profiling.\n\n Parameters\n ----------\n profile_process : string\n whether to profile kvstore `server` or `worker`.\n server can only be profiled when kvstore is of type dist.\n if this is not passed, defaults to `worker`\n " ]
Please provide a description of the function:def resume(profile_process='worker'): profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXProcessProfilePause(int(0), profile_process2int[profile_process], profiler_kvstore_handle))
[ "\n Resume paused profiling.\n\n Parameters\n ----------\n profile_process : string\n whether to profile kvstore `server` or `worker`.\n server can only be profiled when kvstore is of type dist.\n if this is not passed, defaults to `worker`\n " ]
Please provide a description of the function:def set_value(self, value): check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
[ "Set counter value.\n\n Parameters\n ----------\n value : int\n Value for the counter\n " ]
Please provide a description of the function:def increment(self, delta=1): check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
[ "Increment counter value.\n\n Parameters\n ----------\n value_change : int\n Amount by which to add to the counter\n " ]
Please provide a description of the function:def decrement(self, delta=1): check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
[ "Decrement counter value.\n\n Parameters\n ----------\n value_change : int\n Amount by which to subtract from the counter\n " ]
Please provide a description of the function:def mark(self, scope='process'): check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
[ "Set up the profiler state to record operator.\n\n Parameters\n ----------\n scope : string, optional\n Indicates what scope the marker should refer to.\n Can be 'global', 'process', thread', task', and 'marker'\n Default is `process`.\n " ]
Please provide a description of the function:def get_kernel(self, name, signature): r hdl = CudaKernelHandle() is_ndarray = [] is_const = [] dtypes = [] pattern = re.compile(r) args = re.sub(r"\s+", " ", signature).split(",") for arg in args: match = pattern.match(arg) if not match or match.groups()[1] == 'const': raise ValueError( 'Invalid function prototype "%s". Must be in the ' 'form of "(const) type (*) (name)"'%arg) is_const.append(bool(match.groups()[0])) dtype = match.groups()[1] is_ndarray.append(bool(match.groups()[2])) if dtype not in _DTYPE_CPP_TO_NP: raise TypeError( "Unsupported kernel argument type %s. Supported types are: %s."%( arg, ','.join(_DTYPE_CPP_TO_NP.keys()))) dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]]) check_call(_LIB.MXRtcCudaKernelCreate( self.handle, c_str(name), len(dtypes), c_array_buf(ctypes.c_int, array('i', is_ndarray)), c_array_buf(ctypes.c_int, array('i', is_const)), c_array_buf(ctypes.c_int, array('i', dtypes)), ctypes.byref(hdl))) return CudaKernel(hdl, name, is_ndarray, dtypes)
[ "Get CUDA kernel from compiled module.\n\n Parameters\n ----------\n name : str\n String name of the kernel.\n signature : str\n Function signature for the kernel. For example, if a kernel is\n declared as::\n\n extern \"C\" __global__ void axpy(const float *x, double *y, int alpha)\n\n Then its signature should be::\n\n const float *x, double *y, int alpha\n\n or::\n\n const float *, double *, int\n\n Note that `*` in signature marks an argument as array and\n `const` marks an argument as constant (input) array.\n\n Returns\n -------\n CudaKernel\n CUDA kernels that can be launched on GPUs.\n ", "^\\s*(const)?\\s*([\\w_]+)\\s*(\\*)?\\s*([\\w_]+)?\\s*$" ]
Please provide a description of the function:def launch(self, args, ctx, grid_dims, block_dims, shared_mem=0): assert ctx.device_type == 'gpu', "Cuda kernel can only be launched on GPU" assert len(grid_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(block_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(args) == len(self._dtypes), \ "CudaKernel(%s) expects %d arguments but got %d"%( self._name, len(self._dtypes), len(args)) void_args = [] ref_holder = [] for i, (arg, is_nd, dtype) in enumerate(zip(args, self._is_ndarray, self._dtypes)): if is_nd: assert isinstance(arg, NDArray), \ "The %d-th argument is expected to be a NDArray but got %s"%( i, type(arg)) void_args.append(arg.handle) else: assert isinstance(arg, numeric_types), \ "The %d-th argument is expected to be a number, but got %s"%( i, type(arg)) ref_holder.append(np.array(arg, dtype=dtype)) void_args.append(ref_holder[-1].ctypes.data_as(ctypes.c_void_p)) check_call(_LIB.MXRtcCudaKernelCall( self.handle, ctx.device_id, c_array(ctypes.c_void_p, void_args), mx_uint(grid_dims[0]), mx_uint(grid_dims[1]), mx_uint(grid_dims[2]), mx_uint(block_dims[0]), mx_uint(block_dims[1]), mx_uint(block_dims[2]), mx_uint(shared_mem)))
[ "Launch cuda kernel.\n\n Parameters\n ----------\n args : tuple of NDArray or numbers\n List of arguments for kernel. NDArrays are expected for pointer\n types (e.g. `float*`, `double*`) while numbers are expected for\n non-pointer types (e.g. `int`, `float`).\n ctx : Context\n The context to launch kernel on. Must be GPU context.\n grid_dims : tuple of 3 integers\n Grid dimensions for CUDA kernel.\n block_dims : tuple of 3 integers\n Block dimensions for CUDA kernel.\n shared_mem : integer, optional\n Size of dynamically allocated shared memory. Defaults to 0.\n " ]
Please provide a description of the function:def reset(self): if getattr(self, 'num', None) is None: self.num_inst = 0 self.sum_metric = 0.0 else: self.num_inst = [0] * self.num self.sum_metric = [0.0] * self.num self.records = dict() self.counts = dict()
[ "Clear the internal statistics to initial state." ]
Please provide a description of the function:def update(self, labels, preds): def iou(x, ys): ixmin = np.maximum(ys[:, 0], x[0]) iymin = np.maximum(ys[:, 1], x[1]) ixmax = np.minimum(ys[:, 2], x[2]) iymax = np.minimum(ys[:, 3], x[3]) iw = np.maximum(ixmax - ixmin, 0.) ih = np.maximum(iymax - iymin, 0.) inters = iw * ih uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \ (ys[:, 3] - ys[:, 1]) - inters ious = inters / uni ious[uni < 1e-12] = 0 # in case bad boxes return ious # independant execution for each image for i in range(labels[0].shape[0]): # get as numpy arrays label = labels[0][i].asnumpy() if np.sum(label[:, 0] >= 0) < 1: continue pred = preds[self.pred_idx][i].asnumpy() # calculate for each class while (pred.shape[0] > 0): cid = int(pred[0, 0]) indices = np.where(pred[:, 0].astype(int) == cid)[0] if cid < 0: pred = np.delete(pred, indices, axis=0) continue dets = pred[indices] pred = np.delete(pred, indices, axis=0) # sort by score, desceding dets = dets[dets[:,1].argsort()[::-1]] records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1)))) # ground-truths label_indices = np.where(label[:, 0].astype(int) == cid)[0] gts = label[label_indices, :] label = np.delete(label, label_indices, axis=0) if gts.size > 0: found = [False] * gts.shape[0] for j in range(dets.shape[0]): # compute overlaps ious = iou(dets[j, 2:], gts[:, 1:5]) ovargmax = np.argmax(ious) ovmax = ious[ovargmax] if ovmax > self.ovp_thresh: if (not self.use_difficult and gts.shape[1] >= 6 and gts[ovargmax, 5] > 0): pass else: if not found[ovargmax]: records[j, -1] = 1 # tp found[ovargmax] = True else: # duplicate records[j, -1] = 2 # fp else: records[j, -1] = 2 # fp else: # no gt, mark all fp records[:, -1] = 2 # ground truth count if (not self.use_difficult and gts.shape[1] >= 6): gt_count = np.sum(gts[:, 5] < 1) else: gt_count = gts.shape[0] # now we push records to buffer # first column: score, second column: tp/fp # 0: not set(matched to difficult or something), 1: tp, 2: fp records = records[np.where(records[:, -1] > 0)[0], :] if records.size > 0: self._insert(cid, records, gt_count) # add missing class if not present in prediction while (label.shape[0] > 0): cid = int(label[0, 0]) label_indices = np.where(label[:, 0].astype(int) == cid)[0] label = np.delete(label, label_indices, axis=0) if cid < 0: continue gt_count = label_indices.size self._insert(cid, np.array([[0, 0]]), gt_count)
[ "\n Update internal records. This function now only update internal buffer,\n sum_metric and num_inst are updated in _update() function instead when\n get() is called to return results.\n\n Params:\n ----------\n labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional\n 2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])\n preds: mx.nd.array (m * 6)\n 2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)\n ", "\n Calculate intersection-over-union overlap\n Params:\n ----------\n x : numpy.array\n single box [xmin, ymin ,xmax, ymax]\n ys : numpy.array\n multiple box [[xmin, ymin, xmax, ymax], [...], ]\n Returns:\n -----------\n numpy.array\n [iou1, iou2, ...], size == ys.shape[0]\n " ]
Please provide a description of the function:def _update(self): aps = [] for k, v in self.records.items(): recall, prec = self._recall_prec(v, self.counts[k]) ap = self._average_precision(recall, prec) aps.append(ap) if self.num is not None and k < (self.num - 1): self.sum_metric[k] = ap self.num_inst[k] = 1 if self.num is None: self.num_inst = 1 self.sum_metric = np.mean(aps) else: self.num_inst[-1] = 1 self.sum_metric[-1] = np.mean(aps)
[ " update num_inst and sum_metric " ]
Please provide a description of the function:def _recall_prec(self, record, count): record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0) sorted_records = record[record[:,0].argsort()[::-1]] tp = np.cumsum(sorted_records[:, 1].astype(int) == 1) fp = np.cumsum(sorted_records[:, 1].astype(int) == 2) if count <= 0: recall = tp * 0.0 else: recall = tp / float(count) prec = tp.astype(float) / (tp + fp) return recall, prec
[ " get recall and precision from internal records " ]
Please provide a description of the function:def _average_precision(self, rec, prec): # append sentinel values at both ends mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute precision integration ladder for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # look for recall value changes i = np.where(mrec[1:] != mrec[:-1])[0] # sum (\delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
[ "\n calculate average precision\n\n Params:\n ----------\n rec : numpy.array\n cumulated recall\n prec : numpy.array\n cumulated precision\n Returns:\n ----------\n ap as float\n " ]
Please provide a description of the function:def _insert(self, key, records, count): if key not in self.records: assert key not in self.counts self.records[key] = records self.counts[key] = count else: self.records[key] = np.vstack((self.records[key], records)) assert key in self.counts self.counts[key] += count
[ " Insert records according to key " ]
Please provide a description of the function:def _average_precision(self, rec, prec): ap = 0. for t in np.arange(0., 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap += p / 11. return ap
[ "\n calculate average precision, override the default one,\n special 11-point metric\n\n Params:\n ----------\n rec : numpy.array\n cumulated recall\n prec : numpy.array\n cumulated precision\n Returns:\n ----------\n ap as float\n " ]
Please provide a description of the function:def get_fine_tune_model(symbol, arg_params, num_classes, layer_name, dtype='float32'): all_layers = symbol.get_internals() net = all_layers[layer_name+'_output'] net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc') if dtype == 'float16': net = mx.sym.Cast(data=net, dtype=np.float32) net = mx.symbol.SoftmaxOutput(data=net, name='softmax') new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k}) return (net, new_args)
[ "\n symbol: the pre-trained network symbol\n arg_params: the argument parameters of the pre-trained model\n num_classes: the number of classes for the fine-tune datasets\n layer_name: the layer name before the last fully-connected layer\n " ]
Please provide a description of the function:def _list_images(self, root): self.labels = [] self.items = [] valid_unseen_sub_idx = [1, 2, 20, 22] skip_sub_idx = [21] if self._mode == 'train': sub_idx = ['s' + str(i) for i in range(1, 35) \ if i not in valid_unseen_sub_idx + skip_sub_idx] elif self._mode == 'valid': sub_idx = ['s' + str(i) for i in valid_unseen_sub_idx] folder_path = [] for i in sub_idx: folder_path.extend(glob.glob(os.path.join(root, i, "*"))) for folder in folder_path: filename = glob.glob(os.path.join(folder, "*")) if len(filename) != self._seq_len: continue filename.sort() label = os.path.split(folder)[-1] self.items.append((filename, label))
[ "\n Description : generate list for lip images\n " ]
Please provide a description of the function:def align_generation(self, file_nm, padding=75): align = Align(self._align_root + '/' + file_nm + '.align') return nd.array(align.sentence(padding))
[ "\n Description : Align to lip position\n " ]
Please provide a description of the function:def set_verbosity(self, verbose=False, print_func=None): self._verbose = verbose if print_func is None: def asum_stat(x): return str((ndarray.norm(x)/sqrt(x.size)).asscalar()) print_func = asum_stat self._print_func = print_func return self
[ "Switch on/off verbose mode\n\n Parameters\n ----------\n verbose : bool\n switch on/off verbose mode\n print_func : function\n A function that computes statistics of initialized arrays.\n Takes an `NDArray` and returns an `str`. Defaults to mean\n absolute value str((abs(x)/size(x)).asscalar()).\n ", "returns |x|/size(x), async execution." ]
Please provide a description of the function:def _verbose_print(self, desc, init, arr): if self._verbose and self._print_func: logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
[ "Internal verbose print function\n\n Parameters\n ----------\n desc : InitDesc or str\n name of the array\n init : str\n initializer pattern\n arr : NDArray\n initialized array\n " ]
Please provide a description of the function:def _legacy_init(self, name, arr): warnings.warn( "\033[91mCalling initializer with init(str, NDArray) has been deprecated." \ "please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m", DeprecationWarning, stacklevel=3) if not isinstance(name, string_types): raise TypeError('name must be string') if not isinstance(arr, NDArray): raise TypeError('arr must be NDArray') if name.startswith('upsampling'): self._init_bilinear(name, arr) elif name.startswith('stn_loc') and name.endswith('weight'): self._init_zero(name, arr) elif name.startswith('stn_loc') and name.endswith('bias'): self._init_loc_bias(name, arr) elif name.endswith('bias'): self._init_bias(name, arr) elif name.endswith('gamma'): self._init_gamma(name, arr) elif name.endswith('beta'): self._init_beta(name, arr) elif name.endswith('weight'): self._init_weight(name, arr) elif name.endswith("moving_mean"): self._init_zero(name, arr) elif name.endswith("moving_var"): self._init_one(name, arr) elif name.endswith("moving_inv_var"): self._init_zero(name, arr) elif name.endswith("moving_avg"): self._init_zero(name, arr) elif name.endswith('min'): self._init_zero(name, arr) elif name.endswith('max'): self._init_one(name, arr) else: self._init_default(name, arr)
[ "Legacy initialization method.\n\n Parameters\n ----------\n name : str\n Name of corresponding NDArray.\n\n arr : NDArray\n NDArray to be initialized.\n " ]
Please provide a description of the function:def save_imglist(self, fname=None, root=None, shuffle=False): def progress_bar(count, total, suffix=''): import sys bar_len = 24 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix)) sys.stdout.flush() str_list = [] for index in range(self.num_images): progress_bar(index, self.num_images) label = self.label_from_index(index) if label.size < 1: continue path = self.image_path_from_index(index) if root: path = osp.relpath(path, root) str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \ + ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n') if str_list: if shuffle: import random random.shuffle(str_list) if not fname: fname = self.name + '.lst' with open(fname, 'w') as f: for line in str_list: f.write(line) else: raise RuntimeError("No image in imdb")
[ "\n save imglist to disk\n\n Parameters:\n ----------\n fname : str\n saved filename\n " ]
Please provide a description of the function:def _load_class_names(self, filename, dirname): full_path = osp.join(dirname, filename) classes = [] with open(full_path, 'r') as f: classes = [l.strip() for l in f.readlines()] return classes
[ "\n load class names from text file\n\n Parameters:\n ----------\n filename: str\n file stores class names\n dirname: str\n file directory\n " ]
Please provide a description of the function:def read_data(label, image): base_url = 'http://yann.lecun.com/exdb/mnist/' with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl: magic, num = struct.unpack(">II", flbl.read(8)) label = np.fromstring(flbl.read(), dtype=np.int8) with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg: magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16)) image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols) return (label, image)
[ "\n download and read data into numpy\n " ]
Please provide a description of the function:def get_mnist_iter(args, kv): (train_lbl, train_img) = read_data( 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz') (val_lbl, val_img) = read_data( 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz') train = mx.io.NDArrayIter( to4d(train_img), train_lbl, args.batch_size, shuffle=True) val = mx.io.NDArrayIter( to4d(val_img), val_lbl, args.batch_size) return (train, val)
[ "\n create data iterator with NDArrayIter\n " ]
Please provide a description of the function:def make_file_extension_assertion(extension): def file_extension_assertion(file_path): base, ext = os.path.splitext(file_path) if ext.lower() != extension: raise argparse.ArgumentTypeError('File must have ' + extension + ' extension') return file_path return file_extension_assertion
[ "Function factory for file extension argparse assertion\n Args:\n extension (string): the file extension to assert\n\n Returns:\n string: the supplied extension, if assertion is successful.\n\n " ]
Please provide a description of the function:def get_palette(num_colors=256): pallete = [0]*(num_colors*3) for j in range(0, num_colors): lab = j pallete[j*3+0] = 0 pallete[j*3+1] = 0 pallete[j*3+2] = 0 i = 0 while (lab > 0): pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i)) pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i)) pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i)) i = i + 1 lab >>= 3 return pallete
[ "generates the colormap for visualizing the segmentation mask\n Args:\n num_colors (int): the number of colors to generate in the output palette\n\n Returns:\n string: the supplied extension, if assertion is successful.\n\n " ]
Please provide a description of the function:def get_data(img_path): mean = np.array([123.68, 116.779, 103.939]) # (R,G,B) img = Image.open(img_path) img = np.array(img, dtype=np.float32) reshaped_mean = mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) img = np.expand_dims(img, axis=0) return img
[ "get the (1, 3, h, w) np.array data for the supplied image\n Args:\n img_path (string): the input image path\n\n Returns:\n np.array: image data in a (1, 3, h, w) shape\n\n " ]
Please provide a description of the function:def main(): # Initialization variables - update to change your model and execution context model_prefix = "FCN8s_VGG16" epoch = 19 # By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU. ctx = mx.cpu() fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch) fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx) data_shape = fcnxs_args["data"].shape label_shape = (1, data_shape[2]*data_shape[3]) fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx) exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args) exector.forward(is_train=False) output = exector.outputs[0] out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1))) out_img = Image.fromarray(out_img) out_img.putpalette(get_palette()) out_img.save(args.output)
[ "Module main execution" ]
Please provide a description of the function:def _check_classes(self): try: self.classes = self.imdbs[0].classes self.num_classes = len(self.classes) except AttributeError: # fine, if no classes is provided pass if self.num_classes > 0: for db in self.imdbs: assert self.classes == db.classes, "Multiple imdb must have same classes"
[ "\n check input imdbs, make sure they have same classes\n " ]
Please provide a description of the function:def _load_image_set_index(self, shuffle): self.num_images = 0 for db in self.imdbs: self.num_images += db.num_images indices = list(range(self.num_images)) if shuffle: random.shuffle(indices) return indices
[ "\n get total number of images, init indices\n\n Parameters\n ----------\n shuffle : bool\n whether to shuffle the initial indices\n " ]
Please provide a description of the function:def _locate_index(self, index): assert index >= 0 and index < self.num_images, "index out of range" pos = self.image_set_index[index] for k, v in enumerate(self.imdbs): if pos >= v.num_images: pos -= v.num_images else: return (k, pos)
[ "\n given index, find out sub-db and sub-index\n\n Parameters\n ----------\n index : int\n index of a specific image\n\n Returns\n ----------\n a tuple (sub-db, sub-index)\n " ]
Please provide a description of the function:def image_path_from_index(self, index): assert self.image_set_index is not None, "Dataset not initialized" pos = self.image_set_index[index] n_db, n_index = self._locate_index(index) return self.imdbs[n_db].image_path_from_index(n_index)
[ "\n given image index, find out full path\n\n Parameters\n ----------\n index: int\n index of a specific image\n\n Returns\n ----------\n full path of this image\n " ]
Please provide a description of the function:def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
[ "Callback to checkpoint Module to prefix every epoch.\n\n Parameters\n ----------\n mod : subclass of BaseModule\n The module to checkpoint.\n prefix : str\n The file prefix for this checkpoint.\n period : int\n How many epochs to wait before checkpointing. Defaults to 1.\n save_optimizer_states : bool\n Indicates whether or not to save optimizer states for continued training.\n\n Returns\n -------\n callback : function\n The callback function that can be passed as iter_end_callback to fit.\n ", "The checkpoint function." ]
Please provide a description of the function:def do_checkpoint(prefix, period=1): period = int(max(1, period)) def _callback(iter_no, sym, arg, aux): if (iter_no + 1) % period == 0: save_checkpoint(prefix, iter_no + 1, sym, arg, aux) return _callback
[ "A callback that saves a model checkpoint every few epochs.\n Each checkpoint is made up of a couple of binary files: a model description file and a\n parameters (weights and biases) file. The model description file is named\n `prefix`--symbol.json and the parameters file is named `prefix`-`epoch_number`.params\n\n Parameters\n ----------\n prefix : str\n Prefix for the checkpoint filenames.\n period : int, optional\n Interval (number of epochs) between checkpoints. Default `period` is 1.\n\n Returns\n -------\n callback : function\n A callback function that can be passed as `epoch_end_callback` to fit.\n\n Example\n -------\n >>> module.fit(iterator, num_epoch=n_epoch,\n ... epoch_end_callback = mx.callback.do_checkpoint(\"mymodel\", 1))\n Start training with [cpu(0)]\n Epoch[0] Resetting Data Iterator\n Epoch[0] Time cost=0.100\n Saved checkpoint to \"mymodel-0001.params\"\n Epoch[1] Resetting Data Iterator\n Epoch[1] Time cost=0.060\n Saved checkpoint to \"mymodel-0002.params\"\n ", "The checkpoint function." ]
Please provide a description of the function:def log_train_metric(period, auto_reset=False): def _callback(param): if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Iter[%d] Batch[%d] Train-%s=%f', param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset_local() return _callback
[ "Callback to log the training evaluation result every period.\n\n Parameters\n ----------\n period : int\n The number of batch to log the training evaluation metric.\n auto_reset : bool\n Reset the metric after each log.\n\n Returns\n -------\n callback : function\n The callback function that can be passed as iter_epoch_callback to fit.\n ", "The checkpoint function." ]
Please provide a description of the function:def install(self, exe): exe.set_monitor_callback(self.stat_helper, self.monitor_all) self.exes.append(exe)
[ "install callback to executor.\n Supports installing to multiple exes.\n\n Parameters\n ----------\n exe : mx.executor.Executor\n The Executor (returned by symbol.bind) to install to.\n " ]
Please provide a description of the function:def tic(self): if self.step % self.interval == 0: for exe in self.exes: for array in exe.arg_arrays: array.wait_to_read() for array in exe.aux_arrays: array.wait_to_read() self.queue = [] self.activated = True self.step += 1
[ "Start collecting stats for current batch.\n Call before calling forward." ]
Please provide a description of the function:def toc(self): if not self.activated: return [] for exe in self.exes: for array in exe.arg_arrays: array.wait_to_read() for array in exe.aux_arrays: array.wait_to_read() for exe in self.exes: for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) self.activated = False res = [] if self.sort: self.queue.sort(key=lambda x: x[1]) for n, k, v_list in self.queue: if isinstance(v_list, NDArray): v_list = [v_list] assert isinstance(v_list, list) s = '' for v in v_list: assert isinstance(v, NDArray) if v.shape == (1,): s += str(v.asscalar()) + '\t' else: s += str(v.asnumpy()) + '\t' res.append((n, k, s)) self.queue = [] return res
[ "End collecting for current batch and return results.\n Call after computation of current batch.\n\n Returns\n -------\n res : list of " ]
Please provide a description of the function:def toc_print(self): res = self.toc() for n, k, v in res: logging.info('Batch: {:7d} {:30s} {:s}'.format(n, k, v))
[ "End collecting and print results." ]
Please provide a description of the function:def make_data_iter_plan(self): "make a random data iteration plan" # truncate each bucket into multiple of batch-size bucket_n_batches = [] for i in range(len(self.data)): bucket_n_batches.append(np.floor((self.data[i]) / self.batch_size)) self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)] bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)]) np.random.shuffle(bucket_plan) bucket_idx_all = [np.random.permutation(len(x)) for x in self.data] self.bucket_plan = bucket_plan self.bucket_idx_all = bucket_idx_all self.bucket_curr_idx = [0 for x in self.data] self.data_buffer = [] self.label_buffer = [] for i_bucket in range(len(self.data)): if not self.model_parallel: data = np.zeros((self.batch_size, self.buckets[i_bucket])) label = np.zeros((self.batch_size, self.buckets[i_bucket])) self.data_buffer.append(data) self.label_buffer.append(label) else: data = np.zeros((self.buckets[i_bucket], self.batch_size)) self.data_buffer.append(data) if self.model_parallel: # Transpose data if model parallel for i in range(len(self.data)): bucket_data = self.data[i] self.data[i] = np.transpose(bucket_data)
[]
Please provide a description of the function:def expand(x, pending, stage): if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes return if x in pending: #print('loop found: {} in {}'.format(x, pending)) return whtspace = ' ' * expand.treeDepth expand.fileCount += 1 comment = u"//=====[{:3d}] STAGE:{:>4} {}EXPANDING: {} =====\n\n".format(expand.fileCount, stage, whtspace, x) out.write(comment.encode('ascii')) print(comment) with open(x, 'rb') as x_h: for line in x_h.readlines(): uline = line.decode('utf-8') if '#define DMLC_LOG_STACK_TRACE 1' in uline.strip(): # Do not enable stacktrace logging continue if uline.find('#include') < 0: out.write(line) continue if uline.strip().find('#include') > 0: print(uline) continue m = re1.search(uline) if not m: m = re2.search(uline) if m: path = m.groups()[0] else: m = re3.search(uline) if m: path = 'execinfo.h' else: print(uline + ' not found') continue h = path.strip('./') if "../3rdparty/" not in path else path if h.endswith('complex.h') and x.endswith('openblas_config.h'): source = '' elif h.startswith('ps/'): source = '../3rdparty/ps-lite/include/' + h else: source = find_source(h, x, stage) if not source: if (h not in blacklist and h not in sysheaders and 'mkl' not in h and 'nnpack' not in h and 'tensorrt' not in h and not h.endswith('.cuh')): sysheaders.append(h) else: expand.treeDepth += 1 expand(source, pending + [x], stage) expand.treeDepth -= 1 out.write(u"//===== EXPANDED : {} =====\n\n".format(x).encode('ascii')) history.add(x)
[ "\n Expand the pending files in the current stage.\n\n Parameters\n ----------\n x: str\n The file to expand.\n pending : str\n The list of pending files to expand.\n stage: str\n The current stage for file expansion, used for matching the prefix of files.\n " ]
Please provide a description of the function:def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype='float32'): train_dir = os.path.join(root, 'train') train_transform, val_transform = get_imagenet_transforms(data_shape, dtype) logging.info("Loading image folder %s, this may take a bit long...", train_dir) train_dataset = ImageFolderDataset(train_dir, transform=train_transform) train_data = DataLoader(train_dataset, batch_size, shuffle=True, last_batch='discard', num_workers=num_workers) val_dir = os.path.join(root, 'val') if not os.path.isdir(os.path.expanduser(os.path.join(root, 'val', 'n01440764'))): user_warning = 'Make sure validation images are stored in one subdir per category, a helper script is available at https://git.io/vNQv1' raise ValueError(user_warning) logging.info("Loading image folder %s, this may take a bit long...", val_dir) val_dataset = ImageFolderDataset(val_dir, transform=val_transform) val_data = DataLoader(val_dataset, batch_size, last_batch='keep', num_workers=num_workers) return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)
[ "Dataset loader with preprocessing." ]
Please provide a description of the function:def create(embedding_name, **kwargs): create_text_embedding = registry.get_create_func(_TokenEmbedding, 'token embedding') return create_text_embedding(embedding_name, **kwargs)
[ "Creates an instance of token embedding.\n\n\n Creates a token embedding instance by loading embedding vectors from an externally hosted\n pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid\n `embedding_name` and `pretrained_file_name`, use\n `mxnet.contrib.text.embedding.get_pretrained_file_names()`.\n\n\n Parameters\n ----------\n embedding_name : str\n The token embedding name (case-insensitive).\n\n\n Returns\n -------\n An instance of `mxnet.contrib.text.glossary._TokenEmbedding`:\n A token embedding instance that loads embedding vectors from an externally hosted\n pre-trained token embedding file.\n " ]
Please provide a description of the function:def get_pretrained_file_names(embedding_name=None): text_embedding_reg = registry.get_registry(_TokenEmbedding) if embedding_name is not None: if embedding_name not in text_embedding_reg: raise KeyError('Cannot find `embedding_name` %s. Use ' '`get_pretrained_file_names(' 'embedding_name=None).keys()` to get all the valid embedding ' 'names.' % embedding_name) return list(text_embedding_reg[embedding_name].pretrained_file_name_sha1.keys()) else: return {embedding_name: list(embedding_cls.pretrained_file_name_sha1.keys()) for embedding_name, embedding_cls in registry.get_registry(_TokenEmbedding).items()}
[ "Get valid token embedding names and their pre-trained file names.\n\n\n To load token embedding vectors from an externally hosted pre-trained token embedding file,\n such as those of GloVe and FastText, one should use\n `mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.\n This method returns all the valid names of `pretrained_file_name` for the specified\n `embedding_name`. If `embedding_name` is set to None, this method returns all the valid\n names of `embedding_name` with their associated `pretrained_file_name`.\n\n\n Parameters\n ----------\n embedding_name : str or None, default None\n The pre-trained token embedding name.\n\n\n Returns\n -------\n dict or list:\n A list of all the valid pre-trained token embedding file names (`pretrained_file_name`)\n for the specified token embedding name (`embedding_name`). If the text embeding name is\n set to None, returns a dict mapping each valid token embedding name to a list of valid\n pre-trained files (`pretrained_file_name`). They can be plugged into\n `mxnet.contrib.text.embedding.create(embedding_name,\n pretrained_file_name)`.\n " ]
Please provide a description of the function:def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'): pretrained_file_path = os.path.expanduser(pretrained_file_path) if not os.path.isfile(pretrained_file_path): raise ValueError('`pretrained_file_path` must be a valid path to ' 'the pre-trained token embedding file.') logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path) vec_len = None all_elems = [] tokens = set() loaded_unknown_vec = None line_num = 0 with io.open(pretrained_file_path, 'r', encoding=encoding) as f: for line in f: line_num += 1 elems = line.rstrip().split(elem_delim) assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \ 'data format of the pre-trained token embedding file %s ' \ 'is unexpected.' % (line_num, pretrained_file_path) token, elems = elems[0], [float(i) for i in elems[1:]] if token == self.unknown_token and loaded_unknown_vec is None: loaded_unknown_vec = elems tokens.add(self.unknown_token) elif token in tokens: warnings.warn('At line %d of the pre-trained token embedding file: the ' 'embedding vector for token %s has been loaded and a duplicate ' 'embedding for the same token is seen and skipped.' % (line_num, token)) elif len(elems) == 1: warnings.warn('At line %d of the pre-trained text embedding file: token %s ' 'with 1-dimensional vector %s is likely a header and is ' 'skipped.' % (line_num, token, elems)) else: if vec_len is None: vec_len = len(elems) # Reserve a vector slot for the unknown token at the very beggining because # the unknown index is 0. all_elems.extend([0] * vec_len) else: assert len(elems) == vec_len, \ 'At line %d of the pre-trained token embedding file: the dimension ' \ 'of token %s is %d but the dimension of previous tokens is %d. ' \ 'Dimensions of all the tokens must be the same.' \ % (line_num, token, len(elems), vec_len) all_elems.extend(elems) self._idx_to_token.append(token) self._token_to_idx[token] = len(self._idx_to_token) - 1 tokens.add(token) self._vec_len = vec_len self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len)) if loaded_unknown_vec is None: self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len) else: self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec)
[ "Load embedding vectors from the pre-trained token embedding file.\n\n\n For every unknown token, if its representation `self.unknown_token` is encountered in the\n pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token\n embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the\n text embedding vector initialized by `init_unknown_vec`.\n\n If a token is encountered multiple times in the pre-trained text embedding file, only the\n first-encountered token embedding vector will be loaded and the rest will be skipped.\n " ]
Please provide a description of the function:def _set_idx_to_vec_by_embeddings(self, token_embeddings, vocab_len, vocab_idx_to_token): new_vec_len = sum(embed.vec_len for embed in token_embeddings) new_idx_to_vec = nd.zeros(shape=(vocab_len, new_vec_len)) col_start = 0 # Concatenate all the embedding vectors in token_embeddings. for embed in token_embeddings: col_end = col_start + embed.vec_len # Cancatenate vectors of the unknown token. new_idx_to_vec[0, col_start:col_end] = embed.idx_to_vec[0] new_idx_to_vec[1:, col_start:col_end] = embed.get_vecs_by_tokens(vocab_idx_to_token[1:]) col_start = col_end self._vec_len = new_vec_len self._idx_to_vec = new_idx_to_vec
[ "Sets the mapping between token indices and token embedding vectors.\n\n\n Parameters\n ----------\n token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding`\n One or multiple pre-trained token embeddings to load. If it is a list of multiple\n embeddings, these embedding vectors will be concatenated for each token.\n vocab_len : int\n Length of vocabulary whose tokens are indexed in the token embedding.\n vocab_idx_to_token: list of str\n A list of indexed tokens in the vocabulary. These tokens are indexed in the token\n embedding.\n " ]
Please provide a description of the function:def get_vecs_by_tokens(self, tokens, lower_case_backup=False): to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True if not lower_case_backup: indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens] else: indices = [self.token_to_idx[token] if token in self.token_to_idx else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX) for token in tokens] vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0], self.idx_to_vec.shape[1]) return vecs[0] if to_reduce else vecs
[ "Look up embedding vectors of tokens.\n\n\n Parameters\n ----------\n tokens : str or list of strs\n A token or a list of tokens.\n lower_case_backup : bool, default False\n If False, each token in the original case will be looked up; if True, each token in the\n original case will be looked up first, if not found in the keys of the property\n `token_to_idx`, the token in the lower case will be looked up.\n\n\n Returns\n -------\n mxnet.ndarray.NDArray:\n The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is\n a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of\n strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).\n " ]
Please provide a description of the function:def update_token_vectors(self, tokens, new_vectors): assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.' if not isinstance(tokens, list) or len(tokens) == 1: assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \ '`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.' if not isinstance(tokens, list): tokens = [tokens] if len(new_vectors.shape) == 1: new_vectors = new_vectors.expand_dims(0) else: assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \ '`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.' assert new_vectors.shape == (len(tokens), self.vec_len), \ 'The length of new_vectors must be equal to the number of tokens and the width of' \ 'new_vectors must be equal to the dimension of embeddings of the glossary.' indices = [] for token in tokens: if token in self.token_to_idx: indices.append(self.token_to_idx[token]) else: raise ValueError('Token %s is unknown. To update the embedding vector for an ' 'unknown token, please specify it explicitly as the ' '`unknown_token` %s in `tokens`. This is to avoid unintended ' 'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX])) self._idx_to_vec[nd.array(indices)] = new_vectors
[ "Updates embedding vectors for tokens.\n\n\n Parameters\n ----------\n tokens : str or a list of strs\n A token or a list of tokens whose embedding vector are to be updated.\n new_vectors : mxnet.ndarray.NDArray\n An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal\n to the number of `tokens` and its width must be equal to the dimension of embeddings of\n the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list\n of multiple strings, it must be 2-D.\n " ]
Please provide a description of the function:def _check_pretrained_file_names(cls, pretrained_file_name): embedding_name = cls.__name__.lower() if pretrained_file_name not in cls.pretrained_file_name_sha1: raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid ' 'pretrained files for embedding %s: %s' % (pretrained_file_name, embedding_name, embedding_name, ', '.join(cls.pretrained_file_name_sha1.keys())))
[ "Checks if a pre-trained token embedding file name is valid.\n\n\n Parameters\n ----------\n pretrained_file_name : str\n The pre-trained token embedding file.\n " ]