Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any] if tensor.HasField("segment"): raise ValueError( "Currently not supporting loading segments.") if tensor.data_type == TensorProto.UNDEFINED: raise ValueError("The data type is not defined.") tensor_dtype = tensor.data_type np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype] storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype] storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type] storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type] dims = tensor.dims if tensor.data_type == TensorProto.STRING: utf8_strings = getattr(tensor, storage_field) ss = list(s.decode('utf-8') for s in utf8_strings) return np.asarray(ss).astype(np_dtype).reshape(dims) if tensor.HasField("raw_data"): # Raw_bytes support: using frombuffer. return np.frombuffer( tensor.raw_data, dtype=np_dtype).reshape(dims) else: data = getattr(tensor, storage_field), # type: Sequence[np.complex64] if (tensor_dtype == TensorProto.COMPLEX64 or tensor_dtype == TensorProto.COMPLEX128): data = combine_pairs_to_complex(data) return ( np.asarray( data, dtype=storage_np_dtype) .astype(np_dtype) .reshape(dims) )
[ "Converts a tensor def object to a numpy array.\n\n Inputs:\n tensor: a TensorProto object.\n Returns:\n arr: the converted array.\n " ]
Please provide a description of the function:def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto tensor = TensorProto() tensor.dims.extend(arr.shape) if name: tensor.name = name if arr.dtype == np.object: # Special care for strings. tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] # TODO: Introduce full string support. # We flatten the array in case there are 2-D arrays are specified # We throw the error below if we have a 3-D array or some kind of other # object. If you want more complex shapes then follow the below instructions. # Unlike other types where the shape is automatically inferred from # nested arrays of values, the only reliable way now to feed strings # is to put them into a flat array then specify type astype(np.object) # (otherwise all strings may have different types depending on their length) # and then specify shape .reshape([x, y, z]) flat_array = arr.flatten() for e in flat_array: if isinstance(e, text_type): tensor.string_data.append(e.encode('utf-8')) elif isinstance(e, np.ndarray): for s in e: if isinstance(s, text_type): tensor.string_data.append(s.encode('utf-8')) else: raise NotImplementedError( "Unrecognized object in the object array, expect a string, or array of bytes: ", str(type(e))) return tensor # For numerical types, directly use numpy raw bytes. try: dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype] except KeyError: raise RuntimeError( "Numpy data type not understood yet: {}".format(str(arr.dtype))) tensor.data_type = dtype tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9. return tensor
[ "Converts a numpy array to a tensor def.\n\n Inputs:\n arr: a numpy array.\n name: (optional) the name of the tensor.\n Returns:\n tensor_def: the converted tensor def.\n " ]
Please provide a description of the function:def _serialize(proto): # type: (Union[bytes, google.protobuf.message.Message]) -> bytes ''' Serialize a in-memory proto to bytes @params proto is a in-memory proto, such as a ModelProto, TensorProto, etc @return Serialized proto in bytes ''' if isinstance(proto, bytes): return proto elif hasattr(proto, 'SerializeToString') and callable(proto.SerializeToString): result = proto.SerializeToString() return result else: raise ValueError('No SerializeToString method is detected. ' 'neither proto is a str.\ntype is {}'.format(type(proto)))
[]
Please provide a description of the function:def _deserialize(s, proto): # type: (bytes, _Proto) -> _Proto ''' Parse bytes into a in-memory proto @params s is bytes containing serialized proto proto is a in-memory proto object @return The proto instance filled in by s ''' if not isinstance(s, bytes): raise ValueError('Parameter s must be bytes, but got type: {}'.format(type(s))) if not (hasattr(proto, 'ParseFromString') and callable(proto.ParseFromString)): raise ValueError('No ParseFromString method is detected. ' '\ntype is {}'.format(type(proto))) decoded = cast(Optional[int], proto.ParseFromString(s)) if decoded is not None and decoded != len(s): raise google.protobuf.message.DecodeError( "Protobuf decoding consumed too few bytes: {} out of {}".format( decoded, len(s))) return proto
[]
Please provide a description of the function:def load_model(f, format=None, load_external_data=True): # type: (Union[IO[bytes], Text], Optional[Any], bool) -> ModelProto ''' Loads a serialized ModelProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory ModelProto ''' s = _load_bytes(f) model = load_model_from_string(s, format=format) if load_external_data: model_filepath = _get_file_path(f) if model_filepath: base_dir = os.path.dirname(model_filepath) load_external_data_for_model(model, base_dir) return model
[]
Please provide a description of the function:def load_tensor(f, format=None): # type: (Union[IO[bytes], Text], Optional[Any]) -> TensorProto ''' Loads a serialized TensorProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory TensorProto ''' s = _load_bytes(f) return load_tensor_from_string(s, format=format)
[]
Please provide a description of the function:def save_model(proto, f, format=None): # type: (Union[ModelProto, bytes], Union[IO[bytes], Text], Optional[Any]) -> None ''' Saves the ModelProto to the specified path. @params proto should be a in-memory ModelProto f can be a file-like object (has "write" function) or a string containing a file name format is for future use ''' if isinstance(proto, bytes): proto = _deserialize(proto, ModelProto()) model_filepath = _get_file_path(f) if model_filepath: basepath = os.path.dirname(model_filepath) proto = write_external_data_tensors(proto, basepath) s = _serialize(proto) _save_bytes(s, f)
[]
Please provide a description of the function:def polish_model(model): # type: (ModelProto) -> ModelProto ''' This function combines several useful utility functions together. ''' onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) model = onnx.optimizer.optimize(model) onnx.checker.check_model(model) return model
[]
Please provide a description of the function:def dynamic_unroll(cell, inputs, begin_state, drop_inputs=0, drop_outputs=0, layout='TNC', valid_length=None): # Merge is always True, so we don't need length. inputs, axis, F, _ = _format_sequence(0, inputs, layout, True) if axis != 0: axes = list(range(len(layout))) tmp = axes[0] axes[0] = axes[axis] axes[axis] = tmp inputs = F.transpose(inputs, axes=axes) states = begin_state if drop_inputs: inputs = F.Dropout(inputs, p=drop_inputs, axes=(axis,)) if valid_length is None: def loop_body(inputs, states): return cell(inputs, states) else: zeros = [] for s in states: zeros.append(F.zeros_like(s)) states = list(_as_list(states)) states.append(F.zeros((1))) def loop_body(inputs, states): cell_states = states[:-1] iter_no = states[-1] out, new_states = cell(inputs, cell_states) for i, state in enumerate(cell_states): new_states[i] = F.where(F.broadcast_greater(valid_length, iter_no), new_states[i], state) new_states.append(iter_no + 1) return out, new_states outputs, states = F.contrib.foreach(loop_body, inputs, states) if drop_outputs: outputs = F.Dropout(outputs, p=drop_outputs, axes=(axis,)) if valid_length is not None: if axis != 0: outputs = F.transpose(outputs, axes) outputs = F.SequenceMask(outputs, sequence_length=valid_length, use_sequence_length=True, axis=axis) # the last state is the iteration number. We don't need it. return outputs, states[:-1] else: if axis != 0: outputs = F.transpose(outputs, axes) return outputs, states
[ "Unrolls an RNN cell across time steps.\n\n Currently, 'TNC' is a preferred layout. unroll on the input of this layout\n runs much faster.\n\n Parameters\n ----------\n cell : an object whose base class is RNNCell.\n The RNN cell to run on the input sequence.\n inputs : Symbol\n It should have shape (batch_size, length, ...) if `layout` is 'NTC',\n or (length, batch_size, ...) if `layout` is 'TNC'.\n begin_state : nested list of Symbol\n The initial states of the RNN sequence.\n drop_inputs : float, default 0.\n The dropout rate for inputs. Won't apply dropout if it equals 0.\n drop_outputs : float, default 0.\n The dropout rate for outputs. Won't apply dropout if it equals 0.\n layout : str, optional\n `layout` of input symbol. Only used if inputs\n is a single Symbol.\n valid_length : Symbol, NDArray or None\n `valid_length` specifies the length of the sequences in the batch without padding.\n This option is especially useful for building sequence-to-sequence models where\n the input and output sequences would potentially be padded.\n If `valid_length` is None, all sequences are assumed to have the same length.\n If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).\n The ith element will be the length of the ith sequence in the batch.\n The last valid state will be return and the padded outputs will be masked with 0.\n Note that `valid_length` must be smaller or equal to `length`.\n\n Returns\n -------\n outputs : Symbol\n the output of the RNN from this unrolling.\n\n states : list of Symbol\n The new state of this RNN after this unrolling.\n The type of this symbol is same as the output of `begin_state`.\n\n Examples\n --------\n >>> seq_len = 3\n >>> batch_size = 2\n >>> input_size = 5\n >>> cell = mx.gluon.rnn.LSTMCell(input_size, prefix='rnn_')\n >>> cell.initialize(ctx=mx.cpu())\n >>> rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size))\n >>> state_shape = (batch_size, input_size)\n >>> states = [mx.nd.normal(loc=0, scale=1, shape=state_shape) for i in range(2)]\n >>> valid_length = mx.nd.array([2, 3])\n >>> output, states = mx.gluon.contrib.rnn.rnn_cell.dynamic_unroll(cell, rnn_data, states,\n valid_length=valid_length,\n layout='TNC')\n >>> print(output)\n [[[ 0.00767238 0.00023103 0.03973929 -0.00925503 -0.05660512]\n [ 0.00881535 0.05428379 -0.02493718 -0.01834097 0.02189514]]\n [[-0.00676967 0.01447039 0.01287002 -0.00574152 -0.05734247]\n [ 0.01568508 0.02650866 -0.04270559 -0.04328435 0.00904011]]\n [[ 0. 0. 0. 0. 0. ]\n [ 0.01055336 0.02734251 -0.03153727 -0.03742751 -0.01378113]]]\n <NDArray 3x2x5 @cpu(0)>\n " ]
Please provide a description of the function:def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None, valid_length=None): # Dropout on inputs and outputs can be performed on the whole sequence # only when state dropout is not present. if self.drop_states: return super(VariationalDropoutCell, self).unroll(length, inputs, begin_state, layout, merge_outputs, valid_length=valid_length) self.reset() inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, True) states = _get_begin_state(self, F, begin_state, inputs, batch_size) if self.drop_inputs: inputs = F.Dropout(inputs, p=self.drop_inputs, axes=(axis,)) outputs, states = self.base_cell.unroll(length, inputs, states, layout, merge_outputs=True, valid_length=valid_length) if self.drop_outputs: outputs = F.Dropout(outputs, p=self.drop_outputs, axes=(axis,)) merge_outputs = isinstance(outputs, tensor_types) if merge_outputs is None else \ merge_outputs outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs) if valid_length is not None: outputs = _mask_sequence_variable_length(F, outputs, length, valid_length, axis, merge_outputs) return outputs, states
[ "Unrolls an RNN cell across time steps.\n\n Parameters\n ----------\n length : int\n Number of steps to unroll.\n inputs : Symbol, list of Symbol, or None\n If `inputs` is a single Symbol (usually the output\n of Embedding symbol), it should have shape\n (batch_size, length, ...) if `layout` is 'NTC',\n or (length, batch_size, ...) if `layout` is 'TNC'.\n\n If `inputs` is a list of symbols (usually output of\n previous unroll), they should all have shape\n (batch_size, ...).\n begin_state : nested list of Symbol, optional\n Input states created by `begin_state()`\n or output state of another cell.\n Created from `begin_state()` if `None`.\n layout : str, optional\n `layout` of input symbol. Only used if inputs\n is a single Symbol.\n merge_outputs : bool, optional\n If `False`, returns outputs as a list of Symbols.\n If `True`, concatenates output across time steps\n and returns a single symbol with shape\n (batch_size, length, ...) if layout is 'NTC',\n or (length, batch_size, ...) if layout is 'TNC'.\n If `None`, output whatever is faster.\n valid_length : Symbol, NDArray or None\n `valid_length` specifies the length of the sequences in the batch without padding.\n This option is especially useful for building sequence-to-sequence models where\n the input and output sequences would potentially be padded.\n If `valid_length` is None, all sequences are assumed to have the same length.\n If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).\n The ith element will be the length of the ith sequence in the batch.\n The last valid state will be return and the padded outputs will be masked with 0.\n Note that `valid_length` must be smaller or equal to `length`.\n\n Returns\n -------\n outputs : list of Symbol or Symbol\n Symbol (if `merge_outputs` is True) or list of Symbols\n (if `merge_outputs` is False) corresponding to the output from\n the RNN from this unrolling.\n\n states : list of Symbol\n The new state of this RNN after this unrolling.\n The type of this symbol is same as the output of `begin_state()`.\n " ]
Please provide a description of the function:def _fix_attribute_names(attrs, change_map): new_attr = {} for k in attrs.keys(): if k in change_map: new_attr[change_map[k]] = attrs[k] else: new_attr[k] = attrs[k] return new_attr
[ "\n Change attribute names as per values in change_map dictionary.\n Parameters\n ----------\n :param attrs : dict Dict of operator attributes\n :param change_map : dict Dict of onnx attribute name to mxnet attribute names.\n\n Returns\n -------\n :return new_attr : dict Converted dict of operator attributes.\n " ]
Please provide a description of the function:def _remove_attributes(attrs, remove_list): new_attrs = {} for attr in attrs.keys(): if attr not in remove_list: new_attrs[attr] = attrs[attr] return new_attrs
[ "\n Removes attributes in the remove list from the input attribute dict\n :param attrs : Dict of operator attributes\n :param remove_list : list of attributes to be removed\n\n :return new_attr : Dict of operator attributes without the listed attributes.\n " ]
Please provide a description of the function:def _add_extra_attributes(attrs, extra_attr_map): for attr in extra_attr_map: if attr not in attrs: attrs[attr] = extra_attr_map[attr] return attrs
[ "\n :param attrs: Current Attribute list\n :param extraAttrMap: Additional attributes to be added\n :return: new_attr\n " ]
Please provide a description of the function:def _pad_sequence_fix(attr, kernel_dim=None): new_attr = () if len(attr) % 2 == 0: for index in range(int(len(attr) / 2)): new_attr = new_attr + attr[index::int(len(attr) / 2)] # Making sure pad values are in the attr for all axes. if kernel_dim is not None: while len(new_attr) < kernel_dim*2: new_attr = new_attr + (0, 0) return new_attr
[ "Changing onnx's pads sequence to match with mxnet's pad_width\n mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end)\n onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)" ]
Please provide a description of the function:def _fix_pooling(pool_type, inputs, new_attr): stride = new_attr.get('stride') kernel = new_attr.get('kernel') padding = new_attr.get('pad') p_value = new_attr.get('p_value') # Adding default stride. if stride is None: stride = (1,) * len(kernel) # Add padding attr if not provided. if padding is None: padding = (0,) * len(kernel) * 2 # Mxnet Pad operator supports only 4D/5D tensors. # For 1D case, these are the steps: # Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2 # Step 2. Apply padding to this changed tensor # Step 3. Remove the extra dimension added in step 1. if len(kernel) == 1: dummy_axis = 2 # setting 0 padding to the new dim to be added. padding = (0, padding[0], 0, padding[1]) pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2) # Step 1. curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis) # Step 2. Common for all tensor sizes new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) # Step 3: Removing extra dim added. new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1) else: # For 2D/3D cases: # Apply padding pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel)) curr_sym = inputs[0] if pool_type == 'max': # For max pool : mode = 'edge', we should replicate the # edge values to pad, so that we only include input data values # for calculating 'max' new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) else: # For avg pool, we should add 'zeros' for padding so mode='constant' new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width) # Apply pooling without pads. if pool_type == 'lp': new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value) else: new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel) return new_pooling_op
[ "onnx pooling operator supports asymmetrical padding\n Adding pad operator before pooling in mxnet to work with onnx" ]
Please provide a description of the function:def _fix_bias(op_name, attrs, num_inputs): if num_inputs == 3: attrs['no_bias'] = False elif num_inputs == 2: attrs['no_bias'] = True else: raise ValueError("Unexpected number of inputs for: {}".format(op_name)) return attrs
[ "A workaround for 'use_bias' attribute since onnx don't provide this attribute,\n we have to check the number of inputs to decide it." ]
Please provide a description of the function:def _fix_broadcast(op_name, inputs, broadcast_axis, proto_obj): if int(len(proto_obj._params)) > 0: assert len(list(inputs)) == 2 input0_shape = get_input_shape(inputs[0], proto_obj) #creating reshape shape reshape_shape = list(len(input0_shape) * (1,)) reshape_shape[broadcast_axis] = -1 reshape_shape = tuple(reshape_shape) reshape_op_sym = symbol.reshape(inputs[1], shape=reshape_shape) op_sym = getattr(symbol, op_name)(inputs[0], reshape_op_sym) else: op_sym = op_name return op_sym
[ "A workaround to reshape bias term to (1, num_channel)." ]
Please provide a description of the function:def _fix_channels(op_name, attrs, inputs, proto_obj): weight_name = inputs[1].name if not weight_name in proto_obj._params: raise ValueError("Unable to get channels/units attr from onnx graph.") else: wshape = proto_obj._params[weight_name].shape assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape) if op_name == 'FullyConnected': attrs['num_hidden'] = wshape[0] else: if op_name == 'Convolution': # Weight shape for Conv and FC: (M x C x kH x kW) : M is number of # feature maps/hidden and C is number of channels attrs['num_filter'] = wshape[0] elif op_name == 'Deconvolution': # Weight shape for DeConv : (C x M x kH x kW) : M is number of # feature maps/filters and C is number of channels attrs['num_filter'] = wshape[1] return attrs
[ "A workaround for getting 'channels' or 'units' since onnx don't provide\n these attributes. We check the shape of weights provided to get the number.\n " ]
Please provide a description of the function:def _fix_gemm(op_name, inputs, old_attr, proto_obj): op_sym = getattr(symbol, op_name, None) alpha = float(old_attr.get('alpha', 1.0)) beta = float(old_attr.get('beta', 1.0)) trans_a = int(old_attr.get('transA', 0)) trans_b = int(old_attr.get('transB', 0)) if trans_a: inputs[0] = symbol.transpose(inputs[0], axes=(1, 0)) if not trans_b: inputs[1] = symbol.transpose(inputs[1], axes=(1, 0)) new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]] new_attr = {'num_hidden' : proto_obj._params[inputs[2].name].shape[0]} return op_sym, new_attr, new_inputs
[ "Using FullyConnected operator in place of linalg_gemm to perform same operation" ]
Please provide a description of the function:def get_input_shape(sym, proto_obj): arg_params = proto_obj.arg_dict aux_params = proto_obj.aux_dict model_input_shape = [data[1] for data in proto_obj.model_metadata.get('input_tensor_data')] data_names = [data[0] for data in proto_obj.model_metadata.get('input_tensor_data')] # creating dummy inputs inputs = [] for in_shape in model_input_shape: inputs.append(nd.ones(shape=in_shape)) data_shapes = [] for idx, input_name in enumerate(data_names): data_shapes.append((input_name, inputs[idx].shape)) ctx = context.cpu() # create a module mod = module.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) mod.set_params(arg_params=arg_params, aux_params=aux_params) data_forward = [] for idx, input_name in enumerate(data_names): val = inputs[idx] data_forward.append(val) mod.forward(io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() return result.shape
[ "Helper function to obtain the shape of an array" ]
Please provide a description of the function:def imresize(src, w, h, *args, **kwargs): r return _internal._cvimresize(src, w, h, *args, **kwargs)
[ "Resize image with OpenCV.\n\n .. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built\n with USE_OPENCV=1 for `imresize` to work.\n\n Parameters\n ----------\n src : NDArray\n source image\n w : int, required\n Width of resized image.\n h : int, required\n Height of resized image.\n interp : int, optional, default=1\n Interpolation method (default=cv2.INTER_LINEAR).\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n out : NDArray, optional\n The output NDArray to hold the result.\n\n Returns\n -------\n out : NDArray or list of NDArrays\n The output of this function.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> new_image = mx.img.resize(image, 240, 360)\n >>> new_image\n <NDArray 240x360x3 @cpu(0)>\n " ]
Please provide a description of the function:def imdecode(buf, *args, **kwargs): if not isinstance(buf, nd.NDArray): if sys.version_info[0] == 3 and not isinstance(buf, (bytes, bytearray, np.ndarray)): raise ValueError('buf must be of type bytes, bytearray or numpy.ndarray,' 'if you would like to input type str, please convert to bytes') buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8) return _internal._cvimdecode(buf, *args, **kwargs)
[ "Decode an image to an NDArray.\n\n .. note:: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes/bytearray or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n " ]
Please provide a description of the function:def scale_down(src_size, size): w, h = size sw, sh = src_size if sh < h: w, h = float(w * sh) / h, sh if sw < w: w, h = sw, float(h * sw) / w return int(w), int(h)
[ "Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n " ]
Please provide a description of the function:def copyMakeBorder(src, top, bot, left, right, *args, **kwargs): return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)
[ "Pad image border with OpenCV.\n\n Parameters\n ----------\n src : NDArray\n source image\n top : int, required\n Top margin.\n bot : int, required\n Bottom margin.\n left : int, required\n Left margin.\n right : int, required\n Right margin.\n type : int, optional, default='0'\n Filling type (default=cv2.BORDER_CONSTANT).\n 0 - cv2.BORDER_CONSTANT - Adds a constant colored border.\n 1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the\n border elements, like this : fedcba|abcdefgh|hgfedcb\n 2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,\n but with a slight change, like this : gfedcb|abcdefgh|gfedcba\n 3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,\n like this: aaaaaa|abcdefgh|hhhhhhh\n 4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg\n value : double, optional, default=0\n (Deprecated! Use ``values`` instead.) Fill with single value.\n values : tuple of <double>, optional, default=[]\n Fill with value(RGB[A] or gray), up to 4 channels.\n\n out : NDArray, optional\n The output NDArray to hold the result.\n\n Returns\n -------\n out : NDArray or list of NDArrays\n The output of this function.\n\n Example\n --------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)\n >>> new_image\n <NDArray 2324x3489x3 @cpu(0)>\n " ]
Please provide a description of the function:def _get_interp_method(interp, sizes=()): if interp == 9: if sizes: assert len(sizes) == 4 oh, ow, nh, nw = sizes if nh > oh and nw > ow: return 2 elif nh < oh and nw < ow: return 3 else: return 1 else: return 2 if interp == 10: return random.randint(0, 4) if interp not in (0, 1, 2, 3, 4): raise ValueError('Unknown interp method %d' % interp) return interp
[ "Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n " ]
Please provide a description of the function:def resize_short(src, size, interp=2): h, w, _ = src.shape if h > w: new_h, new_w = size * h // w, size else: new_h, new_w = size, size * w // h return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
[ "Resizes shorter edge to size.\n\n .. note:: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An 'NDArray' containing the resized image.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n " ]
Please provide a description of the function:def fixed_crop(src, x0, y0, w, h, size=None, interp=2): out = nd.slice(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2]))) if size is not None and (w, h) != size: sizes = (h, w, size[1], size[0]) out = imresize(out, *size, interp=_get_interp_method(interp, sizes)) return out
[ "Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n " ]
Please provide a description of the function:def center_crop(src, size, interp=2): h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = int((w - new_w) / 2) y0 = int((h - new_h) / 2) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h)
[ "Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n " ]
Please provide a description of the function:def color_normalize(src, mean, std=None): if mean is not None: src -= mean if std is not None: src /= std return src
[ "Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n " ]
Please provide a description of the function:def random_size_crop(src, size, area, ratio, interp=2, **kwargs): h, w, _ = src.shape src_area = h * w if 'min_area' in kwargs: warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning) area = kwargs.pop('min_area') assert not kwargs, "unexpected keyword arguments for `random_size_crop`." if isinstance(area, numeric_types): area = (area, 1.0) for _ in range(10): target_area = random.uniform(area[0], area[1]) * src_area log_ratio = (np.log(ratio[0]), np.log(ratio[1])) new_ratio = np.exp(random.uniform(*log_ratio)) new_w = int(round(np.sqrt(target_area * new_ratio))) new_h = int(round(np.sqrt(target_area / new_ratio))) if new_w <= w and new_h <= h: x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h) # fall back to center_crop return center_crop(src, size, interp)
[ "Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n " ]
Please provide a description of the function:def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False, mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0, pca_noise=0, rand_gray=0, inter_method=2): auglist = [] if resize > 0: auglist.append(ResizeAug(resize, inter_method)) crop_size = (data_shape[2], data_shape[1]) if rand_resize: assert rand_crop auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method)) elif rand_crop: auglist.append(RandomCropAug(crop_size, inter_method)) else: auglist.append(CenterCropAug(crop_size, inter_method)) if rand_mirror: auglist.append(HorizontalFlipAug(0.5)) auglist.append(CastAug()) if brightness or contrast or saturation: auglist.append(ColorJitterAug(brightness, contrast, saturation)) if hue: auglist.append(HueJitterAug(hue)) if pca_noise > 0: eigval = np.array([55.46, 4.794, 1.148]) eigvec = np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]) auglist.append(LightingAug(pca_noise, eigval, eigvec)) if rand_gray > 0: auglist.append(RandomGrayAug(rand_gray)) if mean is True: mean = nd.array([123.68, 116.28, 103.53]) elif mean is not None: assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3] if std is True: std = nd.array([58.395, 57.12, 57.375]) elif std is not None: assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3] if mean is not None or std is not None: auglist.append(ColorNormalizeAug(mean, std)) return auglist
[ "Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n " ]
Please provide a description of the function:def dumps(self): return json.dumps([self.__class__.__name__.lower(), self._kwargs])
[ "Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n " ]
Please provide a description of the function:def dumps(self): return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
[ "Override the default to avoid duplicate dump." ]
Please provide a description of the function:def reset(self): if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.last_batch_handle != 'roll_over' or \ self._cache_data is None: if self.imgrec is not None: self.imgrec.reset() self.cur = 0 if self._allow_read is False: self._allow_read = True
[ "Resets the iterator to the beginning of the data." ]
Please provide a description of the function:def hard_reset(self): if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.imgrec is not None: self.imgrec.reset() self.cur = 0 self._allow_read = True self._cache_data = None self._cache_label = None self._cache_idx = None
[ "Resets the iterator and ignore roll over data" ]
Please provide a description of the function:def next_sample(self): if self._allow_read is False: raise StopIteration if self.seq is not None: if self.cur < self.num_image: idx = self.seq[self.cur] else: if self.last_batch_handle != 'discard': self.cur = 0 raise StopIteration self.cur += 1 if self.imgrec is not None: s = self.imgrec.read_idx(idx) header, img = recordio.unpack(s) if self.imglist is None: return header.label, img else: return self.imglist[idx][0], img else: label, fname = self.imglist[idx] return label, self.read_image(fname) else: s = self.imgrec.read() if s is None: if self.last_batch_handle != 'discard': self.imgrec.reset() raise StopIteration header, img = recordio.unpack(s) return header.label, img
[ "Helper function for reading in next sample." ]
Please provide a description of the function:def _batchify(self, batch_data, batch_label, start=0): i = start batch_size = self.batch_size try: while i < batch_size: label, s = self.next_sample() data = self.imdecode(s) try: self.check_valid_image(data) except RuntimeError as e: logging.debug('Invalid image, skipping: %s', str(e)) continue data = self.augmentation_transform(data) assert i < batch_size, 'Batch size must be multiples of augmenter output length' batch_data[i] = self.postprocess_data(data) batch_label[i] = label i += 1 except StopIteration: if not i: raise StopIteration return i
[ "Helper function for batchifying data" ]
Please provide a description of the function:def imdecode(self, s): def locate(): if self.seq is not None: idx = self.seq[(self.cur % self.num_image) - 1] else: idx = (self.cur % self.num_image) - 1 if self.imglist is not None: _, fname = self.imglist[idx] msg = "filename: {}".format(fname) else: msg = "index: {}".format(idx) return "Broken image " + msg try: img = imdecode(s) except Exception as e: raise RuntimeError("{}, {}".format(locate(), e)) return img
[ "Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.", "Locate the image file/index if decode fails." ]
Please provide a description of the function:def read_image(self, fname): with open(os.path.join(self.path_root, fname), 'rb') as fin: img = fin.read() return img
[ "Reads an input image `fname` and returns the decoded raw bytes.\n Examples\n --------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n " ]
Please provide a description of the function:def facc(label, pred): pred = pred.ravel() label = label.ravel() return ((pred > 0.5) == label).mean()
[ " evaluate accuracy " ]
Please provide a description of the function:def word_to_vector(word): vector = [] for char in list(word): vector.append(char2int(char)) return vector
[ "\n Convert character vectors to integer vectors.\n " ]
Please provide a description of the function:def vector_to_word(vector): word = "" for vec in vector: word = word + int2char(vec) return word
[ "\n Convert integer vectors to character vectors.\n " ]
Please provide a description of the function:def char_conv(out): out_conv = list() for i in range(out.shape[0]): tmp_str = '' for j in range(out.shape[1]): if int(out[i][j]) >= 0: tmp_char = int2char(int(out[i][j])) if int(out[i][j]) == 27: tmp_char = '' tmp_str = tmp_str + tmp_char out_conv.append(tmp_str) return out_conv
[ "\n Convert integer vectors to character vectors for batch.\n " ]
Please provide a description of the function:def add_pooling_with_padding_types(builder, name, height, width, stride_height, stride_width, layer_type, padding_type, input_name, output_name, padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0, same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY', exclude_pad_area = True, is_global = False): spec = builder.spec nn_spec = builder.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.pooling # Set the parameters spec_layer_params.type = \ _NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Value(layer_type) if padding_type == 'VALID': height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() height_border.startEdgeSize = padding_top height_border.endEdgeSize = padding_bottom width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() width_border.startEdgeSize = padding_left width_border.endEdgeSize = padding_right elif padding_type == 'SAME': if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'): raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode) spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode) elif padding_type == 'INCLUDE_LAST_PIXEL': if padding_top != padding_bottom or padding_left != padding_right: raise ValueError("Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type") spec_layer_params.includeLastPixel.paddingAmounts.append(padding_top) spec_layer_params.includeLastPixel.paddingAmounts.append(padding_left) spec_layer_params.kernelSize.append(height) spec_layer_params.kernelSize.append(width) spec_layer_params.stride.append(stride_height) spec_layer_params.stride.append(stride_width) spec_layer_params.avgPoolExcludePadding = exclude_pad_area spec_layer_params.globalPooling = is_global
[ "\r\n Add a pooling layer to the model.\r\n\r\n This is our own implementation of add_pooling since current CoreML's version (0.5.0) of builder\r\n doesn't provide support for padding types apart from valid. This support will be added in the\r\n next release of coremltools. When that happens, this can be removed.\r\n\r\n Parameters\r\n\r\n ----------\r\n builder: NeuralNetworkBuilder\r\n A neural network builder object.\r\n name: str\r\n The name of this layer.\r\n height: int\r\n Height of pooling region.\r\n width: int\r\n Number of elements to be padded on the right side of the input blob.\r\n stride_height: int\r\n Stride along the height direction.\r\n stride_width: int\r\n Stride along the height direction.\r\n layer_type: str\r\n Type of pooling performed. Can either be 'MAX', 'AVERAGE' or 'L2'.\r\n padding_type: str\r\n Option for the output blob shape. Can be either 'VALID' , 'SAME' or 'INCLUDE_LAST_PIXEL'. Kindly look at NeuralNetwork.proto for details.\r\n input_name: str\r\n The input blob name of this layer.\r\n output_name: str\r\n The output blob name of this layer.\r\n\r\n padding_top, padding_bottom, padding_left, padding_right: int\r\n values of height (top, bottom) and width (left, right) padding to be used if padding type is \"VALID\" or \"INCLUDE_LAST_PIXEL\"\r\n\r\n same_padding_asymmetry_mode : str.\r\n Type of asymmetric padding to be used when padding_type = 'SAME'. Kindly look at NeuralNetwork.proto for details. Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.\r\n\r\n exclude_pad_area: boolean\r\n Whether to exclude padded area in the pooling operation. Defaults to True.\r\n\r\n - If True, the value of the padded area will be excluded.\r\n - If False, the padded area will be included.\r\n This flag is only used with average pooling.\r\n is_global: boolean\r\n Whether the pooling operation is global. Defaults to False.\r\n\r\n - If True, the pooling operation is global -- the pooling region is of the same size of the input blob.\r\n Parameters height, width, stride_height, stride_width will be ignored.\r\n\r\n - If False, the pooling operation is not global.\r\n\r\n See Also\r\n --------\r\n add_convolution, add_pooling, add_activation\r\n " ]
Please provide a description of the function:def get_frames(root_path): ret = [] for root, _, files in os.walk(root_path): root=root.replace('\\','/') files=[s for s in files if ".dcm" in s] if len(files) == 0 or not files[0].endswith(".dcm") or root.find("sax") == -1: continue prefix = files[0].rsplit('-', 1)[0] fileset = set(files) expected = ["%s-%04d.dcm" % (prefix, i + 1) for i in range(30)] if all(x in fileset for x in expected): ret.append([root + "/" + x for x in expected]) # sort for reproduciblity return sorted(ret, key = lambda x: x[0])
[ "Get path to all the frame in view SAX and contain complete frames" ]
Please provide a description of the function:def write_data_csv(fname, frames, preproc): fdata = open(fname, "w") dr = Parallel()(delayed(get_data)(lst,preproc) for lst in frames) data,result = zip(*dr) for entry in data: fdata.write(','.join(entry)+'\r\n') print("All finished, %d slices in total" % len(data)) fdata.close() result = np.ravel(result) return result
[ "Write data to csv file" ]
Please provide a description of the function:def crop_resize(img, size): if img.shape[0] < img.shape[1]: img = img.T # we crop image from center short_egde = min(img.shape[:2]) yy = int((img.shape[0] - short_egde) / 2) xx = int((img.shape[1] - short_egde) / 2) crop_img = img[yy : yy + short_egde, xx : xx + short_egde] # resize to 64, 64 resized_img = transform.resize(crop_img, (size, size)) resized_img *= 255 return resized_img.astype("uint8")
[ "crop center and resize" ]
Please provide a description of the function:def get_generator(): g_net = gluon.nn.Sequential() with g_net.name_scope(): g_net.add(gluon.nn.Conv2DTranspose( channels=512, kernel_size=4, strides=1, padding=0, use_bias=False)) g_net.add(gluon.nn.BatchNorm()) g_net.add(gluon.nn.LeakyReLU(0.2)) g_net.add(gluon.nn.Conv2DTranspose( channels=256, kernel_size=4, strides=2, padding=1, use_bias=False)) g_net.add(gluon.nn.BatchNorm()) g_net.add(gluon.nn.LeakyReLU(0.2)) g_net.add(gluon.nn.Conv2DTranspose( channels=128, kernel_size=4, strides=2, padding=1, use_bias=False)) g_net.add(gluon.nn.BatchNorm()) g_net.add(gluon.nn.LeakyReLU(0.2)) g_net.add(gluon.nn.Conv2DTranspose( channels=64, kernel_size=4, strides=2, padding=1, use_bias=False)) g_net.add(gluon.nn.BatchNorm()) g_net.add(gluon.nn.LeakyReLU(0.2)) g_net.add(gluon.nn.Conv2DTranspose(channels=3, kernel_size=4, strides=2, padding=1, use_bias=False)) g_net.add(gluon.nn.Activation('tanh')) return g_net
[ " construct and return generator " ]
Please provide a description of the function:def get_descriptor(ctx): d_net = gluon.nn.Sequential() with d_net.name_scope(): d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx)) d_net.add(gluon.nn.LeakyReLU(0.2)) d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx)) return d_net
[ " construct and return descriptor " ]
Please provide a description of the function:def _spectral_norm(self): w = self.params.get('weight').data(self.ctx) w_mat = nd.reshape(w, [w.shape[0], -1]) _u = self.u.data(self.ctx) _v = None for _ in range(POWER_ITERATION): _v = nd.L2Normalization(nd.dot(_u, w_mat)) _u = nd.L2Normalization(nd.dot(_v, w_mat.T)) sigma = nd.sum(nd.dot(_u, w_mat) * _v) if sigma == 0.: sigma = EPSILON with autograd.pause(): self.u.set_data(_u) return w / sigma
[ " spectral normalization " ]
Please provide a description of the function:def conv_output_length(input_length, filter_size, border_mode, stride, dilation=1): if input_length is None: return None assert border_mode in {'same', 'valid'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if border_mode == 'same': output_length = input_length elif border_mode == 'valid': output_length = input_length - dilated_filter_size + 1 return (output_length + stride - 1) // stride
[ " Compute the length of the output sequence after 1D convolution along\n time. Note that this function is in line with the function used in\n Convolution1D class from Keras.\n Params:\n input_length (int): Length of the input sequence.\n filter_size (int): Width of the convolution kernel.\n border_mode (str): Only support `same` or `valid`.\n stride (int): Stride size used in 1D convolution.\n dilation (int)\n " ]
Please provide a description of the function:def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128): assert not np.iscomplexobj(samples), "Must not pass in complex numbers" window = np.hanning(fft_length)[:, None] window_norm = np.sum(window ** 2) # The scaling below follows the convention of # matplotlib.mlab.specgram which is the same as # matlabs specgram. scale = window_norm * sample_rate trunc = (len(samples) - fft_length) % hop_length x = samples[:len(samples) - trunc] # "stride trick" reshape to include overlap nshape = (fft_length, (len(x) - fft_length) // hop_length + 1) nstrides = (x.strides[0], x.strides[0] * hop_length) x = as_strided(x, shape=nshape, strides=nstrides) # window stride sanity check assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)]) # broadcast window, compute fft over columns and square mod # This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). x = np.fft.rfft(x * window, axis=0) x = np.absolute(x) ** 2 # scale, 2.0 for everything except dc and fft_length/2 x[1:-1, :] *= (2.0 / scale) x[(0, -1), :] /= scale freqs = float(sample_rate) / fft_length * np.arange(x.shape[0]) return x, freqs
[ "\n Compute the spectrogram for a real signal.\n The parameters follow the naming convention of\n matplotlib.mlab.specgram\n Args:\n samples (1D array): input audio signal\n fft_length (int): number of elements in fft window\n sample_rate (scalar): sample rate\n hop_length (int): hop length (relative offset between neighboring\n fft windows).\n Returns:\n x (2D array): spectrogram [frequency x time]\n freq (1D array): frequency of each row in x\n Note:\n This is a truncating computation e.g. if fft_length=10,\n hop_length=5 and the signal has 23 elements, then the\n last 3 elements will be truncated.\n " ]
Please provide a description of the function:def spectrogram_from_file(filename, step=10, window=20, max_freq=None, eps=1e-14, overwrite=False, save_feature_as_csvfile=False): csvfilename = filename.replace(".wav", ".csv") if (os.path.isfile(csvfilename) is False) or overwrite: with soundfile.SoundFile(filename) as sound_file: audio = sound_file.read(dtype='float32') sample_rate = sound_file.samplerate if audio.ndim >= 2: audio = np.mean(audio, 1) if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " " sample rate") if step > window: raise ValueError("step size must not be greater than window size") hop_length = int(0.001 * step * sample_rate) fft_length = int(0.001 * window * sample_rate) pxx, freqs = spectrogram( audio, fft_length=fft_length, sample_rate=sample_rate, hop_length=hop_length) ind = np.where(freqs <= max_freq)[0][-1] + 1 res = np.transpose(np.log(pxx[:ind, :] + eps)) if save_feature_as_csvfile: np.savetxt(csvfilename, res) return res else: return np.loadtxt(csvfilename)
[ " Calculate the log of linear spectrogram from FFT energy\n Params:\n filename (str): Path to the audio file\n step (int): Step size in milliseconds between windows\n window (int): FFT window size in milliseconds\n max_freq (int): Only FFT bins corresponding to frequencies between\n [0, max_freq] are returned\n eps (float): Small value to ensure numerical stability (for ln(x))\n " ]
Please provide a description of the function:def sample(self, label): samples = [] count = 0 for trial in range(self.max_trials): if count >= self.max_sample: return samples scale = np.random.uniform(self.min_scale, self.max_scale) min_ratio = max(self.min_aspect_ratio, scale * scale) max_ratio = min(self.max_aspect_ratio, 1. / scale / scale) ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio)) width = scale * ratio height = scale / ratio left = np.random.uniform(0., 1 - width) top = np.random.uniform(0., 1 - height) rand_box = (left, top, left + width, top + height) valid_mask = np.where(label[:, 0] > -1)[0] gt = label[valid_mask, :] ious = self._check_satisfy(rand_box, gt) if ious is not None: # transform gt labels after crop, discard bad ones l, t, r, b = rand_box new_gt_boxes = [] new_width = r - l new_height = b - t for i in range(valid_mask.size): if ious[i] > 0: xmin = max(0., (gt[i, 1] - l) / new_width) ymin = max(0., (gt[i, 2] - t) / new_height) xmax = min(1., (gt[i, 3] - l) / new_width) ymax = min(1., (gt[i, 4] - t) / new_height) new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax]) if not new_gt_boxes: continue new_gt_boxes = np.array(new_gt_boxes) label = np.lib.pad(new_gt_boxes, ((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \ 'constant', constant_values=(-1, -1)) samples.append((rand_box, label)) count += 1 return samples
[ "\n generate random cropping boxes according to parameters\n if satifactory crops generated, apply to ground-truth as well\n\n Parameters:\n ----------\n label : numpy.array (n x 5 matrix)\n ground-truths\n\n Returns:\n ----------\n list of (crop_box, label) tuples, if failed, return empty list []\n " ]
Please provide a description of the function:def _check_satisfy(self, rand_box, gt_boxes): l, t, r, b = rand_box num_gt = gt_boxes.shape[0] ls = np.ones(num_gt) * l ts = np.ones(num_gt) * t rs = np.ones(num_gt) * r bs = np.ones(num_gt) * b mask = np.where(ls < gt_boxes[:, 1])[0] ls[mask] = gt_boxes[mask, 1] mask = np.where(ts < gt_boxes[:, 2])[0] ts[mask] = gt_boxes[mask, 2] mask = np.where(rs > gt_boxes[:, 3])[0] rs[mask] = gt_boxes[mask, 3] mask = np.where(bs > gt_boxes[:, 4])[0] bs[mask] = gt_boxes[mask, 4] w = rs - ls w[w < 0] = 0 h = bs - ts h[h < 0] = 0 inter_area = h * w union_area = np.ones(num_gt) * max(0, r - l) * max(0, b - t) union_area += (gt_boxes[:, 3] - gt_boxes[:, 1]) * (gt_boxes[:, 4] - gt_boxes[:, 2]) union_area -= inter_area ious = inter_area / union_area ious[union_area <= 0] = 0 max_iou = np.amax(ious) if max_iou < self.min_overlap: return None # check ground-truth constraint if self.config['gt_constraint'] == 'center': for i in range(ious.shape[0]): if ious[i] > 0: gt_x = (gt_boxes[i, 1] + gt_boxes[i, 3]) / 2.0 gt_y = (gt_boxes[i, 2] + gt_boxes[i, 4]) / 2.0 if gt_x < l or gt_x > r or gt_y < t or gt_y > b: return None elif self.config['gt_constraint'] == 'corner': for i in range(ious.shape[0]): if ious[i] > 0: if gt_boxes[i, 1] < l or gt_boxes[i, 3] > r \ or gt_boxes[i, 2] < t or gt_boxes[i, 4] > b: return None return ious
[ "\n check if overlap with any gt box is larger than threshold\n " ]
Please provide a description of the function:def sample(self, label): samples = [] count = 0 for trial in range(self.max_trials): if count >= self.max_sample: return samples scale = np.random.uniform(self.min_scale, self.max_scale) min_ratio = max(self.min_aspect_ratio, scale * scale) max_ratio = min(self.max_aspect_ratio, 1. / scale / scale) ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio)) width = scale * ratio if width < 1: continue height = scale / ratio if height < 1: continue left = np.random.uniform(0., 1 - width) top = np.random.uniform(0., 1 - height) right = left + width bot = top + height rand_box = (left, top, right, bot) valid_mask = np.where(label[:, 0] > -1)[0] gt = label[valid_mask, :] new_gt_boxes = [] for i in range(gt.shape[0]): xmin = (gt[i, 1] - left) / width ymin = (gt[i, 2] - top) / height xmax = (gt[i, 3] - left) / width ymax = (gt[i, 4] - top) / height new_size = min(xmax - xmin, ymax - ymin) if new_size < self.min_gt_scale: new_gt_boxes = [] break new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax]) if not new_gt_boxes: continue new_gt_boxes = np.array(new_gt_boxes) label = np.lib.pad(new_gt_boxes, ((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \ 'constant', constant_values=(-1, -1)) samples.append((rand_box, label)) count += 1 return samples
[ "\n generate random padding boxes according to parameters\n if satifactory padding generated, apply to ground-truth as well\n\n Parameters:\n ----------\n label : numpy.array (n x 5 matrix)\n ground-truths\n\n Returns:\n ----------\n list of (crop_box, label) tuples, if failed, return empty list []\n " ]
Please provide a description of the function:def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs): mx.nd.waitall() args_list = [] for arg in args: args_list.append(arg) start = time.time() if scipy_trans_lhs: args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0]) for _ in range(repeat): func_name(*args_list, **kwargs) mx.nd.waitall() end = time.time() diff = end - start return diff / repeat
[ "Measure time cost of running a function\n " ]
Please provide a description of the function:def info(self): for key, value in self.dataset['info'].items(): print('{}: {}'.format(key, value))
[ "\n Print information about the annotation file.\n :return:\n " ]
Please provide a description of the function:def getCatIds(self, catNms=[], supNms=[], catIds=[]): catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids
[ "\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n " ]
Please provide a description of the function:def loadAnns(self, ids=[]): if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]]
[ "\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n " ]
Please provide a description of the function:def loadCats(self, ids=[]): if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]]
[ "\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n " ]
Please provide a description of the function:def loadImgs(self, ids=[]): if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]]
[ "\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects\n " ]
Please provide a description of the function:def showAnns(self, anns): if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg)/2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask raise NotImplementedError("maskUtils disabled!") if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption'])
[ "\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n " ]
Please provide a description of the function:def download(self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urlretrieve(img['coco_url'], fname) print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
[]
Please provide a description of the function:def loadNumpyAnnotations(self, data): print('Converting ndarray to lists...') assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print('{}/{}'.format(i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann
[ "\n Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}\n :param data (numpy.ndarray)\n :return: annotations (python nested list)\n " ]
Please provide a description of the function:def annToRLE(self, ann): t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code # rles = maskUtils.frPyObjects(segm, h, w) # rle = maskUtils.merge(rles) raise NotImplementedError("maskUtils disabled!") elif type(segm['counts']) == list: # uncompressed RLE # rle = maskUtils.frPyObjects(segm, h, w) raise NotImplementedError("maskUtils disabled!") else: # rle rle = ann['segmentation'] return rle
[ "\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n " ]
Please provide a description of the function:def save_model(): if not os.path.exists("checkpoint"): os.mkdir("checkpoint") return mx.callback.do_checkpoint("checkpoint/checkpoint", args.save_period)
[ "Save cnn model\n Returns\n ----------\n callback: A callback function that can be passed as epoch_end_callback to fit\n " ]
Please provide a description of the function:def highway(data): _data = data high_weight = mx.sym.Variable('high_weight') high_bias = mx.sym.Variable('high_bias') high_fc = mx.sym.FullyConnected(data=data, weight=high_weight, bias=high_bias, num_hidden=300, name='high_fc') high_relu = mx.sym.Activation(high_fc, act_type='relu') high_trans_weight = mx.sym.Variable('high_trans_weight') high_trans_bias = mx.sym.Variable('high_trans_bias') high_trans_fc = mx.sym.FullyConnected(data=_data, weight=high_trans_weight, bias=high_trans_bias, num_hidden=300, name='high_trans_sigmoid') high_trans_sigmoid = mx.sym.Activation(high_trans_fc, act_type='sigmoid') return high_relu * high_trans_sigmoid + _data * (1 - high_trans_sigmoid)
[ "Construct highway net\n Parameters\n ----------\n data:\n Returns\n ----------\n Highway Networks\n " ]
Please provide a description of the function:def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names): devs = mx.cpu() # default setting if args.gpus is not None: for i in args.gpus.split(','): mx.gpu(int(i)) devs = mx.gpu() module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs) init_params = { 'vocab_embed_weight': {'uniform': 0.1}, 'convolution0_weight': {'uniform': 0.1}, 'convolution0_bias': {'costant': 0}, 'convolution1_weight': {'uniform': 0.1}, 'convolution1_bias': {'costant': 0}, 'convolution2_weight': {'uniform': 0.1}, 'convolution2_bias': {'costant': 0}, 'high_weight': {'uniform': 0.1}, 'high_bias': {'costant': 0}, 'high_trans_weight': {'uniform': 0.1}, 'high_trans_bias': {'costant': -2}, 'cls_weight': {'uniform': 0.1}, 'cls_bias': {'costant': 0}, } # custom init_params module.bind(data_shapes=train_iterator.provide_data, label_shapes=train_iterator.provide_label) module.init_params(CustomInit(init_params)) lr_sch = mx.lr_scheduler.FactorScheduler(step=25000, factor=0.999) module.init_optimizer( optimizer='rmsprop', optimizer_params={'learning_rate': 0.0005, 'lr_scheduler': lr_sch}) def norm_stat(d): return mx.nd.norm(d) / np.sqrt(d.size) mon = mx.mon.Monitor(25000, norm_stat) module.fit(train_data=train_iterator, eval_data=valid_iterator, eval_metric='acc', kvstore=args.kv_store, monitor=mon, num_epoch=args.num_epochs, batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches), epoch_end_callback=save_model())
[ "Train cnn model\n Parameters\n ----------\n symbol_data: symbol\n train_iterator: DataIter\n Train DataIter\n valid_iterator: DataIter\n Valid DataIter\n data_column_names: list of str\n Defaults to ('data') for a typical model used in image classification\n target_names: list of str\n Defaults to ('softmax_label') for a typical model used in image classification\n " ]
Please provide a description of the function:def default_batchify_fn(data): if isinstance(data[0], nd.NDArray): return nd.stack(*data) elif isinstance(data[0], tuple): data = zip(*data) return [default_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype)
[ "Collate data into batch." ]
Please provide a description of the function:def default_mp_batchify_fn(data): if isinstance(data[0], nd.NDArray): out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) return nd.stack(*data, out=out) elif isinstance(data[0], tuple): data = zip(*data) return [default_mp_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype, ctx=context.Context('cpu_shared', 0))
[ "Collate data into batch. Use shared memory for stacking." ]
Please provide a description of the function:def _as_in_context(data, ctx): if isinstance(data, nd.NDArray): return data.as_in_context(ctx) elif isinstance(data, (list, tuple)): return [_as_in_context(d, ctx) for d in data] return data
[ "Move data into new context." ]
Please provide a description of the function:def worker_loop_v1(dataset, key_queue, data_queue, batchify_fn): while True: idx, samples = key_queue.get() if idx is None: break batch = batchify_fn([dataset[i] for i in samples]) data_queue.put((idx, batch))
[ "Worker loop for multiprocessing DataLoader." ]
Please provide a description of the function:def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False, pin_device_id=0, data_buffer_lock=None): while True: idx, batch = data_queue.get() if idx is None: break if pin_memory: batch = _as_in_context(batch, context.cpu_pinned(pin_device_id)) else: batch = _as_in_context(batch, context.cpu()) if data_buffer_lock is not None: with data_buffer_lock: data_buffer[idx] = batch else: data_buffer[idx] = batch
[ "Fetcher loop for fetching data from queue and put in reorder dict." ]
Please provide a description of the function:def _worker_fn(samples, batchify_fn, dataset=None): # pylint: disable=unused-argument # it is required that each worker process has to fork a new MXIndexedRecordIO handle # preserving dataset as global variable can save tons of overhead and is safe in new process global _worker_dataset batch = batchify_fn([_worker_dataset[i] for i in samples]) buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch) return buf.getvalue()
[ "Function for processing data in worker process." ]
Please provide a description of the function:def send(self, obj): buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) self.send_bytes(buf.getvalue())
[ "Send object" ]
Please provide a description of the function:def _push_next(self): r = next(self._iter, None) if r is None: return self._key_queue.put((self._sent_idx, r)) self._sent_idx += 1
[ "Assign next batch workload to workers." ]
Please provide a description of the function:def shutdown(self): if not self._shutdown: # send shutdown signal to the fetcher and join data queue first # Remark: loop_fetcher need to be joined prior to the workers. # otherwise, the the fetcher may fail at getting data self._data_queue.put((None, None)) self._fetcher.join() # send shutdown signal to all worker processes for _ in range(self._num_workers): self._key_queue.put((None, None)) # force shut down any alive worker processes for w in self._workers: if w.is_alive(): w.terminate() self._shutdown = True
[ "Shutdown internal workers by pushing terminate signals." ]
Please provide a description of the function:def _push_next(self): r = next(self._iter, None) if r is None: return async_ret = self._worker_pool.apply_async( self._worker_fn, (r, self._batchify_fn, self._dataset)) self._data_buffer[self._sent_idx] = async_ret self._sent_idx += 1
[ "Assign next batch workload to workers." ]
Please provide a description of the function:def _ctype_key_value(keys, vals): if isinstance(keys, (tuple, list)): assert(len(keys) == len(vals)) c_keys = [] c_vals = [] use_str_keys = None for key, val in zip(keys, vals): c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val) c_keys += c_key_i c_vals += c_val_i use_str_keys = str_keys_i if use_str_keys is None else use_str_keys assert(use_str_keys == str_keys_i), "inconsistent types of keys detected." c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \ else c_array(ctypes.c_int, c_keys) c_vals_arr = c_array(ctypes.c_void_p, c_vals) return (c_keys_arr, c_vals_arr, use_str_keys) assert(isinstance(keys, (int,) + string_types)), \ "unexpected type for keys: " + str(type(keys)) use_str_keys = isinstance(keys, string_types) if isinstance(vals, NDArray): c_keys = c_str_array([keys]) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys])) return (c_keys, c_handle_array([vals]), use_str_keys) else: for value in vals: assert(isinstance(value, NDArray)) c_keys = c_str_array([keys] * len(vals)) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals))) return (c_keys, c_handle_array(vals), use_str_keys)
[ "\n Returns ctype arrays for the key-value args, and the whether string keys are used.\n For internal use only.\n " ]
Please provide a description of the function:def _ctype_dict(param_dict): assert(isinstance(param_dict, dict)), \ "unexpected type for param_dict: " + str(type(param_dict)) c_keys = c_array(ctypes.c_char_p, [c_str(k) for k in param_dict.keys()]) c_vals = c_array(ctypes.c_char_p, [c_str(str(v)) for v in param_dict.values()]) return (c_keys, c_vals)
[ "\n Returns ctype arrays for keys and values(converted to strings) in a dictionary\n " ]
Please provide a description of the function:def _updater_wrapper(updater): def updater_handle(key, lhs_handle, rhs_handle, _): lhs = _ndarray_cls(NDArrayHandle(lhs_handle)) rhs = _ndarray_cls(NDArrayHandle(rhs_handle)) updater(key, lhs, rhs) return updater_handle
[ "A wrapper for the user-defined handle.", " ctypes function " ]
Please provide a description of the function:def create(name='local'): if not isinstance(name, string_types): raise TypeError('name must be a string') handle = KVStoreHandle() check_call(_LIB.MXKVStoreCreate(c_str(name), ctypes.byref(handle))) kv = KVStore(handle) set_kvstore_handle(kv.handle) return kv
[ "Creates a new KVStore.\n\n For single machine training, there are two commonly used types:\n\n ``local``: Copies all gradients to CPU memory and updates weights there.\n\n ``device``: Aggregates gradients and updates weights on GPUs. With this setting,\n the KVStore also attempts to use GPU peer-to-peer communication,\n potentially accelerating the communication.\n\n For distributed training, KVStore also supports a number of types:\n\n ``dist_sync``: Behaves similarly to ``local`` but with one major difference.\n With ``dist_sync``, batch-size now means the batch size used on each machine.\n So if there are ``n`` machines and we use batch size ``b``,\n then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.\n\n ``dist_device_sync``: Identical to ``dist_sync`` with the difference similar\n to ``device`` vs ``local``.\n\n ``dist_async``: Performs asynchronous updates.\n The weights are updated whenever gradients are received from any machine.\n No two updates happen on the same weight at the same time. However, the order is not\n guaranteed.\n\n Parameters\n ----------\n name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async'}\n The type of KVStore.\n Returns\n -------\n kv : KVStore\n The created KVStore.\n " ]
Please provide a description of the function:def init(self, key, value): ckeys, cvals, use_str_keys = _ctype_key_value(key, value) if use_str_keys: check_call(_LIB.MXKVStoreInitEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals)) else: check_call(_LIB.MXKVStoreInit(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
[ " Initializes a single or a sequence of key-value pairs into the store.\n\n For each key, one must `init` it before calling `push` or `pull`.\n When multiple workers invoke `init` for the same key, only\n the value supplied by worker with rank `0` is used. This function returns\n after data has been initialized successfully.\n\n Parameters\n ----------\n key : str, int, or sequence of str or int\n The keys.\n value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray\n Values corresponding to the keys.\n\n Examples\n --------\n >>> # init a single key-value pair\n >>> shape = (2,3)\n >>> kv = mx.kv.create('local')\n >>> kv.init('3', mx.nd.ones(shape)*2)\n >>> a = mx.nd.zeros(shape)\n >>> kv.pull('3', out=a)\n >>> print a.asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n\n >>> # init a list of key-value pairs\n >>> keys = ['5', '7', '9']\n >>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))\n\n >>> # init a row_sparse value\n >>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse'))\n >>> b = mx.nd.sparse.zeros('row_sparse', shape)\n >>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b)\n >>> print b\n <RowSparseNDArray 2x3 @cpu(0)>\n " ]
Please provide a description of the function:def push(self, key, value, priority=0): ckeys, cvals, use_str_keys = _ctype_key_value(key, value) if use_str_keys: check_call(_LIB.MXKVStorePushEx( self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority))) else: check_call(_LIB.MXKVStorePush( self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
[ " Pushes a single or a sequence of key-value pairs into the store.\n\n This function returns immediately after adding an operator to the engine.\n The actual operation is executed asynchronously. If there are consecutive\n pushes to the same key, there is no guarantee on the serialization of pushes.\n The execution of a push does not guarantee that all previous pushes are\n finished.\n There is no synchronization between workers.\n One can use ``_barrier()`` to sync all workers.\n\n Parameters\n ----------\n key : str, int, or sequence of str or int\n Keys.\n\n value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray,\n or list of list of NDArray or RowSparseNDArray\n Values corresponding to the keys.\n\n priority : int, optional\n The priority of the push operation.\n Higher priority push operations are likely to be executed before\n other push actions.\n\n Examples\n --------\n >>> # push a single key-value pair\n >>> kv.push('3', mx.nd.ones(shape)*8)\n >>> kv.pull('3', out=a) # pull out the value\n >>> print a.asnumpy()\n [[ 8. 8. 8.]\n [ 8. 8. 8.]]\n\n >>> # aggregate the value and the push\n >>> gpus = [mx.gpu(i) for i in range(4)]\n >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]\n >>> kv.push('3', b)\n >>> kv.pull('3', out=a)\n >>> print a.asnumpy()\n [[ 4. 4. 4.]\n [ 4. 4. 4.]]\n\n >>> # push a list of keys.\n >>> # single device\n >>> keys = ['4', '5', '6']\n >>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))\n >>> b = [mx.nd.zeros(shape)]*len(keys)\n >>> kv.pull(keys, out=b)\n >>> print b[1].asnumpy()\n [[ 1. 1. 1.]\n [ 1. 1. 1.]]\n\n >>> # multiple devices:\n >>> keys = ['7', '8', '9']\n >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)\n >>> kv.push(keys, b)\n >>> kv.pull(keys, out=b)\n >>> print b[1][1].asnumpy()\n [[ 4. 4. 4.]\n [ 4. 4. 4.]]\n\n >>> # push a row_sparse value\n >>> b = mx.nd.sparse.zeros('row_sparse', shape)\n >>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape))\n >>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse'))\n >>> # pull out the value\n >>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b)\n >>> print b\n <RowSparseNDArray 2x3 @cpu(0)>\n " ]
Please provide a description of the function:def pull(self, key, out=None, priority=0, ignore_sparse=True): assert(out is not None) ckeys, cvals, use_str_keys = _ctype_key_value(key, out) if use_str_keys: check_call(_LIB.MXKVStorePullWithSparseEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority), ctypes.c_bool(ignore_sparse))) else: check_call(_LIB.MXKVStorePullWithSparse(self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority), ctypes.c_bool(ignore_sparse)))
[ " Pulls a single value or a sequence of values from the store.\n\n This function returns immediately after adding an operator to the engine.\n Subsequent attempts to read from the `out` variable will be blocked until the\n pull operation completes.\n\n `pull` is executed asynchronously after all previous `pull` calls and only\n the last `push` call for the same input key(s) are finished.\n\n The returned values are guaranteed to be the latest values in the store.\n\n pull with `RowSparseNDArray` is not supported for dist kvstore.\n Please use ``row_sparse_pull`` instead.\n\n Parameters\n ----------\n key : str, int, or sequence of str or int\n Keys.\n\n out: NDArray or list of NDArray or list of list of NDArray\n Values corresponding to the keys.\n\n priority : int, optional\n The priority of the pull operation.\n Higher priority pull operations are likely to be executed before\n other pull actions.\n\n ignore_sparse: bool, optional, default True\n Whether to ignore sparse arrays in the request.\n\n Examples\n --------\n >>> # pull a single key-value pair\n >>> a = mx.nd.zeros(shape)\n >>> kv.pull('3', out=a)\n >>> print a.asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n\n >>> # pull into multiple devices\n >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]\n >>> kv.pull('3', out=b)\n >>> print b[1].asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n\n >>> # pull a list of key-value pairs.\n >>> # On single device\n >>> keys = ['5', '7', '9']\n >>> b = [mx.nd.zeros(shape)]*len(keys)\n >>> kv.pull(keys, out=b)\n >>> print b[1].asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n >>> # On multiple devices\n >>> keys = ['6', '8', '10']\n >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)\n >>> kv.pull(keys, out=b)\n >>> print b[1][1].asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n " ]
Please provide a description of the function:def row_sparse_pull(self, key, out=None, priority=0, row_ids=None): assert(out is not None) assert(row_ids is not None) if isinstance(row_ids, NDArray): row_ids = [row_ids] assert(isinstance(row_ids, list)), \ "row_ids should be NDArray or list of NDArray" first_out = out # whether row_ids are the same single_rowid = False if len(row_ids) == 1 and isinstance(out, list): single_rowid = True first_out = [out[0]] ckeys, cvals, use_str_keys = _ctype_key_value(key, first_out) _, crow_ids, _ = _ctype_key_value(key, row_ids) assert(len(crow_ids) == len(cvals)), \ "the number of row_ids doesn't match the number of values" if use_str_keys: check_call(_LIB.MXKVStorePullRowSparseEx( self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority))) else: check_call(_LIB.MXKVStorePullRowSparse( self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority))) # the result can be copied to other devices without invoking row_sparse_pull # if the indices are the same if single_rowid: for out_i in out[1:]: out[0].copyto(out_i)
[ " Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \\\n from the store with specified row_ids. When there is only one row_id, KVStoreRowSparsePull \\\n is invoked just once and the result is broadcast to all the rest of outputs.\n\n `row_sparse_pull` is executed asynchronously after all previous\n `pull`/`row_sparse_pull` calls and the last `push` call for the\n same input key(s) are finished.\n\n The returned values are guaranteed to be the latest values in the store.\n\n Parameters\n ----------\n key : str, int, or sequence of str or int\n Keys.\n\n out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray\n Values corresponding to the keys. The stype is expected to be row_sparse\n\n priority : int, optional\n The priority of the pull operation.\n Higher priority pull operations are likely to be executed before\n other pull actions.\n\n row_ids : NDArray or list of NDArray\n The row_ids for which to pull for each value. Each row_id is an 1-D NDArray \\\n whose values don't have to be unique nor sorted.\n\n Examples\n --------\n >>> shape = (3, 3)\n >>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse'))\n >>> a = mx.nd.sparse.zeros('row_sparse', shape)\n >>> row_ids = mx.nd.array([0, 2], dtype='int64')\n >>> kv.row_sparse_pull('3', out=a, row_ids=row_ids)\n >>> print a.asnumpy()\n [[ 1. 1. 1.]\n [ 0. 0. 0.]\n [ 1. 1. 1.]]\n >>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64')\n >>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids)\n >>> print a.asnumpy()\n [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 1. 1. 1.]]\n >>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64')\n >>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids)\n >>> print a.asnumpy()\n [[ 1. 1. 1.]\n [ 1. 1. 1.]\n [ 0. 0. 0.]]\n " ]
Please provide a description of the function:def set_gradient_compression(self, compression_params): if ('device' in self.type) or ('dist' in self.type): # pylint: disable=unsupported-membership-test ckeys, cvals = _ctype_dict(compression_params) check_call(_LIB.MXKVStoreSetGradientCompression(self.handle, mx_uint(len(compression_params)), ckeys, cvals)) else: raise Exception('Gradient compression is not supported for this type of kvstore')
[ " Specifies type of low-bit quantization for gradient compression \\\n and additional arguments depending on the type of compression being used.\n\n 2bit Gradient Compression takes a positive float `threshold`.\n The technique works by thresholding values such that positive values in the\n gradient above threshold will be set to threshold. Negative values whose absolute\n values are higher than threshold, will be set to the negative of threshold.\n Values whose absolute values are less than threshold will be set to 0.\n By doing so, each value in the gradient is in one of three states. 2bits are\n used to represent these states, and every 16 float values in the original\n gradient can be represented using one float. This compressed representation\n can reduce communication costs. The difference between these thresholded values and\n original values is stored at the sender's end as residual and added to the\n gradient in the next iteration.\n\n When kvstore is 'local', gradient compression is used to reduce communication\n between multiple devices (gpus). Gradient is quantized on each GPU which\n computed the gradients, then sent to the GPU which merges the gradients. This\n receiving GPU dequantizes the gradients and merges them. Note that this\n increases memory usage on each GPU because of the residual array stored.\n\n When kvstore is 'dist', gradient compression is used to reduce communication\n from worker to sender. Gradient is quantized on each worker which\n computed the gradients, then sent to the server which dequantizes\n this data and merges the gradients from each worker. Note that this\n increases CPU memory usage on each worker because of the residual array stored.\n Only worker to server communication is compressed in this setting.\n If each machine has multiple GPUs, currently this GPU to GPU or GPU to CPU communication\n is not compressed. Server to worker communication (in the case of pull)\n is also not compressed.\n\n To use 2bit compression, we need to specify `type` as `2bit`.\n Only specifying `type` would use default value for the threshold.\n To completely specify the arguments for 2bit compression, we would need to pass\n a dictionary which includes `threshold` like:\n {'type': '2bit', 'threshold': 0.5}\n\n Parameters\n ----------\n compression_params : dict\n A dictionary specifying the type and parameters for gradient compression.\n The key `type` in this dictionary is a\n required string argument and specifies the type of gradient compression.\n Currently `type` can be only `2bit`\n Other keys in this dictionary are optional and specific to the type\n of gradient compression.\n " ]
Please provide a description of the function:def set_optimizer(self, optimizer): is_worker = ctypes.c_int() check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker))) # pylint: disable=invalid-name if 'dist' in self.type and is_worker.value: # pylint: disable=unsupported-membership-test # send the optimizer to server try: # use ASCII protocol 0, might be slower, but not a big ideal optim_str = py_str(pickle.dumps(optimizer, 0)) except: raise cmd = _get_kvstore_server_command_type('kController') self._send_command_to_servers(cmd, optim_str) if optimizer.multi_precision: cmd = _get_kvstore_server_command_type('kSetMultiPrecision') self._send_command_to_servers(cmd, '') else: self._set_updater(opt.get_updater(optimizer))
[ " Registers an optimizer with the kvstore.\n\n When using a single machine, this function updates the local optimizer.\n If using multiple machines and this operation is invoked from a worker node,\n it will serialized the optimizer with pickle and send it to all servers.\n The function returns after all servers have been updated.\n\n Parameters\n ----------\n optimizer : Optimizer\n The new optimizer for the store\n\n Examples\n --------\n\n >>> kv = mx.kv.create()\n >>> shape = (2, 2)\n >>> weight = mx.nd.zeros(shape)\n >>> kv.init(3, weight)\n >>> # set the optimizer for kvstore as the default SGD optimizer\n >>> kv.set_optimizer(mx.optimizer.SGD())\n >>> grad = mx.nd.ones(shape)\n >>> kv.push(3, grad)\n >>> kv.pull(3, out = weight)\n >>> # weight is updated via gradient descent\n >>> weight.asnumpy()\n array([[-0.01, -0.01],\n [-0.01, -0.01]], dtype=float32)\n " ]
Please provide a description of the function:def type(self): kv_type = ctypes.c_char_p() check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type))) return py_str(kv_type.value)
[ " Returns the type of this kvstore.\n\n Returns\n -------\n type : str\n the string type\n " ]
Please provide a description of the function:def rank(self): rank = ctypes.c_int() check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank))) return rank.value
[ " Returns the rank of this worker node.\n\n Returns\n -------\n rank : int\n The rank of this node, which is in range [0, num_workers())\n " ]
Please provide a description of the function:def num_workers(self): size = ctypes.c_int() check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size))) return size.value
[ "Returns the number of worker nodes.\n\n Returns\n -------\n size :int\n The number of worker nodes.\n " ]
Please provide a description of the function:def save_optimizer_states(self, fname, dump_optimizer=False): assert self._updater is not None, "Cannot save states for distributed training" with open(fname, 'wb') as fout: fout.write(self._updater.get_states(dump_optimizer))
[ "Saves the optimizer (updater) state to a file. This is often used when checkpointing\n the model during training.\n\n Parameters\n ----------\n fname : str\n Path to the output states file.\n dump_optimizer : bool, default False\n Whether to also save the optimizer itself. This would also save optimizer\n information such as learning rate and weight decay schedules.\n " ]
Please provide a description of the function:def load_optimizer_states(self, fname): assert self._updater is not None, "Cannot load states for distributed training" self._updater.set_states(open(fname, 'rb').read())
[ "Loads the optimizer (updater) state from the file.\n\n Parameters\n ----------\n fname : str\n Path to input states file.\n " ]
Please provide a description of the function:def _set_updater(self, updater): self._updater = updater # set updater with int keys _updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._updater_func = _updater_proto(_updater_wrapper(updater)) # set updater with str keys _str_updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._str_updater_func = _str_updater_proto(_updater_wrapper(updater)) check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func, self._str_updater_func, None))
[ "Sets a push updater into the store.\n\n This function only changes the local store. When running on multiple machines one must\n use `set_optimizer`.\n\n Parameters\n ----------\n updater : function\n The updater function.\n\n Examples\n --------\n >>> def update(key, input, stored):\n ... print \"update on key: %d\" % key\n ... stored += input * 2\n >>> kv._set_updater(update)\n >>> kv.pull('3', out=a)\n >>> print a.asnumpy()\n [[ 4. 4. 4.]\n [ 4. 4. 4.]]\n >>> kv.push('3', mx.nd.ones(shape))\n update on key: 3\n >>> kv.pull('3', out=a)\n >>> print a.asnumpy()\n [[ 6. 6. 6.]\n [ 6. 6. 6.]]\n " ]
Please provide a description of the function:def _send_command_to_servers(self, head, body): check_call(_LIB.MXKVStoreSendCommmandToServers( self.handle, mx_uint(head), c_str(body)))
[ "Sends a command to all server nodes.\n\n Sending command to a server node will cause that server node to invoke\n ``KVStoreServer.controller`` to execute the command.\n\n This function returns after the command has been executed on all server\n nodes.\n\n Parameters\n ----------\n head : int\n the head of the command.\n body : str\n the body of the command.\n " ]
Please provide a description of the function:def add(self, module, **kwargs): self._modules.append(module) # a sanity check to avoid typo for key in kwargs: assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key) self._metas.append(kwargs) # after adding new modules, we are reset back to raw states, needs # to bind, init_params, etc. self.binded = False self.params_initialized = False self.optimizer_initialized = False return self
[ "Add a module to the chain.\n\n Parameters\n ----------\n module : BaseModule\n The new module to add.\n kwargs : ``**keywords``\n All the keyword arguments are saved as meta information\n for the added module. The currently known meta includes\n\n - `take_labels`: indicating whether the module expect to\n take labels when doing computation. Note any module in\n the chain can take labels (not necessarily only the top\n most one), and they all take the same labels passed\n from the original data batch for the `SequentialModule`.\n\n\n Returns\n -------\n self\n This function returns `self` to allow us to easily chain a\n series of `add` calls.\n Examples\n --------\n >>> # An example of addinging two modules to a chain.\n >>> seq_mod = mx.mod.SequentialModule()\n >>> seq_mod.add(mod1)\n >>> seq_mod.add(mod2)\n\n " ]
Please provide a description of the function:def get_params(self): assert self.binded and self.params_initialized arg_params = dict() aux_params = dict() for module in self._modules: arg, aux = module.get_params() arg_params.update(arg) aux_params.update(aux) return (arg_params, aux_params)
[ "Gets current parameters.\n\n Returns\n -------\n (arg_params, aux_params)\n A pair of dictionaries each mapping parameter names to NDArray values. This\n is a merged dictionary of all the parameters in the modules.\n " ]
Please provide a description of the function:def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): if self.params_initialized and not force_init: return assert self.binded, 'call bind before initializing the parameters' for module in self._modules: module.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra) # make sure we do not have duplicated parameter names def _check_name(known_names, new_names, modules, i): for name in new_names: assert not name in known_names, "Duplicated parameter names: " + \ ('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \ ('used in layer %d (%s).' % (known_names[name], type(modules[known_names[name]]))) known_names[name] = i arg_names = dict() aux_names = dict() for i_layer, module in enumerate(self._modules): arg_params, aux_params = module.get_params() _check_name(arg_names, arg_params.keys(), self._modules, i_layer) _check_name(aux_names, aux_params.keys(), self._modules, i_layer) self.params_initialized = True
[ "Initializes parameters.\n\n Parameters\n ----------\n initializer : Initializer\n arg_params : dict\n Default ``None``. Existing parameters. This has higher priority\n than `initializer`.\n aux_params : dict\n Default ``None``. Existing auxiliary states. This has higher priority\n than `initializer`.\n allow_missing : bool\n Allow missing values in `arg_params` and `aux_params` (if not ``None``).\n In this case, missing values will be filled with `initializer`.\n force_init : bool\n Default ``False``.\n allow_extra : boolean, optional\n Whether allow extra parameters that are not needed by symbol.\n If this is True, no error will be thrown when arg_params or aux_params\n contain extra parameters that is not needed by the executor.\n ", "Internal function to help checking duplicated names." ]
Please provide a description of the function:def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): if self.binded and not force_rebind: self.logger.warning('Already bound, ignoring bind()') return if inputs_need_grad: assert for_training is True assert shared_module is None, 'Shared module is not supported' assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule' self.binded = True # the same label shapes are used for all chained modules self._label_shapes = label_shapes my_data_shapes = data_shapes anybody_ever_needs_label = False for i_layer, module in enumerate(self._modules): meta = self._metas[i_layer] if SequentialModule.META_TAKE_LABELS in meta and \ meta[SequentialModule.META_TAKE_LABELS]: my_label_shapes = label_shapes anybody_ever_needs_label = True else: my_label_shapes = None my_inputs_need_grad = bool(inputs_need_grad or (for_training and i_layer > 0)) if meta.get(SequentialModule.META_AUTO_WIRING, False): data_names = module.data_names assert len(data_names) == len(my_data_shapes) my_data_shapes = [(new_name, shape) for (new_name, (_, shape)) in zip(data_names, my_data_shapes)] module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes, for_training=for_training, inputs_need_grad=my_inputs_need_grad, force_rebind=force_rebind, shared_module=None, grad_req=grad_req) # the output of the previous module is the data of the next module my_data_shapes = module.output_shapes if not anybody_ever_needs_label: # then I do not need label either self._label_shapes = None
[ "Binds the symbols to construct executors. This is necessary before one\n can perform computation with the module.\n\n Parameters\n ----------\n data_shapes : list of (str, tuple)\n Typically is `data_iter.provide_data`.\n label_shapes : list of (str, tuple)\n Typically is `data_iter.provide_label`.\n for_training : bool\n Default is ``True``. Whether the executors should be bind for training.\n inputs_need_grad : bool\n Default is ``False``. Whether the gradients to the input data need to be computed.\n Typically this is not needed. But this might be needed when implementing composition\n of modules.\n force_rebind : bool\n Default is ``False``. This function does nothing if the executors are already\n bound. But with this ``True``, the executors will be forced to rebind.\n shared_module : Module\n Default is ``None``. Currently shared module is not supported for `SequentialModule`.\n grad_req : str, list of str, dict of str to str\n Requirement for gradient accumulation. Can be 'write', 'add', or 'null'\n (default to 'write').\n Can be specified globally (str) or for each argument (list, dict).\n " ]