text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> DEPRECATED, use mx.img instead <END_TASK> <USER_TASK:> Description: def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None): """DEPRECATED, use mx.img instead Parameters ---------- str_img : str Binary image data clip_rect : iterable of 4 int Clip decoded image to rectangle (x0, y0, x1, y1). out : NDArray Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w). index : int Output decoded image to i-th slice of 4 dimensional buffer. channels : int Number of channels to output. Decode to grey scale when channels = 1. mean : NDArray Subtract mean from decode image before outputing. """
# pylint: disable= no-member, protected-access, too-many-arguments if mean is None: mean = NDArray(_new_empty_handle()) if out is None: return _internal._imdecode(mean, index, clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3], channels, len(str_img), str_img=str_img) else: return _internal._imdecode(mean, index, clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3], channels, len(str_img), str_img=str_img, out=out)
<SYSTEM_TASK:> Returns a new array filled with all zeros, with the given shape and type. <END_TASK> <USER_TASK:> Description: def zeros(shape, ctx=None, dtype=None, **kwargs): """Returns a new array filled with all zeros, with the given shape and type. Parameters ---------- shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A created array Examples -------- >>> mx.nd.zeros(1).asnumpy() array([ 0.], dtype=float32) >>> mx.nd.zeros((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy() array([[ 0., 0.]], dtype=float16) """
# pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Return a 2-D array with ones on the diagonal and zeros elsewhere. <END_TASK> <USER_TASK:> Description: def eye(N, M=0, k=0, ctx=None, dtype=None, **kwargs): """Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N: int Number of rows in the output. M: int, optional Number of columns in the output. If 0, defaults to N. k: int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. ctx: Context, optional An optional device context (default is the current default context) dtype: str or numpy.dtype, optional An optional value type (default is `float32`) Returns ------- NDArray A created array Examples -------- >>> mx.nd.eye(2) [[ 1. 0.] [ 0. 1.]] <NDArray 2x2 @cpu(0)> >>> mx.nd.eye(2, 3, 1) [[ 0. 1. 0.] [ 0. 0. 1.]] <NDArray 2x3 @cpu(0)> """
# pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._eye(N=N, M=M, k=k, ctx=ctx, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Returns a reference view of NDArray that represents as DLManagedTensor until <END_TASK> <USER_TASK:> Description: def to_dlpack_for_read(data): """Returns a reference view of NDArray that represents as DLManagedTensor until all previous write operations on the current array are finished. Parameters ---------- data: NDArray input data. Returns ------- PyCapsule (the pointer of DLManagedTensor) a reference view of NDArray that represents as DLManagedTensor. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.to_dlpack_for_read(x) >>> type(y) <class 'PyCapsule'> >>> z = mx.nd.from_dlpack(y) >>> z [[1. 1. 1.] [1. 1. 1.]] <NDArray 2x3 @cpu(0)> """
data.wait_to_read() dlpack = DLPackHandle() check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter)
<SYSTEM_TASK:> Returns an MXNet's NDArray backed by Numpy's ndarray. <END_TASK> <USER_TASK:> Description: def from_numpy(ndarray, zero_copy=True): """Returns an MXNet's NDArray backed by Numpy's ndarray. Parameters ---------- ndarray: numpy.ndarray input data zero_copy: bool Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray. This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True. Returns ------- NDArray a NDArray backed by a dlpack tensor """
def _make_manager_ctx(obj): pyobj = ctypes.py_object(obj) void_p = ctypes.c_void_p.from_buffer(pyobj) ctypes.pythonapi.Py_IncRef(pyobj) return void_p def _make_dl_tensor(array): if str(array.dtype) not in DLDataType.TYPE_MAP: raise ValueError(str(array.dtype) + " is not supported.") dl_tensor = DLTensor() dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p) dl_tensor.ctx = DLContext(1, 0) dl_tensor.ndim = array.ndim dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)] dl_tensor.shape = array.ctypes.shape_as(ctypes.c_int64) dl_tensor.strides = None dl_tensor.byte_offset = 0 return dl_tensor def _make_dl_managed_tensor(array): c_obj = DLManagedTensor() c_obj.dl_tensor = _make_dl_tensor(array) c_obj.manager_ctx = _make_manager_ctx(array) c_obj.deleter = dl_managed_tensor_deleter return c_obj if not zero_copy: return array(ndarray, dtype=ndarray.dtype) if not ndarray.flags['C_CONTIGUOUS']: raise ValueError("Only c-contiguous arrays are supported for zero-copy") c_obj = _make_dl_managed_tensor(ndarray) address = ctypes.addressof(c_obj) address = ctypes.cast(address, ctypes.c_void_p) handle = NDArrayHandle() check_call(_LIB.MXNDArrayFromDLPack(address, ctypes.byref(handle))) return NDArray(handle=handle)
<SYSTEM_TASK:> Given value and vshape, create an `NDArray` from value with the same <END_TASK> <USER_TASK:> Description: def _prepare_value_nd(self, value, vshape): """Given value and vshape, create an `NDArray` from value with the same context and dtype as the current one and broadcast it to vshape."""
if isinstance(value, numeric_types): value_nd = full(shape=vshape, val=value, ctx=self.context, dtype=self.dtype) elif isinstance(value, NDArray): value_nd = value.as_in_context(self.context) if value_nd.dtype != self.dtype: value_nd = value_nd.astype(self.dtype) else: try: value_nd = array(value, ctx=self.context, dtype=self.dtype) except: raise TypeError('NDArray does not support assignment with non-array-like' ' object %s of type %s' % (str(value), str(type(value)))) if value_nd.shape != vshape: value_nd = value_nd.broadcast_to(vshape) return value_nd
<SYSTEM_TASK:> Broadcasts the input array to a new shape. <END_TASK> <USER_TASK:> Description: def broadcast_to(self, shape): """Broadcasts the input array to a new shape. Broadcasting is only allowed on axes with size 1. The new shape cannot change the number of dimensions. For example, you could broadcast from shape (2, 1) to (2, 3), but not from shape (2, 3) to (2, 3, 3). Parameters ---------- shape : tuple of int The shape of the desired array. Returns ------- NDArray A NDArray with the desired shape that is not sharing data with this array, even if the new shape is the same as ``self.shape``. Examples -------- >>> x = mx.nd.arange(0,3).reshape((1,3,1)) >>> x.asnumpy() array([[[ 0.], [ 1.], [ 2.]]], dtype=float32) >>> y = x.broadcast_to((2,3,3)) >>> y.asnumpy() array([[[ 0., 0., 0.], [ 1., 1., 1.], [ 2., 2., 2.]], <BLANKLINE> [[ 0., 0., 0.], [ 1., 1., 1.], [ 2., 2., 2.]]], dtype=float32) """
cur_shape = self.shape err_str = 'operands could not be broadcast together with remapped shapes' \ '[original->remapped]: {} and requested shape {}'.format(cur_shape, shape) if len(shape) < len(cur_shape): raise ValueError(err_str) cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape cur_shape_arr = np.array(cur_shape) broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape)) if (cur_shape_arr[broadcasting_axes] != 1).any(): raise ValueError(err_str) if cur_shape != self.shape: return op.broadcast_to(self.reshape(cur_shape), shape=shape) else: return op.broadcast_to(self, shape=tuple(shape))
<SYSTEM_TASK:> Returns a ``numpy.ndarray`` object with value copied from this array. <END_TASK> <USER_TASK:> Description: def asnumpy(self): """Returns a ``numpy.ndarray`` object with value copied from this array. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = x.asnumpy() >>> type(y) <type 'numpy.ndarray'> >>> y array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> z = mx.nd.ones((2,3), dtype='int32') >>> z.asnumpy() array([[1, 1, 1], [1, 1, 1]], dtype=int32) """
data = np.empty(self.shape, dtype=self.dtype) check_call(_LIB.MXNDArraySyncCopyToCPU( self.handle, data.ctypes.data_as(ctypes.c_void_p), ctypes.c_size_t(data.size))) return data
<SYSTEM_TASK:> Returns a copy of the array after casting to a specified type. <END_TASK> <USER_TASK:> Description: def astype(self, dtype, copy=True): """Returns a copy of the array after casting to a specified type. Parameters ---------- dtype : numpy.dtype or str The type of the returned array. copy : bool Default `True`. By default, astype always returns a newly allocated ndarray on the same context. If this is set to `False`, and the dtype requested is the same as the ndarray's dtype, the ndarray is returned instead of a copy. Returns ------- NDArray, CSRNDArray or RowSparseNDArray The copied array after casting to the specified type, or the same array if copy=False and dtype is the same as the input array. Examples -------- >>> x = mx.nd.zeros((2,3), dtype='float32') >>> y = x.astype('int32') >>> y.dtype <type 'numpy.int32'> """
if not copy and np.dtype(dtype) == self.dtype: return self res = empty(self.shape, ctx=self.context, dtype=dtype) self.copyto(res) return res
<SYSTEM_TASK:> Returns an array on the target device with the same value as this array. <END_TASK> <USER_TASK:> Description: def as_in_context(self, context): """Returns an array on the target device with the same value as this array. If the target context is the same as ``self.context``, then ``self`` is returned. Otherwise, a copy is made. Parameters ---------- context : Context The target context. Returns ------- NDArray, CSRNDArray or RowSparseNDArray The target array. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = x.as_in_context(mx.cpu()) >>> y is x True >>> z = x.as_in_context(mx.gpu(0)) >>> z is x False """
if self.context == context: return self return self.copyto(context)
<SYSTEM_TASK:> Attach a gradient buffer to this NDArray, so that `backward` <END_TASK> <USER_TASK:> Description: def attach_grad(self, grad_req='write', stype=None): """Attach a gradient buffer to this NDArray, so that `backward` can compute gradient with respect to it. Parameters ---------- grad_req : {'write', 'add', 'null'} How gradient will be accumulated. - 'write': gradient will be overwritten on every backward. - 'add': gradient will be added to existing value on every backward. - 'null': do not compute gradient for this NDArray. stype : str, optional The storage type of the gradient array. Defaults to the same stype of this NDArray. """
from . import zeros as _zeros if stype is not None: grad = _zeros(self.shape, stype=stype) else: grad = op.zeros_like(self) # pylint: disable=undefined-variable grad_req = _GRAD_REQ_MAP[grad_req] check_call(_LIB.MXAutogradMarkVariables( 1, ctypes.pointer(self.handle), ctypes.pointer(mx_uint(grad_req)), ctypes.pointer(grad.handle)))
<SYSTEM_TASK:> Returns gradient buffer attached to this NDArray. <END_TASK> <USER_TASK:> Description: def grad(self): """Returns gradient buffer attached to this NDArray."""
from . import _ndarray_cls hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl))) if hdl.value is None: return None return _ndarray_cls(hdl)
<SYSTEM_TASK:> Returns a new NDArray, detached from the current graph. <END_TASK> <USER_TASK:> Description: def detach(self): """Returns a new NDArray, detached from the current graph."""
from . import _ndarray_cls hdl = NDArrayHandle() check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl))) return _ndarray_cls(hdl)
<SYSTEM_TASK:> Compute the gradients of this NDArray w.r.t variables. <END_TASK> <USER_TASK:> Description: def backward(self, out_grad=None, retain_graph=False, train_mode=True): """Compute the gradients of this NDArray w.r.t variables. Parameters ---------- out_grad : NDArray, optional Gradient with respect to head. retain_graph : bool, optional Whether to retain the computaion graph for another backward pass on the same graph. By default the computaion history is cleared. train_mode : bool, optional Whether to compute gradient for training or inference. """
if out_grad is None: ograd_handles = [NDArrayHandle(0)] else: ograd_handles = [out_grad.handle] check_call(_LIB.MXAutogradBackwardEx( 1, c_handle_array([self]), c_array(NDArrayHandle, ograd_handles), 0, ctypes.c_void_p(0), ctypes.c_int(retain_graph), ctypes.c_int(0), ctypes.c_int(train_mode), ctypes.c_void_p(0), ctypes.c_void_p(0)))
<SYSTEM_TASK:> Get the position of words <END_TASK> <USER_TASK:> Description: def word_frame_pos(self, _id): """ Get the position of words """
left = int(self.words[_id][0]/1000) right = max(left+1, int(self.words[_id][1]/1000)) return (left, right)
<SYSTEM_TASK:> Rescale the gradient of provided parameters by a certain scale <END_TASK> <USER_TASK:> Description: def rescale_grad(self, scale=None, param_name=None): """ Rescale the gradient of provided parameters by a certain scale """
if scale is None or param_name is None: return param_idx = self._exec_group.param_names.index(param_name) grad_vals = self._exec_group.grad_arrays[param_idx] for grad in grad_vals: grad[:] *= scale
<SYSTEM_TASK:> Get user friendly information of the output shapes. <END_TASK> <USER_TASK:> Description: def get_output_shape(sym, **input_shapes): """Get user friendly information of the output shapes."""
_, s_outputs, _ = sym.infer_shape(**input_shapes) return dict(zip(sym.list_outputs(), s_outputs))
<SYSTEM_TASK:> Query CUDA for the number of GPUs present. <END_TASK> <USER_TASK:> Description: def num_gpus(): """Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs. """
count = ctypes.c_int() check_call(_LIB.MXGetGPUCount(ctypes.byref(count))) return count.value
<SYSTEM_TASK:> Query CUDA for the free and total bytes of GPU global memory. <END_TASK> <USER_TASK:> Description: def gpu_memory_info(device_id=0): """Query CUDA for the free and total bytes of GPU global memory. Parameters ---------- device_id : int, optional The device id of the GPU device. Raises ------ Will raise an exception on any CUDA error. Returns ------- (free, total) : (int, int) The number of GPUs. """
free = ctypes.c_uint64() total = ctypes.c_uint64() dev_id = ctypes.c_int(device_id) check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total))) return (free.value, total.value)
<SYSTEM_TASK:> Returns the current context. <END_TASK> <USER_TASK:> Description: def current_context(): """Returns the current context. By default, `mx.cpu()` is used for all the computations and it can be overridden by using `with mx.Context(x)` statement where x can be cpu(device_id) or gpu(device_id). Examples ------- >>> mx.current_context() cpu(0) >>> with mx.Context('gpu', 1): # Context changed in `with` block. ... mx.current_context() # Computation done here will be on gpu(1). ... gpu(1) >>> mx.current_context() # Back to default context. cpu(0) Returns ------- default_ctx : Context """
if not hasattr(Context._default_ctx, "value"): Context._default_ctx.value = Context('cpu', 0) return Context._default_ctx.value
<SYSTEM_TASK:> Try to configure cython and return cython configuration <END_TASK> <USER_TASK:> Description: def config_cython(): """Try to configure cython and return cython configuration"""
if not with_cython: return [] # pylint: disable=unreachable if os.name == 'nt': print("WARNING: Cython is not supported on Windows, will compile without cython module") return [] try: from Cython.Build import cythonize # from setuptools.extension import Extension if sys.version_info >= (3, 0): subdir = "_cy3" else: subdir = "_cy2" ret = [] path = "mxnet/cython" if os.name == 'nt': library_dirs = ['mxnet', '../build/Release', '../build'] libraries = ['libmxnet'] else: library_dirs = None libraries = None for fn in os.listdir(path): if not fn.endswith(".pyx"): continue ret.append(Extension( "mxnet/%s/.%s" % (subdir, fn[:-4]), ["mxnet/cython/%s" % fn], include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"], library_dirs=library_dirs, libraries=libraries, language="c++")) return cythonize(ret) except ImportError: print("WARNING: Cython is not installed, will compile without cython module") return []
<SYSTEM_TASK:> Compose symbol on inputs. <END_TASK> <USER_TASK:> Description: def _compose(self, *args, **kwargs): """Compose symbol on inputs. This call mutates the current symbol. Parameters ---------- args: provide positional arguments kwargs: provide keyword arguments Returns ------- the resulting symbol """
name = kwargs.pop('name', None) if name: name = c_str(name) if len(args) != 0 and len(kwargs) != 0: raise TypeError('compose only accept input Symbols \ either as positional or keyword arguments, not both') for arg in args: if not isinstance(arg, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') for val in kwargs.values(): if not isinstance(val, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') num_args = len(args) + len(kwargs) if len(kwargs) != 0: keys = c_str_array(kwargs.keys()) args = c_handle_array(kwargs.values()) else: keys = None args = c_handle_array(kwargs.values()) check_call(_LIB.NNSymbolCompose( self.handle, name, num_args, keys, args))
<SYSTEM_TASK:> Set the attribute of the symbol. <END_TASK> <USER_TASK:> Description: def _set_attr(self, **kwargs): """Set the attribute of the symbol. Parameters ---------- **kwargs The attributes to set """
keys = c_str_array(kwargs.keys()) vals = c_str_array([str(s) for s in kwargs.values()]) num_args = mx_uint(len(kwargs)) check_call(_LIB.MXSymbolSetAttrs( self.handle, num_args, keys, vals))
<SYSTEM_TASK:> Wrapper for get symbol for train <END_TASK> <USER_TASK:> Description: def get_symbol_train(network, data_shape, **kwargs): """Wrapper for get symbol for train Parameters ---------- network : str name for the base network symbol data_shape : int input shape kwargs : dict see symbol_builder.get_symbol_train for more details """
if network.startswith('legacy'): logging.warn('Using legacy model.') return symbol_builder.import_module(network).get_symbol_train(**kwargs) config = get_config(network, data_shape, **kwargs).copy() config.update(kwargs) return symbol_builder.get_symbol_train(**config)
<SYSTEM_TASK:> Set the trainer this parameter is associated with. <END_TASK> <USER_TASK:> Description: def _set_trainer(self, trainer): """ Set the trainer this parameter is associated with. """
# trainer cannot be replaced for sparse params if self._stype != 'default' and self._trainer and trainer and self._trainer is not trainer: raise RuntimeError( "Failed to set the trainer for Parameter '%s' because it was already set. " \ "More than one trainers for a %s Parameter is not supported." \ %(self.name, self._stype)) self._trainer = trainer
<SYSTEM_TASK:> Get row_sparse data from row_sparse parameters based on row_id. <END_TASK> <USER_TASK:> Description: def _get_row_sparse(self, arr_list, ctx, row_id): """ Get row_sparse data from row_sparse parameters based on row_id. """
# get row sparse params based on row ids if not isinstance(row_id, ndarray.NDArray): raise TypeError("row_id must have NDArray type, but %s is given"%(type(row_id))) if not self._trainer: raise RuntimeError("Cannot get row_sparse data for Parameter '%s' when no " \ "Trainer is created with it."%self.name) results = self._check_and_get(arr_list, ctx) # fetch row sparse params from the trainer self._trainer._row_sparse_pull(self, results, row_id) return results
<SYSTEM_TASK:> Finishes deferred initialization. <END_TASK> <USER_TASK:> Description: def _finish_deferred_init(self): """Finishes deferred initialization."""
if not self._deferred_init: return init, ctx, default_init, data = self._deferred_init self._deferred_init = () assert self.shape is not None and np.prod(self.shape) > 0, \ "Cannot initialize Parameter '%s' because it has " \ "invalid shape: %s. Please specify in_units, " \ "in_channels, etc for `Block`s."%( self.name, str(self.shape)) with autograd.pause(): if data is None: data = ndarray.zeros(shape=self.shape, dtype=self.dtype, ctx=context.cpu(), stype=self._stype) initializer.create(default_init)( initializer.InitDesc(self.name, {'__init__': init}), data) self._init_impl(data, ctx)
<SYSTEM_TASK:> Initialize grad buffers. <END_TASK> <USER_TASK:> Description: def _init_grad(self): """Initialize grad buffers."""
if self.grad_req == 'null': self._grad = None return self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context, stype=self._grad_stype) for i in self._data] autograd.mark_variables(self._check_and_get(self._data, list), self._grad, self.grad_req)
<SYSTEM_TASK:> Reduce data from multiple context to cpu. <END_TASK> <USER_TASK:> Description: def _reduce(self): """Reduce data from multiple context to cpu."""
ctx = context.cpu() if self._stype == 'default': block = self.list_data() data = ndarray.add_n(*(w.copyto(ctx) for w in block)) / len(block) else: # fetch all rows for 'row_sparse' param all_row_ids = ndarray.arange(0, self.shape[0], dtype='int64', ctx=ctx) data = ndarray.zeros(self.shape, stype='row_sparse', ctx=ctx) self._trainer._row_sparse_pull(self, data, all_row_ids, full_idx=True) return data
<SYSTEM_TASK:> Re-assign Parameter to other contexts. <END_TASK> <USER_TASK:> Description: def reset_ctx(self, ctx): """Re-assign Parameter to other contexts. Parameters ---------- ctx : Context or list of Context, default ``context.current_context()``. Assign Parameter to given context. If ctx is a list of Context, a copy will be made for each context. """
if ctx is None: ctx = [context.current_context()] if isinstance(ctx, Context): ctx = [ctx] if self._data: data = self._reduce() with autograd.pause(): self._init_impl(data, ctx) elif self._deferred_init: init, _, default_init, data = self._deferred_init self._deferred_init = (init, ctx, default_init, data) else: raise ValueError("Cannot reset context for Parameter '%s' because it " "has not been initialized."%self.name)
<SYSTEM_TASK:> Returns a copy of the 'row_sparse' parameter on the same context as row_id's. <END_TASK> <USER_TASK:> Description: def row_sparse_data(self, row_id): """Returns a copy of the 'row_sparse' parameter on the same context as row_id's. The copy only retains rows whose ids occur in provided row ids. The parameter must have been initialized on this context before. Parameters ---------- row_id: NDArray Row ids to retain for the 'row_sparse' parameter. Returns ------- NDArray on row_id's context """
if self._stype != 'row_sparse': raise RuntimeError("Cannot return a copy of Parameter %s via row_sparse_data() " \ "because its storage type is %s. Please use data() instead." \ %(self.name, self._stype)) return self._get_row_sparse(self._data, row_id.context, row_id)
<SYSTEM_TASK:> Returns copies of the 'row_sparse' parameter on all contexts, in the same order <END_TASK> <USER_TASK:> Description: def list_row_sparse_data(self, row_id): """Returns copies of the 'row_sparse' parameter on all contexts, in the same order as creation. The copy only retains rows whose ids occur in provided row ids. The parameter must have been initialized before. Parameters ---------- row_id: NDArray Row ids to retain for the 'row_sparse' parameter. Returns ------- list of NDArrays """
if self._stype != 'row_sparse': raise RuntimeError("Cannot return copies of Parameter '%s' on all contexts via " \ "list_row_sparse_data() because its storage type is %s. Please " \ "use data() instead." % (self.name, self._stype)) return self._get_row_sparse(self._data, list, row_id)
<SYSTEM_TASK:> Returns a gradient buffer for this parameter on one context. <END_TASK> <USER_TASK:> Description: def grad(self, ctx=None): """Returns a gradient buffer for this parameter on one context. Parameters ---------- ctx : Context Desired context. """
if self._data is not None and self._grad is None: raise RuntimeError( "Cannot get gradient array for Parameter '%s' " \ "because grad_req='null'"%(self.name)) return self._check_and_get(self._grad, ctx)
<SYSTEM_TASK:> Returns a list of contexts this parameter is initialized on. <END_TASK> <USER_TASK:> Description: def list_ctx(self): """Returns a list of contexts this parameter is initialized on."""
if self._data is None: if self._deferred_init: return self._deferred_init[1] raise RuntimeError("Parameter '%s' has not been initialized"%self.name) return self._ctx_list
<SYSTEM_TASK:> Sets gradient buffer on all contexts to 0. No action is taken if <END_TASK> <USER_TASK:> Description: def zero_grad(self): """Sets gradient buffer on all contexts to 0. No action is taken if parameter is uninitialized or doesn't require gradient."""
if self._grad is None: return for i in self._grad: ndarray.zeros_like(i, out=i)
<SYSTEM_TASK:> Returns a symbol representing this parameter. <END_TASK> <USER_TASK:> Description: def var(self): """Returns a symbol representing this parameter."""
if self._var is None: self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype, lr_mult=self.lr_mult, wd_mult=self.wd_mult, init=self.init, stype=self._stype) return self._var
<SYSTEM_TASK:> Cast data and gradient of this Parameter to a new data type. <END_TASK> <USER_TASK:> Description: def cast(self, dtype): """Cast data and gradient of this Parameter to a new data type. Parameters ---------- dtype : str or numpy.dtype The new data type. """
self.dtype = dtype if self._data is None: return with autograd.pause(): self._data = [i.astype(dtype) for i in self._data] if self._grad is None: return self._grad = [i.astype(dtype) for i in self._grad] autograd.mark_variables(self._data, self._grad, self.grad_req)
<SYSTEM_TASK:> Set an attribute to a new value for all Parameters. <END_TASK> <USER_TASK:> Description: def setattr(self, name, value): """Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute. """
for i in self.values(): setattr(i, name, value)
<SYSTEM_TASK:> Save parameters to file. <END_TASK> <USER_TASK:> Description: def save(self, filename, strip_prefix=''): """Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving. """
arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
<SYSTEM_TASK:> List and add all the torch backed ndarray functions to current module. <END_TASK> <USER_TASK:> Description: def _init_torch_module(): """List and add all the torch backed ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)() size = ctypes.c_uint() check_call(_LIB.MXListFunctions(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = FunctionHandle(plist[i]) function = _make_torch_function(hdl) # if function name starts with underscore, register as static method of NDArray if function is not None: setattr(module_obj, function.__name__, function)
<SYSTEM_TASK:> Pack a string into MXImageRecord. <END_TASK> <USER_TASK:> Description: def pack(header, s): """Pack a string into MXImageRecord. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. s : str Raw image string to be packed. Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> with open(path, 'r') as file: ... s = file.read() >>> packed_s = mx.recordio.pack(header, s) """
header = IRHeader(*header) if isinstance(header.label, numbers.Number): header = header._replace(flag=0) else: label = np.asarray(header.label, dtype=np.float32) header = header._replace(flag=label.size, label=0) s = label.tostring() + s s = struct.pack(_IR_FORMAT, *header) + s return s
<SYSTEM_TASK:> Unpack a MXImageRecord to string. <END_TASK> <USER_TASK:> Description: def unpack(s): """Unpack a MXImageRecord to string. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. Returns ------- header : IRHeader Header of the image record. s : str Unpacked string. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, s = mx.recordio.unpack(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) """
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE])) s = s[_IR_SIZE:] if header.flag > 0: header = header._replace(label=np.frombuffer(s, np.float32, header.flag)) s = s[header.flag*4:] return header, s
<SYSTEM_TASK:> Unpack a MXImageRecord to image. <END_TASK> <USER_TASK:> Description: def unpack_img(s, iscolor=-1): """Unpack a MXImageRecord to image. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. iscolor : int Image format option for ``cv2.imdecode``. Returns ------- header : IRHeader Header of the image record. img : numpy.ndarray Unpacked image. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, img = mx.recordio.unpack_img(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) >>> img array([[[ 23, 27, 45], [ 28, 32, 50], ..., [ 36, 40, 59], [ 35, 39, 58]], ..., [[ 91, 92, 113], [ 97, 98, 119], ..., [168, 169, 167], [166, 167, 165]]], dtype=uint8) """
header, s = unpack(s) img = np.frombuffer(s, dtype=np.uint8) assert cv2 is not None img = cv2.imdecode(img, iscolor) return header, img
<SYSTEM_TASK:> Pack an image into ``MXImageRecord``. <END_TASK> <USER_TASK:> Description: def pack_img(header, img, quality=95, img_fmt='.jpg'): """Pack an image into ``MXImageRecord``. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. img : numpy.ndarray Image to be packed. quality : int Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9. img_fmt : str Encoding of the image (.jpg for JPEG, .png for PNG). Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> img = cv2.imread('test.jpg') >>> packed_s = mx.recordio.pack_img(header, img) """
assert cv2 is not None jpg_formats = ['.JPG', '.JPEG'] png_formats = ['.PNG'] encode_params = None if img_fmt.upper() in jpg_formats: encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality] elif img_fmt.upper() in png_formats: encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality] ret, buf = cv2.imencode(img_fmt, img, encode_params) assert ret, 'failed to encode image' return pack(header, buf.tostring())
<SYSTEM_TASK:> Opens the record file. <END_TASK> <USER_TASK:> Description: def open(self): """Opens the record file."""
if self.flag == "w": check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle))) self.writable = True elif self.flag == "r": check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle))) self.writable = False else: raise ValueError("Invalid flag %s"%self.flag) self.pid = current_process().pid self.is_open = True
<SYSTEM_TASK:> Check process id to ensure integrity, reset if in new process. <END_TASK> <USER_TASK:> Description: def _check_pid(self, allow_reset=False): """Check process id to ensure integrity, reset if in new process."""
if not self.pid == current_process().pid: if allow_reset: self.reset() else: raise RuntimeError("Forbidden operation in multiple processes")
<SYSTEM_TASK:> Inserts a string buffer as a record. <END_TASK> <USER_TASK:> Description: def write(self, buf): """Inserts a string buffer as a record. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'w') >>> for i in range(5): ... record.write('record_%d'%i) >>> record.close() Parameters ---------- buf : string (python2), bytes (python3) Buffer to write. """
assert self.writable self._check_pid(allow_reset=False) check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle, ctypes.c_char_p(buf), ctypes.c_size_t(len(buf))))
<SYSTEM_TASK:> Returns record as a string. <END_TASK> <USER_TASK:> Description: def read(self): """Returns record as a string. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(5): ... item = record.read() ... print(item) record_0 record_1 record_2 record_3 record_4 >>> record.close() Returns ---------- buf : string Buffer read. """
assert not self.writable # trying to implicitly read from multiple processes is forbidden, # there's no elegant way to handle unless lock is introduced self._check_pid(allow_reset=False) buf = ctypes.c_char_p() size = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderReadRecord(self.handle, ctypes.byref(buf), ctypes.byref(size))) if buf: buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value)) return buf.contents.raw else: return None
<SYSTEM_TASK:> Sets the current read pointer position. <END_TASK> <USER_TASK:> Description: def seek(self, idx): """Sets the current read pointer position. This function is internally called by `read_idx(idx)` to find the current reader pointer position. It doesn't return anything."""
assert not self.writable self._check_pid(allow_reset=True) pos = ctypes.c_size_t(self.idx[idx]) check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
<SYSTEM_TASK:> Returns the current position of write head. <END_TASK> <USER_TASK:> Description: def tell(self): """Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80 """
assert self.writable pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos))) return pos.value
<SYSTEM_TASK:> Inserts input record at given index. <END_TASK> <USER_TASK:> Description: def write_idx(self, idx, buf): """Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write. """
key = self.key_type(idx) pos = self.tell() self.write(buf) self.fidx.write('%s\t%d\n'%(str(key), pos)) self.idx[key] = pos self.keys.append(key)
<SYSTEM_TASK:> Add new metrics as new columns to selected pandas dataframe. <END_TASK> <USER_TASK:> Description: def _add_new_columns(dataframe, metrics): """Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added. """
#TODO(leodirac): we don't really need to do this on every update. Optimize new_columns = set(metrics.keys()) - set(dataframe.columns) for col in new_columns: dataframe[col] = None
<SYSTEM_TASK:> Append new metrics to selected dataframes. <END_TASK> <USER_TASK:> Description: def append_metrics(self, metrics, df_name): """Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified. """
dataframe = self._dataframes[df_name] _add_new_columns(dataframe, metrics) dataframe.loc[len(dataframe)] = metrics
<SYSTEM_TASK:> Callback function after each epoch. Now it records each epoch time <END_TASK> <USER_TASK:> Description: def epoch_cb(self): """Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe. """
metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now
<SYSTEM_TASK:> Render the plot with bokeh.io and push to notebook. <END_TASK> <USER_TASK:> Description: def _push_render(self): """Render the plot with bokeh.io and push to notebook. """
bokeh.io.push_notebook(handle=self.handle) self.last_update = time.time()
<SYSTEM_TASK:> Converts tokens to indices according to the vocabulary. <END_TASK> <USER_TASK:> Description: def to_indices(self, tokens): """Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary. """
to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
<SYSTEM_TASK:> Create an io iterator by handle. <END_TASK> <USER_TASK:> Description: def _make_io_iterator(handle): """Create an io iterator by handle."""
name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXDataIterGetIterInfo( \ handle, ctypes.byref(name), ctypes.byref(desc), \ ctypes.byref(num_args), \ ctypes.byref(arg_names), \ ctypes.byref(arg_types), \ ctypes.byref(arg_descs))) iter_name = py_str(name.value) narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) doc_str = ('%s\n\n' + '%s\n' + 'Returns\n' + '-------\n' + 'MXDataIter\n'+ ' The result iterator.') doc_str = doc_str % (desc.value, param_str) def creator(*args, **kwargs): """Create an iterator. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting data iterator. Returns ------- dataiter: Dataiter The resulting data iterator. """ param_keys = [] param_vals = [] for k, val in kwargs.items(): param_keys.append(k) param_vals.append(str(val)) # create atomic symbol param_keys = c_str_array(param_keys) param_vals = c_str_array(param_vals) iter_handle = DataIterHandle() check_call(_LIB.MXDataIterCreateIter( handle, mx_uint(len(param_keys)), param_keys, param_vals, ctypes.byref(iter_handle))) if len(args): raise TypeError('%s can only accept keyword arguments' % iter_name) return MXDataIter(iter_handle, **kwargs) creator.__name__ = iter_name creator.__doc__ = doc_str return creator
<SYSTEM_TASK:> List and add all the data iterators to current module. <END_TASK> <USER_TASK:> Description: def _init_io_module(): """List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)() size = ctypes.c_uint() check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = ctypes.c_void_p(plist[i]) dataiter = _make_io_iterator(hdl) setattr(module_obj, dataiter.__name__, dataiter)
<SYSTEM_TASK:> Get DataDesc list from attribute lists. <END_TASK> <USER_TASK:> Description: def get_list(shapes, types): """Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype) """
if types is not None: type_dict = dict(types) return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes] else: return [DataDesc(x[0], x[1]) for x in shapes]
<SYSTEM_TASK:> Get next data batch from iterator. <END_TASK> <USER_TASK:> Description: def next(self): """Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached. """
if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=self.getindex()) else: raise StopIteration
<SYSTEM_TASK:> Ignore roll over data and set to start. <END_TASK> <USER_TASK:> Description: def hard_reset(self): """Ignore roll over data and set to start."""
if self.shuffle: self._shuffle_data() self.cursor = -self.batch_size self._cache_data = None self._cache_label = None
<SYSTEM_TASK:> Increments the coursor by batch_size for next batch <END_TASK> <USER_TASK:> Description: def iter_next(self): """Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points."""
self.cursor += self.batch_size return self.cursor < self.num_data
<SYSTEM_TASK:> Load data from underlying arrays. <END_TASK> <USER_TASK:> Description: def _getdata(self, data_source, start=None, end=None): """Load data from underlying arrays."""
assert start is not None or end is not None, 'should at least specify start or end' start = start if start is not None else 0 if end is None: end = data_source[0][1].shape[0] if data_source else 0 s = slice(start, end) return [ x[1][s] if isinstance(x[1], (np.ndarray, NDArray)) else # h5py (only supports indices in increasing order) array(x[1][sorted(self.idx[s])][[ list(self.idx[s]).index(i) for i in sorted(self.idx[s]) ]]) for x in data_source ]
<SYSTEM_TASK:> Helper function to concat two NDArrays. <END_TASK> <USER_TASK:> Description: def _concat(self, first_data, second_data): """Helper function to concat two NDArrays."""
assert len(first_data) == len( second_data), 'data source should contain the same size' if first_data and second_data: return [ concat( first_data[x], second_data[x], dim=0 ) for x in range(len(first_data)) ] elif (not first_data) and (not second_data): return [] else: return [ first_data[0] if first_data else second_data[0] for x in range(len(first_data)) ]
<SYSTEM_TASK:> Load data from underlying arrays, internal use only. <END_TASK> <USER_TASK:> Description: def _batchify(self, data_source): """Load data from underlying arrays, internal use only."""
assert self.cursor < self.num_data, 'DataIter needs reset.' # first batch of next epoch with 'roll_over' if self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: assert self._cache_data is not None or self._cache_label is not None, \ 'next epoch should have cached data' cache_data = self._cache_data if self._cache_data is not None else self._cache_label second_data = self._getdata( data_source, end=self.cursor + self.batch_size) if self._cache_data is not None: self._cache_data = None else: self._cache_label = None return self._concat(cache_data, second_data) # last batch with 'pad' elif self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor first_data = self._getdata(data_source, start=self.cursor) second_data = self._getdata(data_source, end=pad) return self._concat(first_data, second_data) # normal case else: if self.cursor + self.batch_size < self.num_data: end_idx = self.cursor + self.batch_size # get incomplete last batch else: end_idx = self.num_data return self._getdata(data_source, self.cursor, end_idx)
<SYSTEM_TASK:> Given a quantized symbol and a dict of params that have not been quantized, <END_TASK> <USER_TASK:> Description: def _quantize_params(qsym, params, th_dict): """Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output """
inputs_name = qsym.list_arguments() quantized_params = {} for name in inputs_name: if name.endswith(('weight_quantize', 'bias_quantize')): original_name = name[:-len('_quantize')] param = params[original_name] val, vmin, vmax = ndarray.contrib.quantize(data=param, min_range=ndarray.min(param), max_range=ndarray.max(param), out_type='int8') quantized_params[name] = val quantized_params[name+'_min'] = vmin quantized_params[name+'_max'] = vmax elif name in params: quantized_params[name] = params[name] elif name.endswith(('_min')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][0]]) elif name.endswith(('_max')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][1]]) return quantized_params
<SYSTEM_TASK:> Given a symbol object representing a neural network of data type FP32, <END_TASK> <USER_TASK:> Description: def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'): """Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data. """
num_excluded_symbols = 0 if excluded_symbols is not None: assert isinstance(excluded_symbols, list) num_excluded_symbols = len(excluded_symbols) else: excluded_symbols = [] num_offline = 0 offline = [] if offline_params is not None: num_offline = len(offline_params) for k in offline_params: offline.append(c_str(k)) out = SymbolHandle() check_call(_LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out), mx_uint(num_excluded_symbols), c_str_array(excluded_symbols), mx_uint(num_offline), c_array(ctypes.c_char_p, offline), c_str(quantized_dtype), ctypes.c_bool(True))) return Symbol(out)
<SYSTEM_TASK:> Given a dictionary containing the thresholds for quantizing the layers, <END_TASK> <USER_TASK:> Description: def _calibrate_quantized_sym(qsym, th_dict): """Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators. """
if th_dict is None or len(th_dict) == 0: return qsym num_layer_outputs = len(th_dict) layer_output_names = [] min_vals = [] max_vals = [] for k, v in th_dict.items(): layer_output_names.append(k) min_vals.append(v[0]) max_vals.append(v[1]) calibrated_sym = SymbolHandle() check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle, mx_uint(num_layer_outputs), c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals), c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym))) return Symbol(calibrated_sym)
<SYSTEM_TASK:> Collect min and max values from layer outputs and save them in <END_TASK> <USER_TASK:> Description: def _collect_layer_output_min_max(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect min and max values from layer outputs and save them in a dictionary mapped by layer names. """
collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.min_max_dict, num_examples
<SYSTEM_TASK:> Collect layer outputs and save them in a dictionary mapped by layer names. <END_TASK> <USER_TASK:> Description: def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect layer outputs and save them in a dictionary mapped by layer names."""
collector = _LayerOutputCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.nd_dict, num_examples
<SYSTEM_TASK:> Given a ndarray dict, find the optimal threshold for quantizing each value of the key. <END_TASK> <USER_TASK:> Description: def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None): """Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
if stats is None: raise ImportError('scipy.stats is required for running entropy mode of calculating' ' the optimal thresholds for quantizing FP32 ndarrays into int8.' ' Please check if the scipy python bindings are installed.') assert isinstance(nd_dict, dict) if logger is not None: logger.info('Calculating optimal thresholds for quantization using KL divergence' ' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins)) th_dict = {} # copy nd_dict keys since the keys() only returns a view in python3 layer_names = list(nd_dict.keys()) for name in layer_names: assert name in nd_dict min_val, max_val, min_divergence, opt_th = \ _get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins, num_quantized_bins=num_quantized_bins) del nd_dict[name] # release the memory of ndarray if min_val < 0: th_dict[name] = (-opt_th, opt_th) else: th_dict[name] = (0, opt_th) if logger is not None: logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f' % (name, min_val, max_val, min_divergence, opt_th)) return th_dict
<SYSTEM_TASK:> Given a str as a path the symbol .json file or a symbol, returns a Symbol object. <END_TASK> <USER_TASK:> Description: def _load_sym(sym, logger=logging): """Given a str as a path the symbol .json file or a symbol, returns a Symbol object."""
if isinstance(sym, str): # sym is a symbol file path cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, sym) logger.info('Loading symbol from file %s' % symbol_file_path) return sym_load(symbol_file_path) elif isinstance(sym, Symbol): return sym else: raise ValueError('_load_sym only accepts Symbol or path to the symbol file,' ' while received type %s' % str(type(sym)))
<SYSTEM_TASK:> Given a str as a path to the .params file or a pair of params, <END_TASK> <USER_TASK:> Description: def _load_params(params, logger=logging): """Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params. """
if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
<SYSTEM_TASK:> Callback function for collecting min and max values from an NDArray. <END_TASK> <USER_TASK:> Description: def collect(self, name, arr): """Callback function for collecting min and max values from an NDArray."""
name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False) min_range = ndarray.min(arr).asscalar() max_range = ndarray.max(arr).asscalar() if name in self.min_max_dict: cur_min_max = self.min_max_dict[name] self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range)) else: self.min_max_dict[name] = (min_range, max_range) if self.logger is not None: self.logger.info("Collecting layer %s min_range=%f, max_range=%f" % (name, min_range, max_range))
<SYSTEM_TASK:> Gets root mse between the logarithms of the prediction and the truth. <END_TASK> <USER_TASK:> Description: def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
<SYSTEM_TASK:> Gets a neural network. Better results are obtained with modifications. <END_TASK> <USER_TASK:> Description: def get_net(): """Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(50, activation="relu")) net.add(gluon.nn.Dense(1)) net.initialize() return net
<SYSTEM_TASK:> Trains the model. <END_TASK> <USER_TASK:> Description: def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
<SYSTEM_TASK:> Conducts k-fold cross validation for the model. <END_TASK> <USER_TASK:> Description: def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model."""
assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k
<SYSTEM_TASK:> Trains the model and predicts on the test data set. <END_TASK> <USER_TASK:> Description: def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size): """Trains the model and predicts on the test data set."""
net = get_net() _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) preds = net(X_test).asnumpy() test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test['Id'], test['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
<SYSTEM_TASK:> Get the attribute dict given the attribute set by the symbol. <END_TASK> <USER_TASK:> Description: def get(self, attr): """ Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes. """
if self._attr: ret = self._attr.copy() if attr: ret.update(attr) return ret else: return attr if attr else {}
<SYSTEM_TASK:> Create kvstore assuming some parameters' storage types are row_sparse. <END_TASK> <USER_TASK:> Description: def _create_sparse_kvstore(kvstore): """Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True. """
# always update on kvstore update_on_kvstore = True if isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): kv = kvs.create(kvstore) else: raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. " "The type must be KVStore or str." % kvstore) return (kv, update_on_kvstore)
<SYSTEM_TASK:> Create kvstore <END_TASK> <USER_TASK:> Description: def _create_kvstore(kvstore, num_device, arg_params): """Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights. """
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1"))) if kvstore is None: kv = None elif isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): # create kvstore using the string type if num_device == 1 and 'dist' not in kvstore: # no need to use kv for single device and single machine kv = None else: kv = kvs.create(kvstore) if kvstore == 'local': # automatically select a proper local max_size = max(np.prod(param.shape) for param in arg_params.values()) if max_size > 1024 * 1024 * 16: update_on_kvstore = False else: raise TypeError('kvstore must be KVStore, str or None') if kv is None: update_on_kvstore = False return (kv, update_on_kvstore)
<SYSTEM_TASK:> Perform update of param_arrays from grad_arrays not on kvstore. <END_TASK> <USER_TASK:> Description: def _update_params(param_arrays, grad_arrays, updater, num_device, kvstore=None, param_names=None): """Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)] for i, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue index = i if kvstore: name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the sum gradients, to the same locations. kvstore.pull(name, grad_list, priority=-index) for k, p in enumerate(zip(arg_list, grad_list)): # faked an index here, to make optimizer create diff # state for the same index but on diff devs, TODO(mli) # use a better solution later w, g = p updates[k].append((index*num_device+k, g, w)) for dev_updates in updates: # update params if param_arrays and grad_arrays are not empty if dev_updates: i, w, g = zip(*dev_updates) updater(i, w, g)
<SYSTEM_TASK:> Sends args and kwargs to any configured callbacks. <END_TASK> <USER_TASK:> Description: def _multiple_callbacks(callbacks, *args, **kwargs): """Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list. """
if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs)
<SYSTEM_TASK:> Checkpoint the model data into file. <END_TASK> <USER_TASK:> Description: def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): """Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """
if symbol is not None: symbol.save('%s-symbol.json' % prefix) save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()}) param_name = '%s-%04d.params' % (prefix, epoch) nd.save(param_name, save_dict) logging.info('Saved checkpoint to \"%s\"', param_name)
<SYSTEM_TASK:> verify the argument of the default symbol and user provided parameters <END_TASK> <USER_TASK:> Description: def _check_arguments(self): """verify the argument of the default symbol and user provided parameters"""
if self.argument_checked: return assert(self.symbol is not None) self.argument_checked = True # check if symbol contain duplicated names. _check_arguments(self.symbol) # rematch parameters to delete useless ones if self.allow_extra_params: if self.arg_params: arg_names = set(self.symbol.list_arguments()) self.arg_params = {k : v for k, v in self.arg_params.items() if k in arg_names} if self.aux_params: aux_names = set(self.symbol.list_auxiliary_states()) self.aux_params = {k : v for k, v in self.aux_params.items() if k in aux_names}
<SYSTEM_TASK:> Initialize weight parameters and auxiliary states. <END_TASK> <USER_TASK:> Description: def _init_params(self, inputs, overwrite=False): """Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs] input_shapes = {item.name: item.shape for item in inputs} arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None input_dtypes = {item.name: item.dtype for item in inputs} arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes) assert arg_dtypes is not None arg_names = self.symbol.list_arguments() input_names = input_shapes.keys() param_names = [key for key in arg_names if key not in input_names] aux_names = self.symbol.list_auxiliary_states() param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes) if x[0] in param_names] arg_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in param_name_attrs} aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes) if x[0] in aux_names] aux_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in aux_name_attrs} for k, v in arg_params.items(): if self.arg_params and k in self.arg_params and (not overwrite): arg_params[k][:] = self.arg_params[k][:] else: self.initializer(k, v) for k, v in aux_params.items(): if self.aux_params and k in self.aux_params and (not overwrite): aux_params[k][:] = self.aux_params[k][:] else: self.initializer(k, v) self.arg_params = arg_params self.aux_params = aux_params return (arg_names, list(param_names), aux_names)
<SYSTEM_TASK:> Initialize the predictor module for running prediction. <END_TASK> <USER_TASK:> Description: def _init_predictor(self, input_shapes, type_dict=None): """Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params} shapes.update(dict(input_shapes)) if self._pred_exec is not None: arg_shapes, _, _ = self.symbol.infer_shape(**shapes) assert arg_shapes is not None, "Incomplete input shapes" pred_shapes = [x.shape for x in self._pred_exec.arg_arrays] if arg_shapes == pred_shapes: return # for now only use the first device pred_exec = self.symbol.simple_bind( self.ctx[0], grad_req='null', type_dict=type_dict, **shapes) pred_exec.copy_params_from(self.arg_params, self.aux_params) _check_arguments(self.symbol) self._pred_exec = pred_exec
<SYSTEM_TASK:> Initialize the iterator given input. <END_TASK> <USER_TASK:> Description: def _init_iter(self, X, y, is_train): """Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)): if y is None: if is_train: raise ValueError('y must be specified when X is numpy.ndarray') else: y = np.zeros(X.shape[0]) if not isinstance(y, (np.ndarray, nd.NDArray)): raise TypeError('y must be ndarray when X is numpy.ndarray') if X.shape[0] != y.shape[0]: raise ValueError("The numbers of data points and labels not equal") if y.ndim == 2 and y.shape[1] == 1: y = y.flatten() if y.ndim != 1: raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)") if is_train: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=is_train, last_batch_handle='roll_over') else: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False) if not isinstance(X, io.DataIter): raise TypeError('X must be DataIter, NDArray or numpy.ndarray') return X
<SYSTEM_TASK:> Initialize the iterator given eval_data. <END_TASK> <USER_TASK:> Description: def _init_eval_iter(self, eval_data): """Initialize the iterator given eval_data."""
if eval_data is None: return eval_data if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2: if eval_data[0] is not None: if eval_data[1] is None and isinstance(eval_data[0], io.DataIter): return eval_data[0] input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list) else eval_data[0]) input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list) else eval_data[1]) return self._init_iter(input_data, input_label, is_train=True) else: raise ValueError("Eval data is NONE") if not isinstance(eval_data, io.DataIter): raise TypeError('Eval data must be DataIter, or ' \ 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)') return eval_data
<SYSTEM_TASK:> Run the prediction, always only use one device. <END_TASK> <USER_TASK:> Description: def predict(self, X, num_batch=None, return_data=False, reset=True): """Run the prediction, always only use one device. Parameters ---------- X : mxnet.DataIter num_batch : int or None The number of batch to run. Go though all batches if ``None``. Returns ------- y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs. The predicted value of the output. """
X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) batch_size = X.batch_size data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] output_list = [[] for _ in range(len(self._pred_exec.outputs))] if return_data: data_list = [[] for _ in X.provide_data] label_list = [[] for _ in X.provide_label] i = 0 for batch in X: _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) padded = batch.pad real_size = batch_size - padded for o_list, o_nd in zip(output_list, self._pred_exec.outputs): o_list.append(o_nd[0:real_size].asnumpy()) if return_data: for j, x in enumerate(batch.data): data_list[j].append(x[0:real_size].asnumpy()) for j, x in enumerate(batch.label): label_list[j].append(x[0:real_size].asnumpy()) i += 1 if num_batch is not None and i == num_batch: break outputs = [np.concatenate(x) for x in output_list] if len(outputs) == 1: outputs = outputs[0] if return_data: data = [np.concatenate(x) for x in data_list] label = [np.concatenate(x) for x in label_list] if len(data) == 1: data = data[0] if len(label) == 1: label = label[0] return outputs, data, label else: return outputs
<SYSTEM_TASK:> Run the model given an input and calculate the score <END_TASK> <USER_TASK:> Description: def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True): """Run the model given an input and calculate the score as assessed by an evaluation metric. Parameters ---------- X : mxnet.DataIter eval_metric : metric.metric The metric for calculating score. num_batch : int or None The number of batches to run. Go though all batches if ``None``. Returns ------- s : float The final score. """
# setup metric if not isinstance(eval_metric, metric.EvalMetric): eval_metric = metric.create(eval_metric) X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] for i, batch in enumerate(X): if num_batch is not None and i == num_batch: break _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) eval_metric.update(batch.label, self._pred_exec.outputs) if batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=0, nbatch=i, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(batch_end_callback, batch_end_params) return eval_metric.get()[1]
<SYSTEM_TASK:> Functional style to create a model. <END_TASK> <USER_TASK:> Description: def create(symbol, X, y=None, ctx=None, num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01), eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None, work_load_list=None, eval_end_callback=LogValidationMetricsCallback(), eval_batch_end_callback=None, **kwargs): """Functional style to create a model. This function is more consistent with functional languages such as R, where mutation is not allowed. Parameters ---------- symbol : Symbol The symbol configuration of a computation network. X : DataIter Training data. y : numpy.ndarray, optional If `X` is a ``numpy.ndarray``, `y` must be set. ctx : Context or list of Context, optional The device context of training and prediction. To use multi-GPU training, pass in a list of GPU contexts. num_epoch : int, optional The number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional The name of the chosen optimizer, or an optimizer object, used for training. initializer : initializer function, optional The initialization scheme used. eval_data : DataIter or numpy.ndarray pair If `eval_set` is ``numpy.ndarray`` pair, it should be (`valid_data`, `valid_label`). eval_metric : metric.EvalMetric or str or callable The evaluation metric. Can be the name of an evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for print purposes. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'. Defaults to 'local', often no need to change for single machine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. """
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch, epoch_size=epoch_size, optimizer=optimizer, initializer=initializer, **kwargs) model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, logger=logger, work_load_list=work_load_list, eval_end_callback=eval_end_callback, eval_batch_end_callback=eval_batch_end_callback) return model
<SYSTEM_TASK:> Get the current evaluation result. <END_TASK> <USER_TASK:> Description: def get(self): """Get the current evaluation result. Override the default behavior Returns ------- name : str Name of the metric. value : float Value of the evaluation. """
if self.num is None: if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst) else: names = ['%s'%(self.name[i]) for i in range(self.num)] values = [x / y if y != 0 else float('nan') \ for x, y in zip(self.sum_metric, self.num_inst)] return (names, values)
<SYSTEM_TASK:> Get the dictionary given name and ndarray pairs. <END_TASK> <USER_TASK:> Description: def _get_dict(names, ndarrays): """Get the dictionary given name and ndarray pairs."""
nset = set() for nm in names: if nm in nset: raise ValueError('Duplicate names detected, %s' % str(names)) nset.add(nm) return dict(zip(names, ndarrays))
<SYSTEM_TASK:> List all the output NDArray. <END_TASK> <USER_TASK:> Description: def _get_outputs(self): """List all the output NDArray. Returns ------- A list of ndarray bound to the heads of executor. """
out_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() check_call(_LIB.MXExecutorOutputs(self.handle, ctypes.byref(out_size), ctypes.byref(handles))) num_output = out_size.value outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)] return outputs
<SYSTEM_TASK:> Calculate the outputs specified by the bound symbol. <END_TASK> <USER_TASK:> Description: def forward(self, is_train=False, **kwargs): """Calculate the outputs specified by the bound symbol. Parameters ---------- is_train: bool, optional Whether this forward is for evaluation purpose. If True, a backward call is expected to follow. **kwargs Additional specification of input arguments. Examples -------- >>> # doing forward by specifying data >>> texec.forward(is_train=True, data=mydata) >>> # doing forward by not specifying things, but copy to the executor before hand >>> mydata.copyto(texec.arg_dict['data']) >>> texec.forward(is_train=True) >>> # doing forward by specifying data and get outputs >>> outputs = texec.forward(is_train=True, data=mydata) >>> print(outputs[0].asnumpy()) """
if len(kwargs) != 0: arg_dict = self.arg_dict for name, array in kwargs.items(): if not isinstance(array, (NDArray, np.ndarray)): raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray') if name not in arg_dict: raise TypeError('Unknown argument %s' % name) if arg_dict[name].shape != array.shape: raise ValueError('Shape not match! Argument %s, need: %s, received: %s' %(name, str(arg_dict[name].shape), str(array.shape))) arg_dict[name][:] = array check_call(_LIB.MXExecutorForward( self.handle, ctypes.c_int(int(is_train)))) return self.outputs
<SYSTEM_TASK:> Do backward pass to get the gradient of arguments. <END_TASK> <USER_TASK:> Description: def backward(self, out_grads=None, is_train=True): """Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. is_train : bool, default True Whether this backward is for training or inference. Note that in rare cases you want to call backward with is_train=False to get gradient during inference. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.] """
if out_grads is None: out_grads = [] elif isinstance(out_grads, NDArray): out_grads = [out_grads] elif isinstance(out_grads, dict): out_grads = [out_grads[k] for k in self._symbol.list_outputs()] for obj in out_grads: if not isinstance(obj, NDArray): raise TypeError("inputs must be NDArray") ndarray = c_handle_array(out_grads) check_call(_LIB.MXExecutorBackwardEx( self.handle, mx_uint(len(out_grads)), ndarray, ctypes.c_int(is_train)))
<SYSTEM_TASK:> Install callback for monitor. <END_TASK> <USER_TASK:> Description: def set_monitor_callback(self, callback, monitor_all=False): """Install callback for monitor. Parameters ---------- callback : function Takes a string and an NDArrayHandle. monitor_all : bool, default False If true, monitor both input and output, otherwise monitor output only. Examples -------- >>> def mon_callback(*args, **kwargs): >>> print("Do your stuff here.") >>> >>> texe.set_monitor_callback(mon_callback) """
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p) self._monitor_callback = cb_type(_monitor_callback_wrapper(callback)) check_call(_LIB.MXExecutorSetMonitorCallbackEX( self.handle, self._monitor_callback, None, ctypes.c_int(monitor_all)))
<SYSTEM_TASK:> Get dictionary representation of argument arrrays. <END_TASK> <USER_TASK:> Description: def arg_dict(self): """Get dictionary representation of argument arrrays. Returns ------- arg_dict : dict of str to NDArray The dictionary that maps the names of arguments to NDArrays. Raises ------ ValueError : if there are duplicated names in the arguments. """
if self._arg_dict is None: self._arg_dict = Executor._get_dict( self._symbol.list_arguments(), self.arg_arrays) return self._arg_dict