text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Creates an unrolled LSTM symbol for inference if loss_type is not specified, and for training <END_TASK> <USER_TASK:> Description: def lstm_unroll(num_lstm_layer, seq_len, num_hidden, num_label, loss_type=None): """ Creates an unrolled LSTM symbol for inference if loss_type is not specified, and for training if loss_type is specified. loss_type must be one of 'ctc' or 'warpctc' Parameters ---------- num_lstm_layer: int seq_len: int num_hidden: int num_label: int loss_type: str 'ctc' or 'warpctc' Returns ------- mxnet.symbol.symbol.Symbol """
# Create the base (shared between training and inference) and add loss to the end pred = _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden) if loss_type: # Training mode, add loss return _add_ctc_loss(pred, seq_len, num_label, loss_type) else: # Inference mode, add softmax return mx.sym.softmax(data=pred, name='softmax')
<SYSTEM_TASK:> Returns name and shape of init states of LSTM network <END_TASK> <USER_TASK:> Description: def init_states(batch_size, num_lstm_layer, num_hidden): """ Returns name and shape of init states of LSTM network Parameters ---------- batch_size: list of tuple of str and tuple of int and int num_lstm_layer: int num_hidden: int Returns ------- list of tuple of str and tuple of int and int """
init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] return init_c + init_h
<SYSTEM_TASK:> ctypes implementation of imperative invoke wrapper <END_TASK> <USER_TASK:> Description: def _imperative_invoke(handle, ndargs, keys, vals, out): """ctypes implementation of imperative invoke wrapper"""
if out is not None: original_output = out if isinstance(out, NDArrayBase): out = (out,) num_output = ctypes.c_int(len(out)) output_vars = c_handle_array(out) output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle)) else: original_output = None output_vars = ctypes.POINTER(NDArrayHandle)() num_output = ctypes.c_int(0) # return output stypes to avoid the c_api call for checking # a handle's stype in _ndarray_cls out_stypes = ctypes.POINTER(ctypes.c_int)() check_call(_LIB.MXImperativeInvokeEx( ctypes.c_void_p(handle), ctypes.c_int(len(ndargs)), c_handle_array(ndargs), ctypes.byref(num_output), ctypes.byref(output_vars), ctypes.c_int(len(keys)), c_str_array(keys), c_str_array([str(s) for s in vals]), ctypes.byref(out_stypes))) if original_output is not None: return original_output if num_output.value == 1: return _ndarray_cls(ctypes.cast(output_vars[0], NDArrayHandle), stype=out_stypes[0]) else: return [_ndarray_cls(ctypes.cast(output_vars[i], NDArrayHandle), stype=out_stypes[i]) for i in range(num_output.value)]
<SYSTEM_TASK:> Compute the gradients of outputs w.r.t variables. <END_TASK> <USER_TASK:> Description: def backward(outputs, out_grads=None, retain_graph=False): """Compute the gradients of outputs w.r.t variables. Parameters ---------- outputs: list of NDArray out_grads: list of NDArray or None """
assert isinstance(outputs, (list, tuple)), \ "outputs must be a list or tuple of NDArrays" if out_grads is None: check_call(_LIB.MXAutogradBackward( len(outputs), c_handle_array(outputs), ctypes.c_void_p(0), ctypes.c_int(retain_graph))) return ograd_handles = [] for arr in out_grads: if arr is not None: ograd_handles.append(arr.handle) else: ograd_handles.append(NDArrayHandle(0)) assert len(ograd_handles) == len(outputs), \ "outputs and out_grads must have the same length" check_call(_LIB.MXAutogradBackward( len(outputs), c_handle_array(outputs), c_array(NDArrayHandle, ograd_handles), ctypes.c_int(retain_graph)))
<SYSTEM_TASK:> Return function that computes both gradient of arguments and loss value. <END_TASK> <USER_TASK:> Description: def grad_and_loss(func, argnum=None): """Return function that computes both gradient of arguments and loss value. Parameters ---------- func: a python function The forward (loss) function. argnum: an int or a list of int The index of argument to calculate gradient for. Returns ------- grad_and_loss_func: a python function A function that would compute both the gradient of arguments and loss value. """
@functools.wraps(func) def wrapped(*args): """Wrapped function.""" variables = args if argnum is not None: argnum_ = argnum if isinstance(argnum, list) else [argnum] variables = [args[i] for i in argnum_] for x in variables: assert isinstance(x, NDArray), "type of autograd input should NDArray." grads = [zeros_like(x) for x in variables] mark_variables(variables, grads) with train_section(): outputs = func(*args) compute_gradient([outputs] if isinstance(outputs, NDArray) else outputs) return grads, outputs return wrapped
<SYSTEM_TASK:> Return function that computes gradient of arguments. <END_TASK> <USER_TASK:> Description: def grad(func, argnum=None): """Return function that computes gradient of arguments. Parameters ---------- func: a python function The forward (loss) function. argnum: an int or a list of int The index of argument to calculate gradient for. Returns ------- grad_func: a python function A function that would compute the gradient of arguments. Examples -------- >>> # autograd supports dynamic graph which is changed >>> # every instance >>> def func(x): >>> r = random.randint(0, 1) >>> if r % 2: >>> return x**2 >>> else: >>> return x/3 >>> # use `grad(func)` to get the gradient function >>> for x in range(10): >>> grad_func = grad(func) >>> inputs = nd.array([[1, 2, 3], [4, 5, 6]]) >>> grad_vals = grad_func(inputs) """
grad_with_loss_func = grad_and_loss(func, argnum) @functools.wraps(grad_with_loss_func) def wrapped(*args): return grad_with_loss_func(*args)[0] return wrapped
<SYSTEM_TASK:> Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`. <END_TASK> <USER_TASK:> Description: def clip_global_norm(arrays, max_norm, check_isfinite=True): """Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`. Parameters ---------- arrays : list of NDArray max_norm : float check_isfinite : bool, default True If True, check that the total_norm is finite (not nan or inf). This requires a blocking .asscalar() call. Returns ------- NDArray or float Total norm. Return type is NDArray of shape (1,) if check_isfinite is False. Otherwise a float is returned. """
def _norm(array): if array.stype == 'default': x = array.reshape((-1,)) return ndarray.dot(x, x) return array.norm().square() assert len(arrays) > 0 ctx = arrays[0].context total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays]) total_norm = ndarray.sqrt(total_norm) if check_isfinite: if not np.isfinite(total_norm.asscalar()): warnings.warn( UserWarning('nan or inf is detected. ' 'Clipping results will be undefined.'), stacklevel=2) scale = max_norm / (total_norm + 1e-8) scale = ndarray.min(ndarray.concat(scale, ndarray.ones(1, ctx=ctx), dim=0)) for arr in arrays: arr *= scale.as_in_context(arr.context) if check_isfinite: return total_norm.asscalar() else: return total_norm
<SYSTEM_TASK:> Check whether the sha1 hash of the file content matches the expected hash. <END_TASK> <USER_TASK:> Description: def check_sha1(filename, sha1_hash): """Check whether the sha1 hash of the file content matches the expected hash. Parameters ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns ------- bool Whether the file content matches the expected hash. """
sha1 = hashlib.sha1() with open(filename, 'rb') as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash
<SYSTEM_TASK:> Download an given URL <END_TASK> <USER_TASK:> Description: def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True): """Download an given URL Parameters ---------- url : str URL to download path : str, optional Destination path to store downloaded file. By default stores to the current directory with same name as in url. overwrite : bool, optional Whether to overwrite destination file if already exists. sha1_hash : str, optional Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified but doesn't match. retries : integer, default 5 The number of times to attempt the download in case of failure or non 200 return codes verify_ssl : bool, default True Verify SSL certificates. Returns ------- str The file path of the downloaded file. """
if path is None: fname = url.split('/')[-1] # Empty filenames are invalid assert fname, 'Can\'t construct file-name from this URL. ' \ 'Please set the `path` option manually.' else: path = os.path.expanduser(path) if os.path.isdir(path): fname = os.path.join(path, url.split('/')[-1]) else: fname = path assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format( retries) if not verify_ssl: warnings.warn( 'Unverified HTTPS request is being made (verify_ssl=False). ' 'Adding certificate verification is strongly advised.') if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) if not os.path.exists(dirname): os.makedirs(dirname) while retries + 1 > 0: # Disable pyling too broad Exception # pylint: disable=W0703 try: print('Downloading {} from {}...'.format(fname, url)) r = requests.get(url, stream=True, verify=verify_ssl) if r.status_code != 200: raise RuntimeError('Failed downloading url {}'.format(url)) # create uuid for temporary files random_uuid = str(uuid.uuid4()) with open('{}.{}'.format(fname, random_uuid), 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) # if the target file exists(created by other processes) # and have the same hash with target file # delete the temporary file if not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): # atmoic operation in the same file system _replace_atomic('{}.{}'.format(fname, random_uuid), fname) else: try: os.remove('{}.{}'.format(fname, random_uuid)) except OSError: pass finally: warnings.warn( 'File {} exists in file system so the downloaded file is deleted'.format(fname)) if sha1_hash and not check_sha1(fname, sha1_hash): raise UserWarning( 'File {} is downloaded but the content hash does not match.' ' The repo may be outdated or download may be incomplete. ' 'If the "repo_url" is overridden, consider switching to ' 'the default repo.'.format(fname)) break except Exception as e: retries -= 1 if retries <= 0: raise e else: print('download failed due to {}, retrying, {} attempt{} left' .format(repr(e), retries, 's' if retries > 1 else '')) return fname
<SYSTEM_TASK:> Return the URL for hosted file in Gluon repository. <END_TASK> <USER_TASK:> Description: def _get_repo_file_url(namespace, filename): """Return the URL for hosted file in Gluon repository. Parameters ---------- namespace : str Namespace of the file. filename : str Name of the file """
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(), namespace=namespace, filename=filename)
<SYSTEM_TASK:> Print at most `limit` elements of list. <END_TASK> <USER_TASK:> Description: def _brief_print_list(lst, limit=7): """Print at most `limit` elements of list."""
lst = list(lst) if len(lst) > limit: return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \ _brief_print_list(lst[-limit//2:], limit) return ', '.join(["'%s'"%str(i) for i in lst])
<SYSTEM_TASK:> Create a symbol function by handle and function name. <END_TASK> <USER_TASK:> Description: def _make_symbol_function(handle, name, func_name): """Create a symbol function by handle and function name."""
code, doc_str = _generate_symbol_function_code(handle, name, func_name) local = {} exec(code, None, local) # pylint: disable=exec-used symbol_function = local[func_name] symbol_function.__name__ = func_name symbol_function.__doc__ = doc_str symbol_function.__module__ = 'mxnet.symbol' return symbol_function
<SYSTEM_TASK:> Generate row ids based on the current mini-batch <END_TASK> <USER_TASK:> Description: def batch_row_ids(data_batch): """ Generate row ids based on the current mini-batch """
item = data_batch.data[0] user = data_batch.data[1] return {'user_weight': user.astype(np.int64), 'item_weight': item.astype(np.int64)}
<SYSTEM_TASK:> Generate row ids for all rows <END_TASK> <USER_TASK:> Description: def all_row_ids(data_batch): """ Generate row ids for all rows """
all_users = mx.nd.arange(0, MOVIELENS['max_user'], dtype='int64') all_movies = mx.nd.arange(0, MOVIELENS['max_movie'], dtype='int64') return {'user_weight': all_users, 'item_weight': all_movies}
<SYSTEM_TASK:> check function consistency with uniform random numbers <END_TASK> <USER_TASK:> Description: def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]): """check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int): assert dim shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim)) arg_shapes = [shape] * arg_shapes for dtype in type_list: ndarray_arg = [] numpy_arg = [] for s in arg_shapes: npy = np.random.uniform(rmin, 10, s).astype(dtype) narr = mx.nd.array(npy, dtype=dtype) ndarray_arg.append(narr) numpy_arg.append(npy) out1 = uf(*ndarray_arg) if npuf is None: out2 = uf(*numpy_arg).astype(dtype) else: out2 = npuf(*numpy_arg).astype(dtype) assert out1.shape == out2.shape if isinstance(out1, mx.nd.NDArray): out1 = out1.asnumpy() if dtype == np.float16: assert reldiff(out1, out2) < 2e-3 else: assert reldiff(out1, out2) < 1e-6
<SYSTEM_TASK:> Only flip boxes coordinates, images will be flipped when loading into network <END_TASK> <USER_TASK:> Description: def append_flipped_images(self): """Only flip boxes coordinates, images will be flipped when loading into network"""
logger.info('%s append flipped images to roidb' % self._name) roidb_flipped = [] for roi_rec in self._roidb: boxes = roi_rec['boxes'].copy() oldx1 = boxes[:, 0].copy() oldx2 = boxes[:, 2].copy() boxes[:, 0] = roi_rec['width'] - oldx2 - 1 boxes[:, 2] = roi_rec['width'] - oldx1 - 1 assert (boxes[:, 2] >= boxes[:, 0]).all() roi_rec_flipped = roi_rec.copy() roi_rec_flipped['boxes'] = boxes roi_rec_flipped['flipped'] = True roidb_flipped.append(roi_rec_flipped) self._roidb.extend(roidb_flipped)
<SYSTEM_TASK:> r"""Return location for the pretrained on local file system. <END_TASK> <USER_TASK:> Description: def get_model_file(name, root=os.path.join(base.data_dir(), 'models')): r"""Return location for the pretrained on local file system. This function will download from online model zoo when model cannot be found or has mismatch. The root directory will be created if it doesn't exist. Parameters ---------- name : str Name of the model. root : str, default $MXNET_HOME/models Location for keeping the model parameters. Returns ------- file_path Path to the requested pretrained model file. """
file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name)) root = os.path.expanduser(root) file_path = os.path.join(root, file_name+'.params') sha1_hash = _model_sha1[name] if os.path.exists(file_path): if check_sha1(file_path, sha1_hash): return file_path else: logging.warning('Mismatch in the content of model file detected. Downloading again.') else: logging.info('Model file not found. Downloading to %s.', file_path) util.makedirs(root) zip_file_path = os.path.join(root, file_name+'.zip') repo_url = os.environ.get('MXNET_GLUON_REPO', apache_repo_url) if repo_url[-1] != '/': repo_url = repo_url + '/' download(_url_format.format(repo_url=repo_url, file_name=file_name), path=zip_file_path, overwrite=True) with zipfile.ZipFile(zip_file_path) as zf: zf.extractall(root) os.remove(zip_file_path) if check_sha1(file_path, sha1_hash): return file_path else: raise ValueError('Downloaded file has different hash. Please try again.')
<SYSTEM_TASK:> r"""Purge all pretrained model files in local file store. <END_TASK> <USER_TASK:> Description: def purge(root=os.path.join(base.data_dir(), 'models')): r"""Purge all pretrained model files in local file store. Parameters ---------- root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. """
root = os.path.expanduser(root) files = os.listdir(root) for f in files: if f.endswith(".params"): os.remove(os.path.join(root, f))
<SYSTEM_TASK:> initialize all entries given annotation json file <END_TASK> <USER_TASK:> Description: def _load_all(self, anno_file, shuffle): """ initialize all entries given annotation json file Parameters: ---------- anno_file: str annotation json file shuffle: bool whether to shuffle image list """
image_set_index = [] labels = [] coco = COCO(anno_file) img_ids = coco.getImgIds() # deal with class names cats = [cat['name'] for cat in coco.loadCats(coco.getCatIds())] class_to_coco_ind = dict(zip(cats, coco.getCatIds())) class_to_ind = dict(zip(self.classes, range(len(self.classes)))) coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls]) for cls in self.classes[0:]]) for img_id in img_ids: # filename image_info = coco.loadImgs(img_id)[0] filename = image_info["file_name"] subdir = filename.split('_')[1] height = image_info["height"] width = image_info["width"] # label anno_ids = coco.getAnnIds(imgIds=img_id) annos = coco.loadAnns(anno_ids) label = [] for anno in annos: cat_id = coco_ind_to_class_ind[anno['category_id']] bbox = anno["bbox"] assert len(bbox) == 4 xmin = float(bbox[0]) / width ymin = float(bbox[1]) / height xmax = xmin + float(bbox[2]) / width ymax = ymin + float(bbox[3]) / height label.append([cat_id, xmin, ymin, xmax, ymax, 0]) if label: labels.append(np.array(label)) image_set_index.append(os.path.join(subdir, filename)) if shuffle: import random indices = list(range(len(image_set_index))) random.shuffle(indices) image_set_index = [image_set_index[i] for i in indices] labels = [labels[i] for i in indices] # store the results self.image_set_index = image_set_index self.labels = labels
<SYSTEM_TASK:> Forward computation. States from previous forward computation are carried <END_TASK> <USER_TASK:> Description: def forward(self, data_batch, is_train=None, carry_state=True): """Forward computation. States from previous forward computation are carried to the current iteration if `carry_state` is set to `True`. """
# propagate states from the previous iteration if carry_state: if isinstance(self._next_states, (int, float)): self._module.set_states(value=self._next_states) else: self._module.set_states(states=self._next_states) self._module.forward(data_batch, is_train=is_train) outputs = self._module.get_outputs(merge_multi_context=False) self._next_states = outputs[:-1]
<SYSTEM_TASK:> Updates parameters according to the installed optimizer and the gradients computed <END_TASK> <USER_TASK:> Description: def update(self, max_norm=None): """Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. Gradients are clipped by their global norm if `max_norm` is set. Parameters ---------- max_norm: float, optional If set, clip values of all gradients the ratio of the sum of their norms. """
if max_norm is not None: self._clip_by_global_norm(max_norm) self._module.update()
<SYSTEM_TASK:> Get the translation of images <END_TASK> <USER_TASK:> Description: def transformer(data, label): """Get the translation of images"""
# resize to 64x64 data = mx.image.imresize(data, 64, 64) # transpose from (64, 64, 3) to (3, 64, 64) data = mx.nd.transpose(data, (2, 0, 1)) # normalize to [-1, 1] data = data.astype(np.float32)/128 - 1 # if image is greyscale, repeat 3 times to get RGB image. if data.shape[0] == 1: data = mx.nd.tile(data, (3, 1, 1)) return data, label
<SYSTEM_TASK:> helper function to get dataloader <END_TASK> <USER_TASK:> Description: def get_training_data(batch_size): """ helper function to get dataloader"""
return gluon.data.DataLoader( CIFAR10(train=True, transform=transformer), batch_size=batch_size, shuffle=True, last_batch='discard')
<SYSTEM_TASK:> Draw random samples from a Poisson distribution. <END_TASK> <USER_TASK:> Description: def poisson(lam=1, shape=_Null, dtype=_Null, **kwargs): """Draw random samples from a Poisson distribution. Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). Samples will always be returned as a floating point data type. Parameters ---------- lam : float or Symbol, optional Expectation of interval, should be >= 0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. """
return _random_helper(_internal._random_poisson, _internal._sample_poisson, [lam], shape, dtype, kwargs)
<SYSTEM_TASK:> Draw random samples from a generalized negative binomial distribution. <END_TASK> <USER_TASK:> Description: def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, **kwargs): """Draw random samples from a generalized negative binomial distribution. Samples are distributed according to a generalized negative binomial distribution parametrized by *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the number of unsuccessful experiments (generalized to real numbers). Samples will always be returned as a floating point data type. Parameters ---------- mu : float or Symbol, optional Mean of the negative binomial distribution. alpha : float or Symbol, optional Alpha (dispersion) parameter of the negative binomial distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and `alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and `alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. """
return _random_helper(_internal._random_generalized_negative_binomial, _internal._sample_generalized_negative_binomial, [mu, alpha], shape, dtype, kwargs)
<SYSTEM_TASK:> Creates a model from previously saved checkpoint. <END_TASK> <USER_TASK:> Description: def load(prefix, epoch, load_optimizer_states=False, **kwargs): """Creates a model from previously saved checkpoint. Parameters ---------- prefix : str path prefix of saved model files. You should have "prefix-symbol.json", "prefix-xxxx.params", and optionally "prefix-xxxx.states", where xxxx is the epoch number. epoch : int epoch to load. load_optimizer_states : bool whether to load optimizer states. Checkpoint needs to have been made with save_optimizer_states=True. data_names : list of str Default is `('data')` for a typical model used in image classification. label_names : list of str Default is `('softmax_label')` for a typical model used in image classification. logger : Logger Default is `logging`. context : Context or list of Context Default is ``cpu()``. work_load_list : list of number Default ``None``, indicating uniform workload. fixed_param_names: list of str Default ``None``, indicating no network parameters are fixed. """
sym, args, auxs = load_checkpoint(prefix, epoch) mod = Module(symbol=sym, **kwargs) mod._arg_params = args mod._aux_params = auxs mod.params_initialized = True if load_optimizer_states: mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch) return mod
<SYSTEM_TASK:> Saves current progress to checkpoint. <END_TASK> <USER_TASK:> Description: def save_checkpoint(self, prefix, epoch, save_optimizer_states=False): """Saves current progress to checkpoint. Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training. Parameters ---------- prefix : str The file prefix to checkpoint to. epoch : int The current epoch number. save_optimizer_states : bool Whether to save optimizer states to continue training. """
self._symbol.save('%s-symbol.json'%prefix) param_name = '%s-%04d.params' % (prefix, epoch) self.save_params(param_name) logging.info('Saved checkpoint to \"%s\"', param_name) if save_optimizer_states: state_name = '%s-%04d.states' % (prefix, epoch) self.save_optimizer_states(state_name) logging.info('Saved optimizer state to \"%s\"', state_name)
<SYSTEM_TASK:> Reshapes the module for new input shapes. <END_TASK> <USER_TASK:> Description: def reshape(self, data_shapes, label_shapes=None): """Reshapes the module for new input shapes. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. """
assert self.binded self._data_shapes, self._label_shapes = _parse_data_desc( self.data_names, self.label_names, data_shapes, label_shapes) self._exec_group.reshape(self._data_shapes, self._label_shapes)
<SYSTEM_TASK:> Forward computation. It supports data batches with different shapes, such as <END_TASK> <USER_TASK:> Description: def forward(self, data_batch, is_train=None): """Forward computation. It supports data batches with different shapes, such as different batch sizes or different image sizes. If reshaping of data batch relates to modification of symbol or module, such as changing image layout ordering or switching from training to predicting, module rebinding is required. See Also ---------- :meth:`BaseModule.forward`. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``. """
assert self.binded and self.params_initialized curr_data_shapes = tuple(i.shape for i in self._data_shapes) if isinstance(data_batch, list): assert data_batch is not None, "Encountered empty data batch" new_data_shapes = [] for i in range(len(data_batch[0].data)): shape = data_batch[0].data[i].shape for db in data_batch: assert shape == db.data[i].shape, \ "All data batches in a list need to have the same shape" new_batch_size = len(data_batch) * shape[0] new_data_shapes.append((new_batch_size,) + shape[1:]) new_data_shapes = tuple(new_data_shapes) else: new_data_shapes = tuple(i.shape for i in data_batch.data) if curr_data_shapes != new_data_shapes: if hasattr(data_batch, "provide_data") and data_batch.provide_data: new_dshape = data_batch.provide_data else: new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \ for i, shape in zip(self._data_shapes, new_data_shapes)] if hasattr(data_batch, "provide_label") and data_batch.provide_label: new_lshape = data_batch.provide_label elif hasattr(data_batch, "label") and data_batch.label: new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \ for i, j in zip(self._label_shapes, data_batch.label)] else: new_lshape = None self.reshape(new_dshape, new_lshape) self._exec_group.forward(data_batch, is_train)
<SYSTEM_TASK:> Updates parameters according to the installed optimizer and the gradients computed <END_TASK> <USER_TASK:> Description: def update(self): """Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, this function does update the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. Please call `prepare` to broadcast `row_sparse` parameters with the next batch of data. See Also ---------- :meth:`BaseModule.update`. """
assert self.binded and self.params_initialized and self.optimizer_initialized self._params_dirty = True if self._update_on_kvstore: _update_params_on_kvstore(self._exec_group.param_arrays, self._exec_group.grad_arrays, self._kvstore, self._exec_group.param_names) else: _update_params(self._exec_group.param_arrays, self._exec_group.grad_arrays, updater=self._updater, num_device=len(self._context), kvstore=self._kvstore, param_names=self._exec_group.param_names)
<SYSTEM_TASK:> Gets outputs of the previous forward computation. <END_TASK> <USER_TASK:> Description: def get_outputs(self, merge_multi_context=True): """Gets outputs of the previous forward computation. If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray` might live on different devices. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Output. """
assert self.binded and self.params_initialized return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
<SYSTEM_TASK:> Synchronizes parameters from devices to CPU. This function should be called after <END_TASK> <USER_TASK:> Description: def _sync_params_from_devices(self): """Synchronizes parameters from devices to CPU. This function should be called after calling `update` that updates the parameters on the devices, before one can read the latest parameters from ``self._arg_params`` and ``self._aux_params``. For row_sparse parameters on devices, ther are pulled from KVStore with all row ids. """
self._exec_group.get_params(self._arg_params, self._aux_params) if self._kvstore and self._update_on_kvstore: for param_name, param_val in sorted(self._arg_params.items()): if param_val.stype == 'row_sparse': row_ids = nd.arange(0, param_val.shape[0], dtype='int64') self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids) self._params_dirty = False
<SYSTEM_TASK:> Draw random samples from a uniform distribution. <END_TASK> <USER_TASK:> Description: def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : float or NDArray, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high : float or NDArray, optional Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed samples are drawn for each `[low, high)` pair. Examples -------- >>> mx.nd.random.uniform(0, 1) [ 0.54881352] <NDArray 1 @cpu(0) >>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0)) [ 0.92514056] <NDArray 1 @gpu(0)> >>> mx.nd.random.uniform(-1, 1, shape=(2,)) [ 0.71589124 0.08976638] <NDArray 2 @cpu(0)> >>> low = mx.nd.array([1,2,3]) >>> high = mx.nd.array([2,3,4]) >>> mx.nd.random.uniform(low, high, shape=2) [[ 1.78653979 1.93707538] [ 2.01311183 2.37081361] [ 3.30491424 3.69977832]] <NDArray 3x2 @cpu(0)> """
return _random_helper(_internal._random_uniform, _internal._sample_uniform, [low, high], shape, dtype, ctx, out, kwargs)
<SYSTEM_TASK:> r"""Draw samples from an exponential distribution. <END_TASK> <USER_TASK:> Description: def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): r"""Draw samples from an exponential distribution. Its probability density function is .. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}), for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the inverse of the rate parameter \lambda = 1/\beta. Parameters ---------- scale : float or NDArray, optional The scale parameter, \beta = 1/\lambda. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `scale.context` when `scale` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output` will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale. Examples -------- >>> mx.nd.random.exponential(1) [ 0.79587454] <NDArray 1 @cpu(0)> >>> mx.nd.random.exponential(1, shape=(2,)) [ 0.89856035 1.25593066] <NDArray 2 @cpu(0)> >>> scale = mx.nd.array([1,2,3]) >>> mx.nd.random.exponential(scale, shape=2) [[ 0.41063145 0.42140478] [ 2.59407091 10.12439728] [ 2.42544937 1.14260709]] <NDArray 3x2 @cpu(0)> """
return _random_helper(_internal._random_exponential, _internal._sample_exponential, [1.0/scale], shape, dtype, ctx, out, kwargs)
<SYSTEM_TASK:> Draw random samples from a gamma distribution. <END_TASK> <USER_TASK:> Description: def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta : float or NDArray, optional The scale of the gamma distribution. Should be greater than zero. Default is equal to 1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `alpha.context` when `alpha` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. Examples -------- >>> mx.nd.random.gamma(1, 1) [ 1.93308783] <NDArray 1 @cpu(0)> >>> mx.nd.random.gamma(1, 1, shape=(2,)) [ 0.48216391 2.09890771] <NDArray 2 @cpu(0)> >>> alpha = mx.nd.array([1,2,3]) >>> beta = mx.nd.array([2,3,4]) >>> mx.nd.random.gamma(alpha, beta, shape=2) [[ 3.24343276 0.94137681] [ 3.52734375 0.45568955] [ 14.26264095 14.0170126 ]] <NDArray 3x2 @cpu(0)> """
return _random_helper(_internal._random_gamma, _internal._sample_gamma, [alpha, beta], shape, dtype, ctx, out, kwargs)
<SYSTEM_TASK:> Draw random samples from a negative binomial distribution. <END_TASK> <USER_TASK:> Description: def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Parameters ---------- k : float or NDArray, optional Limit of unsuccessful experiments, > 0. p : float or NDArray, optional Failure probability in each experiment, >= 0 and <=1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `k.context` when `k` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. Examples -------- >>> mx.nd.random.negative_binomial(10, 0.5) [ 4.] <NDArray 1 @cpu(0)> >>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,)) [ 3. 4.] <NDArray 2 @cpu(0)> >>> k = mx.nd.array([1,2,3]) >>> p = mx.nd.array([0.2,0.4,0.6]) >>> mx.nd.random.negative_binomial(k, p, shape=2) [[ 3. 2.] [ 4. 4.] [ 0. 5.]] <NDArray 3x2 @cpu(0)> """
return _random_helper(_internal._random_negative_binomial, _internal._sample_negative_binomial, [k, p], shape, dtype, ctx, out, kwargs)
<SYSTEM_TASK:> Draw random samples from a discrete uniform distribution. <END_TASK> <USER_TASK:> Description: def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : int, required Lower boundary of the output interval. All values generated will be greater than or equal to low. high : int, required Upper boundary of the output interval. All values generated will be less than high. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. dtype : {'int32', 'int64'}, optional Data type of output samples. Default is 'int32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents of the returned NDArray will be samples from the interval `[low, high)`. Examples -------- >>> mx.nd.random.randint(5, 100) [ 90] <NDArray 1 @cpu(0) >>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0)) [ -8] <NDArray 1 @gpu(0)> >>> mx.nd.random.randint(-10, 10, shape=(2,)) [ -5 4] <NDArray 2 @cpu(0)> """
return _random_helper(_internal._random_randint, None, [low, high], shape, dtype, ctx, out, kwargs)
<SYSTEM_TASK:> Initialize parameters in the KVStore. <END_TASK> <USER_TASK:> Description: def _init_params(self): """Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored. """
assert self._kv_initialized, "Cannot initialize parameters in KVStore " \ "when KVStore is not initialized." params_to_init = [] if self._kvstore: for param in self._params_to_init: if param._deferred_init: params_to_init.append(param) else: param_arrays = param._check_and_get(param._data, list) idx = self._param2idx[param.name] self._kvstore.init(idx, param_arrays[0]) if param._stype == 'default': self._kvstore.pull(idx, param_arrays, priority=-idx) self._params_to_init = params_to_init
<SYSTEM_TASK:> Sets a new learning rate of the optimizer. <END_TASK> <USER_TASK:> Description: def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. """
if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
<SYSTEM_TASK:> Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, <END_TASK> <USER_TASK:> Description: def _row_sparse_pull(self, parameter, out, row_id, full_idx=False): """Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, `kv.pull` is preferred instead of `kv.row_sparse_pull`. """
# initialize kv and params if not already if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() idx = self._param2idx[parameter.name] if full_idx and 'dist' not in self._kvstore.type: assert row_id.size == out.shape[0] self._kvstore.pull(idx, out=out, priority=-idx, ignore_sparse=False) else: self._kvstore.row_sparse_pull(idx, out=out, row_ids=row_id, priority=-idx)
<SYSTEM_TASK:> For each parameter, reduce the gradients from different contexts. <END_TASK> <USER_TASK:> Description: def allreduce_grads(self): """For each parameter, reduce the gradients from different contexts. Should be called after `autograd.backward()`, outside of `record()` scope, and before `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. """
if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'allreduce_grads() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._allreduce_grads()
<SYSTEM_TASK:> Makes one step of parameter update. <END_TASK> <USER_TASK:> Description: def update(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """
if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'update() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._check_and_rescale_grad(self._scale / batch_size) self._update(ignore_stale_grad)
<SYSTEM_TASK:> sample 10 times of a size of 1000 for estimating the density of the sparse dataset <END_TASK> <USER_TASK:> Description: def estimate_density(DATA_PATH, feature_size): """sample 10 times of a size of 1000 for estimating the density of the sparse dataset"""
if not os.path.exists(DATA_PATH): raise Exception("Data is not there!") density = [] P = 0.01 for _ in range(10): num_non_zero = 0 num_sample = 0 with open(DATA_PATH) as f: for line in f: if (random.random() < P): num_non_zero += len(line.split(" ")) - 1 num_sample += 1 density.append(num_non_zero * 1.0 / (feature_size * num_sample)) return sum(density) / len(density)
<SYSTEM_TASK:> Execute the command line command. <END_TASK> <USER_TASK:> Description: def exec_cmd(cmd, role, taskid, pass_env): """Execute the command line command."""
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt': cmd[0] = './' + cmd[0] cmd = ' '.join(cmd) env = os.environ.copy() for k, v in pass_env.items(): env[k] = str(v) env['DMLC_TASK_ID'] = str(taskid) env['DMLC_ROLE'] = role env['DMLC_JOB_CLUSTER'] = 'local' ntrial = 0 while True: if os.name == 'nt': env['DMLC_NUM_ATTEMPT'] = str(ntrial) ret = subprocess.call(cmd, shell=True, env=env) if ret != 0: ntrial += 1 continue else: bash = cmd ret = subprocess.call(bash, shell=True, executable='bash', env=env) if ret == 0: logging.debug('Thread %d exit with 0', taskid) return else: if os.name == 'nt': sys.exit(-1) else: raise RuntimeError('Get nonzero return code=%d' % ret)
<SYSTEM_TASK:> Removes trailing zeros in the list of integers and returns a new list of integers <END_TASK> <USER_TASK:> Description: def _remove_blank(l): """ Removes trailing zeros in the list of integers and returns a new list of integers"""
ret = [] for i, _ in enumerate(l): if l[i] == 0: break ret.append(l[i]) return ret
<SYSTEM_TASK:> Not particularly fast code to parse the text file and load into NDArrays. <END_TASK> <USER_TASK:> Description: def get_movielens_iter(filename, batch_size): """Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation. """
logging.info("Preparing data iterators for " + filename + " ... ") user = [] item = [] score = [] with open(filename, 'r') as f: num_samples = 0 for line in f: tks = line.strip().split('::') if len(tks) != 4: continue num_samples += 1 user.append((tks[0])) item.append((tks[1])) score.append((tks[2])) # convert to ndarrays user = mx.nd.array(user, dtype='int32') item = mx.nd.array(item) score = mx.nd.array(score) # prepare data iters data_train = {'user': user, 'item': item} label_train = {'score': score} iter_train = mx.io.NDArrayIter(data=data_train,label=label_train, batch_size=batch_size, shuffle=True) return mx.io.PrefetchingIter(iter_train)
<SYSTEM_TASK:> Decode image from str buffer. <END_TASK> <USER_TASK:> Description: def imdecode(str_img, flag=1): """Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order """
hdl = NDArrayHandle() check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img), mx_uint(len(str_img)), flag, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
<SYSTEM_TASK:> Decode image from str buffer. <END_TASK> <USER_TASK:> Description: def resize(src, size, interpolation=cv2.INTER_LINEAR): """Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns ------- img : NDArray resized image """
hdl = NDArrayHandle() check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]), interpolation, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
<SYSTEM_TASK:> Pad image border <END_TASK> <USER_TASK:> Description: def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """
hdl = NDArrayHandle() check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
<SYSTEM_TASK:> Randomly crop src with size. Upsample result if src is smaller than size <END_TASK> <USER_TASK:> Description: def random_crop(src, size): """Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h)
<SYSTEM_TASK:> Randomly crop src with size. Randomize area and aspect ratio <END_TASK> <USER_TASK:> Description: def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) * area new_ratio = random.uniform(*ratio) new_w = int(new_area*new_ratio) new_h = int(new_area/new_ratio) if random.uniform(0., 1.) < 0.5: new_w, new_h = new_h, new_w if new_w > w or new_h > h: continue x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h) return random_crop(src, size)
<SYSTEM_TASK:> Check to see if the two arrays are the same size. <END_TASK> <USER_TASK:> Description: def check_label_shapes(labels, preds, shape=0): """Check to see if the two arrays are the same size."""
if shape == 0: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of " "predictions {}".format(label_shape, pred_shape))
<SYSTEM_TASK:> Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. <END_TASK> <USER_TASK:> Description: def import_to_gluon(model_file, ctx): """ Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. Parameters ---------- model_file : str ONNX model file name ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block : :class:`~mxnet.gluon.SymbolBlock` A SymbolBlock object representing the given model file. Notes ----- This method is available when you ``import mxnet.contrib.onnx`` """
graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. Instructions to" + " install - https://github.com/onnx/onnx#installation") model_proto = onnx.load_model(model_file) net = graph.graph_to_gluon(model_proto.graph, ctx) return net
<SYSTEM_TASK:> Set the learning rate to the initial value decayed by ratio every N epochs. <END_TASK> <USER_TASK:> Description: def update_learning_rate(lr, trainer, epoch, ratio, steps): """Set the learning rate to the initial value decayed by ratio every N epochs."""
new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch))) trainer.set_learning_rate(new_lr) return trainer
<SYSTEM_TASK:> Seeds the random number generators in MXNet. <END_TASK> <USER_TASK:> Description: def seed(seed_state, ctx="all"): """Seeds the random number generators in MXNet. This affects the behavior of modules in MXNet that uses random number generators, like the dropout operator and `NDArray`'s random sampling operators. Parameters ---------- seed_state : int The random number seed. ctx : Context The device context of the generator. The default is "all" which means seeding random number generators of all devices. Notes ----- Random number generators in MXNet are device specific. `mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the device id. Therefore, random numbers generated from different devices can be different even if they are seeded using the same seed. To produce identical random number sequences independent of the device id, set optional `ctx` argument. This produces the same sequence of random numbers independent of the device id, but the sequence can be different on different kind of devices as MXNet's random number generators for CPU and GPU use different algorithms. Example ------- >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.36481571 -0.62203991] [-1.4962182 -0.08511394]] >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.09544981 -0.20014545] [-0.20808885 0.2527658 ]] # Same results on the same device with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] # Different results on gpu(0) and gpu(1) with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 0.24336822 -1.664805 ] [-1.0223296 1.253198 ]] # Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1) >>> mx.random.seed(128, ctx=mx.gpu(0)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128, ctx=mx.gpu(1)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] """
if not isinstance(seed_state, integer_types): raise ValueError('seed_state must be int') seed_state = ctypes.c_int(int(seed_state)) if ctx == "all": check_call(_LIB.MXRandomSeed(seed_state)) else: ctx = Context(ctx) check_call(_LIB.MXRandomSeedContext(seed_state, ctx.device_typeid, ctx.device_id))
<SYSTEM_TASK:> Draw random samples from a uniform distribtuion. <END_TASK> <USER_TASK:> Description: def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion."""
try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs
<SYSTEM_TASK:> Draw random samples from a Gaussian distribution. <END_TASK> <USER_TASK:> Description: def random_normal(attrs, inputs, proto_obj): """Draw random samples from a Gaussian distribution."""
try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attr = translation_utils._remove_attributes(attrs, ['seed']) new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'}) new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))] return 'random_normal', new_attr, inputs
<SYSTEM_TASK:> Adding two tensors <END_TASK> <USER_TASK:> Description: def add(attrs, inputs, proto_obj): """Adding two tensors"""
new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_add', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_add', new_attr, inputs
<SYSTEM_TASK:> Mean of all the input tensors. <END_TASK> <USER_TASK:> Description: def mean(attrs, inputs, proto_obj): """Mean of all the input tensors."""
concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs] concat_sym = symbol.concat(*concat_input, dim=0) mean_sym = symbol.mean(concat_sym, axis=0) return mean_sym, attrs, inputs
<SYSTEM_TASK:> Returns indices of the maximum values along an axis <END_TASK> <USER_TASK:> Description: def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis"""
axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op
<SYSTEM_TASK:> Returns indices of the minimum values along an axis. <END_TASK> <USER_TASK:> Description: def argmin(attrs, inputs, proto_obj): """Returns indices of the minimum values along an axis."""
axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmin_op
<SYSTEM_TASK:> Elementwise maximum of arrays. <END_TASK> <USER_TASK:> Description: def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time """
if len(inputs) > 1: mxnet_op = symbol.maximum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.maximum(mxnet_op, op_input) else: mxnet_op = symbol.maximum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
<SYSTEM_TASK:> Joins input arrays along a given axis. <END_TASK> <USER_TASK:> Description: def concat(attrs, inputs, proto_obj): """ Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'}) return 'concat', new_attrs, inputs
<SYSTEM_TASK:> Add padding to input tensor <END_TASK> <USER_TASK:> Description: def pad(attrs, inputs, proto_obj): """ Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' }) new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width')) return 'pad', new_attrs, inputs
<SYSTEM_TASK:> Applies the sofplus activation function element-wise to the input. <END_TASK> <USER_TASK:> Description: def softplus(attrs, inputs, proto_obj): """Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'}) return 'Activation', new_attrs, inputs
<SYSTEM_TASK:> Computes transposed convolution of the input tensor. <END_TASK> <USER_TASK:> Description: def deconv(attrs, inputs, proto_obj): """Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return deconv_op, new_attrs, inputs
<SYSTEM_TASK:> Performs max pooling on the input. <END_TASK> <USER_TASK:> Description: def global_maxpooling(attrs, inputs, proto_obj): """Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'max'}) return 'Pooling', new_attrs, inputs
<SYSTEM_TASK:> Performs avg pooling on the input. <END_TASK> <USER_TASK:> Description: def global_avgpooling(attrs, inputs, proto_obj): """Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'avg'}) return 'Pooling', new_attrs, inputs
<SYSTEM_TASK:> Performs general matrix multiplication and accumulation <END_TASK> <USER_TASK:> Description: def linalg_gemm(attrs, inputs, proto_obj): """Performs general matrix multiplication and accumulation"""
trans_a = 0 trans_b = 0 alpha = 1 beta = 1 if 'transA' in attrs: trans_a = attrs['transA'] if 'transB' in attrs: trans_b = attrs['transB'] if 'alpha' in attrs: alpha = attrs['alpha'] if 'beta' in attrs: beta = attrs['beta'] flatten_a = symbol.flatten(inputs[0]) matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1], transpose_a=trans_a, transpose_b=trans_b, alpha=alpha) gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2]) new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a', 'transB': 'transpose_b'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) return gemm_op, new_attrs, inputs
<SYSTEM_TASK:> Reshape the given array by the shape attribute. <END_TASK> <USER_TASK:> Description: def reshape(attrs, inputs, proto_obj): """Reshape the given array by the shape attribute."""
if len(inputs) == 1: return 'reshape', attrs, inputs[0] reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy()) reshape_shape = [int(i) for i in reshape_shape] new_attrs = {'shape': reshape_shape} return 'reshape', new_attrs, inputs[:1]
<SYSTEM_TASK:> Cast input to a given dtype <END_TASK> <USER_TASK:> Description: def cast(attrs, inputs, proto_obj): """ Cast input to a given dtype"""
try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'}) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])] return 'cast', new_attrs, inputs
<SYSTEM_TASK:> Splits an array along a particular axis into multiple sub-arrays. <END_TASK> <USER_TASK:> Description: def split(attrs, inputs, proto_obj): """Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else [] new_attrs = translation_utils._fix_attribute_names(attrs, {'split' : 'num_outputs'}) if 'axis' not in attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0}) if not split_list: num_outputs = len(proto_obj.model_metadata.get('output_tensor_data')) else: if len(set(split_list)) == 1: num_outputs = len(split_list) else: raise NotImplementedError("Operator {} in MXNet does not support variable splits." "Tracking the issue to support variable split here: " "https://github.com/apache/incubator-mxnet/issues/11594" .format('split')) new_attrs['num_outputs'] = num_outputs return 'split', new_attrs, inputs
<SYSTEM_TASK:> Returns a slice of the input tensor along multiple axes. <END_TASK> <USER_TASK:> Description: def _slice(attrs, inputs, proto_obj): """Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis', 'ends' : 'end', 'starts' : 'begin'}) # onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator # for multiple axes from mxnet begin = new_attrs.get('begin') end = new_attrs.get('end') axes = new_attrs.get('axis', tuple(range(len(begin)))) slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op, new_attrs, inputs
<SYSTEM_TASK:> Remove single-dimensional entries from the shape of a tensor. <END_TASK> <USER_TASK:> Description: def squeeze(attrs, inputs, proto_obj): """Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis'}) return 'squeeze', new_attrs, inputs
<SYSTEM_TASK:> Inserts a new axis of size 1 into the array shape <END_TASK> <USER_TASK:> Description: def unsqueeze(attrs, inputs, cls): """Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time. mxnet_op = inputs[0] for axis in attrs["axes"]: mxnet_op = symbol.expand_dims(mxnet_op, axis=axis) return mxnet_op, attrs, inputs
<SYSTEM_TASK:> Flattens the input array into a 2-D array by collapsing the higher dimensions. <END_TASK> <USER_TASK:> Description: def flatten(attrs, inputs, proto_obj): """Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1 if 'axis' in attrs and attrs['axis'] != 1: raise RuntimeError("Flatten operator only supports axis=1") new_attrs = translation_utils._remove_attributes(attrs, ['axis']) return 'Flatten', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by maximum value <END_TASK> <USER_TASK:> Description: def reduce_max(attrs, inputs, proto_obj): """Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'max', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by mean value <END_TASK> <USER_TASK:> Description: def reduce_mean(attrs, inputs, proto_obj): """Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'mean', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by minimum value <END_TASK> <USER_TASK:> Description: def reduce_min(attrs, inputs, proto_obj): """Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'min', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by sum value <END_TASK> <USER_TASK:> Description: def reduce_sum(attrs, inputs, proto_obj): """Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'sum', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by product value <END_TASK> <USER_TASK:> Description: def reduce_prod(attrs, inputs, proto_obj): """Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'prod', new_attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by log sum value <END_TASK> <USER_TASK:> Description: def reduce_log_sum(attrs, inputs, proto_obj): """Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims') sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'), keepdims=keep_dims) log_sym = symbol.log(sum_op) return log_sym, attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by log sum exp value <END_TASK> <USER_TASK:> Description: def reduce_log_sum_exp(attrs, inputs, proto_obj): """Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims') exp_op = symbol.exp(inputs[0]) sum_op = symbol.sum(exp_op, axis=attrs.get('axes'), keepdims=keep_dims) log_sym = symbol.log(sum_op) return log_sym, attrs, inputs
<SYSTEM_TASK:> Reduce the array along a given axis by sum square value <END_TASK> <USER_TASK:> Description: def reduce_sum_square(attrs, inputs, proto_obj): """Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0]) sum_op = symbol.sum(square_op, axis=attrs.get('axes'), keepdims=attrs.get('keepdims')) return sum_op, attrs, inputs
<SYSTEM_TASK:> Rearranges data from depth into blocks of spatial data. <END_TASK> <USER_TASK:> Description: def depthtospace(attrs, inputs, proto_obj): """Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'}) return "depth_to_space", new_attrs, inputs
<SYSTEM_TASK:> Rearranges blocks of spatial data into depth. <END_TASK> <USER_TASK:> Description: def spacetodepth(attrs, inputs, proto_obj): """Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'}) return "space_to_depth", new_attrs, inputs
<SYSTEM_TASK:> this runs inside the vm <END_TASK> <USER_TASK:> Description: def run_ut_python3_qemu_internal(): """this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0] logging.info("=== NOW Running inside QEMU ===") logging.info("PIP Installing %s", pkg) check_call(['sudo', 'pip3', 'install', pkg]) logging.info("PIP Installing mxnet/test_requirements.txt") check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt']) logging.info("Running tests in mxnet/tests/python/unittest/") check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py'])
<SYSTEM_TASK:> Return a new handle with specified shape and context. <END_TASK> <USER_TASK:> Description: def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): """Return a new handle with specified shape and context. Empty handle is only used to hold results. Returns ------- handle A new empty `NDArray` handle. """
hdl = NDArrayHandle() check_call(_LIB.MXNDArrayCreateEx( c_array_buf(mx_uint, native_array('I', shape)), mx_uint(len(shape)), ctypes.c_int(ctx.device_typeid), ctypes.c_int(ctx.device_id), ctypes.c_int(int(delay_alloc)), ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])), ctypes.byref(hdl))) return hdl
<SYSTEM_TASK:> Returns a dispatch code for calling basic or advanced indexing functions. <END_TASK> <USER_TASK:> Description: def _get_indexing_dispatch_code(key): """Returns a dispatch code for calling basic or advanced indexing functions."""
if isinstance(key, (NDArray, np.ndarray)): return _NDARRAY_ADVANCED_INDEXING elif isinstance(key, list): # TODO(junwu): Add support for nested lists besides integer list for i in key: if not isinstance(i, integer_types): raise TypeError('Indexing NDArray only supports a list of integers as index' ' when key is of list type, received element=%s of type=%s' % (str(i), str(type(i)))) return _NDARRAY_ADVANCED_INDEXING elif isinstance(key, (integer_types, py_slice)): return _NDARRAY_BASIC_INDEXING elif isinstance(key, tuple): for idx in key: if isinstance(idx, (NDArray, np.ndarray, list, tuple)): return _NDARRAY_ADVANCED_INDEXING elif not isinstance(idx, (py_slice, integer_types)): raise ValueError("NDArray does not support slicing with key %s of type %s." % (str(idx), str(type(idx)))) return _NDARRAY_BASIC_INDEXING else: return _NDARRAY_UNSUPPORTED_INDEXING
<SYSTEM_TASK:> Given data and index shapes, get the output `NDArray` shape. <END_TASK> <USER_TASK:> Description: def _get_oshape_of_gather_nd_op(dshape, ishape): """Given data and index shapes, get the output `NDArray` shape. This basically implements the infer shape logic of op gather_nd."""
assert len(dshape) > 0 and len(ishape) > 0 oshape = list(ishape[1:]) if ishape[0] < len(dshape): oshape.extend(dshape[ishape[0]:]) return tuple(oshape)
<SYSTEM_TASK:> Given start, stop, and stop, calculate the number of elements <END_TASK> <USER_TASK:> Description: def _get_dim_size(start, stop, step): """Given start, stop, and stop, calculate the number of elements of this slice."""
assert step != 0 if step > 0: assert start < stop dim_size = (stop - start - 1) // step + 1 else: assert stop < start dim_size = (start - stop - 1) // (-step) + 1 return dim_size
<SYSTEM_TASK:> Given two shapes that are not identical, find the shape <END_TASK> <USER_TASK:> Description: def _get_broadcast_shape(shape1, shape2): """Given two shapes that are not identical, find the shape that both input shapes can broadcast to."""
if shape1 == shape2: return shape1 length1 = len(shape1) length2 = len(shape2) if length1 > length2: shape = list(shape1) else: shape = list(shape2) i = max(length1, length2) - 1 for a, b in zip(shape1[::-1], shape2[::-1]): if a != 1 and b != 1 and a != b: raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2)) shape[i] = max(a, b) i -= 1 return tuple(shape)
<SYSTEM_TASK:> Returns a new array filled with all ones, with the given shape and type. <END_TASK> <USER_TASK:> Description: def ones(shape, ctx=None, dtype=None, **kwargs): """Returns a new array filled with all ones, with the given shape and type. Parameters ---------- shape : int or tuple of int or list of int The shape of the empty array. ctx : Context, optional An optional device context. Defaults to the current default context (``mxnet.context.current_context()``). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A new array of the specified shape filled with all ones. Examples -------- >>> mx.nd.ones(1).asnumpy() array([ 1.], dtype=float32) >>> mx.nd.ones((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.ones((1,2), dtype='float16').asnumpy() array([[ 1., 1.]], dtype=float16) """
# pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
<SYSTEM_TASK:> Moves the `source` axis into the `destination` position <END_TASK> <USER_TASK:> Description: def moveaxis(tensor, source, destination): """Moves the `source` axis into the `destination` position while leaving the other axes in their original order Parameters ---------- tensor : mx.nd.array The array which axes should be reordered source : int or sequence of int Original position of the axes to move. Can be negative but must be unique. destination : int or sequence of int Destination position for each of the original axes. Can be negative but must be unique. Returns ------- result : mx.nd.array Array with moved axes. Examples -------- >>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]]) >>> mx.nd.moveaxis(X, 0, 1).shape (3L, 2L) >>> X = mx.nd.zeros((3, 4, 5)) >>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape (5, 4, 3) """
try: source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim) except IndexError: raise ValueError('Source should verify 0 <= source < tensor.ndim' 'Got %d' % source) try: destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim) except IndexError: raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).' % tensor.ndim, 'Got %d' % destination) if len(source) != len(destination): raise ValueError('`source` and `destination` arguments must have ' 'the same number of elements') order = [n for n in range(tensor.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) return op.transpose(tensor, order)
<SYSTEM_TASK:> Helper function for element-wise operation. <END_TASK> <USER_TASK:> Description: def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None): """ Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. Parameters -------- lhs : NDArray or numeric value Left-hand side operand. rhs : NDArray or numeric value Right-hand operand, fn_array : function Function to be called if both lhs and rhs are of ``NDArray`` type. fn_scalar : function Function to be called if both lhs and rhs are numeric values. lfn_scalar : function Function to be called if lhs is ``NDArray`` while rhs is numeric value rfn_scalar : function Function to be called if lhs is numeric value while rhs is ``NDArray``; if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar Returns -------- NDArray result array """
if isinstance(lhs, numeric_types): if isinstance(rhs, numeric_types): return fn_scalar(lhs, rhs) else: if rfn_scalar is None: # commutative function return lfn_scalar(rhs, float(lhs)) else: return rfn_scalar(rhs, float(lhs)) elif isinstance(rhs, numeric_types): return lfn_scalar(lhs, float(rhs)) elif isinstance(rhs, NDArray): return fn_array(lhs, rhs) else: raise TypeError('type %s not supported' % str(type(rhs)))
<SYSTEM_TASK:> Returns element-wise modulo of the input arrays with broadcasting. <END_TASK> <USER_TASK:> Description: def modulo(lhs, rhs): """Returns element-wise modulo of the input arrays with broadcasting. Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array in modulo. rhs : scalar or mxnet.ndarray.array Second array in modulo. The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise modulo of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3))*6 >>> y = mx.nd.ones((2,1))*4 >>> x.asnumpy() array([[ 6., 6., 6.], [ 6., 6., 6.]], dtype=float32) >>> y.asnumpy() array([[ 4.], [ 4.]], dtype=float32) >>> x%5 <NDArray 2x3 @cpu(0)> >>> (x%5).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (x%y).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> mx.nd.modulo(x,y).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) """
# pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_mod, operator.mod, _internal._mod_scalar, _internal._rmod_scalar)
<SYSTEM_TASK:> Returns result of first array elements raised to powers from second array, element-wise <END_TASK> <USER_TASK:> Description: def power(base, exp): """Returns result of first array elements raised to powers from second array, element-wise with broadcasting. Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- base : scalar or NDArray The base array exp : scalar or NDArray The exponent array. If ``base.shape != exp.shape``, they must be broadcastable to a common shape. Returns -------- NDArray The bases in x raised to the exponents in y. Examples -------- >>> x = mx.nd.ones((2,3))*2 >>> y = mx.nd.arange(1,3).reshape((2,1)) >>> z = mx.nd.arange(1,3).reshape((2,1)) >>> x.asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> y.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> z.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> (x**2).asnumpy() array([[ 4., 4., 4.], [ 4., 4., 4.]], dtype=float32) >>> (x**y).asnumpy() array([[ 2., 2., 2.], [ 4., 4., 4.]], dtype=float32) >>> mx.nd.power(x,y).asnumpy() array([[ 2., 2., 2.], [ 4., 4., 4.]], dtype=float32) >>> (z**y).asnumpy() array([[ 1.], [ 4.]], dtype=float32) """
# pylint: disable= no-member, protected-access return _ufunc_helper( base, exp, op.broadcast_power, operator.pow, _internal._power_scalar, _internal._rpower_scalar)
<SYSTEM_TASK:> Returns element-wise maximum of the input arrays with broadcasting. <END_TASK> <USER_TASK:> Description: def maximum(lhs, rhs): """Returns element-wise maximum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise maximum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.maximum(x, 2).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> mx.nd.maximum(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.maximum(y, z).asnumpy() array([[ 0., 1.], [ 1., 1.]], dtype=float32) """
# pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_maximum, lambda x, y: x if x > y else y, _internal._maximum_scalar, None)
<SYSTEM_TASK:> Returns element-wise minimum of the input arrays with broadcasting. <END_TASK> <USER_TASK:> Description: def minimum(lhs, rhs): """Returns element-wise minimum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise minimum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.minimum(x, 2).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(z, y).asnumpy() array([[ 0., 0.], [ 0., 1.]], dtype=float32) """
# pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_minimum, lambda x, y: x if x < y else y, _internal._minimum_scalar, None)
<SYSTEM_TASK:> DEPRECATED, use ``concat`` instead <END_TASK> <USER_TASK:> Description: def concatenate(arrays, axis=0, always_copy=True): """DEPRECATED, use ``concat`` instead Parameters ---------- arrays : list of `NDArray` Arrays to be concatenate. They must have identical shape except the first dimension. They also must have the same data type. axis : int The axis along which to concatenate. always_copy : bool Default `True`. When not `True`, if the arrays only contain one `NDArray`, that element will be returned directly, avoid copying. Returns ------- NDArray An `NDArray` that lives on the same context as `arrays[0].context`. """
assert isinstance(arrays, list) assert len(arrays) > 0 assert isinstance(arrays[0], NDArray) if not always_copy and len(arrays) == 1: return arrays[0] shape_axis = arrays[0].shape[axis] shape_rest1 = arrays[0].shape[0:axis] shape_rest2 = arrays[0].shape[axis+1:] dtype = arrays[0].dtype for arr in arrays[1:]: shape_axis += arr.shape[axis] assert shape_rest1 == arr.shape[0:axis] assert shape_rest2 == arr.shape[axis+1:] assert dtype == arr.dtype ret_shape = shape_rest1 + (shape_axis,) + shape_rest2 ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype) idx = 0 begin = [0 for _ in ret_shape] end = list(ret_shape) for arr in arrays: if axis == 0: ret[idx:idx+arr.shape[0]] = arr else: begin[axis] = idx end[axis] = idx+arr.shape[axis] # pylint: disable=no-member,protected-access _internal._crop_assign(ret, arr, out=ret, begin=tuple(begin), end=tuple(end)) # pylint: enable=no-member,protected-access idx += arr.shape[axis] return ret