Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def retry(target_exception, tries=4, delay_s=1, backoff=2):
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry | [
"Retry calling the decorated function using an exponential backoff.\n\n http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/\n original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry\n\n :param target_exception: the exception to check. may be a tuple of\n exceptions to check\n :type target_exception: Exception or tuple\n :param tries: number of times to try (not retry) before giving up\n :type tries: int\n :param delay_s: initial delay between retries in seconds\n :type delay_s: int\n :param backoff: backoff multiplier e.g. value of 2 will double the delay\n each retry\n :type backoff: int\n "
] |
Please provide a description of the function:def load_model(model_name, epoch_num, data_shapes, label_shapes, label_names, gpus=''):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, epoch_num)
mod = create_module(sym, data_shapes, label_shapes, label_names, gpus)
mod.set_params(
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True
)
return mod | [
"Returns a module loaded with the provided model.\n\n Parameters\n ----------\n model_name: str\n Prefix of the MXNet model name as stored on the local directory.\n\n epoch_num : int\n Epoch number of model we would like to load.\n\n input_shape: tuple\n The shape of the input data in the form of (batch_size, channels, height, width)\n\n files: list of strings\n List of URLs pertaining to files that need to be downloaded in order to use the model.\n\n data_shapes: list of tuples.\n List of tuples where each tuple is a pair of input variable name and its shape.\n\n label_shapes: list of (str, tuple)\n Typically is ``data_iter.provide_label``.\n\n label_names: list of str\n Name of the output labels in the MXNet symbolic graph.\n\n gpus: str\n Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.\n If empty, we use CPU.\n\n Returns\n -------\n MXNet module\n "
] |
Please provide a description of the function:def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod | [
"Creates a new MXNet module.\n\n Parameters\n ----------\n sym : Symbol\n An MXNet symbol.\n\n input_shape: tuple\n The shape of the input data in the form of (batch_size, channels, height, width)\n\n files: list of strings\n List of URLs pertaining to files that need to be downloaded in order to use the model.\n\n data_shapes: list of tuples.\n List of tuples where each tuple is a pair of input variable name and its shape.\n\n label_shapes: list of (str, tuple)\n Typically is ``data_iter.provide_label``.\n\n label_names: list of str\n Name of the output labels in the MXNet symbolic graph.\n\n gpus: str\n Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.\n If empty, we use CPU.\n\n Returns\n -------\n MXNet module\n "
] |
Please provide a description of the function:def evaluate_net(net, path_imgrec, num_classes, num_batch, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=32,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False):
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape, mean_pixels=mean_pixels,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names)
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names)
num = num_batch * batch_size
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=ctx) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, []) # empty label
dry_run = 5 # use 5 iterations to warm up
for i in range(dry_run):
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
tic = time.time()
results = mod.score(eval_iter, metric, num_batch=num_batch)
speed = num / (time.time() - tic)
if logger is not None:
logger.info('Finished inference with %d images' % num)
logger.info('Finished with %f images per second', speed)
for k, v in results:
print("{}: {}".format(k, v)) | [
"\n evalute network given validation record file\n\n Parameters:\n ----------\n net : str or None\n Network name or use None to load from json without modifying\n path_imgrec : str\n path to the record validation file\n path_imglist : str\n path to the list file to replace labels in record file, optional\n num_classes : int\n number of classes, not including background\n mean_pixels : tuple\n (mean_r, mean_g, mean_b)\n data_shape : tuple or int\n (3, height, width) or height/width\n model_prefix : str\n model prefix of saved checkpoint\n epoch : int\n load model epoch\n ctx : mx.ctx\n mx.gpu() or mx.cpu()\n batch_size : int\n validation batch size\n nms_thresh : float\n non-maximum suppression threshold\n force_nms : boolean\n whether suppress different class objects\n ovp_thresh : float\n AP overlap threshold for true/false postives\n use_difficult : boolean\n whether to use difficult objects in evaluation if applicable\n class_names : comma separated str\n class names in string, must correspond to num_classes if set\n voc07_metric : boolean\n whether to use 11-point evluation as in VOC07 competition\n "
] |
Please provide a description of the function:def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
pass | [
"Initializes the parameters and auxiliary states. By default this function\n does nothing. Subclass should override this method if contains parameters.\n\n Parameters\n ----------\n initializer : Initializer\n Called to initialize parameters if needed.\n arg_params : dict\n If not ``None``, should be a dictionary of existing `arg_params`. Initialization\n will be copied from that.\n aux_params : dict\n If not ``None``, should be a dictionary of existing `aux_params`. Initialization\n will be copied from that.\n allow_missing : bool\n If ``True``, params could contain missing values, and the initializer will be\n called to fill those missing params.\n force_init : bool\n If ``True``, will force re-initialize even if already initialized.\n allow_extra : boolean, optional\n Whether allow extra parameters that are not needed by symbol.\n If this is True, no error will be thrown when arg_params or aux_params\n contain extra parameters that is not needed by the executor.\n "
] |
Please provide a description of the function:def update_metric(self, eval_metric, labels, pre_sliced=False):
if self._label_shapes is None:
# since we do not need labels, we are probably not a module with a loss
# function or predictions, so just ignore this call
return
if pre_sliced:
raise RuntimeError("PythonModule does not support presliced labels")
# by default we expect our outputs are some scores that could be evaluated
eval_metric.update(labels, self.get_outputs()) | [
"Evaluates and accumulates evaluation metric on outputs of the last forward computation.\n Subclass should override this method if needed.\n\n Parameters\n ----------\n eval_metric : EvalMetric\n labels : list of NDArray\n Typically ``data_batch.label``.\n "
] |
Please provide a description of the function:def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
assert grad_req == 'write', "Python module only support write gradient"
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
assert len(data_shapes) == len(self._data_names)
assert [x[0] for x in data_shapes] == self._data_names
self._data_shapes = data_shapes
self._label_shapes = label_shapes
if label_shapes is not None:
assert self._label_names is not None
assert len(self._label_names) == len(label_shapes)
assert [x[0] for x in label_shapes] == self._label_names
self._output_shapes = self._compute_output_shapes() | [
"Binds the symbols to construct executors. This is necessary before one\n can perform computation with the module.\n\n Parameters\n ----------\n data_shapes : list of (str, tuple)\n Typically is ``data_iter.provide_data``.\n label_shapes : list of (str, tuple)\n Typically is ``data_iter.provide_label``.\n for_training : bool\n Default is ``True``. Whether the executors should be bind for training.\n inputs_need_grad : bool\n Default is ``False``. Whether the gradients to the input data need to be computed.\n Typically this is not needed. But this might be needed when implementing composition\n of modules.\n force_rebind : bool\n Default is ``False``. This function does nothing if the executors are already\n bound. But with this ``True``, the executors will be forced to rebind.\n shared_module : Module\n Default is ``None``. This is used in bucketing. When not ``None``, the shared module\n essentially corresponds to a different bucket -- a module with different symbol\n but with the same sets of parameters (e.g. unrolled RNNs with different lengths).\n grad_req : str, list of str, dict of str to str\n Requirement for gradient accumulation. Can be 'write', 'add', or 'null'\n (default to 'write').\n Can be specified globally (str) or for each argument (list, dict).\n "
] |
Please provide a description of the function:def forward(self, data_batch, is_train=None):
self._scores = data_batch.data[0]
if is_train is None:
is_train = self.for_training
if is_train:
self._labels = data_batch.label[0] | [
"Forward computation. Here we do nothing but to keep a reference to\n the scores and the labels so that we can do backward computation.\n\n Parameters\n ----------\n data_batch : DataBatch\n Could be anything with similar API implemented.\n is_train : bool\n Default is ``None``, which means `is_train` takes the value of ``self.for_training``.\n "
] |
Please provide a description of the function:def _backward_impl(self):
if self._grad_func is not None:
grad = self._grad_func(self._scores, self._labels)
if not isinstance(grad, nd.NDArray):
grad = nd.array(grad)
self._scores_grad = grad
else:
raise NotImplementedError() | [
"Actual implementation of the backward computation. The computation\n should take ``self._scores`` and ``self._labels`` and then compute the\n gradients with respect to the scores, store it as an `NDArray` in\n ``self._scores_grad``.\n\n Instead of defining a subclass and overriding this function,\n a more convenient way is to pass in a `grad_func` when constructing\n the module object. Then it will be called to compute the gradients.\n "
] |
Please provide a description of the function:def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n',
start_label=0, unknown_token=None):
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert (new_vocab or unknown_token), "Unknown token %s"%word
if idx == invalid_label:
idx += 1
if unknown_token:
word = unknown_token
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab | [
"Encode sentences and (optionally) build a mapping\n from string tokens to integer indices. Unknown keys\n will be added to vocabulary.\n\n Parameters\n ----------\n sentences : list of list of str\n A list of sentences to encode. Each sentence\n should be a list of string tokens.\n vocab : None or dict of str -> int\n Optional input Vocabulary\n invalid_label : int, default -1\n Index for invalid token, like <end-of-sentence>\n invalid_key : str, default '\\\\n'\n Key for invalid token. Use '\\\\n' for end\n of sentence by default.\n start_label : int\n lowest index.\n unknown_token: str\n Symbol to represent unknown token.\n If not specified, unknown token will be skipped.\n\n Returns\n -------\n result : list of list of int\n encoded sentences\n vocab : dict of str -> int\n result vocabulary\n "
] |
Please provide a description of the function:def reset(self):
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype)) | [
"Resets the iterator to the beginning of the data."
] |
Please provide a description of the function:def next(self):
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)]) | [
"Returns the next batch of data."
] |
Please provide a description of the function:def getInstance(self):
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance | [
"\n Returns the singleton instance. Upon its first call, it creates a\n new instance of the decorated class and calls its `__init__` method.\n On all subsequent calls, the already created instance is returned.\n\n "
] |
Please provide a description of the function:def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--image_path', type=str, default='./data/datasets/')
parser.add_argument('--align_path', type=str, default='./data/align/')
parser.add_argument('--num_gpus', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--data_type', type=str, default='valid')
parser.add_argument('--model_path', type=str, default=None)
config = parser.parse_args()
trainer = Train(config)
trainer.build_model(path=config.model_path)
trainer.load_dataloader()
if config.data_type == 'train':
data_loader = trainer.train_dataloader
elif config.data_type == 'valid':
data_loader = trainer.valid_dataloader
trainer.infer_batch(data_loader) | [
"\n Description : run lipnet training code using argument info\n "
] |
Please provide a description of the function:def get(self, name, **kwargs):
name = self._prefix + name
if name not in self._params:
self._params[name] = symbol.Variable(name, **kwargs)
return self._params[name] | [
"Get the variable given a name if one exists or create a new one if missing.\n\n Parameters\n ----------\n name : str\n name of the variable\n **kwargs :\n more arguments that's passed to symbol.Variable\n "
] |
Please provide a description of the function:def reset(self):
self._init_counter = -1
self._counter = -1
if hasattr(self, '_cells'):
for cell in self._cells:
cell.reset() | [
"Reset before re-using the cell for another graph."
] |
Please provide a description of the function:def begin_state(self, func=symbol.zeros, **kwargs):
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info:
self._init_counter += 1
if info is None:
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
else:
kwargs.update(info)
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
states.append(state)
return states | [
"Initial state for this cell.\n\n Parameters\n ----------\n func : callable, default symbol.zeros\n Function for creating initial state. Can be symbol.zeros,\n symbol.uniform, symbol.Variable etc.\n Use symbol.Variable if you want to directly\n feed input as states.\n **kwargs :\n more keyword arguments passed to func. For example\n mean, std, dtype, etc.\n\n Returns\n -------\n states : nested list of Symbol\n Starting states for the first RNN step.\n "
] |
Please provide a description of the function:def unpack_weights(self, args):
args = args.copy()
if not self._gate_names:
return args
h = self._num_hidden
for group_name in ['i2h', 'h2h']:
weight = args.pop('%s%s_weight'%(self._prefix, group_name))
bias = args.pop('%s%s_bias' % (self._prefix, group_name))
for j, gate in enumerate(self._gate_names):
wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
args[wname] = weight[j*h:(j+1)*h].copy()
bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
args[bname] = bias[j*h:(j+1)*h].copy()
return args | [
"Unpack fused weight matrices into separate\n weight matrices.\n\n For example, say you use a module object `mod` to run a network that has an lstm cell.\n In `mod.get_params()[0]`, the lstm parameters are all represented as a single big vector.\n `cell.unpack_weights(mod.get_params()[0])` will unpack this vector into a dictionary of\n more readable lstm parameters - c, f, i, o gates for i2h (input to hidden) and\n h2h (hidden to hidden) weights.\n\n Parameters\n ----------\n args : dict of str -> NDArray\n Dictionary containing packed weights.\n usually from `Module.get_params()[0]`.\n\n Returns\n -------\n args : dict of str -> NDArray\n Dictionary with unpacked weights associated with\n this cell.\n\n See Also\n --------\n pack_weights: Performs the reverse operation of this function.\n "
] |
Please provide a description of the function:def pack_weights(self, args):
args = args.copy()
if not self._gate_names:
return args
for group_name in ['i2h', 'h2h']:
weight = []
bias = []
for gate in self._gate_names:
wname = '%s%s%s_weight'%(self._prefix, group_name, gate)
weight.append(args.pop(wname))
bname = '%s%s%s_bias'%(self._prefix, group_name, gate)
bias.append(args.pop(bname))
args['%s%s_weight'%(self._prefix, group_name)] = ndarray.concatenate(weight)
args['%s%s_bias'%(self._prefix, group_name)] = ndarray.concatenate(bias)
return args | [
"Pack separate weight matrices into a single packed\n weight.\n\n Parameters\n ----------\n args : dict of str -> NDArray\n Dictionary containing unpacked weights.\n\n Returns\n -------\n args : dict of str -> NDArray\n Dictionary with packed weights associated with\n this cell.\n "
] |
Please provide a description of the function:def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states | [
"Unroll an RNN cell across time steps.\n\n Parameters\n ----------\n length : int\n Number of steps to unroll.\n inputs : Symbol, list of Symbol, or None\n If `inputs` is a single Symbol (usually the output\n of Embedding symbol), it should have shape\n (batch_size, length, ...) if layout == 'NTC',\n or (length, batch_size, ...) if layout == 'TNC'.\n\n If `inputs` is a list of symbols (usually output of\n previous unroll), they should all have shape\n (batch_size, ...).\n begin_state : nested list of Symbol, default None\n Input states created by `begin_state()`\n or output state of another cell.\n Created from `begin_state()` if None.\n layout : str, optional\n `layout` of input symbol. Only used if inputs\n is a single Symbol.\n merge_outputs : bool, optional\n If False, return outputs as a list of Symbols.\n If True, concatenate output across time steps\n and return a single symbol with shape\n (batch_size, length, ...) if layout == 'NTC',\n or (length, batch_size, ...) if layout == 'TNC'.\n If None, output whatever is faster.\n\n Returns\n -------\n outputs : list of Symbol or Symbol\n Symbol (if `merge_outputs` is True) or list of Symbols\n (if `merge_outputs` is False) corresponding to the output from\n the RNN from this unrolling.\n\n states : nested list of Symbol\n The new state of this RNN after this unrolling.\n The type of this symbol is same as the output of begin_state().\n "
] |
Please provide a description of the function:def _get_activation(self, inputs, activation, **kwargs):
if isinstance(activation, string_types):
return symbol.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs) | [
"Get activation function. Convert if is string"
] |
Please provide a description of the function:def _slice_weights(self, arr, li, lh):
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args | [
"slice fused rnn weights"
] |
Please provide a description of the function:def unfuse(self):
stack = SequentialRNNCell()
get_cell = {'rnn_relu': lambda cell_prefix: RNNCell(self._num_hidden,
activation='relu',
prefix=cell_prefix),
'rnn_tanh': lambda cell_prefix: RNNCell(self._num_hidden,
activation='tanh',
prefix=cell_prefix),
'lstm': lambda cell_prefix: LSTMCell(self._num_hidden,
prefix=cell_prefix),
'gru': lambda cell_prefix: GRUCell(self._num_hidden,
prefix=cell_prefix)}[self._mode]
for i in range(self._num_layers):
if self._bidirectional:
stack.add(BidirectionalCell(
get_cell('%sl%d_'%(self._prefix, i)),
get_cell('%sr%d_'%(self._prefix, i)),
output_prefix='%sbi_l%d_'%(self._prefix, i)))
else:
stack.add(get_cell('%sl%d_'%(self._prefix, i)))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(DropoutCell(self._dropout, prefix='%s_dropout%d_'%(self._prefix, i)))
return stack | [
"Unfuse the fused RNN in to a stack of rnn cells.\n\n Returns\n -------\n cell : mxnet.rnn.SequentialRNNCell\n unfused cell that can be used for stepping, and can run on CPU.\n "
] |
Please provide a description of the function:def add(self, cell):
self._cells.append(cell)
if self._override_cell_params:
assert cell._own_params, \
"Either specify params for SequentialRNNCell " \
"or child cells, not both."
cell.params._params.update(self.params._params)
self.params._params.update(cell.params._params) | [
"Append a cell into the stack.\n\n Parameters\n ----------\n cell : BaseRNNCell\n The cell to be appended. During unroll, previous cell's output (or raw inputs if\n no previous cell) is used as the input to this cell.\n "
] |
Please provide a description of the function:def read_image(img_path, image_dims=None, mean=None):
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img | [
"\n Reads an image from file path or URL, optionally resizing to given image dimensions and\n subtracting mean.\n :param img_path: path to file, or url to download\n :param image_dims: image dimensions to resize to, or None\n :param mean: mean file to subtract, or None\n :return: loaded image, in RGB format\n "
] |
Please provide a description of the function:def _ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs | [
"\n Changes device of given mxnet arguments\n :param arg_params: arguments\n :param aux_params: auxiliary parameters\n :param ctx: new device context\n :return: arguments and auxiliary parameters on new device\n "
] |
Please provide a description of the function:def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return | [
"\n Run the layer comparison on a caffe model, given its prototxt, weights and mean.\n The comparison is done by inferring on a given image using both caffe and mxnet model\n :param image_url: image file or url to run inference on\n :param gpu: gpu to use, -1 for cpu\n :param caffe_prototxt_path: path to caffe prototxt\n :param caffe_model_path: path to caffe weights\n :param caffe_mean: path to caffe mean file\n "
] |
Please provide a description of the function:def _bfs(root_node, process_node):
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node) | [
"\n Implementation of Breadth-first search (BFS) on caffe network DAG\n :param root_node: root node of caffe network DAG\n :param process_node: function to run on each node\n "
] |
Please provide a description of the function:def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
warnings.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return | [
"\n Compare layer by layer of a caffe network with mxnet network\n :param caffe_net: loaded caffe network\n :param arg_params: arguments\n :param aux_params: auxiliary parameters\n :param exe: mxnet model\n :param layer_name_to_record: map between caffe layer and information record\n :param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)\n :param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob\n :param max_diff_allowed: max difference allowed between caffe blob and mxnet blob\n "
] |
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\
'tutorials/python/predict_image/cat.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed) | [
"Entrypoint for compare_layers"
] |
Please provide a description of the function:def get_executor(sym, ctx, data_inputs, initializer=None):
data_shapes = {k: v.shape for k, v in data_inputs.items()}
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = list(set(arg_names) - set(data_inputs.keys()))
arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)}
params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names}
params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names}
aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)}
exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs),
args_grad=params_grad,
aux_states=aux_states)
if initializer is not None:
for k, v in params.items():
initializer(k, v)
return exe, params, params_grad, aux_states | [
"Get executor to Stochastic Gradient Langevin Dynamics and/or Bayesian Dark Knowledge"
] |
Please provide a description of the function:def copy_param(exe, new_param=None):
if new_param is None:
new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k, v in exe.arg_dict.items()}
for k, v in new_param.items():
exe.arg_dict[k].copyto(v)
return new_param | [
"Create copy of parameters"
] |
Please provide a description of the function:def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("font_path", help="Path to ttf font file or directory containing ttf files")
parser.add_argument("--loss", help="'ctc' or 'warpctc' loss [Default 'ctc']", default='ctc')
parser.add_argument("--cpu",
help="Number of CPUs for training [Default 8]. Ignored if --gpu is specified.",
type=int, default=8)
parser.add_argument("--gpu", help="Number of GPUs for training [Default 0]", type=int)
parser.add_argument("--num_proc", help="Number CAPTCHA generating processes [Default 4]", type=int, default=4)
parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr')
return parser.parse_args() | [
"Parse command line arguments"
] |
Please provide a description of the function:def main():
args = parse_args()
if not any(args.loss == s for s in ['ctc', 'warpctc']):
raise ValueError("Invalid loss '{}' (must be 'ctc' or 'warpctc')".format(args.loss))
hp = Hyperparams()
# Start a multiprocessor captcha image generator
mp_captcha = MPDigitCaptcha(
font_paths=get_fonts(args.font_path), h=hp.seq_length, w=30,
num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2)
try:
# Must call start() before any call to mxnet module (https://github.com/apache/incubator-mxnet/issues/9213)
mp_captcha.start()
if args.gpu:
contexts = [mx.context.gpu(i) for i in range(args.gpu)]
else:
contexts = [mx.context.cpu(i) for i in range(args.cpu)]
init_states = lstm.init_states(hp.batch_size, hp.num_lstm_layer, hp.num_hidden)
data_train = OCRIter(
hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='train')
data_val = OCRIter(
hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='val')
symbol = lstm.lstm_unroll(
num_lstm_layer=hp.num_lstm_layer,
seq_len=hp.seq_length,
num_hidden=hp.num_hidden,
num_label=hp.num_label,
loss_type=args.loss)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
module = mx.mod.Module(
symbol,
data_names=['data', 'l0_init_c', 'l0_init_h', 'l1_init_c', 'l1_init_h'],
label_names=['label'],
context=contexts)
metrics = CtcMetrics(hp.seq_length)
module.fit(train_data=data_train,
eval_data=data_val,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='sgd',
optimizer_params={'learning_rate': hp.learning_rate,
'momentum': hp.momentum,
'wd': 0.00001,
},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
num_epoch=hp.num_epoch,
batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
)
except KeyboardInterrupt:
print("W: interrupt received, stopping...")
finally:
# Reset multiprocessing captcha generator to stop processes
mp_captcha.reset() | [
"Program entry point"
] |
Please provide a description of the function:def optimize(args):
if args.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu(0)
# load the content and style target
content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
content_image = utils.subtract_imagenet_mean_preprocess_batch(content_image)
style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
style_image = utils.subtract_imagenet_mean_preprocess_batch(style_image)
# load the pre-trained vgg-16 and extract features
vgg = net.Vgg16()
utils.init_vgg_params(vgg, 'models', ctx=ctx)
# content feature
f_xc_c = vgg(content_image)[1]
# style feature
features_style = vgg(style_image)
gram_style = [net.gram_matrix(y) for y in features_style]
# output
output = Parameter('output', shape=content_image.shape)
output.initialize(ctx=ctx)
output.set_data(content_image)
# optimizer
trainer = gluon.Trainer([output], 'adam',
{'learning_rate': args.lr})
mse_loss = gluon.loss.L2Loss()
# optimizing the images
for e in range(args.iters):
utils.imagenet_clamp_batch(output.data(), 0, 255)
# fix BN for pre-trained vgg
with autograd.record():
features_y = vgg(output.data())
content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c)
style_loss = 0.
for m in range(len(features_y)):
gram_y = net.gram_matrix(features_y[m])
gram_s = gram_style[m]
style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s)
total_loss = content_loss + style_loss
total_loss.backward()
trainer.step(1)
if (e + 1) % args.log_interval == 0:
print('loss:{:.2f}'.format(total_loss.asnumpy()[0]))
# save the image
output = utils.add_imagenet_mean_batch(output.data())
utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) | [
" Gatys et al. CVPR 2017\n ref: Image Style Transfer Using Convolutional Neural Networks\n "
] |
Please provide a description of the function:def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net | [
"Get symbol of mnist"
] |
Please provide a description of the function:def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad | [
"Get synthetic gradient value"
] |
Please provide a description of the function:def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net | [
"Get toy symbol"
] |
Please provide a description of the function:def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id)) | [
"Run DistilledSGLD on mnist dataset"
] |
Please provide a description of the function:def run_toy_SGLD(gpu_id=None):
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
exe, params, _ = SGLD(sym=net,
data_inputs=data_inputs,
X=X,
Y=Y,
X_test=X_test,
Y_test=Y_test,
total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size,
dev=dev(gpu_id)) | [
"Run SGLD on toy dataset"
] |
Please provide a description of the function:def run_toy_DistilledSGLD(gpu_id):
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev(gpu_id)) | [
"Run DistilledSGLD on toy dataset"
] |
Please provide a description of the function:def run_toy_HMC(gpu_id=None):
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev(gpu_id)) | [
"Run HMC on toy dataset"
] |
Please provide a description of the function:def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show() | [
"Run synthetic SGLD"
] |
Please provide a description of the function:def load_pascal(image_set, year, devkit_path, shuffle=False):
image_set = [y.strip() for y in image_set.split(',')]
assert image_set, "No image_set specified"
year = [y.strip() for y in year.split(',')]
assert year, "No year specified"
# make sure (# sets == # years)
if len(image_set) > 1 and len(year) == 1:
year = year * len(image_set)
if len(image_set) == 1 and len(year) > 1:
image_set = image_set * len(year)
assert len(image_set) == len(year), "Number of sets and year mismatch"
imdbs = []
for s, y in zip(image_set, year):
imdbs.append(PascalVoc(s, y, devkit_path, shuffle, is_train=True))
if len(imdbs) > 1:
return ConcatDB(imdbs, shuffle)
else:
return imdbs[0] | [
"\n wrapper function for loading pascal voc dataset\n\n Parameters:\n ----------\n image_set : str\n train, trainval...\n year : str\n 2007, 2012 or combinations splitted by comma\n devkit_path : str\n root directory of dataset\n shuffle : bool\n whether to shuffle initial list\n\n Returns:\n ----------\n Imdb\n "
] |
Please provide a description of the function:def load_coco(image_set, dirname, shuffle=False):
anno_files = ['instances_' + y.strip() + '.json' for y in image_set.split(',')]
assert anno_files, "No image set specified"
imdbs = []
for af in anno_files:
af_path = os.path.join(dirname, 'annotations', af)
imdbs.append(Coco(af_path, dirname, shuffle=shuffle))
if len(imdbs) > 1:
return ConcatDB(imdbs, shuffle)
else:
return imdbs[0] | [
"\n wrapper function for loading ms coco dataset\n\n Parameters:\n ----------\n image_set : str\n train2014, val2014, valminusminival2014, minival2014\n dirname: str\n root dir for coco\n shuffle: boolean\n initial shuffle\n "
] |
Please provide a description of the function:def reset(self):
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for i, buck in enumerate(self.sentences):
self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
self.sentences[i],
self.characters[i],
self.label[i])
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for i, buck in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) | [
"Resets the iterator to the beginning of the data."
] |
Please provide a description of the function:def next(self):
if self.curr_idx == len(self.idx):
raise StopIteration
#i = batches index, j = starting record
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
indices = self.ndindex[i][j:j + self.batch_size]
sentences = self.ndsent[i][j:j + self.batch_size]
characters = self.ndchar[i][j:j + self.batch_size]
label = self.ndlabel[i][j:j + self.batch_size]
return DataBatch([sentences, characters], [label], pad=0, index = indices, bucket_key=self.buckets[i],
provide_data=[DataDesc(name=self.data_names[0], shape=sentences.shape, layout=self.layout),
DataDesc(name=self.data_names[1], shape=characters.shape, layout=self.layout)],
provide_label=[DataDesc(name=self.label_name, shape=label.shape, layout=self.layout)]) | [
"Returns the next batch of data."
] |
Please provide a description of the function:def convert_reshape(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
target_shape = node['shape']
if any(item <= 0 for item in target_shape):
raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
if 'reverse' in node and node['reverse'] == 'True':
raise NotImplementedError('"reverse" parameter is not supported by yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
mode = 0 # CHANNEL_FIRST
builder.add_reshape(name, input_name, output_name, target_shape, mode) | [
"Converts a reshape layer from mxnet to coreml.\n\n This doesn't currently handle the deprecated parameters for the reshape layer.\n\n Parameters\n ----------\n network: net\n An mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n A module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_transpose(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
axes = literal_eval(param['axes'])
builder.add_permute(name, axes, input_name, output_name) | [
"Convert a transpose layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_flatten(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mode = 0 # CHANNEL_FIRST
builder.add_flatten(name, mode, input_name, output_name) | [
"Convert a flatten layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_softmax(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
builder.add_softmax(name=name,
input_name=input_name,
output_name=output_name) | [
"Convert a softmax layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_activation(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mx_non_linearity = _get_attrs(node)['act_type']
#TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR
if mx_non_linearity == 'relu':
non_linearity = 'RELU'
elif mx_non_linearity == 'tanh':
non_linearity = 'TANH'
elif mx_non_linearity == 'sigmoid':
non_linearity = 'SIGMOID'
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name) | [
"Convert an activation layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_leakyrelu(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
args, _ = module.get_params()
mx_non_linearity = _get_attrs(node)['act_type']
if mx_non_linearity == 'elu':
non_linearity = 'ELU'
slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25
params = slope
elif mx_non_linearity == 'leaky':
non_linearity = 'LEAKYRELU'
slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25
params = [slope]
elif mx_non_linearity == 'prelu':
non_linearity = 'PRELU'
params = args[_get_node_name(net, inputs[1][0])].asnumpy()
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name,
params = params) | [
"Convert a leakyrelu layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_elementwise_add(net, node, module, builder):
input_names, output_name = _get_input_output_name(net, node, [0, 1])
name = node['name']
builder.add_elementwise(name, input_names, output_name, 'ADD') | [
"Convert an elementwise add layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_convolution(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
inputs = node['inputs']
args, _ = module.get_params()
if 'no_bias' in param.keys():
has_bias = not literal_eval(param['no_bias'])
else:
has_bias = True
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
border_mode = "valid"
n_filters = int(param['num_filter'])
n_groups = int(param['num_group']) if 'num_group' in param else 1
W = args[_get_node_name(net, inputs[1][0])].asnumpy()
if has_bias:
Wb = args[_get_node_name(net, inputs[2][0])].asnumpy()
else:
Wb = None
channels = W.shape[1]
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_height, kernel_width = literal_eval(param['kernel'])
W = W.transpose((2, 3, 1, 0))
builder.add_convolution(
name=name,
kernel_channels=channels,
output_channels=n_filters,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=n_groups,
W=W,
b=Wb,
has_bias=has_bias,
is_deconv=False,
output_shape=None,
input_name=input_name,
output_name=output_name) | [
"Convert a convolution layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_pooling(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
layer_type_mx = param['pool_type']
if layer_type_mx == 'max':
layer_type = 'MAX'
elif layer_type_mx == 'avg':
layer_type = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % layer_type_mx)
# Add padding if there is any
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_width, kernel_height = literal_eval(param['kernel'])
type_map = {'valid': 'VALID', 'full': 'INCLUDE_LAST_PIXEL'}
padding_type = param['pooling_convention'] if 'pooling_convention' in param else 'valid'
if padding_type not in type_map:
raise KeyError("%s type is not supported in this converter. It is a Github issue.")
padding_type = type_map[padding_type]
if 'global_pool' in param.keys():
is_global = literal_eval(param['global_pool'])
else:
is_global = False
# For reasons why we are not using the standard builder but having our own implementation,
# see the function documentation.
_add_pooling.add_pooling_with_padding_types(
builder=builder,
name=name,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
layer_type=layer_type,
padding_type=padding_type,
exclude_pad_area=False,
is_global=is_global,
input_name=input_name,
output_name=output_name
) | [
"Convert a pooling layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_batchnorm(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
eps = 1e-3 # Default value of eps for MXNet.
use_global_stats = False # Default value of use_global_stats for MXNet.
fix_gamma = True # Default value of fix_gamma for MXNet.
attrs = _get_attrs(node)
if 'eps' in attrs:
eps = literal_eval(attrs['eps'])
if 'fix_gamma' in attrs:
fix_gamma = literal_eval(attrs['fix_gamma'])
args, aux = module.get_params()
gamma = args[_get_node_name(net, inputs[1][0])].asnumpy()
beta = args[_get_node_name(net, inputs[2][0])].asnumpy()
mean = aux[_get_node_name(net, inputs[3][0])].asnumpy()
variance = aux[_get_node_name(net, inputs[4][0])].asnumpy()
nb_channels = gamma.shape[0]
if fix_gamma:
gamma.fill(1.)
builder.add_batchnorm(
name=name,
channels=nb_channels,
gamma=gamma,
beta=beta,
mean=mean,
variance=variance,
input_name=input_name,
output_name=output_name,
epsilon=eps) | [
"Convert a batchnorm layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def convert_concat(net, node, module, builder):
# Get input and output names
input_names, output_name = _get_input_output_name(net, node, 'all')
name = node['name']
mode = 'CONCAT'
builder.add_elementwise(name = name, input_names = input_names,
output_name = output_name, mode = mode) | [
"Convert concat layer from mxnet to coreml.\n\n Parameters\n ----------\n network: net\n A mxnet network object.\n\n layer: node\n Node to convert.\n\n module: module\n An module for MXNet\n\n builder: NeuralNetworkBuilder\n A neural network builder object.\n "
] |
Please provide a description of the function:def dmlc_opts(opts):
args = ['--num-workers', str(opts.num_workers),
'--num-servers', str(opts.num_servers),
'--cluster', opts.launcher,
'--host-file', opts.hostfile,
'--sync-dst-dir', opts.sync_dst_dir]
# convert to dictionary
dopts = vars(opts)
for key in ['env_server', 'env_worker', 'env']:
for v in dopts[key]:
args.append('--' + key.replace("_","-"))
args.append(v)
args += opts.command
try:
from dmlc_tracker import opts
except ImportError:
print("Can't load dmlc_tracker package. Perhaps you need to run")
print(" git submodule update --init --recursive")
raise
dmlc_opts = opts.get_opts(args)
return dmlc_opts | [
"convert from mxnet's opts to dmlc's opts\n "
] |
Please provide a description of the function:def _unfuse(self):
assert not self._projection_size, "_unfuse does not support projection layer yet!"
assert not self._lstm_state_clip_min and not self._lstm_state_clip_max, \
"_unfuse does not support state clipping yet!"
get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='relu',
**kwargs),
'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='tanh',
**kwargs),
'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size,
**kwargs),
'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size,
**kwargs)}[self._mode]
stack = rnn_cell.HybridSequentialRNNCell(prefix=self.prefix, params=self.params)
with stack.name_scope():
ni = self._input_size
for i in range(self._num_layers):
kwargs = {'input_size': ni,
'i2h_weight_initializer': self._i2h_weight_initializer,
'h2h_weight_initializer': self._h2h_weight_initializer,
'i2h_bias_initializer': self._i2h_bias_initializer,
'h2h_bias_initializer': self._h2h_bias_initializer}
if self._dir == 2:
stack.add(rnn_cell.BidirectionalCell(
get_cell(prefix='l%d_'%i, **kwargs),
get_cell(prefix='r%d_'%i, **kwargs)))
else:
stack.add(get_cell(prefix='l%d_'%i, **kwargs))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(rnn_cell.DropoutCell(self._dropout))
ni = self._hidden_size * self._dir
return stack | [
"Unfuses the fused RNN in to a stack of rnn cells."
] |
Please provide a description of the function:def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
states = []
for i, info in enumerate(self.state_info(batch_size)):
if info is not None:
info.update(kwargs)
else:
info = kwargs
states.append(func(name='%sh0_%d'%(self.prefix, i), **info))
return states | [
"Initial state for this cell.\n\n Parameters\n ----------\n batch_size: int\n Only required for `NDArray` API. Size of the batch ('N' in layout).\n Dimension of the input.\n func : callable, default `ndarray.zeros`\n Function for creating initial state.\n\n For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,\n `symbol.var` etc. Use `symbol.var` if you want to directly\n feed input as states.\n\n For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.\n\n **kwargs :\n Additional keyword arguments passed to func. For example\n `mean`, `std`, `dtype`, etc.\n\n Returns\n -------\n states : nested list of Symbol\n Starting states for the first RNN step.\n "
] |
Please provide a description of the function:def _forward_kernel(self, F, inputs, states, **kwargs):
if self._layout == 'NTC':
inputs = F.swapaxes(inputs, dim1=0, dim2=1)
if self._projection_size is None:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h'])
else:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h', 'h2r']
if g != 'h2r' or t != 'bias')
params = F._internal._rnn_param_concat(*params, dim=0)
rnn = F.RNN(inputs, params, *states, state_size=self._hidden_size,
projection_size=self._projection_size,
num_layers=self._num_layers, bidirectional=self._dir == 2,
p=self._dropout, state_outputs=True, mode=self._mode,
lstm_state_clip_min=self._lstm_state_clip_min,
lstm_state_clip_max=self._lstm_state_clip_max,
lstm_state_clip_nan=self._lstm_state_clip_nan)
if self._mode == 'lstm':
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
outputs, states = rnn[0], [rnn[1]]
if self._layout == 'NTC':
outputs = F.swapaxes(outputs, dim1=0, dim2=1)
return outputs, states | [
" forward using CUDNN or CPU kenrel"
] |
Please provide a description of the function:def wait_ssh_open(server, port, keep_waiting=None, timeout=None):
import socket
import errno
import time
log = logging.getLogger('wait_ssh_open')
sleep_s = 1
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
log.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if keep_waiting and not keep_waiting():
log.debug("keep_waiting() is set and evaluates to False")
return False
if timeout:
next_timeout = end - now()
if next_timeout < 0:
log.debug("connect time out")
return False
else:
log.debug("connect timeout %d s", next_timeout)
s.settimeout(next_timeout)
log.debug("connect %s:%d", server, port)
s.connect((server, port))
ret = s.recv(1024).decode()
if ret and ret.startswith('SSH'):
s.close()
log.info("wait_ssh_open: port %s:%s is open and ssh is ready", server, port)
return True
else:
log.debug("Didn't get the SSH banner")
s.close()
except ConnectionError as err:
log.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
else:
sleep_s *= 2
except socket.gaierror as err:
log.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise | [
" Wait for network service to appear\n @param server: host to connect to (str)\n @param port: port (int)\n @param timeout: in seconds, if None or 0 wait forever\n @return: True of False, if timeout is None may return only True or\n throw unhandled network exception\n "
] |
Please provide a description of the function:def wait_port_open(server, port, timeout=None):
import socket
import errno
import time
sleep_s = 0
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
logging.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
logging.info("connect %s %d", server, port)
s.connect((server, port))
except ConnectionError as err:
logging.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
except socket.gaierror as err:
logging.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise
else:
s.close()
logging.info("wait_port_open: port %s:%s is open", server, port)
return True | [
" Wait for network service to appear\n @param server: host to connect to (str)\n @param port: port (int)\n @param timeout: in seconds, if None or 0 wait forever\n @return: True of False, if timeout is None may return only True or\n throw unhandled network exception\n "
] |
Please provide a description of the function:def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be Symbol")
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
heads = set(conf["heads"][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
line = ''
for i, field in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(node, out_shape):
op = node["op"]
pre_node = []
pre_filter = 0
if op != "null":
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_node["op"] != "null" or item[0] in heads:
# add precede
pre_node.append(input_name)
if show_shape:
if input_node["op"] != "null":
key = input_name + "_output"
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
else:
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
cur_param += int(node["attrs"]["num_filter"])
elif op == 'FullyConnected':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
cur_param = pre_filter * int(node["attrs"]["num_hidden"])
else:
cur_param = (pre_filter+1) * int(node["attrs"]["num_hidden"])
elif op == 'BatchNorm':
key = node["name"] + "_output"
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
elif op == 'Embedding':
cur_param = int(node["attrs"]['input_dim']) * int(node["attrs"]['output_dim'])
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')',
"x".join([str(x) for x in out_shape]),
cur_param,
first_connection]
print_row(fields, positions)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
print_row(fields, positions)
return cur_param
total_params = 0
for i, node in enumerate(nodes):
out_shape = []
op = node["op"]
if op == "null" and i > 0:
continue
if op != "null" or i in heads:
if show_shape:
if op != "null":
key = node["name"] + "_output"
else:
key = node["name"]
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print("Total params: {params}".format(params=total_params))
print('_' * line_length) | [
"Convert symbol for detail information.\n\n Parameters\n ----------\n symbol: Symbol\n Symbol to be visualized.\n shape: dict\n A dict of shapes, str->shape (tuple), given input shapes.\n line_length: int\n Rotal length of printed lines\n positions: list\n Relative or absolute positions of log elements in each line.\n\n Returns\n ------\n None\n\n Notes\n -----\n If ``mxnet`` is imported, the visualization module can be used in its short-form.\n For example, if we ``import mxnet`` as follows::\n\n import mxnet\n\n this method in visualization module can be used in its short-form as::\n\n mxnet.viz.print_summary(...)\n\n ",
"Print format row.\n\n Parameters\n ----------\n fields: list\n Information field.\n positions: list\n Field length ratio.\n Returns\n ------\n None\n ",
"print layer information\n\n Parameters\n ----------\n node: dict\n Node information.\n out_shape: dict\n Node shape information.\n Returns\n ------\n Node total parameters.\n "
] |
Please provide a description of the function:def plot_network(symbol, title="plot", save_format='pdf', shape=None, dtype=None, node_attrs={},
hide_weights=True):
# todo add shape support
try:
from graphviz import Digraph
except:
raise ImportError("Draw network requires graphviz library")
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be a Symbol")
internals = symbol.get_internals()
draw_shape = shape is not None
if draw_shape:
_, out_shapes, _ = internals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(internals.list_outputs(), out_shapes))
draw_type = dtype is not None
if draw_type:
_, out_types, _ = internals.infer_type(**dtype)
if out_types is None:
raise ValueError("Input type is incomplete")
type_dict = dict(zip(internals.list_outputs(), out_types))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
# check if multiple nodes have the same name
if len(nodes) != len(set([node["name"] for node in nodes])):
seen_nodes = set()
# find all repeated names
repeated = set(node['name'] for node in nodes if node['name'] in seen_nodes
or seen_nodes.add(node['name']))
warning_message = "There are multiple variables with the same name in your graph, " \
"this may result in cyclic graph. Repeated names: " + ','.join(repeated)
warnings.warn(warning_message, RuntimeWarning)
# default attributes of node
node_attr = {"shape": "box", "fixedsize": "true",
"width": "1.3", "height": "0.8034", "style": "filled"}
# merge the dict provided by user and the default one
node_attr.update(node_attrs)
dot = Digraph(name=title, format=save_format)
# color map
cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
"#fdb462", "#b3de69", "#fccde5")
def looks_like_weight(name):
weight_like = ('_weight', '_bias', '_beta', '_gamma',
'_moving_var', '_moving_mean', '_running_var', '_running_mean')
return name.endswith(weight_like)
# make nodes
hidden_nodes = set()
for node in nodes:
op = node["op"]
name = node["name"]
# input data
attr = copy.deepcopy(node_attr)
label = name
if op == "null":
if looks_like_weight(node["name"]):
if hide_weights:
hidden_nodes.add(node["name"])
# else we don't render a node, but
# don't add it to the hidden_nodes set
# so it gets rendered as an empty oval
continue
attr["shape"] = "oval" # inputs get their own shape
label = node["name"]
attr["fillcolor"] = cm[0]
elif op == "Convolution":
label = "Convolution\n{kernel}/{stride}, {filter}".format(
kernel="x".join(_str2tuple(node["attrs"]["kernel"])),
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1",
filter=node["attrs"]["num_filter"]
)
attr["fillcolor"] = cm[1]
elif op == "FullyConnected":
label = "FullyConnected\n{hidden}".format(hidden=node["attrs"]["num_hidden"])
attr["fillcolor"] = cm[1]
elif op == "BatchNorm":
attr["fillcolor"] = cm[3]
elif op == 'Activation':
act_type = node["attrs"]["act_type"]
label = 'Activation\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == 'LeakyReLU':
attrs = node.get("attrs")
act_type = attrs.get("act_type", "Leaky") if attrs else "Leaky"
label = 'LeakyReLU\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == "Pooling":
label = "Pooling\n{pooltype}, {kernel}/{stride}".format(pooltype=node["attrs"]["pool_type"],
kernel="x".join(_str2tuple(node["attrs"]["kernel"]))
if "kernel" in node["attrs"] else "[]",
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1")
attr["fillcolor"] = cm[4]
elif op in ("Concat", "Flatten", "Reshape"):
attr["fillcolor"] = cm[5]
elif op == "Softmax":
attr["fillcolor"] = cm[6]
else:
attr["fillcolor"] = cm[7]
if op == "Custom":
label = node["attrs"]["op_type"]
dot.node(name=name, label=label, **attr)
# add edges
for node in nodes: # pylint: disable=too-many-nested-blocks
op = node["op"]
name = node["name"]
if op == "null":
continue
else:
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_name not in hidden_nodes:
attr = {"dir": "back", 'arrowtail':'open', 'label': ''}
# add shapes
if draw_shape:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
else:
key = input_name
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
if draw_type:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
else:
key = input_name
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
dot.edge(tail_name=name, head_name=input_name, **attr)
return dot | [
"Creates a visualization (Graphviz digraph object) of the given computation graph.\n Graphviz must be installed for this function to work.\n\n Parameters\n ----------\n title: str, optional\n Title of the generated visualization.\n symbol: Symbol\n A symbol from the computation graph. The generated digraph will visualize the part\n of the computation graph required to compute `symbol`.\n shape: dict, optional\n Specifies the shape of the input tensors. If specified, the visualization will include\n the shape of the tensors between the nodes. `shape` is a dictionary mapping\n input symbol names (str) to the corresponding tensor shape (tuple).\n dtype: dict, optional\n Specifies the type of the input tensors. If specified, the visualization will include\n the type of the tensors between the nodes. `dtype` is a dictionary mapping\n input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`).\n node_attrs: dict, optional\n Specifies the attributes for nodes in the generated visualization. `node_attrs` is\n a dictionary of Graphviz attribute names and values. For example::\n\n node_attrs={\"shape\":\"oval\",\"fixedsize\":\"false\"}\n\n will use oval shape for nodes and allow variable sized nodes in the visualization.\n hide_weights: bool, optional\n If True (default), then inputs with names of form *_weight* (corresponding to weight\n tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner\n visualization.\n\n Returns\n -------\n dot: Digraph\n A Graphviz digraph object visualizing the computation graph to compute `symbol`.\n\n Example\n -------\n >>> net = mx.sym.Variable('data')\n >>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)\n >>> net = mx.sym.Activation(data=net, name='relu1', act_type=\"relu\")\n >>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)\n >>> net = mx.sym.SoftmaxOutput(data=net, name='out')\n >>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},\n ... node_attrs={\"fixedsize\":\"false\"})\n >>> digraph.view()\n\n Notes\n -----\n If ``mxnet`` is imported, the visualization module can be used in its short-form.\n For example, if we ``import mxnet`` as follows::\n\n import mxnet\n\n this method in visualization module can be used in its short-form as::\n\n mxnet.viz.plot_network(...)\n\n ",
"Internal helper to figure out if node should be hidden with `hide_weights`.\n "
] |
Please provide a description of the function:def evaluate_accuracy(data_iterator, network):
acc = mx.metric.Accuracy()
# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
# Get the data and label into the GPU
data = data.as_in_context(ctx[0])
label = label.as_in_context(ctx[0])
# Get network's output which is a probability distribution
# Apply argmax on the probability distribution to get network's classification.
output = network(data)
predictions = nd.argmax(output, axis=1)
# Give network's prediction and the correct label to update the metric
acc.update(preds=predictions, labels=label)
# Return the accuracy
return acc.get()[1] | [
" Measure the accuracy of ResNet\n\n Parameters\n ----------\n data_iterator: Iter\n examples of dataset\n network:\n ResNet\n\n Returns\n ----------\n tuple of array element\n "
] |
Please provide a description of the function:def train_batch(batch_list, context, network, gluon_trainer):
# Split and load data into multiple GPUs
data = batch_list[0]
data = gluon.utils.split_and_load(data, context)
# Split and load label into multiple GPUs
label = batch_list[1]
label = gluon.utils.split_and_load(label, context)
# Run the forward and backward pass
forward_backward(network, data, label)
# Update the parameters
this_batch_size = batch_list[0].shape[0]
gluon_trainer.step(this_batch_size) | [
" Training with multiple GPUs\n\n Parameters\n ----------\n batch_list: List\n list of dataset\n context: List\n a list of all GPUs to be used for training\n network:\n ResNet\n gluon_trainer:\n rain module of gluon\n "
] |
Please provide a description of the function:def get_optimized_symbol(executor):
handle = SymbolHandle()
try:
check_call(_LIB.MXExecutorGetOptimizedSymbol(executor.handle, ctypes.byref(handle)))
result = sym.Symbol(handle=handle)
return result
except MXNetError:
logging.error('Error while trying to fetch TRT optimized symbol for graph. Please ensure '
'build was compiled with MXNET_USE_TENSORRT enabled.')
raise | [
"\n Take an executor's underlying symbol graph and return its generated optimized version.\n\n Parameters\n ----------\n executor :\n An executor for which you want to see an optimized symbol. Getting an optimized symbol\n is useful to compare and verify the work TensorRT has done against a legacy behaviour.\n\n Returns\n -------\n symbol : nnvm::Symbol\n The nnvm symbol optimized.\n "
] |
Please provide a description of the function:def tensorrt_bind(symbol, ctx, all_params, type_dict=None, stype_dict=None, group2ctx=None,
**kwargs):
kwargs['shared_buffer'] = all_params
return symbol.simple_bind(ctx, type_dict=type_dict, stype_dict=stype_dict,
group2ctx=group2ctx, **kwargs) | [
"Bind current symbol to get an optimized trt executor.\n\n Parameters\n ----------\n symbol : Symbol\n The symbol you wish to bind, and optimize with TensorRT.\n\n ctx : Context\n The device context the generated executor to run on.\n\n all_params : Dict of str->ndarray\n A dictionary of mappings from parameter names to parameter NDArrays.\n\n type_dict : Dict of str->numpy.dtype\n Input type dictionary, name->dtype\n\n stype_dict : Dict of str->str\n Input storage type dictionary, name->storage_type\n\n group2ctx : Dict of string to mx.Context\n The dict mapping the `ctx_group` attribute to the context assignment.\n\n kwargs : Dict of str->shape\n Input shape dictionary, name->shape\n\n Returns\n -------\n executor : mxnet.Executor\n An optimized TensorRT executor.\n "
] |
Please provide a description of the function:def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype='float32', **kwargs):
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
if num_layers not in vgg_spec:
raise ValueError("Invalide num_layers {}. Possible choices are 11,13,16,19.".format(num_layers))
layers, filters = vgg_spec[num_layers]
data = mx.sym.Variable(name="data")
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
if dtype == 'float16':
classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
symbol = mx.sym.SoftmaxOutput(data=classifier, name='softmax')
return symbol | [
"\n Parameters\n ----------\n num_classes : int, default 1000\n Number of classification classes.\n num_layers : int\n Number of layers for the variant of densenet. Options are 11, 13, 16, 19.\n batch_norm : bool, default False\n Use batch normalization.\n dtype: str, float32 or float16\n Data precision.\n "
] |
Please provide a description of the function:def create_batch(self, frame):
frame_resize = mx.nd.array(cv2.resize(frame, (self.data_shape[0], self.data_shape[1])))
#frame_resize = mx.img.imresize(frame, self.data_shape[0], self.data_shape[1], cv2.INTER_LINEAR)
# Change dimensions from (w,h,channels) to (channels, w, h)
frame_t = mx.nd.transpose(frame_resize, axes=(2,0,1))
frame_norm = frame_t - self.mean_pixels_nd
# Add dimension for batch, results in (1,channels,w,h)
batch_frame = [mx.nd.expand_dims(frame_norm, axis=0)]
batch_shape = [DataDesc('data', batch_frame[0].shape)]
batch = DataBatch(data=batch_frame, provide_data=batch_shape)
return batch | [
"\n :param frame: an (w,h,channels) numpy array (image)\n :return: DataBatch of (1,channels,data_shape,data_shape)\n "
] |
Please provide a description of the function:def detect_iter(self, det_iter, show_timer=False):
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
logging.info("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = Detector.filter_positive_detections(detections)
return result | [
"\n detect all images in iterator\n\n Parameters:\n ----------\n det_iter : DetIter\n iterator for all testing images\n show_timer : Boolean\n whether to print out detection exec time\n\n Returns:\n ----------\n list of detection results\n "
] |
Please provide a description of the function:def detect_batch(self, batch):
self.mod.forward(batch, is_train=False)
detections = self.mod.get_outputs()[0]
positive_detections = Detector.filter_positive_detections(detections)
return positive_detections | [
"\n Return detections for batch\n :param batch:\n :return:\n "
] |
Please provide a description of the function:def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect_iter(test_iter, show_timer) | [
"\n wrapper for detecting multiple images\n\n Parameters:\n ----------\n im_list : list of str\n image path or list of image paths\n root_dir : str\n directory of input images, optional if image path already\n has full directory information\n extension : str\n image extension, eg. \".jpg\", optional\n\n Returns:\n ----------\n list of detection results in format [det0, det1...], det is in\n format np.array([id, score, xmin, ymin, xmax, ymax]...)\n "
] |
Please provide a description of the function:def visualize_detection(self, img, dets, classes=[], thresh=0.6):
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for det in dets:
(klass, score, x0, y0, x1, y1) = det
if score < thresh:
continue
cls_id = int(klass)
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(x0 * width)
ymin = int(y0 * height)
xmax = int(x1 * width)
ymax = int(y1 * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show() | [
"\n visualize detections in one image\n\n Parameters:\n ----------\n img : numpy.array\n image, in bgr format\n dets : numpy.array\n ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])\n each row is one object\n classes : tuple or list of str\n class names\n thresh : float\n score threshold\n "
] |
Please provide a description of the function:def filter_positive_detections(detections):
class_idx = 0
assert(isinstance(detections, mx.nd.NDArray) or isinstance(detections, np.ndarray))
detections_per_image = []
# for each image
for i in range(detections.shape[0]):
result = []
det = detections[i, :, :]
for obj in det:
if obj[class_idx] >= 0:
result.append(obj)
detections_per_image.append(result)
logging.info("%d positive detections", len(result))
return detections_per_image | [
"\n First column (class id) is -1 for negative detections\n :param detections:\n :return:\n "
] |
Please provide a description of the function:def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.visualize_detection(img, det, classes, thresh) | [
"\n wrapper for im_detect and visualize_detection\n\n Parameters:\n ----------\n im_list : list of str or str\n image path or list of image paths\n root_dir : str or None\n directory of input images, optional if image path already\n has full directory information\n extension : str or None\n image extension, eg. \".jpg\", optional\n\n Returns:\n ----------\n\n "
] |
Please provide a description of the function:def process_network_proto(caffe_root, deploy_proto):
processed_deploy_proto = deploy_proto + ".processed"
from shutil import copyfile
copyfile(deploy_proto, processed_deploy_proto)
# run upgrade tool on new file name (same output file)
import os
upgrade_tool_command_line = caffe_root + '/build/tools/upgrade_net_proto_text.bin ' \
+ processed_deploy_proto + ' ' + processed_deploy_proto
os.system(upgrade_tool_command_line)
return processed_deploy_proto | [
"\n Runs the caffe upgrade tool on the prototxt to create a prototxt in the latest format.\n This enable us to work just with latest structures, instead of supporting all the variants\n\n :param caffe_root: link to caffe root folder, where the upgrade tool is located\n :param deploy_proto: name of the original prototxt file\n :return: name of new processed prototxt file\n "
] |
Please provide a description of the function:def read_network_dag(processed_deploy_prototxt):
from caffe.proto import caffe_pb2
from google.protobuf import text_format # pylint: disable=relative-import
from collections import OrderedDict
# load prototxt file
network_def = caffe_pb2.NetParameter()
with open(processed_deploy_prototxt, 'r') as proto_file:
text_format.Merge(str(proto_file.read()), network_def)
# map layer name to layer record
layer_name_to_record = OrderedDict()
for layer_def in network_def.layer:
if (len(layer_def.include) == 0) or \
(caffe_pb2.TEST in [item.phase for item in layer_def.include]):
layer_name_to_record[layer_def.name] = LayerRecord(layer_def)
top_to_layers = dict()
for layer in network_def.layer:
# no specific phase, or TEST phase is specifically asked for
if (len(layer.include) == 0) or (caffe_pb2.TEST in [item.phase for item in layer.include]):
for top in layer.top:
if top not in top_to_layers:
top_to_layers[top] = list()
top_to_layers[top].append(layer.name)
# find parents and children of all layers
for child_layer_name in layer_name_to_record.keys(): # pylint: disable=too-many-nested-blocks
child_layer_def = layer_name_to_record[child_layer_name]
for bottom in child_layer_def.bottoms:
if bottom in top_to_layers:
for parent_layer_name in top_to_layers[bottom]:
if parent_layer_name in layer_name_to_record:
parent_layer_def = layer_name_to_record[parent_layer_name]
if parent_layer_def not in child_layer_def.parents:
child_layer_def.parents.append(parent_layer_def)
if child_layer_def not in parent_layer_def.children:
parent_layer_def.children.append(child_layer_def)
# update filter, strid, pad for maxout "structures"
for layer_name in layer_name_to_record.keys():
layer_def = layer_name_to_record[layer_name]
if layer_def.type == 'Eltwise' and \
len(layer_def.parents) == 1 and \
layer_def.parents[0].type == 'Slice' and \
len(layer_def.parents[0].parents) == 1 and \
layer_def.parents[0].parents[0].type in ['Convolution', 'InnerProduct']:
layer_def.filter = layer_def.parents[0].parents[0].filter
layer_def.stride = layer_def.parents[0].parents[0].stride
layer_def.pad = layer_def.parents[0].parents[0].pad
return network_def, layer_name_to_record, top_to_layers | [
"\n Reads from the caffe prototxt the network structure\n :param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should\n be processed before using a call to process_network_proto()\n :return: network_def, layer_name_to_record, top_to_layers\n network_def: caffe network structure, gives access to *all* the network information\n layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which\n describes in a simple form the layer parameters\n top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it\n when a top is used several times, like in inplace layhers, the list will contain all the layers\n by order of appearance\n "
] |
Please provide a description of the function:def read_caffe_mean(caffe_mean_file):
import caffe_parser
import numpy as np
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(caffe_mean_file, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
return img_mean_np | [
"\n Reads caffe formatted mean file\n :param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix\n :return: mean image, converted from BGR to RGB format\n "
] |
Please provide a description of the function:def get_distance(F, x):
n = x.shape[0]
square = F.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * F.dot(x, x.transpose()))
# Adding identity to make sqrt work.
return F.sqrt(distance_square + F.array(np.identity(n))) | [
"Helper function for margin-based loss. Return a distance matrix given a matrix."
] |
Please provide a description of the function:def cross_entropy_loss(inputs, labels, rescale_loss=1):
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
return S.make_loss(loss.mean()) | [
" cross entropy loss with a mask "
] |
Please provide a description of the function:def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
state_names = []
data = S.var('data')
weight = S.var("encoder_weight", stype='row_sparse')
embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size,
output_dim=num_embed, name='embed', sparse_grad=True)
states = []
outputs = S.Dropout(embed, p=dropout)
for i in range(num_layers):
prefix = 'lstmp%d_' % i
init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero())
init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero())
state_names += [prefix + 'init_h', prefix + 'init_c']
lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix)
outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \
layout='NTC', merge_outputs=True)
outputs = S.Dropout(outputs, p=dropout)
states += [S.stop_gradient(s) for s in next_states]
outputs = S.reshape(outputs, shape=(-1, num_proj))
trainable_lstm_args = []
for arg in outputs.list_arguments():
if 'lstmp' in arg and 'init' not in arg:
trainable_lstm_args.append(arg)
return outputs, states, trainable_lstm_args, state_names | [
" word embedding + LSTM Projected "
] |
Please provide a description of the function:def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias,
sampled_values, remove_accidental_hits=True):
# inputs = (n, in_dim)
sample, prob_sample, prob_target = sampled_values
# (num_samples, )
sample = S.var('sample', shape=(num_samples,), dtype='float32')
# (n, )
label = S.var('label')
label = S.reshape(label, shape=(-1,), name="label_reshape")
# (num_samples+n, )
sample_label = S.concat(sample, label, dim=0)
# lookup weights and biases
# (num_samples+n, dim)
sample_target_w = S.sparse.Embedding(data=sample_label, weight=weight,
input_dim=num_classes, output_dim=in_dim,
sparse_grad=True)
# (num_samples+n, 1)
sample_target_b = S.sparse.Embedding(data=sample_label, weight=bias,
input_dim=num_classes, output_dim=1,
sparse_grad=True)
# (num_samples, dim)
sample_w = S.slice(sample_target_w, begin=(0, 0), end=(num_samples, None))
target_w = S.slice(sample_target_w, begin=(num_samples, 0), end=(None, None))
sample_b = S.slice(sample_target_b, begin=(0, 0), end=(num_samples, None))
target_b = S.slice(sample_target_b, begin=(num_samples, 0), end=(None, None))
# target
# (n, 1)
true_pred = S.sum(target_w * inputs, axis=1, keepdims=True) + target_b
# samples
# (n, num_samples)
sample_b = S.reshape(sample_b, (-1,))
sample_pred = S.FullyConnected(inputs, weight=sample_w, bias=sample_b,
num_hidden=num_samples)
# remove accidental hits
if remove_accidental_hits:
label_v = S.reshape(label, (-1, 1))
sample_v = S.reshape(sample, (1, -1))
neg = S.broadcast_equal(label_v, sample_v) * -1e37
sample_pred = sample_pred + neg
prob_sample = S.reshape(prob_sample, shape=(1, num_samples))
p_target = true_pred - S.log(prob_target)
p_sample = S.broadcast_sub(sample_pred, S.log(prob_sample))
# return logits and new_labels
# (n, 1+num_samples)
logits = S.concat(p_target, p_sample, dim=1)
new_targets = S.zeros_like(label)
return logits, new_targets | [
" Sampled softmax via importance sampling.\n This under-estimates the full softmax and is only used for training.\n "
] |
Please provide a description of the function:def generate_samples(label, num_splits, sampler):
def listify(x):
return x if isinstance(x, list) else [x]
label_splits = listify(label.split(num_splits, axis=0))
prob_samples = []
prob_targets = []
samples = []
for label_split in label_splits:
label_split_2d = label_split.reshape((-1,1))
sampled_value = sampler.draw(label_split_2d)
sampled_classes, exp_cnt_true, exp_cnt_sampled = sampled_value
samples.append(sampled_classes.astype(np.float32))
prob_targets.append(exp_cnt_true.astype(np.float32).reshape((-1,1)))
prob_samples.append(exp_cnt_sampled.astype(np.float32))
return samples, prob_samples, prob_targets | [
" Split labels into `num_splits` and\n generate candidates based on log-uniform distribution.\n "
] |
Please provide a description of the function:def get_model(name, **kwargs):
models = {'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25
}
name = name.lower()
if name not in models:
raise ValueError(
'Model %s is not supported. Available options are\n\t%s' % (
name, '\n\t'.join(sorted(models.keys()))))
return models[name](**kwargs) | [
"Returns a pre-defined model by name\n\n Parameters\n ----------\n name : str\n Name of the model.\n pretrained : bool\n Whether to load the pretrained weights for model.\n classes : int\n Number of classes for the output layer.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '$MXNET_HOME/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n HybridBlock\n The model.\n "
] |
Please provide a description of the function:def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl | [
"Return a new handle with specified storage type, shape, dtype and context.\n\n Empty handle is only used to hold results\n\n Returns\n -------\n handle\n A new empty ndarray handle\n "
] |
Please provide a description of the function:def _prepare_src_array(source_array, dtype):
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array | [
"Prepare `source_array` so that it can be used to construct NDArray.\n `source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \\\n nor an `np.ndarray`.\n "
] |
Please provide a description of the function:def _prepare_default_dtype(src_array, dtype):
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype | [
"Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray\n or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."
] |
Please provide a description of the function:def _check_shape(s1, s2):
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2)) | [
"check s1 == s2 if both are not None"
] |
Please provide a description of the function:def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr') | [
"Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.\n\n The CSRNDArray can be instantiated in several ways:\n\n - csr_matrix(D):\n to construct a CSRNDArray with a dense 2D array ``D``\n - **D** (*array_like*) - An object exposing the array interface, an object whose \\\n `__array__` method returns an array, or any (nested) sequence.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \\\n float32 otherwise.\n\n - csr_matrix(S)\n to construct a CSRNDArray with a sparse 2D array ``S``\n - **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is ``S.dtype``.\n\n - csr_matrix((M, N))\n to construct an empty CSRNDArray with shape ``(M, N)``\n - **M** (*int*) - Number of rows in the matrix\n - **N** (*int*) - Number of columns in the matrix\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is float32.\n\n - csr_matrix((data, indices, indptr))\n to construct a CSRNDArray based on the definition of compressed sparse row format \\\n using three separate arrays, \\\n where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \\\n and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \\\n The column indices for a given row are expected to be **sorted in ascending order.** \\\n Duplicate column entries for the same row are not allowed.\n - **data** (*array_like*) - An object exposing the array interface, which \\\n holds all the non-zero entries of the matrix in row-major order.\n - **indices** (*array_like*) - An object exposing the array interface, which \\\n stores the column index for each non-zero element in ``data``.\n - **indptr** (*array_like*) - An object exposing the array interface, which \\\n stores the offset into ``data`` of the first non-zero element number of each \\\n row of the matrix.\n - **shape** (*tuple of int, optional*) - The shape of the array. The default \\\n shape is inferred from the indices and indptr arrays.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \\\n float32 otherwise.\n\n - csr_matrix((data, (row, col)))\n to construct a CSRNDArray based on the COOrdinate format \\\n using three seperate arrays, \\\n where ``row[i]`` is the row index of the element, \\\n ``col[i]`` is the column index of the element \\\n and ``data[i]`` is the data corresponding to the element. All the missing \\\n elements in the input are taken to be zeroes.\n - **data** (*array_like*) - An object exposing the array interface, which \\\n holds all the non-zero entries of the matrix in COO format.\n - **row** (*array_like*) - An object exposing the array interface, which \\\n stores the row index for each non zero element in ``data``.\n - **col** (*array_like*) - An object exposing the array interface, which \\\n stores the col index for each non zero element in ``data``.\n - **shape** (*tuple of int, optional*) - The shape of the array. The default \\\n shape is inferred from the ``row`` and ``col`` arrays.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is float32.\n\n Parameters\n ----------\n arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \\\n scipy.sparse.coo_matrix, tuple of int or tuple of array_like\n The argument to help instantiate the csr matrix. See above for further details.\n shape : tuple of int, optional\n The shape of the csr matrix.\n ctx: Context, optional\n Device context (default is the current default context).\n dtype: str or numpy.dtype, optional\n The data type of the output array.\n\n Returns\n -------\n CSRNDArray\n A `CSRNDArray` with the `csr` storage representation.\n\n Example\n -------\n >>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))\n >>> a.asnumpy()\n array([[ 0., 1., 0.],\n [ 2., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 3.]], dtype=float32)\n\n See Also\n --------\n CSRNDArray : MXNet NDArray in compressed sparse row format.\n "
] |
Please provide a description of the function:def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
# pylint: disable= no-member, protected-access
storage_type = 'csr'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result | [
"Create a `CSRNDArray` based on data, indices and indptr"
] |
Please provide a description of the function:def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse') | [
"Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \\\n tensor slices at given indices.\n\n The RowSparseNDArray can be instantiated in several ways:\n\n - row_sparse_array(D):\n to construct a RowSparseNDArray with a dense ndarray ``D``\n - **D** (*array_like*) - An object exposing the array interface, an object whose \\\n `__array__` method returns an array, or any (nested) sequence.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \\\n float32 otherwise.\n\n - row_sparse_array(S)\n to construct a RowSparseNDArray with a sparse ndarray ``S``\n - **S** (*RowSparseNDArray*) - A sparse ndarray.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is ``S.dtype``.\n\n - row_sparse_array((D0, D1 .. Dn))\n to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``\n - **D0, D1 .. Dn** (*int*) - The shape of the ndarray\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is float32.\n\n - row_sparse_array((data, indices))\n to construct a RowSparseNDArray based on the definition of row sparse format \\\n using two separate arrays, \\\n where the `indices` stores the indices of the row slices with non-zeros,\n while the values are stored in `data`. The corresponding NDArray ``dense``\n represented by RowSparseNDArray ``rsp`` has \\\n ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``\n The row indices for are expected to be **sorted in ascending order.** \\\n - **data** (*array_like*) - An object exposing the array interface, which \\\n holds all the non-zero row slices of the array.\n - **indices** (*array_like*) - An object exposing the array interface, which \\\n stores the row index for each row slice with non-zero elements.\n - **shape** (*tuple of int, optional*) - The shape of the array. The default \\\n shape is inferred from the indices and indptr arrays.\n - **ctx** (*Context, optional*) - Device context \\\n (default is the current default context).\n - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \\\n The default dtype is float32.\n\n Parameters\n ----------\n arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like\n The argument to help instantiate the row sparse ndarray. See above for further details.\n shape : tuple of int, optional\n The shape of the row sparse ndarray. (Default value = None)\n ctx : Context, optional\n Device context (default is the current default context).\n dtype : str or numpy.dtype, optional\n The data type of the output array. (Default value = None)\n\n Returns\n -------\n RowSparseNDArray\n An `RowSparseNDArray` with the `row_sparse` storage representation.\n\n Examples\n --------\n >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))\n >>> a.asnumpy()\n array([[ 0., 0.],\n [ 1., 2.],\n [ 0., 0.],\n [ 0., 0.],\n [ 3., 4.],\n [ 0., 0.]], dtype=float32)\n\n See Also\n --------\n RowSparseNDArray : MXNet NDArray in row sparse format.\n "
] |
Please provide a description of the function:def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
storage_type = 'row_sparse'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result | [
"Create a `RowSparseNDArray` based on data and indices"
] |
Please provide a description of the function:def add(lhs, rhs):
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_add,
operator.add,
_internal._plus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None) | [
"Returns element-wise sum of the input arrays with broadcasting.\n\n Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and\n ``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not\n match. If lhs.shape == rhs.shape, this is equivalent to\n ``mx.nd.elemwise_add(lhs, rhs)``\n\n .. note::\n\n If the corresponding dimensions of two arrays have the same size or one of them has size 1,\n then the arrays are broadcastable to a common shape.abs\n\n Parameters\n ----------\n lhs : scalar or mxnet.ndarray.sparse.array\n First array to be added.\n rhs : scalar or mxnet.ndarray.sparse.array\n Second array to be added.\n If ``lhs.shape != rhs.shape``, they must be\n broadcastable to a common shape.\n\n Returns\n -------\n NDArray\n The element-wise sum of the input arrays.\n\n Examples\n --------\n >>> a = mx.nd.ones((2,3)).tostype('csr')\n >>> b = mx.nd.ones((2,3)).tostype('csr')\n >>> a.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> b.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> (a+b).asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n >>> c = mx.nd.ones((2,3)).tostype('row_sparse')\n >>> d = mx.nd.ones((2,3)).tostype('row_sparse')\n >>> c.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> d.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> (c+d).asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n "
] |
Please provide a description of the function:def subtract(lhs, rhs):
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_sub,
operator.sub,
_internal._minus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
None) | [
"Returns element-wise difference of the input arrays with broadcasting.\n\n Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and\n ``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not\n match. If lhs.shape == rhs.shape, this is equivalent to\n ``mx.nd.elemwise_sub(lhs, rhs)``\n\n .. note::\n\n If the corresponding dimensions of two arrays have the same size or one of them has size 1,\n then the arrays are broadcastable to a common shape.\n\n Parameters\n ----------\n lhs : scalar or mxnet.ndarray.sparse.array\n First array to be subtracted.\n rhs : scalar or mxnet.ndarray.sparse.array\n Second array to be subtracted.\n If ``lhs.shape != rhs.shape``, they must be\n broadcastable to a common shape.__spec__\n\n Returns\n -------\n NDArray\n The element-wise difference of the input arrays.\n\n Examples\n --------\n >>> a = mx.nd.ones((2,3)).tostype('csr')\n >>> b = mx.nd.ones((2,3)).tostype('csr')\n >>> a.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> b.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> (a-b).asnumpy()\n array([[ 0., 0., 0.],\n [ 0., 0., 0.]], dtype=float32)\n >>> c = mx.nd.ones((2,3)).tostype('row_sparse')\n >>> d = mx.nd.ones((2,3)).tostype('row_sparse')\n >>> c.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> d.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> (c-d).asnumpy()\n array([[ 0., 0., 0.],\n [ 0., 0., 0.]], dtype=float32)\n "
] |
Please provide a description of the function:def multiply(lhs, rhs):
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_mul,
operator.mul,
_internal._mul_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None) | [
"Returns element-wise product of the input arrays with broadcasting.\n\n Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``\n when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,\n this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``\n\n .. note::\n\n If the corresponding dimensions of two arrays have the same size or one of them has size 1,\n then the arrays are broadcastable to a common shape.\n\n Parameters\n ----------\n lhs : scalar or mxnet.ndarray.sparse.array\n First array to be multiplied.\n rhs : scalar or mxnet.ndarray.sparse.array\n Second array to be multiplied.\n If ``lhs.shape != rhs.shape``, they must be\n broadcastable to a common shape.\n\n Returns\n -------\n NDArray\n The element-wise multiplication of the input arrays.\n\n Examples\n --------\n >>> x = mx.nd.ones((2,3)).tostype('csr')\n >>> y = mx.nd.arange(2).reshape((2,1))\n >>> z = mx.nd.arange(3)\n >>> x.asnumpy()\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]], dtype=float32)\n >>> y.asnumpy()\n array([[ 0.],\n [ 1.]], dtype=float32)\n >>> z.asnumpy()\n array([ 0., 1., 2.], dtype=float32)\n >>> (x*2).asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n >>> (x*y).asnumpy()\n array([[ 0., 0., 0.],\n [ 1., 1., 1.]], dtype=float32)\n >>> mx.nd.sparse.multiply(x, y).asnumpy()\n array([[ 0., 0., 0.],\n [ 1., 1., 1.]], dtype=float32)\n >>> (x*z).asnumpy()\n array([[ 0., 1., 2.],\n [ 0., 1., 2.]], dtype=float32)\n >>> mx.nd.sparse.multiply(x, z).asnumpy()\n array([[ 0., 1., 2.],\n [ 0., 1., 2.]], dtype=float32)\n >>> z = z.reshape((1, 3))\n >>> z.asnumpy()\n array([[ 0., 1., 2.]], dtype=float32)\n >>> (x*z).asnumpy()\n array([[ 0., 1., 2.],\n [ 0., 1., 2.]], dtype=float32)\n >>> mx.nd.sparse.multiply(x, z).asnumpy()\n array([[ 0., 1., 2.],\n [ 0., 1., 2.]], dtype=float32)\n "
] |
Please provide a description of the function:def divide(lhs, rhs):
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_div,
operator.truediv,
_internal._div_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
None) | [
"Returns element-wise division of the input arrays with broadcasting.\n\n Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``\n when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,\n this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``\n\n .. note::\n\n If the corresponding dimensions of two arrays have the same size or one of them has size 1,\n then the arrays are broadcastable to a common shape.\n\n Parameters\n ----------\n lhs : scalar or mxnet.ndarray.sparse.array\n First array in division.\n rhs : scalar or mxnet.ndarray.sparse.array\n Second array in division.\n The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be\n broadcastable to a common shape.\n\n Returns\n -------\n NDArray\n The element-wise division of the input arrays.\n\n Examples\n --------\n >>> x = (mx.nd.ones((2,3))*6).tostype('csr')\n >>> y = mx.nd.arange(2).reshape((2,1)) + 1\n >>> z = mx.nd.arange(3) + 1\n >>> x.asnumpy()\n array([[ 6., 6., 6.],\n [ 6., 6., 6.]], dtype=float32)\n >>> y.asnumpy()\n array([[ 1.],\n [ 2.]], dtype=float32)\n >>> z.asnumpy()\n array([ 1., 2., 3.], dtype=float32)\n >>> x/2\n <NDArray 2x3 @cpu(0)>\n >>> (x/3).asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n >>> (x/y).asnumpy()\n array([[ 6., 6., 6.],\n [ 3., 3., 3.]], dtype=float32)\n >>> mx.nd.sparse.divide(x,y).asnumpy()\n array([[ 6., 6., 6.],\n [ 3., 3., 3.]], dtype=float32)\n >>> (x/z).asnumpy()\n array([[ 6., 3., 2.],\n [ 6., 3., 2.]], dtype=float32)\n >>> mx.nd.sprase.divide(x,z).asnumpy()\n array([[ 6., 3., 2.],\n [ 6., 3., 2.]], dtype=float32)\n >>> z = z.reshape((1,3))\n >>> z.asnumpy()\n array([[ 1., 2., 3.]], dtype=float32)\n >>> (x/z).asnumpy()\n array([[ 6., 3., 2.],\n [ 6., 3., 2.]], dtype=float32)\n >>> mx.nd.sparse.divide(x,z).asnumpy()\n array([[ 6., 3., 2.],\n [ 6., 3., 2.]], dtype=float32)\n "
] |
Please provide a description of the function:def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
# pylint: disable= no-member, protected-access
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
if stype in ('row_sparse', 'csr'):
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs) | [
"Return a new array of given shape and type, filled with zeros.\n\n Parameters\n ----------\n stype: string\n The storage type of the empty array, such as 'row_sparse', 'csr', etc\n shape : int or tuple of int\n The shape of the empty array\n ctx : Context, optional\n An optional device context (default is the current default context)\n dtype : str or numpy.dtype, optional\n An optional value type (default is `float32`)\n\n Returns\n -------\n RowSparseNDArray or CSRNDArray\n A created array\n Examples\n --------\n >>> mx.nd.sparse.zeros('csr', (1,2))\n <CSRNDArray 1x2 @cpu(0)>\n >>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()\n array([[ 0., 0.]], dtype=float16)\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.